X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=44c40d45643103d510926235e4ca4f446bdc9bf8;hb=18329a03eee9bae6c0649eb7e35f744d1b9a1476;hp=4b5eab8f119427a69a9cbfe8b223d982e036424a;hpb=cca7bc4357de350a3d795d97dad83221217aaed2;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 4b5eab8f1..44c40d456 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -21,7 +21,6 @@ * @file * @brief iropt --- optimizations intertwined with IR construction. * @author Christian Schaefer, Goetz Lindenmaier, Michael Beck - * @version $Id$ */ #include "config.h" @@ -44,7 +43,6 @@ #include "irhooks.h" #include "irarch.h" #include "hashptr.h" -#include "opt_polymorphy.h" #include "irtools.h" #include "irhooks.h" #include "array_t.h" @@ -53,7 +51,6 @@ #include "bitfiddle.h" #include "be.h" -/* Make types visible to allow most efficient access */ #include "entity_t.h" static bool is_Or_Eor_Add(const ir_node *node) @@ -85,7 +82,6 @@ static ir_tarval *default_value_of(const ir_node *n) value_of_func value_of_ptr = default_value_of; -/* * Set a new value_of function. */ void set_value_of_func(value_of_func func) { if (func != NULL) @@ -588,6 +584,15 @@ ir_relation ir_get_possible_cmp_relations(const ir_node *left, /* Alloc nodes never return null (but throw an exception) */ if (is_Alloc(left) && tarval_is_null(tv_r)) possible &= ~ir_relation_equal; + /* stuff known through confirm nodes */ + if (is_Confirm(left) && get_Confirm_bound(left) == right) { + possible &= get_Confirm_relation(left); + } + if (is_Confirm(right) && get_Confirm_bound(right) == left) { + ir_relation relation = get_Confirm_relation(right); + relation = get_inversed_relation(relation); + possible &= relation; + } return possible; } @@ -609,6 +614,17 @@ static ir_tarval *compute_cmp(const ir_node *cmp) return computed_value_Cmp_Confirm(cmp, left, right, relation); } +/** + * some people want to call compute_cmp directly, in this case we have to + * test the constant folding flag again + */ +static ir_tarval *compute_cmp_ext(const ir_node *cmp) +{ + if (!get_opt_constant_folding()) + return tarval_bad; + return compute_cmp(cmp); +} + /** * Return the value of a Cmp. * @@ -618,7 +634,7 @@ static ir_tarval *compute_cmp(const ir_node *cmp) static ir_tarval *computed_value_Cmp(const ir_node *cmp) { /* we can't construct Constb after lowering mode_b nodes */ - if (is_irg_state(get_irn_irg(cmp), IR_GRAPH_STATE_MODEB_LOWERED)) + if (irg_is_constrained(get_irn_irg(cmp), IR_GRAPH_CONSTRAINT_MODEB_LOWERED)) return tarval_bad; return compute_cmp(cmp); @@ -720,16 +736,7 @@ ir_tarval *computed_value(const ir_node *n) return tarval_bad; } -/** - * Set the default computed_value evaluator in an ir_op_ops. - * - * @param code the opcode for the default operation - * @param ops the operations initialized - * - * @return - * The operations. - */ -static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops) +void firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -768,8 +775,6 @@ static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops /* leave NULL */ break; } - - return ops; #undef CASE_PROJ #undef CASE } @@ -1477,7 +1482,7 @@ static ir_node *equivalent_node_Mux(ir_node *n) if (ts == tarval_bad && is_Cmp(sel)) { /* try again with a direct call to compute_cmp, as we don't care * about the MODEB_LOWERED flag here */ - ts = compute_cmp(sel); + ts = compute_cmp_ext(sel); } /* Mux(true, f, t) == t */ @@ -1608,16 +1613,7 @@ ir_node *equivalent_node(ir_node *n) return n; } -/** - * Sets the default equivalent node operation for an ir_op_ops. - * - * @param code the opcode for the default operation - * @param ops the operations initialized - * - * @return - * The operations. - */ -static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *ops) +void firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -1655,8 +1651,6 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op /* leave NULL */ break; } - - return ops; #undef CASE #undef CASE_PROJ } @@ -2375,7 +2369,7 @@ static ir_node *transform_node_bitop_shift(ir_node *n) ir_tarval *tv2; ir_tarval *tv_bitop; - if (!is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2)) + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_NORMALISATION2)) return n; assert(is_And(n) || is_Or(n) || is_Eor(n) || is_Or_Eor_Add(n)); @@ -2768,7 +2762,7 @@ static ir_node *transform_node_Add(ir_node *n) ir_graph *irg = get_irn_irg(n); /* the following code leads to endless recursion when Mul are replaced * by a simple instruction chain */ - if (!is_irg_state(irg, IR_GRAPH_STATE_ARCH_DEP) + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_ARCH_DEP) && a == b && mode_is_int(mode)) { ir_node *block = get_nodes_block(n); @@ -3556,7 +3550,7 @@ static ir_node *transform_node_Cond(ir_node *n) if (ta == tarval_bad && is_Cmp(a)) { /* try again with a direct call to compute_cmp, as we don't care * about the MODEB_LOWERED flag here */ - ta = compute_cmp(a); + ta = compute_cmp_ext(a); } if (ta != tarval_bad && get_irn_mode(a) == mode_b) { @@ -3574,7 +3568,7 @@ static ir_node *transform_node_Cond(ir_node *n) } /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(irg), blk); - clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE); + clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE); } return n; } @@ -3648,7 +3642,7 @@ static ir_node *transform_node_shift_bitop(ir_node *n) ir_tarval *tv2; ir_tarval *tv_shift; - if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2)) + if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_NORMALISATION2)) return n; assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n)); @@ -4080,27 +4074,25 @@ static ir_node *transform_node_Minus(ir_node *n) */ static ir_node *transform_node_Proj_Load(ir_node *proj) { - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *load = get_Proj_pred(proj); + if (get_irn_mode(proj) == mode_X) { + ir_node *load = get_Proj_pred(proj); - /* get the Load address */ - const ir_node *addr = get_Load_ptr(load); - const ir_node *confirm; + /* get the Load address */ + const ir_node *addr = get_Load_ptr(load); + const ir_node *confirm; - if (value_not_null(addr, &confirm)) { - if (confirm == NULL) { - /* this node may float if it did not depend on a Confirm */ - set_irn_pinned(load, op_pin_state_floats); - } - if (get_Proj_proj(proj) == pn_Load_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return new_r_Bad(irg, mode_X); - } else { - ir_node *blk = get_nodes_block(load); - return new_r_Jmp(blk); - } + if (value_not_null(addr, &confirm)) { + if (confirm == NULL) { + /* this node may float if it did not depend on a Confirm */ + set_irn_pinned(load, op_pin_state_floats); + } + if (get_Proj_proj(proj) == pn_Load_X_except) { + ir_graph *irg = get_irn_irg(proj); + DBG_OPT_EXC_REM(proj); + return new_r_Bad(irg, mode_X); + } else { + ir_node *blk = get_nodes_block(load); + return new_r_Jmp(blk); } } } @@ -4112,27 +4104,25 @@ static ir_node *transform_node_Proj_Load(ir_node *proj) */ static ir_node *transform_node_Proj_Store(ir_node *proj) { - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *store = get_Proj_pred(proj); + if (get_irn_mode(proj) == mode_X) { + ir_node *store = get_Proj_pred(proj); - /* get the load/store address */ - const ir_node *addr = get_Store_ptr(store); - const ir_node *confirm; + /* get the load/store address */ + const ir_node *addr = get_Store_ptr(store); + const ir_node *confirm; - if (value_not_null(addr, &confirm)) { - if (confirm == NULL) { - /* this node may float if it did not depend on a Confirm */ - set_irn_pinned(store, op_pin_state_floats); - } - if (get_Proj_proj(proj) == pn_Store_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return new_r_Bad(irg, mode_X); - } else { - ir_node *blk = get_nodes_block(store); - return new_r_Jmp(blk); - } + if (value_not_null(addr, &confirm)) { + if (confirm == NULL) { + /* this node may float if it did not depend on a Confirm */ + set_irn_pinned(store, op_pin_state_floats); + } + if (get_Proj_proj(proj) == pn_Store_X_except) { + ir_graph *irg = get_irn_irg(proj); + DBG_OPT_EXC_REM(proj); + return new_r_Bad(irg, mode_X); + } else { + ir_node *blk = get_nodes_block(store); + return new_r_Jmp(blk); } } } @@ -5138,7 +5128,7 @@ static ir_node *transform_node_Proj(ir_node *proj) /** * Test whether a block is unreachable * Note: That this only returns true when - * IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE is set. + * IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE is set. * This is important, as you easily end up producing invalid constructs in the * unreachable code when optimizing away edges into the unreachable code. * So only set this flag when you iterate localopts to the fixpoint. @@ -5149,7 +5139,7 @@ static ir_node *transform_node_Proj(ir_node *proj) static bool is_block_unreachable(const ir_node *block) { const ir_graph *irg = get_irn_irg(block); - if (!is_irg_state(irg, IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE)) + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE)) return false; return get_Block_dom_depth(block) < 0; } @@ -5161,7 +5151,7 @@ static ir_node *transform_node_Block(ir_node *block) ir_node *bad = NULL; int i; - if (!is_irg_state(irg, IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE)) + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE)) return block; for (i = 0; i < arity; ++i) { @@ -5916,6 +5906,8 @@ static ir_node *transform_Mux_set(ir_node *n) if (!mode_is_int(mode) && !mode_is_reference(mode)) return n; dest_mode = get_irn_mode(n); + if (!mode_is_int(dest_mode) && !mode_is_reference(dest_mode)) + return n; right = get_Cmp_right(cond); relation = get_Cmp_relation(cond) & ~ir_relation_unordered; if (get_mode_size_bits(mode) >= get_mode_size_bits(dest_mode) @@ -6030,7 +6022,7 @@ static ir_node *transform_node_Mux(ir_node *n) /* the following optimisations create new mode_b nodes, so only do them * before mode_b lowering */ - if (!is_irg_state(irg, IR_GRAPH_STATE_MODEB_LOWERED)) { + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_MODEB_LOWERED)) { if (is_Mux(t)) { ir_node* block = get_nodes_block(n); ir_node* c0 = sel; @@ -6350,16 +6342,7 @@ static ir_node *transform_node_Call(ir_node *call) return res; } -/** - * Sets the default transform node operation for an ir_op_ops. - * - * @param code the opcode for the default operation - * @param ops the operations initialized - * - * @return - * The operations. - */ -static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops) +void firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -6393,7 +6376,6 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Phi); CASE(Proj); CASE(Rotl); - CASE(Sel); CASE(Shl); CASE(Shr); CASE(Shrs); @@ -6409,8 +6391,6 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops default: break; } - - return ops; #undef CASE_PROJ_EX #undef CASE_PROJ #undef CASE @@ -6531,7 +6511,7 @@ static int node_cmp_attr_Call(const ir_node *a, const ir_node *b) { const call_attr *pa = &a->attr.call; const call_attr *pb = &b->attr.call; - if (pa->type != pb->type || pa->tail_call != pb->tail_call) + if (pa->type != pb->type) return 1; return node_cmp_exception(a, b); } @@ -6547,11 +6527,11 @@ static int node_cmp_attr_Sel(const ir_node *a, const ir_node *b) /** Compares the attributes of two Phi nodes. */ static int node_cmp_attr_Phi(const ir_node *a, const ir_node *b) { - /* we can only enter this function if both nodes have the same number of inputs, - hence it is enough to check if one of them is a Phi0 */ - if (is_Phi0(a)) { - /* check the Phi0 pos attribute */ - return a->attr.phi.u.pos != b->attr.phi.u.pos; + (void) b; + /* do not CSE Phi-nodes without any inputs when building new graphs */ + if (get_irn_arity(a) == 0 && + get_irg_phase_state(get_irn_irg(a)) == phase_building) { + return 1; } return 0; } @@ -6657,7 +6637,8 @@ static int node_cmp_attr_Builtin(const ir_node *a, const ir_node *b) /** Compares the attributes of two ASM nodes. */ static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b) { - int i, n; + size_t n; + size_t i; const ir_asm_constraint *ca; const ir_asm_constraint *cb; ident **cla, **clb; @@ -6720,16 +6701,7 @@ static int node_cmp_attr_InstOf(const ir_node *a, const ir_node *b) return node_cmp_exception(a, b); } -/** - * Set the default node attribute compare operation for an ir_op_ops. - * - * @param code the opcode for the default operation - * @param ops the operations initialized - * - * @return - * The operations. - */ -static ir_op_ops *firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops) +void firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -6763,15 +6735,9 @@ static ir_op_ops *firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops) /* leave NULL */ break; } - - return ops; #undef CASE } -/* - * Compare function for two nodes in the value table. Gets two - * nodes as parameters. Returns 0 if the nodes are a Common Sub Expression. - */ int identities_cmp(const void *elt, const void *key) { ir_node *a = (ir_node *)elt; @@ -6836,17 +6802,11 @@ int identities_cmp(const void *elt, const void *key) return 0; } -/* - * Calculate a hash value of a node. - * - * @param node The IR-node - */ unsigned ir_node_hash(const ir_node *node) { return node->op->ops.hash(node); } - void new_identities(ir_graph *irg) { if (irg->value_table != NULL) @@ -6860,8 +6820,6 @@ void del_identities(ir_graph *irg) del_pset(irg->value_table); } -/* Normalize a node by putting constants (and operands with larger - * node index) on the right (operator side). */ void ir_normalize_node(ir_node *n) { if (is_op_commutative(get_irn_op(n))) { @@ -6880,16 +6838,6 @@ void ir_normalize_node(ir_node *n) } } -/* - * Return the canonical node computing the same value as n. - * Looks up the node in a hash table, enters it in the table - * if it isn't there yet. - * - * @param n the node to look up - * - * @return a node that computes the same value as n or n if no such - * node could be found - */ ir_node *identify_remember(ir_node *n) { ir_graph *irg = get_irn_irg(n); @@ -6930,7 +6878,6 @@ static inline ir_node *identify_cons(ir_node *n) return n; } -/* Add a node to the identities value table. */ void add_identities(ir_node *node) { if (!get_opt_cse()) @@ -6941,7 +6888,6 @@ void add_identities(ir_node *node) identify_remember(node); } -/* Visit each node in the value table of a graph. */ void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) { ir_node *node; @@ -6954,13 +6900,6 @@ void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) current_ir_graph = rem; } -/** - * These optimizations deallocate nodes from the obstack. - * It can only be called if it is guaranteed that no other nodes - * reference this one, i.e., right after construction of a node. - * - * @param n The node to optimize - */ ir_node *optimize_node(ir_node *n) { ir_node *oldn = n; @@ -7053,12 +6992,6 @@ ir_node *optimize_node(ir_node *n) return n; } - -/** - * These optimizations never deallocate nodes (in place). This can cause dead - * nodes lying on the obstack. Remove these by a dead node elimination, - * i.e., a copying garbage collection. - */ ir_node *optimize_in_place_2(ir_node *n) { if (!get_opt_optimize() && !is_Phi(n)) return n; @@ -7087,8 +7020,6 @@ ir_node *optimize_in_place_2(ir_node *n) irn_verify(n); /* Now we have a legal, useful node. Enter it in hash table for cse. - * Blocks should be unique anyways. (Except the successor of start: - * is cse with the start block!) * * Note: This is only necessary because some of the optimisations * operate in-place (set_XXX_bla, turn_into_tuple, ...) which is considered @@ -7104,9 +7035,6 @@ ir_node *optimize_in_place_2(ir_node *n) return n; } -/** - * Wrapper for external use, set proper status bits after optimization. - */ ir_node *optimize_in_place(ir_node *n) { ir_graph *irg = get_irn_irg(n); @@ -7118,7 +7046,7 @@ ir_node *optimize_in_place(ir_node *n) /* FIXME: Maybe we could also test whether optimizing the node can change the control graph. */ - clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE); + clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE); return optimize_in_place_2(n); } @@ -7130,7 +7058,7 @@ static unsigned hash_Const(const ir_node *node) unsigned h; /* special value for const, as they only differ in their tarval. */ - h = HASH_PTR(node->attr.con.tarval); + h = hash_ptr(node->attr.con.tarval); return h; } @@ -7143,21 +7071,12 @@ static unsigned hash_SymConst(const ir_node *node) unsigned h; /* all others are pointers */ - h = HASH_PTR(node->attr.symc.sym.type_p); + h = hash_ptr(node->attr.symc.sym.type_p); return h; } -/** - * Set the default hash operation in an ir_op_ops. - * - * @param code the opcode for the default operation - * @param ops the operations initialized - * - * @return - * The operations. - */ -static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) +void firm_set_default_hash(unsigned code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -7166,7 +7085,7 @@ static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) /* hash function already set */ if (ops->hash != NULL) - return ops; + return; switch (code) { CASE(Const); @@ -7175,23 +7094,5 @@ static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) /* use input/mode default hash if no function was given */ ops->hash = firm_default_hash; } - - return ops; #undef CASE } - -/* - * Sets the default operation for an ir_ops. - */ -ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops) -{ - ops = firm_set_default_hash(code, ops); - ops = firm_set_default_computed_value(code, ops); - ops = firm_set_default_equivalent_node(code, ops); - ops = firm_set_default_transform_node(code, ops); - ops = firm_set_default_node_cmp_attr(code, ops); - ops = firm_set_default_get_type_attr(code, ops); - ops = firm_set_default_get_entity_attr(code, ops); - - return ops; -}