From: Matthias Braun Date: Wed, 29 Aug 2012 16:41:58 +0000 (+0200) Subject: set load/store to floating if ptr is never NULL X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;h=70de47c4584c90da38d8c7298ba4ea51f3ea26cb;p=libfirm set load/store to floating if ptr is never NULL --- diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index d37db02cc..9686ac000 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -6099,23 +6099,28 @@ static ir_node *transform_node_Sync(ir_node *n) static ir_node *transform_node_Load(ir_node *n) { + /* don't touch volatile loads */ + if (get_Load_volatility(n) == volatility_is_volatile) + return n; + + ir_node *ptr = get_Load_ptr(n); + const ir_node *confirm; + if (value_not_zero(ptr, &confirm) && confirm == NULL) { + set_irn_pinned(n, op_pin_state_floats); + } + /* if our memory predecessor is a load from the same address, then reuse the * previous result */ ir_node *mem = get_Load_mem(n); - ir_node *mem_pred; - if (!is_Proj(mem)) return n; - /* don't touch volatile loads */ - if (get_Load_volatility(n) == volatility_is_volatile) - return n; - mem_pred = get_Proj_pred(mem); + ir_node *mem_pred = get_Proj_pred(mem); if (is_Load(mem_pred)) { ir_node *pred_load = mem_pred; /* conservatively compare the 2 loads. TODO: This could be less strict * with fixup code in some situations (like smaller/bigger modes) */ - if (get_Load_ptr(pred_load) != get_Load_ptr(n)) + if (get_Load_ptr(pred_load) != ptr) return n; if (get_Load_mode(pred_load) != get_Load_mode(n)) return n; @@ -6136,7 +6141,7 @@ static ir_node *transform_node_Load(ir_node *n) ir_node *pred_store = mem_pred; ir_node *value = get_Store_value(pred_store); - if (get_Store_ptr(pred_store) != get_Load_ptr(n)) + if (get_Store_ptr(pred_store) != ptr) return n; if (get_irn_mode(value) != get_Load_mode(n)) return n; @@ -6157,6 +6162,20 @@ static ir_node *transform_node_Load(ir_node *n) return n; } +static ir_node *transform_node_Store(ir_node *n) +{ + /* don't touch volatile stores */ + if (get_Store_volatility(n) == volatility_is_volatile) + return n; + + ir_node *ptr = get_Store_ptr(n); + const ir_node *confirm; + if (value_not_zero(ptr, &confirm) && confirm == NULL) { + set_irn_pinned(n, op_pin_state_floats); + } + return n; +} + /** * optimize a trampoline Call into a direct Call */ @@ -6398,6 +6417,7 @@ void ir_register_opt_node_ops(void) register_transform_node_func(op_Shl, transform_node_Shl); register_transform_node_func(op_Shrs, transform_node_Shrs); register_transform_node_func(op_Shr, transform_node_Shr); + register_transform_node_func(op_Store, transform_node_Store); register_transform_node_func(op_Sub, transform_node_Sub); register_transform_node_func(op_Switch, transform_node_Switch); register_transform_node_func(op_Sync, transform_node_Sync); diff --git a/ir/opt/ifconv.c b/ir/opt/ifconv.c index 1bc7f13c9..c39e1a263 100644 --- a/ir/opt/ifconv.c +++ b/ir/opt/ifconv.c @@ -333,7 +333,9 @@ restart: mux_true = get_Phi_pred(p, i); mux_false = get_Phi_pred(p, j); } - if (!env->allow_ifconv(sel, mux_false, mux_true)) { + ir_mode *mode = get_irn_mode(mux_true); + if (mode == mode_M + || !env->allow_ifconv(sel, mux_false, mux_true)) { supported = false; break; }