return n;
}
+/**
+ * Create a 0 constant of given mode.
+ */
+static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode)
+{
+ ir_tarval *tv = get_mode_null(mode);
+ ir_node *cnst = new_r_Const(irg, tv);
+
+ return cnst;
+}
+
/**
* Transform an And.
*/
ir_mode *mode;
vrp_attr *a_vrp, *b_vrp;
- /* we can combine the relations of two compares with the same operands */
if (is_Cmp(a) && is_Cmp(b)) {
- ir_node *a_left = get_Cmp_left(a);
- ir_node *a_right = get_Cmp_left(a);
- ir_node *b_left = get_Cmp_left(b);
- ir_node *b_right = get_Cmp_right(b);
+ ir_node *a_left = get_Cmp_left(a);
+ ir_node *a_right = get_Cmp_right(a);
+ ir_node *b_left = get_Cmp_left(b);
+ ir_node *b_right = get_Cmp_right(b);
+ ir_relation a_relation = get_Cmp_relation(a);
+ ir_relation b_relation = get_Cmp_relation(b);
+ /* we can combine the relations of two compares with the same
+ * operands */
if (a_left == b_left && b_left == b_right) {
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_relation a_relation = get_Cmp_relation(a);
- ir_relation b_relation = get_Cmp_relation(b);
ir_relation new_relation = a_relation & b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
+ /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */
+ if (is_Const(a_right) && is_Const_null(a_right)
+ && is_Const(b_right) && is_Const_null(b_right)
+ && a_relation == b_relation && a_relation == ir_relation_equal
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(a_left);
+ ir_node *n_b_left = get_irn_mode(b_left) != mode ?
+ new_rd_Conv(dbgi, block, b_left, mode) : b_left;
+ ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *zero = create_zero_const(irg, mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
+ }
}
mode = get_irn_mode(n);
return false;
}
-/**
- * Create a 0 constant of given mode.
- */
-static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode)
-{
- ir_tarval *tv = get_mode_null(mode);
- ir_node *cnst = new_r_Const(irg, tv);
-
- return cnst;
-}
-
/**
* Normalizes and optimizes Cmp nodes.
*/
return n;
} /* transform_node_Or_Rotl */
+static bool is_cmp_unequal_zero(const ir_node *node)
+{
+ ir_relation relation = get_Cmp_relation(node);
+ ir_node *left = get_Cmp_left(node);
+ ir_node *right = get_Cmp_right(node);
+ ir_mode *mode = get_irn_mode(left);
+
+ if (!is_Const(right) || !is_Const_null(right))
+ return false;
+ if (mode_is_signed(mode)) {
+ return relation == ir_relation_less_greater;
+ } else {
+ return relation == ir_relation_greater;
+ }
+}
+
/**
* Transform an Or.
*/
ir_relation new_relation = a_relation | b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
+ /* Cmp(a!=0) or Cmp(b!=0) => Cmp(a|b != 0) */
+ if (is_cmp_unequal_zero(a) && is_cmp_unequal_zero(b)
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ ir_graph *irg = get_irn_irg(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(a_left);
+ ir_node *n_b_left = get_irn_mode(b_left) != mode ?
+ new_rd_Conv(dbgi, block, b_left, mode) : b_left;
+ ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
+ ir_node *zero = create_zero_const(irg, mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater);
+ }
}
mode = get_irn_mode(n);
return n;
} /* transform_node_Sync */
+static ir_node *transform_node_Load(ir_node *n)
+{
+ /* if our memory predecessor is a load from the same address, then reuse the
+ * previous result */
+ ir_node *mem = get_Load_mem(n);
+ ir_node *mem_pred;
+
+ if (!is_Proj(mem))
+ return n;
+ /* don't touch volatile loads */
+ if (get_Load_volatility(n) == volatility_is_volatile)
+ return n;
+ mem_pred = get_Proj_pred(mem);
+ if (is_Load(mem_pred)) {
+ ir_node *pred_load = mem_pred;
+
+ /* conservatively compare the 2 loads. TODO: This could be less strict
+ * with fixup code in some situations (like smaller/bigger modes) */
+ if (get_Load_ptr(pred_load) != get_Load_ptr(n))
+ return n;
+ if (get_Load_mode(pred_load) != get_Load_mode(n))
+ return n;
+ /* all combinations of aligned/unaligned pred/n should be fine so we do
+ * not compare the unaligned attribute */
+ {
+ ir_node *block = get_nodes_block(n);
+ ir_node *jmp = new_r_Jmp(block);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *bad = new_r_Bad(irg);
+ ir_mode *mode = get_Load_mode(n);
+ ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
+ ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
+ ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
+ return tuple;
+ }
+ } else if (is_Store(mem_pred)) {
+ ir_node *pred_store = mem_pred;
+ ir_node *value = get_Store_value(pred_store);
+
+ if (get_Store_ptr(pred_store) != get_Load_ptr(n))
+ return n;
+ if (get_irn_mode(value) != get_Load_mode(n))
+ return n;
+ /* all combinations of aligned/unaligned pred/n should be fine so we do
+ * not compare the unaligned attribute */
+ {
+ ir_node *block = get_nodes_block(n);
+ ir_node *jmp = new_r_Jmp(block);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *bad = new_r_Bad(irg);
+ ir_node *res = value;
+ ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
+ ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
+ return tuple;
+ }
+ }
+
+ return n;
+}
+
/**
* optimize a trampoline Call into a direct Call
*/
}
var = get_method_variadicity(mtp);
set_method_variadicity(ctp, var);
- if (var == variadicity_variadic) {
- set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
- }
/* When we resolve a trampoline, the function must be called by a this-call */
set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
set_method_additional_properties(ctp, get_method_additional_properties(mtp));
CASE(Sync);
CASE_PROJ(Bound);
CASE_PROJ(CopyB);
- CASE_PROJ(Load);
CASE_PROJ(Store);
CASE_PROJ_EX(Cond);
CASE_PROJ_EX(Div);
+ CASE_PROJ_EX(Load);
CASE_PROJ_EX(Mod);
default:
/* leave NULL */;
/* NEVER do CSE on volatile Loads */
return 1;
/* do not CSE Loads with different alignment. Be conservative. */
- if (get_Load_align(a) != get_Load_align(b))
+ if (get_Load_unaligned(a) != get_Load_unaligned(b))
return 1;
return get_Load_mode(a) != get_Load_mode(b);
static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
{
/* do not CSE Stores with different alignment. Be conservative. */
- if (get_Store_align(a) != get_Store_align(b))
+ if (get_Store_unaligned(a) != get_Store_unaligned(b))
return 1;
/* NEVER do CSE on volatile Stores */
/* Should we really check the constraints here? Should be better, but is strange. */
n = get_ASM_n_input_constraints(a);
if (n != get_ASM_n_input_constraints(b))
- return 0;
+ return 1;
ca = get_ASM_input_constraints(a);
cb = get_ASM_input_constraints(b);
for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
return 1;
}
n = get_ASM_n_output_constraints(a);
if (n != get_ASM_n_output_constraints(b))
- return 0;
+ return 1;
ca = get_ASM_output_constraints(a);
cb = get_ASM_output_constraints(b);
for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
return 1;
}
n = get_ASM_n_clobbers(a);
if (n != get_ASM_n_clobbers(b))
- return 0;
+ return 1;
cla = get_ASM_clobbers(a);
clb = get_ASM_clobbers(b);
if (nn != n) {
/* n is reachable again */
- edges_node_revival(nn, get_irn_irg(nn));
+ edges_node_revival(nn);
}
return nn;
Run always for transformation induced Bads. */
n = gigo(n);
if (n != oldn) {
- edges_node_deleted(oldn, irg);
+ edges_node_deleted(oldn);
/* We found an existing, better node, so we can deallocate the old node. */
irg_kill_node(irg, oldn);
memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
/* note the inplace edges module */
- edges_node_deleted(n, irg);
+ edges_node_deleted(n);
/* evaluation was successful -- replace the node. */
irg_kill_node(irg, n);
n = identify_cons(n);
if (n != oldn) {
- edges_node_deleted(oldn, irg);
+ edges_node_deleted(oldn);
/* We found an existing, better node, so we can deallocate the old node. */
irg_kill_node(irg, oldn);
* @return
* The operations.
*/
-static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops)
+static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops)
{
#define CASE(a) \
case iro_##a: \
/*
* Sets the default operation for an ir_ops.
*/
-ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops)
+ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops)
{
ops = firm_set_default_hash(code, ops);
ops = firm_set_default_computed_value(code, ops);