* Transforms the standard firm graph into
* an ia32 firm graph
*/
-static void ia32_prepare_graph(void *self) {
- ia32_code_gen_t *cg = self;
+static void ia32_prepare_graph(void *self)
+{
+ ia32_code_gen_t *cg = self;
+ ir_graph *irg = cg->irg;
/* do local optimizations */
- optimize_graph_df(cg->irg);
+ optimize_graph_df(irg);
+
+ /* we have to do cfopt+remove_critical_edges as we can't have Bad-blocks
+ * or critical edges in the backend */
+ optimize_cf(irg);
+ remove_critical_cf_edges(irg);
/* TODO: we often have dead code reachable through out-edges here. So for
* now we rebuild edges (as we need correct user count for code selection)
}
/**
- * Check for Abs or Nabs.
+ * Check for Abs or -Abs.
*/
-static int is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
ir_node *l, *r;
pn_Cmp pnc;
return 1;
}
+/**
+ * Check for Abs only
+ */
+static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+ ir_node *l, *r;
+ pn_Cmp pnc;
+
+ if (cmp == NULL)
+ return 0;
+
+ /* must be <, <=, >=, > */
+ pnc = get_Proj_proj(sel);
+ if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
+ pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
+ return 0;
+
+ l = get_Cmp_left(cmp);
+ r = get_Cmp_right(cmp);
+
+ /* must be x cmp 0 */
+ if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
+ return 0;
+
+ if ((!is_Minus(t) || get_Minus_op(t) != f) &&
+ (!is_Minus(f) || get_Minus_op(f) != t))
+ return 0;
+
+ if (pnc & pn_Cmp_Gt) {
+ /* x >= 0 ? -x : x is NABS */
+ if (is_Minus(t))
+ return 0;
+ } else {
+ /* x < 0 ? x : -x is NABS */
+ if (is_Minus(f))
+ return 0;
+ }
+ return 1;
+}
+
+
/**
* Allows or disallows the creation of Psi nodes for the given Phi nodes.
*
if (is_Cmp(cmp)) {
ir_node *left = get_Cmp_left(cmp);
ir_mode *cmp_mode = get_irn_mode(left);
- if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
- return 0;
+ if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32) {
+ /* 64bit Abs IS supported */
+ for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
+ ir_node *t = get_Phi_pred(phi, i);
+ ir_node *f = get_Phi_pred(phi, j);
+
+ if (! psi_is_Abs(cmp, sel, t, f))
+ return 0;
+ }
+ return 1;
+ }
} else {
cmp = NULL;
}
ir_node *t = get_Phi_pred(phi, i);
ir_node *f = get_Phi_pred(phi, j);
- if (! is_Abs_or_Nabs(cmp, sel, t, f))
+ if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
return 0;
} else if (get_mode_size_bits(mode) > 32)
return 0;
if (mode_is_float(mode)) {
/* only abs or nabs supported */
- if (! is_Abs_or_Nabs(cmp, sel, t, f))
+ if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
return 0;
} else if (get_mode_size_bits(mode) > 32) {
/* no 64bit yet */
1, /* allow Mulhs */
1, /* allow Mulus */
- 32 /* Mulh allowed up to 32 bit */
+ 32, /* Mulh allowed up to 32 bit */
};
static backend_params p = {
1, /* need dword lowering */
1, /* support inline assembly */
0, /* no immediate floating point mode. */
- NULL, /* no additional opcodes */
NULL, /* will be set later */
ia32_create_intrinsic_fkt,
&intrinsic_env, /* context for ia32_create_intrinsic_fkt */