return arch_irn_class_branch;
else if (is_ia32_Cnst(irn))
return arch_irn_class_const;
+ else if (is_ia32_Ld(irn))
+ return arch_irn_class_load;
+ else if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
+ return arch_irn_class_store;
else if (is_ia32_irn(irn))
return arch_irn_class_normal;
else
return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
}
+/**
+ * Get the estimated cycle count for @p irn.
+ *
+ * @param self The this pointer.
+ * @param irn The node.
+ *
+ * @return The estimated cycle count for this operation
+ */
+static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
+{
+ int cost;
+ switch (get_ia32_irn_opcode(irn)) {
+ case iro_ia32_xDiv:
+ case iro_ia32_DivMod:
+ cost = 8;
+ break;
+
+ case iro_ia32_xLoad:
+ case iro_ia32_l_Load:
+ case iro_ia32_Load:
+ case iro_ia32_Push:
+ case iro_ia32_Pop:
+ cost = 10;
+ break;
+
+ case iro_ia32_xStore:
+ case iro_ia32_l_Store:
+ case iro_ia32_Store:
+ case iro_ia32_Store8Bit:
+ cost = 50;
+ break;
+
+ case iro_ia32_MulS:
+ case iro_ia32_Mul:
+ case iro_ia32_Mulh:
+ case iro_ia32_xMul:
+ case iro_ia32_l_MulS:
+ case iro_ia32_l_Mul:
+ cost = 2;
+ break;
+
+ default:
+ cost = 1;
+ }
+
+ return cost;
+}
+
/**
* Returns the inverse operation if @p irn, recalculating the argument at position @p i.
*
return NULL;
}
+ set_ia32_res_mode(inverse->nodes[0], mode);
inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
return inverse;
ia32_get_flags,
ia32_get_frame_entity,
ia32_set_stack_bias,
- ia32_get_inverse
+ ia32_get_inverse,
+ ia32_get_op_estimated_cost
};
ia32_irn_ops_t ia32_irn_ops = {
reg = arch_get_irn_register(env->cg->arch_env, irn);
arch_set_irn_register(env->cg->arch_env, new_op, reg);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
exchange(irn, proj);
}
sched_remove(irn);
}
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
exchange(irn, proj);
}
irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
/* if we do x87 code generation, rewrite all the virtual instructions and registers */
- if (cg->used_fp == fp_x87) {
+ if (cg->used_fp == fp_x87 || cg->force_sim) {
x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
}
}
* Return the number of register classes for this architecture.
* We report always these:
* - the general purpose registers
- * - the floating point register set (depending on the unit used for FP)
- * - MMX/SSE registers (currently not supported)
+ * - the SSE floating point register set
+ * - the virtual floating point registers
*/
static int ia32_get_n_reg_class(const void *self) {
- return 2;
+ return 3;
}
/**
*/
static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
const ia32_isa_t *isa = self;
- assert(i >= 0 && i < 2 && "Invalid ia32 register class requested.");
+ assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
if (i == 0)
return &ia32_reg_classes[CLASS_ia32_gp];
- return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
+ else if (i == 1)
+ return &ia32_reg_classes[CLASS_ia32_xmm];
+ else
+ return &ia32_reg_classes[CLASS_ia32_vfp];
}
/**
return bytes;
}
+static ia32_intrinsic_env_t intrinsic_env = { NULL, NULL };
+
/**
* Returns the libFirm configuration parameter for this backend.
*/
NULL, /* will be set later */
1, /* need dword lowering */
ia32_create_intrinsic_fkt,
- NULL, /* context for ia32_create_intrinsic_fkt */
+ &intrinsic_env, /* context for ia32_create_intrinsic_fkt */
};
p.dep_param = &ad;