/** the backend uses big-endian byte ordering if set, else little endian */
unsigned byte_order_big_endian:1;
- /** callback that performs lowerings required for target architecture */
- lower_for_target_func lower_for_target;
-
/** Settings for architecture dependent optimizations. */
const ir_settings_arch_dep_t *dep_param;
*/
FIRM_API const backend_params *be_get_backend_param(void);
+/**
+ * Lowers current program for the target architecture.
+ * This must be run once before using be_main. The idea here is that the backend
+ * can perform lowerings like doubleword-lowering, ABI adjustments or
+ * implementation of boolean values, if-conversion, with target specific
+ * settings.
+ * The resulting graph is still a "normal" firm-graph on which you can and
+ * should perform further architecture-neutral optimisations before be_main.
+ */
+FIRM_API void be_lower_for_target(void);
+
/**
* Creates an ir_prog pass which performs lowerings necessary for the target
* architecture. (Calling backend_params->lower_for_target)
0, /* no inline assembly */
0, /* no support for Rotl nodes */
0, /* 0: little-endian, 1: big-endian */
- TEMPLATE_lower_for_target, /* lowering for target */
NULL, /* architecture dependent settings, will be set later */
TEMPLATE_is_mux_allowed, /* parameter for if conversion */
NULL, /* float arithmetic mode */
const arch_isa_if_t TEMPLATE_isa_if = {
TEMPLATE_init,
+ TEMPLATE_lower_for_target,
TEMPLATE_done,
NULL, /* handle intrinsics */
TEMPLATE_get_reg_class_for_mode,
0, /* no inline assembly */
1, /* support Rotl nodes */
0, /* little endian */
- amd64_lower_for_target, /* lowering callback */
NULL, /* will be set later */
amd64_is_mux_allowed, /* parameter for if conversion */
NULL, /* float arithmetic mode */
const arch_isa_if_t amd64_isa_if = {
amd64_init,
+ amd64_lower_for_target,
amd64_done,
NULL, /* handle intrinsics */
amd64_get_reg_class_for_mode,
0, /* don't support inline assembler yet */
1, /* support Rotl nodes */
1, /* big endian */
- arm_lower_for_target, /* lowering function */
&ad, /* will be set later */
arm_is_mux_allowed, /* allow_ifconv function */
NULL, /* float arithmetic mode (TODO) */
const arch_isa_if_t arm_isa_if = {
arm_init,
+ arm_lower_for_target,
arm_done,
NULL, /* handle_intrinsics */
arm_get_reg_class_for_mode,
*/
arch_env_t *(*init)(FILE *file_handle);
+ /**
+ * lowers current program for target. See the documentation for
+ * be_lower_for_target() for details.
+ */
+ void (*lower_for_target)(void);
+
/**
* Free the isa instance.
*/
}
ir_timer_t *be_timers[T_LAST+1];
+void be_lower_for_target(void)
+{
+ int i;
+
+ isa_if->lower_for_target();
+ /* set the phase to low */
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph *irg = get_irp_irg(i);
+ set_irg_phase_state(irg, phase_low);
+ }
+ set_irp_phase_state(phase_low);
+}
+
/**
* The Firm backend main loop.
* Do architecture specific lowering for all graphs
static int do_lower_for_target(ir_prog *irp, void *context)
{
- const backend_params *be_params = be_get_backend_param();
- be_params->lower_for_target();
+ be_lower_for_target();
(void) context;
(void) irp;
return 0;
1, /* support inline assembly */
1, /* support Rotl nodes */
0, /* little endian */
- ia32_lower_for_target,
NULL, /* will be set later */
ia32_is_mux_allowed,
NULL, /* float arithmetic mode, will be set below */
const arch_isa_if_t ia32_isa_if = {
ia32_init,
+ ia32_lower_for_target,
ia32_done,
ia32_handle_intrinsics,
ia32_get_reg_class_for_mode,
0, /* no inline assembly */
0, /* no support for RotL nodes */
1, /* big endian */
- sparc_lower_for_target, /* lowering callback */
&arch_dep, /* will be set later */
sparc_is_mux_allowed, /* parameter for if conversion */
NULL, /* float arithmetic mode */
const arch_isa_if_t sparc_isa_if = {
sparc_init,
+ sparc_lower_for_target,
sparc_done,
NULL, /* handle intrinsics */
sparc_get_reg_class_for_mode,