#include "config.h"
#endif
+#ifdef WITH_LIBCORE
+#include <libcore/lc_opts.h>
+#include <libcore/lc_opts_enum.h>
+#endif /* WITH_LIBCORE */
+
#include "pseudo_irg.h"
#include "irgwalk.h"
#include "irprog.h"
DB((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
if (mode_is_float(mode)) {
- memcpy(req, &(arm_default_req_arm_fp.req), sizeof(*req));
+ memcpy(req, &(arm_default_req_arm_fpa.req), sizeof(*req));
}
else if (mode_is_int(mode) || mode_is_reference(mode)) {
memcpy(req, &(arm_default_req_arm_gp.req), sizeof(*req));
return NULL;
}
+static void arm_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
+ /* TODO: set the entity assigned to the frame */
+}
+
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
/* TODO: correct offset if irn accesses the stack */
}
+static int arm_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
/* fill register allocator interface */
static const arch_irn_ops_if_t arm_irn_ops_if = {
arm_classify,
arm_get_flags,
arm_get_frame_entity,
- arm_set_stack_bias
+ arm_set_frame_entity,
+ arm_set_stack_bias,
+ arm_get_sp_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
arm_irn_ops_t arm_irn_ops = {
/**
* Called immediately before emit phase.
*/
-static void arm_finish_irg(ir_graph *irg, arm_code_gen_t *cg) {
+static void arm_finish_irg(void *self) {
/* TODO: - fix offsets for nodes accessing stack
- ...
*/
cg->emit_decls = 0;
}
- arm_finish_irg(irg, cg);
dump_ir_block_graph_sched(irg, "-arm-finished");
arm_gen_routine(out, irg, cg);
free(self);
}
-enum convert_which { low, high };
+/**
+ * Move a double floating point value into an integer register.
+ * Place the move operation into block bl.
+ *
+ * Handle some special cases here:
+ * 1.) A constant: simply split into two
+ * 2.) A load: siply split into two
+ */
+static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem,
+ ir_node **resH, ir_node **resL) {
+ if (is_Const(arg)) {
+ tarval *tv = get_Const_tarval(arg);
+ unsigned v;
+
+ /* get the upper 32 bits */
+ v = get_tarval_sub_bits(tv, 7);
+ v = (v << 8) | get_tarval_sub_bits(tv, 6);
+ v = (v << 8) | get_tarval_sub_bits(tv, 5);
+ v = (v << 8) | get_tarval_sub_bits(tv, 4);
+ *resH = new_Const_long(mode_Is, v);
+
+ /* get the lower 32 bits */
+ v = get_tarval_sub_bits(tv, 3);
+ v = (v << 8) | get_tarval_sub_bits(tv, 2);
+ v = (v << 8) | get_tarval_sub_bits(tv, 1);
+ v = (v << 8) | get_tarval_sub_bits(tv, 0);
+ *resL = new_Const_long(mode_Is, v);
+ }
+ else if (get_irn_op(skip_Proj(arg)) == op_Load) {
+ /* FIXME: handling of low/high depends on LE/BE here */
+ assert(0);
+ }
+ else {
+ ir_graph *irg = current_ir_graph;
+ ir_node *conv;
+
+ conv = new_rd_arm_fpaDbl2GP(NULL, irg, bl, arg, mem);
+ /* move high/low */
+ *resL = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
+ *resH = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
+ mem = new_r_Proj(irg, bl, conv, mode_M, pn_arm_fpaDbl2GP_M);
+ }
+ return mem;
+}
/**
- * Move an floating point value to a integer register.
+ * Move a single floating point value into an integer register.
* Place the move operation into block bl.
+ *
+ * Handle some special cases here:
+ * 1.) A constant: simply move
+ * 2.) A load: siply load
*/
-static ir_node *convert_to_int(ir_node *bl, ir_node *arg, enum convert_which which) {
+static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg) {
+ if (is_Const(arg)) {
+ tarval *tv = get_Const_tarval(arg);
+ unsigned v;
+
+ /* get the lower 32 bits */
+ v = get_tarval_sub_bits(tv, 3);
+ v = (v << 8) | get_tarval_sub_bits(tv, 2);
+ v = (v << 8) | get_tarval_sub_bits(tv, 1);
+ v = (v << 8) | get_tarval_sub_bits(tv, 0);
+ return new_Const_long(mode_Is, v);
+ }
+ else if (get_irn_op(skip_Proj(arg)) == op_Load) {
+ ir_node *load;
+
+ load = skip_Proj(arg);
+ }
+ assert(0);
return NULL;
}
/**
* Convert the arguments of a call to support the
* ARM calling convention of general purpose AND floating
- * point arguments
+ * point arguments.
*/
static void handle_calls(ir_node *call, void *env)
{
if (mode_is_float(mode)) {
if (get_mode_size_bits(mode) > 32) {
+ ir_node *mem = get_Call_mem(call);
+
+ /* Beware: ARM wants the high part first */
size += 2 * 4;
- new_tp[idx] = cg->int_tp;
- new_in[idx] = convert_to_int(bl, get_Call_param(call, i), low);
- ++idx;
- new_tp[idx] = cg->int_tp;
- new_in[idx] = convert_to_int(bl, get_Call_param(call, i), high);
- ++idx;
+ new_tp[idx] = cg->int_tp;
+ new_tp[idx+1] = cg->int_tp;
+ mem = convert_dbl_to_int(bl, get_Call_param(call, i), mem, &new_in[idx], &new_in[idx+1]);
+ idx += 2;
+ set_Call_mem(call, mem);
}
else {
size += 4;
new_tp[idx] = cg->int_tp;
- new_in[idx] = convert_to_int(bl, get_Call_param(call, i), low);
+ new_in[idx] = convert_sng_to_int(bl, get_Call_param(call, i));
++idx;
}
flag = 1;
}
/**
- * Handle graph transformations before the abi converter does it's work
+ * Handle graph transformations before the abi converter does its work.
*/
static void arm_before_abi(void *self) {
arm_code_gen_t *cg = self;
arm_prepare_graph,
arm_before_sched, /* before scheduling hook */
arm_before_ra, /* before register allocation hook */
- NULL, /* after register allocation */
+ NULL, /* after register allocation */
+ arm_finish_irg,
arm_emit_and_done,
};
* and map all instructions the backend did not support
* to runtime calls.
*/
-static void arm_global_init(void) {
+static void arm_handle_intrinsics(void) {
ir_type *tp, *int_tp, *uint_tp;
i_record records[8];
int n_records = 0;
0, /* use generic register names instead of SP, LR, PC */
NULL, /* current code generator */
NULL, /* output file */
+ ARM_FPU_ARCH_FPE, /* FPU architecture */
};
/**
isa->out = file_handle;
arm_create_opcodes();
- arm_global_init();
+ arm_handle_intrinsics();
arm_switch_section(NULL, NO_SECTION);
inited = 1;
* Return the register class with requested index.
*/
static const arch_register_class_t *arm_get_reg_class(const void *self, int i) {
- return i == 0 ? &arm_reg_classes[CLASS_arm_gp] : &arm_reg_classes[CLASS_arm_fp];
+ return i == 0 ? &arm_reg_classes[CLASS_arm_gp] : &arm_reg_classes[CLASS_arm_fpa];
}
/**
*/
const arch_register_class_t *arm_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
if (mode_is_float(mode))
- return &arm_reg_classes[CLASS_arm_fp];
+ return &arm_reg_classes[CLASS_arm_fpa];
else
return &arm_reg_classes[CLASS_arm_gp];
}
static void arm_abi_dont_save_regs(void *self, pset *s)
{
arm_abi_env_t *env = self;
- if(env->flags.try_omit_fp)
+ if (env->flags.try_omit_fp)
pset_insert_ptr(s, env->isa->bp);
}
return env->isa->sp;
ip = be_new_Copy(gp, irg, block, sp );
- arch_set_irn_register(env->arch_env, ip, &arm_gp_regs[REG_R12]);
- be_set_constr_single_reg(ip, BE_OUT_POS(0), &arm_gp_regs[REG_R12] );
+ arch_set_irn_register(env->arch_env, ip, &arm_gp_regs[REG_R12]);
+ be_set_constr_single_reg(ip, BE_OUT_POS(0), &arm_gp_regs[REG_R12] );
// if (r0) regs[n_regs++] = r0;
// if (r1) regs[n_regs++] = r1;
// TODO: Activate Omit fp in epilogue
if(env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
+ add_irn_dep(curr_sp, *mem);
curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
be_node_set_reg_class(curr_lr, 1, &arm_reg_classes[CLASS_arm_gp]);
mode = get_type_mode(tp);
be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &arm_fp_regs[REG_F0] : &arm_gp_regs[REG_R0]);
+ mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0]);
}
}
/**
* Returns the reg_pressure scheduler with to_appear_in_schedule() over\loaded
*/
-static const list_sched_selector_t *arm_get_list_sched_selector(const void *self) {
+static const list_sched_selector_t *arm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
memcpy(&arm_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
arm_sched_selector.to_appear_in_schedule = arm_to_appear_in_schedule;
return &arm_sched_selector;
return get_mode_size_bytes(mode);
}
+/**
+ * Returns the libFirm configuration parameter for this backend.
+ */
+static const backend_params *arm_get_libfirm_params(void) {
+ static arch_dep_params_t ad = {
+ 1, /* allow subs */
+ 0, /* Muls are fast enough on ARM */
+ 31, /* shift would be ok */
+ 0, /* SMUL is needed, only in Arch M*/
+ 0, /* UMUL is needed, only in Arch M */
+ 32, /* SMUL & UMUL available for 32 bit */
+ };
+ static backend_params p = {
+ NULL, /* no additional opcodes */
+ NULL, /* will be set later */
+ 1, /* need dword lowering */
+ NULL, /* but yet no creator function */
+ NULL, /* context for create_intrinsic_fkt */
+ };
+
+ p.dep_param = &ad;
+ return &p;
+}
+
#ifdef WITH_LIBCORE
+
+/* fpu set architectures. */
+static const lc_opt_enum_int_items_t arm_fpu_items[] = {
+ { "softfloat", ARM_FPU_ARCH_SOFTFLOAT },
+ { "fpe", ARM_FPU_ARCH_FPE },
+ { "fpa", ARM_FPU_ARCH_FPA },
+ { "vfp1xd", ARM_FPU_ARCH_VFP_V1xD },
+ { "vfp1", ARM_FPU_ARCH_VFP_V1 },
+ { "vfp2", ARM_FPU_ARCH_VFP_V2 },
+ { NULL, 0 }
+};
+
+static lc_opt_enum_int_var_t arch_fpu_var = {
+ &arm_isa_template.fpu_arch, arm_fpu_items
+};
+
static const lc_opt_table_entry_t arm_options[] = {
+ LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &arch_fpu_var),
LC_OPT_ENT_BOOL("gen_reg_names", "use generic register names", &arm_isa_template.gen_reg_names),
{ NULL }
};
*
* Options so far:
*
+ * arm-fpuunit=unit select the floating point unit
* arm-gen_reg_names use generic register names instead of SP, LR, PC
*/
static void arm_register_options(lc_opt_entry_t *ent)
arm_get_code_generator_if,
arm_get_list_sched_selector,
arm_get_reg_class_alignment,
+ arm_get_libfirm_params,
#ifdef WITH_LIBCORE
arm_register_options
#endif