DB((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
if (mode_is_float(mode)) {
- memcpy(req, &(ppc32_default_req_ppc32_floating_point.req), sizeof(*req));
+ memcpy(req, &(ppc32_default_req_ppc32_fp.req), sizeof(*req));
}
else if (mode_is_int(mode) || mode_is_reference(mode)) {
- memcpy(req, &(ppc32_default_req_ppc32_general_purpose.req), sizeof(*req));
+ memcpy(req, &(ppc32_default_req_ppc32_gp.req), sizeof(*req));
}
else if (mode == mode_T || mode == mode_M) {
DBG((mod, LEVEL_1, "ignoring Phi node %+F\n", irn));
return get_ppc32_frame_entity(irn);
}
+static void ppc32_set_frame_entity(const void *self, const ir_node *irn, entity *ent) {
+ if (! is_ppc32_irn(irn) || get_ppc32_type(irn) != ppc32_ac_FrameEntity)
+ return;
+ set_ppc32_frame_entity(irn, ent);
+}
+
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
isleaf = flags.bits.irg_is_leaf;
if(flags.bits.try_omit_fp)
- return &ppc32_general_purpose_regs[REG_R1];
+ return &ppc32_gp_regs[REG_R1];
else
- return &ppc32_general_purpose_regs[REG_R31];
+ return &ppc32_gp_regs[REG_R31];
}
/**
ppc32_classify,
ppc32_get_flags,
ppc32_get_frame_entity,
- ppc32_set_stack_bias
+ ppc32_set_frame_entity,
+ ppc32_set_stack_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
ppc32_irn_ops_t ppc32_irn_ops = {
irg_walk_blkwise_graph(cg->irg, NULL, ppc32_pretransform_walk, cg);
be_dump(cg->irg, "-pretransformed", dump_ir_block_graph);
+ ppc32_register_transformers();
irg_walk_blkwise_graph(cg->irg, NULL, ppc32_transform_node, cg);
be_dump(cg->irg, "-transformed", dump_ir_block_graph);
irg_walk_blkwise_graph(cg->irg, NULL, ppc32_transform_const, cg);
/**
* Called immediatly before emit phase.
*/
-static void ppc32_finish_irg(ir_graph *irg, ppc32_code_gen_t *cg) {
+static void ppc32_finish_irg(void *self) {
/* TODO: - fix offsets for nodes accessing stack
- ...
*/
const arch_register_class_t *regclass = arch_get_irn_reg_class(cgenv->arch_env, node, 1);
- if (regclass == &ppc32_reg_classes[CLASS_ppc32_general_purpose])
+ if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp])
{
store = new_rd_ppc32_Stw(dbg, current_ir_graph, block,
- get_irn_n(node, 0), get_irn_n(node, 1), new_rd_NoMem(current_ir_graph), mode_T);
+ get_irn_n(node, 0), get_irn_n(node, 1), new_rd_NoMem(current_ir_graph));
}
- else if (regclass == &ppc32_reg_classes[CLASS_ppc32_floating_point])
+ else if (regclass == &ppc32_reg_classes[CLASS_ppc32_fp])
{
store = new_rd_ppc32_Stfd(dbg, current_ir_graph, block,
- get_irn_n(node, 0), get_irn_n(node, 1), new_rd_NoMem(current_ir_graph), mode_T);
+ get_irn_n(node, 0), get_irn_n(node, 1), new_rd_NoMem(current_ir_graph));
}
else assert(0 && "Spill for register class not supported yet!");
const arch_register_class_t *regclass = arch_get_irn_reg_class(cgenv->arch_env, node, -1);
- if (regclass == &ppc32_reg_classes[CLASS_ppc32_general_purpose])
+ if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp])
{
- load = new_rd_ppc32_Lwz(dbg, current_ir_graph, block,
- get_irn_n(node, 0), get_irn_n(node, 1), mode_T);
+ load = new_rd_ppc32_Lwz(dbg, current_ir_graph, block, get_irn_n(node, 0), get_irn_n(node, 1));
}
- else if (regclass == &ppc32_reg_classes[CLASS_ppc32_floating_point])
+ else if (regclass == &ppc32_reg_classes[CLASS_ppc32_fp])
{
- load = new_rd_ppc32_Lfd(dbg, current_ir_graph, block,
- get_irn_n(node, 0), get_irn_n(node, 1), mode_T);
+ load = new_rd_ppc32_Lfd(dbg, current_ir_graph, block, get_irn_n(node, 0), get_irn_n(node, 1));
}
else assert(0 && "Reload for register class not supported yet!");
static void ppc32_emit_and_done(void *self) {
ppc32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- FILE *out = cg->out;
+ FILE *out = cg->isa->out;
if (cg->emit_decls) {
- ppc32_gen_decls(cg->out);
+ ppc32_gen_decls(out);
cg->emit_decls = 0;
}
- ppc32_finish_irg(irg, cg);
dump_ir_block_graph_sched(irg, "-ppc-finished");
ppc32_gen_routine(out, irg, cg);
}
}
-static void *ppc32_cg_init(FILE *F, const be_irg_t *birg);
+static void *ppc32_cg_init(const be_irg_t *birg);
static const arch_code_generator_if_t ppc32_code_gen_if = {
ppc32_cg_init,
ppc32_before_sched, /* before scheduling hook */
ppc32_before_ra, /* before register allocation hook */
ppc32_after_ra,
+ ppc32_finish_irg,
ppc32_emit_and_done
};
/**
* Initializes the code generator.
*/
-static void *ppc32_cg_init(FILE *F, const be_irg_t *birg) {
+static void *ppc32_cg_init(const be_irg_t *birg) {
ppc32_isa_t *isa = (ppc32_isa_t *)birg->main_env->arch_env->isa;
ppc32_code_gen_t *cg = xmalloc(sizeof(*cg));
cg->impl = &ppc32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ppc32_cmp_irn_reg_assoc, 1024);
- cg->out = F;
cg->arch_env = birg->main_env->arch_env;
+ cg->isa = isa;
cg->birg = birg;
cg->area_size = 0;
cg->area = NULL;
static ppc32_isa_t ppc32_isa_template = {
&ppc32_isa_if,
- &ppc32_general_purpose_regs[REG_R1], // stack pointer
- &ppc32_general_purpose_regs[REG_R31], // base pointer
- -1, // stack is decreasing
- 0 // num codegens... ??
+ &ppc32_gp_regs[REG_R1], // stack pointer
+ &ppc32_gp_regs[REG_R31], // base pointer
+ -1, // stack is decreasing
+ 0, // num codegens... ??
+ NULL
};
/**
* Initializes the backend ISA and opens the output file.
*/
-static void *ppc32_init(void) {
+static void *ppc32_init(FILE *file_handle) {
static int inited = 0;
ppc32_isa_t *isa;
if(inited)
return NULL;
- isa = xcalloc(1, sizeof(*isa));
+ isa = xmalloc(sizeof(*isa));
memcpy(isa, &ppc32_isa_template, sizeof(*isa));
+ isa->out = file_handle;
+
ppc32_register_init(isa);
ppc32_create_opcodes();
*/
const arch_register_class_t *ppc32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
if (mode_is_float(mode))
- return &ppc32_reg_classes[CLASS_ppc32_floating_point];
+ return &ppc32_reg_classes[CLASS_ppc32_fp];
else
- return &ppc32_reg_classes[CLASS_ppc32_general_purpose];
+ return &ppc32_reg_classes[CLASS_ppc32_gp];
}
{
if(get_mode_size_bits(mode) == 32) gpregi++, stackparamsize=4;
else gpregi += 2, stackparamsize=8; // mode == irm_D
- reg = &ppc32_floating_point_regs[fpregi++];
+ reg = &ppc32_fp_regs[fpregi++];
}
else
{
else
{
if(gpregi <= REG_R10)
- reg = &ppc32_general_purpose_regs[gpregi++];
+ reg = &ppc32_gp_regs[gpregi++];
else
reg = NULL;
stackparamsize=4;
mode = get_type_mode(tp);
be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &ppc32_floating_point_regs[REG_F1] : &ppc32_general_purpose_regs[REG_R3]);
+ mode_is_float(mode) ? &ppc32_fp_regs[REG_F1] : &ppc32_gp_regs[REG_R3]);
}
}
return get_mode_size_bytes(mode);
}
+/**
+ * Returns the libFirm configuration parameter for this backend.
+ */
+static const backend_params *ppc32_get_libfirm_params(void) {
+ static arch_dep_params_t ad = {
+ 1, /* allow subs */
+ 0, /* Muls are fast enough on ARM */
+ 31, /* shift would be ok */
+ 0, /* SMUL is needed, only in Arch M*/
+ 0, /* UMUL is needed, only in Arch M */
+ 32, /* SMUL & UMUL available for 32 bit */
+ };
+ static backend_params p = {
+ NULL, /* no additional opcodes */
+ NULL, /* will be set later */
+ 1, /* need dword lowering */
+ NULL, /* but yet no creator function */
+ NULL, /* context for create_intrinsic_fkt */
+ };
+
+ p.dep_param = &ad;
+ return &p;
+}
+
#ifdef WITH_LIBCORE
static void ppc32_register_options(lc_opt_entry_t *ent)
{
ppc32_get_code_generator_if,
ppc32_get_list_sched_selector,
ppc32_get_reg_class_alignment,
+ ppc32_get_libfirm_params,
#ifdef WITH_LIBCORE
ppc32_register_options
#endif