#include "../besched_t.h"
#include "../be.h"
#include "../beabi.h"
+#include "../bemachine.h"
+#include "../bemodule.h"
+#include "../beblocksched.h"
#include "pset.h"
return 0;
}
-static entity *ppc32_get_frame_entity(const void *self, const ir_node *irn) {
+static ir_entity *ppc32_get_frame_entity(const void *self, const ir_node *irn) {
if(!is_ppc32_irn(irn)) return NULL;
if(get_ppc32_type(irn)!=ppc32_ac_FrameEntity) return NULL;
return get_ppc32_frame_entity(irn);
}
+static void ppc32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
+ if (! is_ppc32_irn(irn) || get_ppc32_type(irn) != ppc32_ac_FrameEntity)
+ return;
+ set_ppc32_frame_entity(irn, ent);
+}
+
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
set_ppc32_offset(irn, bias);
}
+static int ppc32_get_sp_bias(const void *self, const ir_node *irn) {
+ return 0;
+}
+
typedef struct
{
const be_abi_call_t *call;
static ir_type *ppc32_abi_get_between_type(void *self)
{
static ir_type *between_type = NULL;
- static entity *old_bp_ent = NULL;
+ static ir_entity *old_bp_ent = NULL;
if(!between_type) {
- entity *ret_addr_ent;
+ ir_entity *ret_addr_ent;
ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
ret_addr_ent = new_entity(between_type, new_id_from_str("old_bp"), ret_addr_type);
- set_entity_offset_bytes(old_bp_ent, 0);
- set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_entity_offset(old_bp_ent, 0);
+ set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
}
ppc32_classify,
ppc32_get_flags,
ppc32_get_frame_entity,
+ ppc32_set_frame_entity,
ppc32_set_stack_bias,
- NULL
+ ppc32_get_sp_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
ppc32_irn_ops_t ppc32_irn_ops = {
/**
* Called immediatly before emit phase.
*/
-static void ppc32_finish_irg(ir_graph *irg, ppc32_code_gen_t *cg) {
+static void ppc32_finish_irg(void *self) {
/* TODO: - fix offsets for nodes accessing stack
- ...
*/
*/
static void ppc32_before_ra(void *self) {
ppc32_code_gen_t *cg = self;
- cg->blk_sched = sched_create_block_schedule(cg->irg);
+ cg->blk_sched = be_create_block_schedule(cg->irg, cg->birg->exec_freq);
}
static void ppc32_transform_spill(ir_node *node, void *env)
cg->emit_decls = 0;
}
- ppc32_finish_irg(irg, cg);
dump_ir_block_graph_sched(irg, "-ppc-finished");
ppc32_gen_routine(out, irg, cg);
}
}
-int is_direct_entity(entity *ent);
+int is_direct_entity(ir_entity *ent);
/**
* Collects all SymConsts which need to be accessed "indirectly"
void ppc32_collect_symconsts_walk(ir_node *node, void *env) {
if(get_irn_op(node) == op_SymConst)
{
- entity *ent = get_SymConst_entity(node);
+ ir_entity *ent = get_SymConst_entity(node);
if(!is_direct_entity(ent))
pset_insert_ptr(symbol_pset, ent);
}
}
-static void *ppc32_cg_init(const be_irg_t *birg);
+static void *ppc32_cg_init(be_irg_t *birg);
static const arch_code_generator_if_t ppc32_code_gen_if = {
ppc32_cg_init,
ppc32_before_abi,
ppc32_prepare_graph,
+ NULL, /* spill */
ppc32_before_sched, /* before scheduling hook */
ppc32_before_ra, /* before register allocation hook */
ppc32_after_ra,
+ ppc32_finish_irg,
ppc32_emit_and_done
};
/**
* Initializes the code generator.
*/
-static void *ppc32_cg_init(const be_irg_t *birg) {
+static void *ppc32_cg_init(be_irg_t *birg) {
ppc32_isa_t *isa = (ppc32_isa_t *)birg->main_env->arch_env->isa;
ppc32_code_gen_t *cg = xmalloc(sizeof(*cg));
/**
* Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
*/
-static const list_sched_selector_t *ppc32_get_list_sched_selector(const void *self) {
+static const list_sched_selector_t *ppc32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
memcpy(&ppc32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
ppc32_sched_selector.to_appear_in_schedule = ppc32_to_appear_in_schedule;
return &ppc32_sched_selector;
}
+static const ilp_sched_selector_t *ppc32_get_ilp_sched_selector(const void *self) {
+ return NULL;
+}
+
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
return get_mode_size_bytes(mode);
}
-#ifdef WITH_LIBCORE
-static void ppc32_register_options(lc_opt_entry_t *ent)
+static const be_execution_unit_t ***ppc32_get_allowed_execution_units(const void *self, const ir_node *irn) {
+ /* TODO */
+ assert(0);
+ return NULL;
+}
+
+static const be_machine_t *ppc32_get_machine(const void *self) {
+ /* TODO */
+ assert(0);
+ return NULL;
+}
+
+/**
+ * Returns the libFirm configuration parameter for this backend.
+ */
+static const backend_params *ppc32_get_libfirm_params(void) {
+ static arch_dep_params_t ad = {
+ 1, /* allow subs */
+ 0, /* Muls are fast enough on ARM */
+ 31, /* shift would be ok */
+ 0, /* SMUL is needed, only in Arch M*/
+ 0, /* UMUL is needed, only in Arch M */
+ 32, /* SMUL & UMUL available for 32 bit */
+ };
+ static backend_params p = {
+ NULL, /* no additional opcodes */
+ NULL, /* will be set later */
+ 1, /* need dword lowering */
+ NULL, /* but yet no creator function */
+ NULL, /* context for create_intrinsic_fkt */
+ };
+
+ p.dep_param = &ad;
+ return &p;
+}
+
+void be_init_arch_ppc32(void)
{
}
-#endif /* WITH_LIBCORE */
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ppc32);
const arch_isa_if_t ppc32_isa_if = {
ppc32_init,
ppc32_get_irn_handler,
ppc32_get_code_generator_if,
ppc32_get_list_sched_selector,
+ ppc32_get_ilp_sched_selector,
ppc32_get_reg_class_alignment,
-#ifdef WITH_LIBCORE
- ppc32_register_options
-#endif
+ ppc32_get_libfirm_params,
+ ppc32_get_allowed_execution_units,
+ ppc32_get_machine,
};