5 #include "pseudo_irg.h"
15 #include "../bearch.h" /* the general register allocator interface */
16 #include "../benode_t.h"
17 #include "../belower.h"
18 #include "../besched_t.h"
19 #include "bearch_TEMPLATE_t.h"
21 #include "TEMPLATE_new_nodes.h" /* TEMPLATE nodes interface */
22 #include "gen_TEMPLATE_regalloc_if.h" /* the generated interface (register type and class defenitions) */
23 #include "TEMPLATE_gen_decls.h" /* interface declaration emitter */
24 #include "TEMPLATE_transform.h"
25 #include "TEMPLATE_emitter.h"
26 #include "TEMPLATE_map_regs.h"
28 #define DEBUG_MODULE "firm.be.TEMPLATE.isa"
30 /* TODO: ugly, but we need it to get access to the registers assigned to Phi nodes */
31 static set *cur_reg_set = NULL;
33 /**************************************************
36 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
37 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
38 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
39 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
42 **************************************************/
44 static ir_node *my_skip_proj(const ir_node *n) {
51 * Return register requirements for a TEMPLATE node.
52 * If the node returns a tuple (mode_T) then the proj's
53 * will be asked for this information.
55 static const arch_register_req_t *TEMPLATE_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
56 const TEMPLATE_register_req_t *irn_req;
57 long node_pos = pos == -1 ? 0 : pos;
58 ir_mode *mode = get_irn_mode(irn);
59 firm_dbg_module_t *mod = firm_dbg_register(DEBUG_MODULE);
61 if (mode == mode_T || mode == mode_M) {
62 DBG((mod, LEVEL_1, "ignoring mode_T, mode_M node %+F\n", irn));
66 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
69 /* in case of a proj, we need to get the correct OUT slot */
70 /* of the node corresponding to the proj number */
72 node_pos = TEMPLATE_translate_proj_pos(irn);
78 irn = my_skip_proj(irn);
80 DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
83 /* get requirements for our own nodes */
84 if (is_TEMPLATE_irn(irn)) {
86 irn_req = get_TEMPLATE_in_req(irn, pos);
89 irn_req = get_TEMPLATE_out_req(irn, node_pos);
92 DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
94 memcpy(req, &(irn_req->req), sizeof(*req));
96 if (arch_register_req_is(&(irn_req->req), should_be_same) ||
97 arch_register_req_is(&(irn_req->req), should_be_different)) {
98 assert(irn_req->pos >= 0 && "should be same/different constraint for in -> out NYI");
99 req->other = get_irn_n(irn, irn_req->pos);
102 /* get requirements for FIRM nodes */
104 /* treat Phi like Const with default requirements */
106 DB((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
108 if (mode_is_float(mode)) {
109 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_floating_point.req), sizeof(*req));
111 else if (mode_is_int(mode) || mode_is_reference(mode)) {
112 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_general_purpose.req), sizeof(*req));
114 else if (mode == mode_T || mode == mode_M) {
115 DBG((mod, LEVEL_1, "ignoring Phi node %+F\n", irn));
119 assert(0 && "unsupported Phi-Mode");
122 else if ((get_irn_op(irn) == op_Return) && pos > 0) {
123 /* pos == 0 is Memory -> no requirements */
124 DB((mod, LEVEL_1, "giving return (%+F) requirements\n", irn));
127 /* pos == 1 is Stackpointer */
128 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_general_purpose_r6.req), sizeof(*req));
131 if (mode_is_float(get_irn_mode(get_Return_res(irn, pos)))) {
133 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_floating_point_f0.req), sizeof(*req));
136 /* integer result, 64bit results are returned as two 32bit values */
138 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_general_purpose_r0.req), sizeof(*req));
141 memcpy(req, &(TEMPLATE_default_req_TEMPLATE_general_purpose_r1.req), sizeof(*req));
147 DB((mod, LEVEL_1, "returning NULL for %+F (node not supported)\n", irn));
155 static void TEMPLATE_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
158 else if (is_Proj(irn)) {
159 pos = TEMPLATE_translate_proj_pos(irn);
160 irn = my_skip_proj(irn);
163 if (is_TEMPLATE_irn(irn)) {
164 const arch_register_t **slots;
166 slots = get_TEMPLATE_slots(irn);
170 /* here we set the registers for the Phi nodes */
171 TEMPLATE_set_firm_reg(irn, reg, cur_reg_set);
175 static const arch_register_t *TEMPLATE_get_irn_reg(const void *self, const ir_node *irn) {
177 const arch_register_t *reg = NULL;
179 else if (is_Proj(irn)) {
180 pos = TEMPLATE_translate_proj_pos(irn);
181 irn = my_skip_proj(irn);
184 if (is_TEMPLATE_irn(irn)) {
185 const arch_register_t **slots;
186 slots = get_TEMPLATE_slots(irn);
190 reg = TEMPLATE_get_firm_reg(irn, cur_reg_set);
196 static arch_irn_class_t TEMPLATE_classify(const void *self, const ir_node *irn) {
197 irn = my_skip_proj(irn);
200 return arch_irn_class_branch;
202 else if (is_TEMPLATE_irn(irn)) {
203 return arch_irn_class_normal;
209 static arch_irn_flags_t TEMPLATE_get_flags(const void *self, const ir_node *irn) {
210 irn = my_skip_proj(irn);
212 if (is_TEMPLATE_irn(irn)) {
213 return get_TEMPLATE_flags(irn);
215 else if (is_Unknown(irn)) {
216 return arch_irn_flags_ignore;
222 /* fill register allocator interface */
224 static const arch_irn_ops_if_t TEMPLATE_irn_ops_if = {
225 TEMPLATE_get_irn_reg_req,
226 TEMPLATE_set_irn_reg,
227 TEMPLATE_get_irn_reg,
232 TEMPLATE_irn_ops_t TEMPLATE_irn_ops = {
233 &TEMPLATE_irn_ops_if,
239 /**************************************************
242 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
243 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
244 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
245 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
248 **************************************************/
251 * Transforms the standard firm graph into
252 * a TEMLPATE firm graph
254 static void TEMPLATE_prepare_graph(void *self) {
255 TEMPLATE_code_gen_t *cg = self;
257 if (! is_pseudo_ir_graph(cg->irg)) {
258 irg_walk_blkwise_graph(cg->irg, TEMPLATE_place_consts, TEMPLATE_transform_node, cg);
265 * Fix offsets and stacksize
267 static void TEMPLATE_finish_irg(ir_graph *irg, TEMPLATE_code_gen_t *cg) {
272 static void TEMPLATE_before_sched(void *self) {
275 static void TEMPLATE_before_ra(void *self) {
280 * Creates a Store for a Spill
282 static ir_node *TEMPLATE_lower_spill(void *self, ir_node *spill) {
283 TEMPLATE_code_gen_t *cg = self;
284 dbg_info *dbg = get_irn_dbg_info(spill);
285 ir_node *block = get_nodes_block(spill);
286 ir_node *ptr = get_irg_frame(cg->irg);
287 ir_node *val = be_get_Spill_context(spill);
288 ir_node *mem = new_rd_NoMem(cg->irg);
289 ir_mode *mode = get_irn_mode(spill);
291 entity *ent = be_get_spill_entity(spill);
292 unsigned offs = get_entity_offset_bytes(ent);
294 DB((cg->mod, LEVEL_1, "lower_spill: got offset %d for %+F\n", offs, ent));
296 /* TODO: create Store */
302 * Create a Load for a Spill
304 static ir_node *TEMPLATE_lower_reload(void *self, ir_node *reload) {
305 TEMPLATE_code_gen_t *cg = self;
306 dbg_info *dbg = get_irn_dbg_info(reload);
307 ir_node *block = get_nodes_block(reload);
308 ir_node *ptr = get_irg_frame(cg->irg);
309 ir_mode *mode = get_irn_mode(reload);
310 ir_node *pred = get_irn_n(reload, 0);
314 /* TODO: create Load */
320 * Returns the Stackregister
322 static const arch_register_t *TEMPLATE_get_stack_register(void *self) {
327 * Emits the code, closes the output file and frees
328 * the code generator interface.
330 static void TEMPLATE_codegen(void *self) {
331 TEMPLATE_code_gen_t *cg = self;
332 ir_graph *irg = cg->irg;
335 if (cg->emit_decls) {
336 TEMPLATE_gen_decls(cg->out);
340 TEMPLATE_finish_irg(irg, cg);
341 dump_ir_block_graph_sched(irg, "-TEMPLATE-finished");
342 TEMPLATE_gen_routine(out, irg, cg);
346 /* de-allocate code generator */
347 del_set(cg->reg_set);
351 static void *TEMPLATE_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env);
353 static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
355 TEMPLATE_prepare_graph,
356 TEMPLATE_before_sched, /* before scheduling hook */
357 TEMPLATE_before_ra, /* before register allocation hook */
358 TEMPLATE_lower_spill,
359 TEMPLATE_lower_reload,
360 TEMPLATE_get_stack_register,
361 TEMPLATE_codegen /* emit && done */
365 * Initializes the code generator.
367 static void *TEMPLATE_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env) {
368 TEMPLATE_isa_t *isa = (TEMPLATE_isa_t *)arch_env->isa;
369 TEMPLATE_code_gen_t *cg = xmalloc(sizeof(*cg));
371 cg->impl = &TEMPLATE_code_gen_if;
373 cg->reg_set = new_set(TEMPLATE_cmp_irn_reg_assoc, 1024);
374 cg->mod = firm_dbg_register("firm.be.TEMPLATE.cg");
376 cg->arch_env = arch_env;
380 if (isa->num_codegens > 1)
385 cur_reg_set = cg->reg_set;
387 TEMPLATE_irn_ops.cg = cg;
389 return (arch_code_generator_t *)cg;
394 /*****************************************************************
395 * ____ _ _ _____ _____
396 * | _ \ | | | | |_ _|/ ____| /\
397 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
398 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
399 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
400 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
402 *****************************************************************/
405 * Initializes the backend ISA and opens the output file.
407 static void *TEMPLATE_init(void) {
408 static int inited = 0;
409 TEMPLATE_isa_t *isa = xmalloc(sizeof(*isa));
411 isa->impl = &TEMPLATE_isa_if;
417 isa->num_codegens = 0;
419 TEMPLATE_register_init(isa);
420 TEMPLATE_create_opcodes();
428 * Closes the output file and frees the ISA structure.
430 static void TEMPLATE_done(void *self) {
436 static int TEMPLATE_get_n_reg_class(const void *self) {
440 static const arch_register_class_t *TEMPLATE_get_reg_class(const void *self, int i) {
441 assert(i >= 0 && i < N_CLASSES && "Invalid TEMPLATE register class requested.");
442 return &TEMPLATE_reg_classes[i];
445 static const void *TEMPLATE_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
446 return &TEMPLATE_irn_ops;
449 const arch_irn_handler_t TEMPLATE_irn_handler = {
453 const arch_irn_handler_t *TEMPLATE_get_irn_handler(const void *self) {
454 return &TEMPLATE_irn_handler;
457 int TEMPLATE_to_appear_in_schedule(void *block_env, const ir_node *irn) {
458 return is_TEMPLATE_irn(irn);
462 * Initializes the code generator interface.
464 static const arch_code_generator_if_t *TEMPLATE_get_code_generator_if(void *self) {
465 return &TEMPLATE_code_gen_if;
468 list_sched_selector_t TEMPLATE_sched_selector;
471 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
473 static const list_sched_selector_t *TEMPLATE_get_list_sched_selector(const void *self) {
474 memcpy(&TEMPLATE_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
475 TEMPLATE_sched_selector.to_appear_in_schedule = TEMPLATE_to_appear_in_schedule;
476 return &TEMPLATE_sched_selector;
480 static void TEMPLATE_register_options(lc_opt_entry_t *ent)
483 #endif /* WITH_LIBCORE */
485 const arch_isa_if_t TEMPLATE_isa_if = {
487 TEMPLATE_register_options,
491 TEMPLATE_get_n_reg_class,
492 TEMPLATE_get_reg_class,
493 TEMPLATE_get_irn_handler,
494 TEMPLATE_get_code_generator_if,
495 TEMPLATE_get_list_sched_selector,