5 #include "pseudo_irg.h"
15 #include "../bearch.h" /* the general register allocator interface */
16 #include "../benode_t.h"
17 #include "../belower.h"
18 #include "bearch_ia32_t.h"
20 #include "ia32_new_nodes.h" /* ia32 nodes interface */
21 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
22 #include "ia32_gen_decls.h" /* interface declaration emitter */
23 #include "ia32_transform.h"
24 #include "ia32_emitter.h"
25 #include "ia32_map_regs.h"
26 #include "ia32_optimize.h"
28 #define DEBUG_MODULE "ir.be.isa.ia32"
31 static set *cur_reg_set = NULL;
34 #define is_Start(irn) (get_irn_opcode(irn) == iro_Start)
36 /**************************************************
39 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
40 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
41 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
42 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
45 **************************************************/
47 static ir_node *my_skip_proj(const ir_node *n) {
53 static int is_Call_Proj(const ir_node *n) {
55 is_Proj(get_Proj_pred(n)) &&
56 get_irn_mode(get_Proj_pred(n)) == mode_T &&
57 is_ia32_Call(get_Proj_pred(get_Proj_pred(n))))
65 static int is_Start_Proj(const ir_node *n) {
67 is_Proj(get_Proj_pred(n)) &&
68 get_irn_mode(get_Proj_pred(n)) == mode_T &&
69 is_Start(get_Proj_pred(get_Proj_pred(n))))
77 static int is_P_frame_base_Proj(const ir_node *n) {
79 is_Start(get_Proj_pred(n)) &&
80 get_Proj_proj(n) == pn_Start_P_frame_base)
88 static int is_used_by_Keep(const ir_node *n) {
89 return be_is_Keep(get_edge_src_irn(get_irn_out_edge_first(n)));
93 * Return register requirements for an ia32 node.
94 * If the node returns a tuple (mode_T) then the proj's
95 * will be asked for this information.
97 static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
98 const ia32_register_req_t *irn_req;
99 long node_pos = pos == -1 ? 0 : pos;
100 ir_mode *mode = get_irn_mode(irn);
101 firm_dbg_module_t *mod = firm_dbg_register(DEBUG_MODULE);
102 const ia32_irn_ops_t *ops = self;
104 if (mode == mode_T || mode == mode_M) {
105 DBG((mod, LEVEL_1, "ignoring mode_T, mode_M node %+F\n", irn));
109 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
112 if (is_Call_Proj(irn) && is_used_by_Keep(irn)) {
117 irn_req = ia32_projnum_reg_req_map[get_Proj_proj(irn)];
118 memcpy(req, &(irn_req->req), sizeof(*req));
123 else if (is_Start_Proj(irn)) {
124 irn_req = ops->cg->reg_param_req[get_Proj_proj(irn)];
125 assert(irn_req && "missing requirement for regparam");
126 memcpy(req, &(irn_req->req), sizeof(*req));
129 else if (is_Proj(irn)) {
131 node_pos = ia32_translate_proj_pos(irn);
137 irn = my_skip_proj(irn);
139 DBG((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
142 if (is_ia32_irn(irn)) {
144 irn_req = get_ia32_in_req(irn, pos);
147 irn_req = get_ia32_out_req(irn, node_pos);
150 DBG((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
152 memcpy(req, &(irn_req->req), sizeof(*req));
154 if (arch_register_req_is(&(irn_req->req), should_be_same) ||
155 arch_register_req_is(&(irn_req->req), should_be_different)) {
156 assert(irn_req->pos >= 0 && "should be same/different constraint for in -> out NYI");
157 req->other = get_irn_n(irn, irn_req->pos);
161 /* treat Phi like Const with default requirements */
163 DBG((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
164 if (mode_is_float(mode))
165 memcpy(req, &(ia32_default_req_ia32_floating_point.req), sizeof(*req));
166 else if (mode_is_int(mode) || mode_is_reference(mode))
167 memcpy(req, &(ia32_default_req_ia32_general_purpose.req), sizeof(*req));
168 else if (mode == mode_T || mode == mode_M) {
169 DBG((mod, LEVEL_1, "ignoring Phi node %+F\n", irn));
173 assert(0 && "unsupported Phi-Mode");
175 else if (is_Start(irn)) {
176 DBG((mod, LEVEL_1, "returning reqs none for ProjX -> Start (%+F )\n", irn));
178 case pn_Start_X_initial_exec:
179 case pn_Start_P_value_arg_base:
180 case pn_Start_P_globals:
181 case pn_Start_P_frame_base:
182 memcpy(req, &(ia32_default_req_none.req), sizeof(*req));
184 case pn_Start_T_args:
185 assert(0 && "ProjT(pn_Start_T_args) should not be asked");
188 else if (get_irn_op(irn) == op_Return && pos > 0) {
189 DBG((mod, LEVEL_1, "returning reqs EAX for %+F\n", irn));
190 memcpy(req, &(ia32_default_req_ia32_general_purpose_eax.req), sizeof(*req));
193 DBG((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
201 static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
204 if ((is_Call_Proj(irn) && is_used_by_Keep(irn)) ||
205 is_P_frame_base_Proj(irn) ||
208 /* don't skip the proj, we want to take the else below */
210 else if (is_Proj(irn)) {
211 pos = ia32_translate_proj_pos(irn);
212 irn = my_skip_proj(irn);
215 if (is_ia32_irn(irn)) {
216 const arch_register_t **slots;
218 slots = get_ia32_slots(irn);
222 ia32_set_firm_reg(irn, reg, cur_reg_set);
226 static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
228 const arch_register_t *reg = NULL;
230 if ((is_Call_Proj(irn) && is_used_by_Keep(irn)) ||
231 is_P_frame_base_Proj(irn) ||
234 /* don't skip the proj, we want to take the else below */
236 else if (is_Proj(irn)) {
237 pos = ia32_translate_proj_pos(irn);
238 irn = my_skip_proj(irn);
241 if (is_ia32_irn(irn)) {
242 const arch_register_t **slots;
243 slots = get_ia32_slots(irn);
247 reg = ia32_get_firm_reg(irn, cur_reg_set);
253 static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
254 irn = my_skip_proj(irn);
256 return arch_irn_class_branch;
257 else if (is_ia32_Call(irn))
258 return arch_irn_class_call;
259 else if (is_ia32_irn(irn))
260 return arch_irn_class_normal;
265 static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
266 irn = my_skip_proj(irn);
267 if (is_ia32_irn(irn))
268 return get_ia32_flags(irn);
270 ir_printf("don't know flags of %+F\n", irn);
275 /* fill register allocator interface */
277 static const arch_irn_ops_if_t ia32_irn_ops_if = {
278 ia32_get_irn_reg_req,
285 ia32_irn_ops_t ia32_irn_ops = {
292 /**************************************************
295 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
296 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
297 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
298 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
301 **************************************************/
303 static void check_for_alloca(ir_node *irn, void *env) {
304 int *has_alloca = env;
306 if (get_irn_opcode(irn) == iro_Alloc) {
307 if (get_Alloc_where(irn) == stack_alloc) {
314 * Transforms the standard firm graph into
317 static void ia32_prepare_graph(void *self) {
318 ia32_code_gen_t *cg = self;
320 if (! is_pseudo_ir_graph(cg->irg)) {
321 /* If there is a alloca in the irg, we use %ebp for stack addressing */
322 /* instead of %esp, as alloca destroys %esp. */
326 /* check for alloca node */
327 irg_walk_blkwise_graph(cg->irg, check_for_alloca, NULL, &(cg->has_alloca));
329 if (cg->has_alloca) {
330 ia32_general_purpose_regs[REG_EBP].type = arch_register_type_ignore;
333 irg_walk_blkwise_graph(cg->irg, ia32_place_consts, ia32_transform_node, cg);
340 * Set the register for P_frame_base Proj to %esp.
342 static void ia32_set_P_frame_base_Proj_reg(ir_node *irn, void *env) {
343 ia32_code_gen_t *cg = env;
345 if (is_P_frame_base_Proj(irn)) {
346 if (cg->has_alloca) {
347 arch_set_irn_register(cg->arch_env, irn, &ia32_general_purpose_regs[REG_EBP]);
350 arch_set_irn_register(cg->arch_env, irn, &ia32_general_purpose_regs[REG_ESP]);
358 * Dummy functions for hooks we don't need but which must be filled.
360 static void ia32_before_sched(void *self) {
361 ia32_code_gen_t *cg = self;
363 lower_nodes_before_sched(cg->irg, cg->arch_env);
366 static void ia32_before_ra(void *self) {
371 * Creates a Store for a Spill
373 static ir_node *ia32_lower_spill(void *self, ir_node *spill) {
374 ia32_code_gen_t *cg = self;
375 unsigned offs = be_get_spill_offset(spill);
376 dbg_info *dbg = get_irn_dbg_info(spill);
377 ir_node *block = get_nodes_block(spill);
378 ir_node *ptr = get_irg_frame(cg->irg);
379 ir_node *val = be_get_Spill_context(spill);
380 ir_node *mem = new_rd_NoMem(cg->irg);
381 ir_mode *mode = get_irn_mode(spill);
384 res = new_rd_ia32_Store(dbg, cg->irg, block, ptr, val, mem, mode);
385 set_ia32_am_offs(res, new_tarval_from_long(offs, mode_Iu));
391 * Create a Load for a Spill
393 static ir_node *ia32_lower_reload(void *self, ir_node *reload) {
394 ia32_code_gen_t *cg = self;
395 dbg_info *dbg = get_irn_dbg_info(reload);
396 ir_node *block = get_nodes_block(reload);
397 ir_node *ptr = get_irg_frame(cg->irg);
398 ir_mode *mode = get_irn_mode(reload);
399 ir_node *pred = get_irn_n(reload, 0);
403 if (be_is_Spill(pred)) {
404 tv = new_tarval_from_long(be_get_spill_offset(pred), mode_Iu);
406 else if (is_ia32_Store(pred)) {
407 tv = get_ia32_am_offs(pred);
410 assert(0 && "unsupported Reload predecessor");
413 res = new_rd_ia32_Load(dbg, cg->irg, block, ptr, pred, mode);
414 set_ia32_am_offs(res, tv);
420 * Emits the code, closes the output file and frees
421 * the code generator interface.
423 static void ia32_codegen(void *self) {
424 ia32_code_gen_t *cg = self;
425 ir_graph *irg = cg->irg;
428 if (cg->emit_decls) {
429 ia32_gen_decls(cg->out);
433 /* set the stack register */
434 if (! is_pseudo_ir_graph(irg))
435 irg_walk_blkwise_graph(irg, NULL, ia32_set_P_frame_base_Proj_reg, cg);
437 // ia32_finish_irg(irg);
438 ia32_gen_routine(out, irg, cg->arch_env);
442 /* de-allocate code generator */
443 del_set(cg->reg_set);
447 static void *ia32_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env);
449 static const arch_code_generator_if_t ia32_code_gen_if = {
452 ia32_before_sched, /* before scheduling hook */
453 ia32_before_ra, /* before register allocation hook */
456 ia32_codegen /* emit && done */
460 * Initializes the code generator.
462 static void *ia32_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env) {
463 ia32_isa_t *isa = (ia32_isa_t *)arch_env->isa;
464 ia32_code_gen_t *cg = malloc(sizeof(*cg));
466 cg->impl = &ia32_code_gen_if;
468 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
469 cg->mod = firm_dbg_register("be.transform.ia32");
471 cg->arch_env = arch_env;
475 if (isa->num_codegens > 1)
480 cur_reg_set = cg->reg_set;
482 ia32_irn_ops.cg = cg;
484 return (arch_code_generator_t *)cg;
489 /*****************************************************************
490 * ____ _ _ _____ _____
491 * | _ \ | | | | |_ _|/ ____| /\
492 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
493 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
494 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
495 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
497 *****************************************************************/
500 * Initializes the backend ISA and opens the output file.
502 static void *ia32_init(void) {
503 static int inited = 0;
504 ia32_isa_t *isa = malloc(sizeof(*isa));
506 isa->impl = &ia32_isa_if;
513 isa->num_codegens = 0;
514 isa->reg_projnum_map = new_set(ia32_cmp_reg_projnum_assoc, 1024);
516 ia32_register_init(isa);
517 ia32_create_opcodes();
525 * Closes the output file and frees the ISA structure.
527 static void ia32_done(void *self) {
533 static int ia32_get_n_reg_class(const void *self) {
537 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
538 assert(i >= 0 && i < N_CLASSES && "Invalid ia32 register class requested.");
539 return &ia32_reg_classes[i];
542 static const void *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
543 return &ia32_irn_ops;
546 const arch_irn_handler_t ia32_irn_handler = {
550 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
551 return &ia32_irn_handler;
554 long ia32_get_call_projnum_for_reg(const void *self, const arch_register_t *reg) {
555 ia32_isa_t *isa = (ia32_isa_t *)self;
556 return ia32_get_reg_projnum(reg, isa->reg_projnum_map);
559 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
560 return is_ia32_irn(irn);
564 * Initializes the code generator interface.
566 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
567 return &ia32_code_gen_if;
570 list_sched_selector_t ia32_sched_selector;
573 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
575 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
576 memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
577 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
578 return &ia32_sched_selector;
582 static void ia32_register_options(lc_opt_entry_t *ent)
585 #endif /* WITH_LIBCORE */
587 const arch_isa_if_t ia32_isa_if = {
589 ia32_register_options,
593 ia32_get_n_reg_class,
595 ia32_get_irn_handler,
596 ia32_get_code_generator_if,
597 ia32_get_list_sched_selector,
598 ia32_get_call_projnum_for_reg