5 #include "pseudo_irg.h"
14 #include "../bearch.h" /* the general register allocator interface */
15 #include "../benode_t.h"
16 #include "bearch_ia32_t.h"
18 #include "ia32_new_nodes.h" /* ia32 nodes interface */
19 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
20 #include "ia32_gen_decls.h" /* interface declaration emitter */
21 #include "ia32_transform.h"
22 #include "ia32_emitter.h"
23 #include "ia32_map_regs.h"
25 #define DEBUG_MODULE "ir.be.isa.ia32"
28 static set *cur_reg_set = NULL;
32 /**************************************************
35 * _ __ ___ __ _ __ _| | | ___ ___ _| |_
36 * | '__/ _ \/ _` | / _` | | |/ _ \ / __| | | _|
37 * | | | __/ (_| | | (_| | | | (_) | (__ | | |
38 * |_| \___|\__, | \__,_|_|_|\___/ \___| |_|_|
41 **************************************************/
43 static ir_node *my_skip_proj(const ir_node *n) {
49 static int is_Call_Proj(const ir_node *n) {
51 is_Proj(get_Proj_pred(n)) &&
52 get_irn_mode(get_Proj_pred(n)) == mode_T &&
53 is_ia32_Call(get_Proj_pred(get_Proj_pred(n))))
61 static int is_P_frame_base_Proj(const ir_node *n) {
63 get_irn_opcode(get_Proj_pred(n)) == iro_Start &&
64 get_Proj_proj(n) == pn_Start_P_frame_base)
72 static int is_used_by_Keep(const ir_node *n) {
73 return be_is_Keep(get_edge_src_irn(get_irn_out_edge_first(n)));
77 * Return register requirements for an ia32 node.
78 * If the node returns a tuple (mode_T) then the proj's
79 * will be asked for this information.
81 static const arch_register_req_t *ia32_get_irn_reg_req(const arch_irn_ops_t *self, arch_register_req_t *req, const ir_node *irn, int pos) {
82 const ia32_register_req_t *irn_req;
83 long node_pos = pos == -1 ? 0 : pos;
84 ir_mode *mode = get_irn_mode(irn);
85 firm_dbg_module_t *mod = firm_dbg_register(DEBUG_MODULE);
87 if (mode == mode_T || mode == mode_M) {
88 DBG((mod, LEVEL_1, "ignoring mode_T, mode_M node %+F\n", irn));
92 DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
95 if (is_Call_Proj(irn) && is_used_by_Keep(irn)) {
96 irn_req = ia32_projnum_reg_req_map[get_Proj_proj(irn)];
97 memcpy(req, &(irn_req->req), sizeof(*req));
100 else if (is_Proj(irn)) {
102 node_pos = ia32_translate_proj_pos(irn);
108 irn = my_skip_proj(irn);
110 DBG((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
113 if (is_ia32_irn(irn)) {
115 irn_req = get_ia32_in_req(irn, pos);
118 irn_req = get_ia32_out_req(irn, node_pos);
121 DBG((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
123 memcpy(req, &(irn_req->req), sizeof(*req));
125 if (arch_register_req_is(&(irn_req->req), should_be_same) ||
126 arch_register_req_is(&(irn_req->req), should_be_different)) {
127 assert(irn_req->pos >= 0 && "should be same/different constraint for in -> out NYI");
128 req->other = get_irn_n(irn, irn_req->pos);
132 /* treat Phi like Const with default requirements */
134 DBG((mod, LEVEL_1, "returning standard reqs for %+F\n", irn));
135 if (mode_is_float(mode))
136 memcpy(req, &(ia32_default_req_ia32_floating_point.req), sizeof(*req));
137 else if (mode_is_int(mode) || mode_is_reference(mode))
138 memcpy(req, &(ia32_default_req_ia32_general_purpose.req), sizeof(*req));
139 else if (mode == mode_T || mode == mode_M) {
140 DBG((mod, LEVEL_1, "ignoring Phi node %+F\n", irn));
144 assert(0 && "unsupported Phi-Mode");
146 else if (get_irn_op(irn) == op_Start) {
147 DBG((mod, LEVEL_1, "returning reqs none for ProjX -> Start (%+F )\n", irn));
149 case pn_Start_X_initial_exec:
150 case pn_Start_P_value_arg_base:
151 case pn_Start_P_globals:
152 case pn_Start_P_frame_base:
153 memcpy(req, &(ia32_default_req_none.req), sizeof(*req));
155 case pn_Start_T_args:
156 assert(0 && "ProjT(pn_Start_T_args) should not be asked");
159 else if (get_irn_op(irn) == op_Return && pos > 0) {
160 DBG((mod, LEVEL_1, "returning reqs EAX for %+F\n", irn));
161 memcpy(req, &(ia32_default_req_ia32_general_purpose_eax.req), sizeof(*req));
164 DBG((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
172 static void ia32_set_irn_reg(const arch_irn_ops_t *self, ir_node *irn, const arch_register_t *reg) {
175 if ((is_Call_Proj(irn) && is_used_by_Keep(irn)) || is_P_frame_base_Proj(irn)) {
176 /* don't skip the proj, we want to take the else below */
178 else if (is_Proj(irn)) {
179 pos = ia32_translate_proj_pos(irn);
180 irn = my_skip_proj(irn);
183 if (is_ia32_irn(irn)) {
184 const arch_register_t **slots;
186 slots = get_ia32_slots(irn);
190 ia32_set_firm_reg(irn, reg, cur_reg_set);
194 static const arch_register_t *ia32_get_irn_reg(const arch_irn_ops_t *self, const ir_node *irn) {
196 const arch_register_t *reg = NULL;
198 if ((is_Call_Proj(irn) && is_used_by_Keep(irn)) || is_P_frame_base_Proj(irn)) {
199 /* don't skip the proj, we want to take the else below */
201 else if (is_Proj(irn)) {
202 pos = ia32_translate_proj_pos(irn);
203 irn = my_skip_proj(irn);
206 if (is_ia32_irn(irn)) {
207 const arch_register_t **slots;
208 slots = get_ia32_slots(irn);
212 reg = ia32_get_firm_reg(irn, cur_reg_set);
218 static arch_irn_class_t ia32_classify(const arch_irn_ops_t *self, const ir_node *irn) {
219 irn = my_skip_proj(irn);
221 return arch_irn_class_branch;
222 else if (is_ia32_Call(irn))
223 return arch_irn_class_call;
224 else if (is_ia32_irn(irn))
225 return arch_irn_class_normal;
230 static arch_irn_flags_t ia32_get_flags(const arch_irn_ops_t *self, const ir_node *irn) {
231 irn = my_skip_proj(irn);
232 if (is_ia32_irn(irn))
233 return get_ia32_flags(irn);
235 ir_printf("don't know flags of %+F\n", irn);
240 /* fill register allocator interface */
242 static const arch_irn_ops_t ia32_irn_ops = {
243 ia32_get_irn_reg_req,
252 /**************************************************
255 * ___ ___ __| | ___ __ _ ___ _ __ _| |_
256 * / __/ _ \ / _` |/ _ \/ _` |/ _ \ '_ \ | | _|
257 * | (_| (_) | (_| | __/ (_| | __/ | | | | | |
258 * \___\___/ \__,_|\___|\__, |\___|_| |_| |_|_|
261 **************************************************/
263 static void check_for_alloca(ir_node *irn, void *env) {
264 int *has_alloca = env;
266 if (get_irn_opcode(irn) == iro_Alloc) {
267 if (get_Alloc_where(irn) == stack_alloc) {
274 * Transforms the standard firm graph into
277 static void ia32_prepare_graph(void *self) {
278 ia32_code_gen_t *cg = self;
281 if (! is_pseudo_ir_graph(cg->irg)) {
282 /* If there is a alloca in the irg, we use %ebp for stack addressing */
283 /* instead of %esp, as alloca destroys %esp. */
285 /* check for alloca node */
286 irg_walk_blkwise_graph(cg->irg, check_for_alloca, NULL, &has_alloca);
289 ia32_general_purpose_regs[REG_EBP].type = arch_register_type_ignore;
292 irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
299 * Set the register for P_frame_base Proj to %esp.
301 static void ia32_set_P_frame_base_Proj_reg(ir_node *irn, void *env) {
302 ia32_code_gen_t *cg = env;
304 if (is_P_frame_base_Proj(irn)) {
305 arch_set_irn_register(cg->arch_env, irn, &ia32_general_purpose_regs[REG_ESP]);
310 * This function is the hook before_sched but more important: it is
311 * called after the dead node elimination. The dead node elimination changes
312 * the memory location of the nodes, which will change the hash key of
313 * the Proj_P_frame_base(Start) and this will fuck up the firm_node -> register
314 * hash map. So we need to insert the register for this node after the dead node
317 static void ia32_some_stuff_need_to_be_done_after_deadnode_elimination(void *self) {
318 ia32_code_gen_t *cg = self;
320 if (! is_pseudo_ir_graph(cg->irg))
321 irg_walk_blkwise_graph(cg->irg, NULL, ia32_set_P_frame_base_Proj_reg, cg);
327 * Dummy functions for hooks we don't need but which must be filled.
329 static void ia32_before_ra(void *self) {
335 * Emits the code, closes the output file and frees
336 * the code generator interface.
338 static void ia32_codegen(void *self) {
339 ia32_code_gen_t *cg = self;
340 ir_graph *irg = cg->irg;
343 if (cg->emit_decls) {
344 ia32_gen_decls(cg->out);
348 // ia32_finish_irg(irg);
349 ia32_gen_routine(out, irg, cg->arch_env);
353 /* de-allocate code generator */
354 del_set(cg->reg_set);
358 static void *ia32_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env);
360 static const arch_code_generator_if_t ia32_code_gen_if = {
363 ia32_some_stuff_need_to_be_done_after_deadnode_elimination, /* before scheduling hook */
364 ia32_before_ra, /* before register allocation hook */
365 ia32_codegen /* emit && done */
369 * Initializes the code generator.
371 static void *ia32_cg_init(FILE *F, ir_graph *irg, const arch_env_t *arch_env) {
372 ia32_isa_t *isa = (ia32_isa_t *)arch_env->isa;
373 ia32_code_gen_t *cg = malloc(sizeof(*cg));
375 cg->impl = &ia32_code_gen_if;
377 cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
378 cg->mod = firm_dbg_register("be.transform.ia32");
380 cg->arch_env = arch_env;
384 if (isa->num_codegens > 1)
389 cur_reg_set = cg->reg_set;
391 return (arch_code_generator_t *)cg;
396 /*****************************************************************
397 * ____ _ _ _____ _____
398 * | _ \ | | | | |_ _|/ ____| /\
399 * | |_) | __ _ ___| | _____ _ __ __| | | | | (___ / \
400 * | _ < / _` |/ __| |/ / _ \ '_ \ / _` | | | \___ \ / /\ \
401 * | |_) | (_| | (__| < __/ | | | (_| | _| |_ ____) / ____ \
402 * |____/ \__,_|\___|_|\_\___|_| |_|\__,_| |_____|_____/_/ \_\
404 *****************************************************************/
407 * Initializes the backend ISA and opens the output file.
409 static void *ia32_init(void) {
410 static int inited = 0;
411 ia32_isa_t *isa = malloc(sizeof(*isa));
413 isa->impl = &ia32_isa_if;
420 isa->num_codegens = 0;
421 isa->reg_projnum_map = new_set(ia32_cmp_reg_projnum_assoc, 1024);
423 ia32_register_init(isa);
424 ia32_create_opcodes();
432 * Closes the output file and frees the ISA structure.
434 static void ia32_done(void *self) {
440 static int ia32_get_n_reg_class(const void *self) {
444 static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
445 assert(i >= 0 && i < N_CLASSES && "Invalid ia32 register class requested.");
446 return &ia32_reg_classes[i];
449 static const arch_irn_ops_t *ia32_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) {
450 return &ia32_irn_ops;
453 const arch_irn_handler_t ia32_irn_handler = {
457 const arch_irn_handler_t *ia32_get_irn_handler(const void *self) {
458 return &ia32_irn_handler;
461 long ia32_get_call_projnum_for_reg(const void *self, const arch_register_t *reg) {
462 ia32_isa_t *isa = (ia32_isa_t *)self;
463 return ia32_get_reg_projnum(reg, isa->reg_projnum_map);
466 int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
467 return is_ia32_irn(irn);
471 * Initializes the code generator interface.
473 static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self) {
474 return &ia32_code_gen_if;
477 list_sched_selector_t ia32_sched_selector;
480 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
482 static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
483 memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
484 ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
485 return &ia32_sched_selector;
489 static void ia32_register_options(lc_opt_entry_t *ent)
492 #endif /* WITH_LIBCORE */
494 const arch_isa_if_t ia32_isa_if = {
496 ia32_register_options,
500 ia32_get_n_reg_class,
502 ia32_get_irn_handler,
503 ia32_get_code_generator_if,
504 ia32_get_list_sched_selector,
505 ia32_get_call_projnum_for_reg