2 * ISA implementation for Firm IR nodes.
13 #include "iredges_t.h"
21 #include "../bearch.h"
22 #include "../besched.h"
23 #include "../beutil.h"
29 enum { imm_Const, imm_SymConst } tp;
32 symconst_attr symc_attr;
36 static arch_register_t datab_regs[N_REGS];
38 static arch_register_class_t reg_classes[] = {
39 { "datab", N_REGS, NULL, datab_regs },
42 static ir_op *op_push;
45 const arch_isa_if_t firm_isa;
48 (sizeof(reg_classes) / sizeof(reg_classes[0]))
52 tarval *get_Imm_tv(ir_node *n) {
53 imm_attr_t *attr = (imm_attr_t *)get_irn_generic_attr(n);
54 return attr->tp == imm_Const ? attr->data.cnst_attr.tv : NULL;
57 int is_Imm(const ir_node *irn) {
58 return get_irn_op(irn) == op_imm;
61 static int dump_node_Imm(ir_node *n, FILE *F, dump_reason_t reason) {
69 case dump_node_opcode_txt:
73 tarval_snprintf(buf, sizeof(buf), tv);
74 fprintf(F, "%s", buf);
77 fprintf(F, "immSymC");
81 case dump_node_mode_txt:
82 mode = get_irn_mode(n);
84 if (mode && mode != mode_BB && mode != mode_ANY && mode != mode_BAD && mode != mode_T) {
85 fprintf(F, "[%s]", get_mode_name(mode));
89 case dump_node_nodeattr_txt:
90 attr = (imm_attr_t *)get_irn_generic_attr(n);
92 if (is_Imm(n) && attr->tp == imm_SymConst) {
93 const char *name = NULL;
94 symconst_attr *sc_attr = &attr->data.symc_attr;
96 switch (sc_attr->num) {
97 case symconst_addr_name:
98 name = get_id_str(sc_attr->sym.ident_p);
101 case symconst_addr_ent:
102 name = get_entity_ld_name(sc_attr->sym.entity_p);
106 assert(!"Unsupported SymConst");
109 fprintf(F, "&%s ", name);
114 case dump_node_info_txt:
121 static void *firm_init(FILE *outfile)
123 static struct obstack obst;
124 static int inited = 0;
125 arch_isa_t *isa = xmalloc(sizeof(*isa));
128 isa->impl = &firm_isa;
136 for(k = 0; k < N_CLASSES; ++k) {
137 arch_register_class_t *cls = ®_classes[k];
141 for(i = 0; i < cls->n_regs; ++i) {
145 arch_register_t *reg = (arch_register_t *) &cls->regs[i];
147 n = snprintf(buf, sizeof(buf), "r%d", i);
148 name = obstack_copy0(&obst, buf, n);
151 reg->reg_class = cls;
158 * Create some opcodes and types to let firm look a little
159 * bit more like real machines.
162 int push_opc = get_next_ir_opcode();
164 op_push = new_ir_op(push_opc, "Push",
165 op_pin_state_pinned, 0, oparity_binary, 0, 0, NULL);
169 int imm_opc = get_next_ir_opcode();
172 memset(&ops, 0, sizeof(ops));
173 ops.dump_node = dump_node_Imm;
175 op_imm = new_ir_op(imm_opc, "Imm",
176 op_pin_state_pinned, 0, oparity_zero, 0, sizeof(imm_attr_t), &ops);
182 static void firm_done(void *self)
187 static int firm_get_n_reg_class(const void *self)
192 static const arch_register_class_t *firm_get_reg_class(const void *self, int i)
194 assert(i >= 0 && i < N_CLASSES);
195 return ®_classes[i];
198 static const arch_register_class_t *firm_get_reg_class_for_mode(const void *self, const ir_mode *irm)
200 return mode_is_datab(irm) ? ®_classes[CLS_DATAB] : NULL;
203 static ir_type *firm_abi_get_between_type(void *self) {
204 static ir_type *between_type = NULL;
207 between_type = new_type_class(new_id_from_str("firm_be_between"));
208 set_type_size_bytes(between_type, 0);
214 static const be_abi_callbacks_t firm_abi_callbacks = {
217 firm_abi_get_between_type,
223 static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
225 const arch_register_class_t *cls = ®_classes[CLS_DATAB];
227 be_abi_call_flags_t flags = { { 0, 0, 0, 0, 0 } };
230 for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
231 ir_type *t = get_method_param_type(method_type, i);
232 if(is_Primitive_type(t))
233 be_abi_call_param_reg(abi, i, &cls->regs[i]);
235 be_abi_call_param_stack(abi, i, 1, 0, 0);
238 for(i = 0, n = get_method_n_ress(method_type); i < n; ++i) {
239 ir_type *t = get_method_res_type(method_type, i);
240 if(is_Primitive_type(t))
241 be_abi_call_res_reg(abi, i, &cls->regs[i]);
245 be_abi_call_set_flags(abi, flags, &firm_abi_callbacks);
249 static const arch_register_req_t firm_std_reg_req = {
250 arch_register_req_type_normal,
251 ®_classes[CLS_DATAB],
256 static const arch_register_req_t *
257 firm_get_irn_reg_req(const void *self, const ir_node *irn, int pos)
259 if(is_firm_be_mode(get_irn_mode(irn)))
260 return &firm_std_reg_req;
265 struct irn_reg_assoc {
267 const arch_register_t *reg;
270 static int cmp_irn_reg_assoc(const void *a, const void *b, size_t len)
272 const struct irn_reg_assoc *x = a;
273 const struct irn_reg_assoc *y = b;
275 return x->irn != y->irn;
278 static struct irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn)
280 static set *reg_set = NULL;
281 struct irn_reg_assoc templ;
284 reg_set = new_set(cmp_irn_reg_assoc, 1024);
289 return set_insert(reg_set, &templ, sizeof(templ), HASH_PTR(irn));
292 static void firm_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
294 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
298 static const arch_register_t *firm_get_irn_reg(const void *self, const ir_node *irn)
300 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
304 static arch_irn_class_t firm_classify(const void *self, const ir_node *irn)
306 arch_irn_class_t res;
308 switch(get_irn_opcode(irn)) {
311 res = arch_irn_class_branch;
314 res = arch_irn_class_call;
317 res = arch_irn_class_normal;
323 static arch_irn_flags_t firm_get_flags(const void *self, const ir_node *irn)
325 arch_irn_flags_t res = 0;
327 if(get_irn_op(irn) == op_imm)
328 res |= arch_irn_flags_rematerializable;
330 switch(get_irn_opcode(irn)) {
340 res |= arch_irn_flags_rematerializable;
348 static void firm_set_stack_bias(const void *self, ir_node *irn, int bias)
352 static ir_entity *firm_get_frame_entity(const void *self, const ir_node *irn)
357 static void firm_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
361 static const arch_irn_ops_if_t firm_irn_ops_if = {
362 firm_get_irn_reg_req,
367 firm_get_frame_entity,
368 firm_set_frame_entity,
370 NULL, /* get_inverse */
371 NULL, /* get_op_estimated_cost */
372 NULL, /* possible_memory_operand */
373 NULL, /* perform_memory_operand */
376 static const arch_irn_ops_t firm_irn_ops = {
380 static const void *firm_get_irn_ops(const arch_irn_handler_t *self,
383 return &firm_irn_ops;
386 const arch_irn_handler_t firm_irn_handler = {
390 static ir_node *new_Push(ir_graph *irg, ir_node *bl, ir_node *push, ir_node *arg)
395 return new_ir_node(NULL, irg, bl, op_push, mode_M, 2, ins);
399 * Creates an op_Imm node from an op_Const.
401 static ir_node *new_Imm(ir_graph *irg, ir_node *bl, ir_node *cnst) {
406 res = new_ir_node(NULL, irg, bl, op_imm, get_irn_mode(cnst), 0, ins);
407 attr = (imm_attr_t *) &res->attr;
409 switch (get_irn_opcode(cnst)) {
411 attr->tp = imm_Const;
412 attr->data.cnst_attr = get_irn_const_attr(cnst);
415 attr->tp = imm_SymConst;
416 attr->data.symc_attr = get_irn_symconst_attr(cnst);
421 assert(0 && "Cannot create Imm for this opcode");
427 static void prepare_walker(ir_node *irn, void *data)
429 ir_opcode opc = get_irn_opcode(irn);
431 /* A replacement for this node has already been computed. */
432 if(get_irn_link(irn))
435 if(opc == iro_Call) {
436 ir_node *bl = get_nodes_block(irn);
437 ir_graph *irg = get_irn_irg(bl);
439 ir_node *store = get_Call_mem(irn);
440 ir_node *ptr = get_Call_ptr(irn);
441 ir_type *ct = get_Call_type(irn);
442 int np = get_Call_n_params(irn) > 0 ? 1 : 0;
448 int i, n = get_Call_n_params(irn);
450 unsigned cc = get_method_calling_convention(get_Call_type(irn));
452 if (cc & cc_last_on_top) {
453 store = new_Push(irg, bl, store, get_Call_param(irn, 0));
455 for (i = 1; i < n; ++i)
456 store = new_Push(irg, bl, store, get_Call_param(irn, i));
459 store = new_Push(irg, bl, store, get_Call_param(irn, n - 1));
461 for (i = n - 2; i >= 0; --i)
462 store = new_Push(irg, bl, store, get_Call_param(irn, i));
465 snprintf(buf, sizeof(buf), "push_%s", get_type_name(ct));
467 n = get_method_n_ress(ct);
468 nt = new_type_method(new_id_from_str(buf), 0, n);
469 for(i = 0; i < n; ++i)
470 set_method_res_type(nt, i, get_method_res_type(ct, i));
472 nc = new_r_Call(irg, bl, store, ptr, 0, ins, nt);
474 set_irn_link(nc, nc);
479 static void localize_const_walker(ir_node *irn, void *data)
483 ir_node *bl = get_nodes_block(irn);
485 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
486 ir_node *op = get_irn_n(irn, i);
487 ir_opcode opc = get_irn_opcode(op);
490 || opc == iro_Unknown
491 || (opc == iro_SymConst /*&& get_SymConst_kind(op) == symconst_addr_ent*/)) {
492 ir_graph *irg = get_irn_irg(bl);
493 ir_node *imm_bl = is_Phi(irn) ? get_Block_cfgpred_block(bl, i) : bl;
495 ir_node *imm = new_Imm(irg, imm_bl, op);
496 set_irn_n(irn, i, imm);
502 static const arch_irn_handler_t *firm_get_irn_handler(const void *self)
504 return &firm_irn_handler;
507 typedef struct _firm_code_gen_t {
508 const arch_code_generator_if_t *impl;
513 static void firm_prepare_graph(void *self)
515 firm_code_gen_t *cg = self;
517 irg_walk_graph(cg->irg, firm_clear_link, localize_const_walker, NULL);
518 irg_walk_graph(cg->irg, NULL, prepare_walker, NULL);
521 static void firm_before_sched(void *self)
525 static void imm_scheduler(ir_node *irn, void *env) {
528 ir_node *user, *user_block, *before, *tgt_block;
530 if (1 != get_irn_n_edges(irn)) {
531 printf("Out edges: %d\n", get_irn_n_edges(irn));
532 assert(1 == get_irn_n_edges(irn));
535 e = get_irn_out_edge_first(irn);
537 user_block = get_nodes_block(user);
539 before = get_Block_cfgpred_block(user_block, e->pos);
543 tgt_block = user_block;
547 set_nodes_block(irn, tgt_block);
548 sched_add_before(before, irn);
552 static void firm_before_ra(void *self)
554 firm_code_gen_t *cg = self;
555 irg_walk_graph(cg->irg, imm_scheduler, NULL, NULL);
558 static void firm_after_ra(void *self)
562 static void firm_codegen_done(void *self)
567 static void *firm_cg_init(be_irg_t *birg);
569 static const arch_code_generator_if_t firm_code_gen_if = {
580 static void *firm_cg_init(be_irg_t *birg)
582 firm_code_gen_t *cg = xmalloc(sizeof(*cg));
583 cg->impl = &firm_code_gen_if;
589 static const arch_code_generator_if_t *firm_get_code_generator_if(void *self)
591 return &firm_code_gen_if;
594 static const list_sched_selector_t *firm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
595 return trivial_selector;
598 static const ilp_sched_selector_t *firm_get_ilp_sched_selector(const void *self) {
603 * Returns the necessary byte alignment for storing a register of given class.
605 static int firm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
606 ir_mode *mode = arch_register_class_mode(cls);
607 return get_mode_size_bytes(mode);
610 static const be_execution_unit_t ***firm_get_allowed_execution_units(const void *self, const ir_node *irn) {
616 static const be_machine_t *firm_get_machine(const void *self) {
623 * Return irp irgs in the desired order.
625 static ir_graph **firm_get_irg_list(const void *self, ir_graph ***irg_list) {
630 * Returns the libFirm configuration parameter for this backend.
632 static const backend_params *firm_get_libfirm_params(void) {
633 static arch_dep_params_t ad = {
635 0, /* Muls are fast enough on Firm */
636 31, /* shift would be ok */
641 static backend_params p = {
642 NULL, /* no additional opcodes */
643 NULL, /* will be set later */
644 0, /* no dword lowering */
645 NULL, /* no creator function */
646 NULL, /* context for create_intrinsic_fkt */
653 const arch_isa_if_t firm_isa = {
656 firm_get_n_reg_class,
658 firm_get_reg_class_for_mode,
660 firm_get_irn_handler,
661 firm_get_code_generator_if,
662 firm_get_list_sched_selector,
663 firm_get_ilp_sched_selector,
664 firm_get_reg_class_alignment,
665 firm_get_libfirm_params,
666 firm_get_allowed_execution_units,