3 * ISA implementation for Firm IR nodes.
10 #include <libcore/lc_opts.h>
18 #include "iredges_t.h"
26 #include "../bearch.h"
27 #include "../besched.h"
28 #include "../beutil.h"
34 enum { imm_Const, imm_SymConst } tp;
37 symconst_attr symc_attr;
41 static arch_register_t datab_regs[N_REGS];
43 static arch_register_class_t reg_classes[] = {
44 { "datab", N_REGS, NULL, datab_regs },
47 static ir_op *op_push;
50 const arch_isa_if_t firm_isa;
53 (sizeof(reg_classes) / sizeof(reg_classes[0]))
57 tarval *get_Imm_tv(ir_node *n) {
58 imm_attr_t *attr = (imm_attr_t *)get_irn_generic_attr(n);
59 return attr->tp == imm_Const ? attr->data.cnst_attr.tv : NULL;
62 int is_Imm(const ir_node *irn) {
63 return get_irn_op(irn) == op_imm;
66 static int dump_node_Imm(ir_node *n, FILE *F, dump_reason_t reason) {
74 case dump_node_opcode_txt:
78 tarval_snprintf(buf, sizeof(buf), tv);
79 fprintf(F, "%s", buf);
82 fprintf(F, "immSymC");
86 case dump_node_mode_txt:
87 mode = get_irn_mode(n);
89 if (mode && mode != mode_BB && mode != mode_ANY && mode != mode_BAD && mode != mode_T) {
90 fprintf(F, "[%s]", get_mode_name(mode));
94 case dump_node_nodeattr_txt:
95 attr = (imm_attr_t *)get_irn_generic_attr(n);
97 if (is_Imm(n) && attr->tp == imm_SymConst) {
98 const char *name = NULL;
99 symconst_attr *sc_attr = &attr->data.symc_attr;
101 switch (sc_attr->num) {
102 case symconst_addr_name:
103 name = get_id_str(sc_attr->sym.ident_p);
106 case symconst_addr_ent:
107 name = get_entity_ld_name(sc_attr->sym.entity_p);
111 assert(!"Unsupported SymConst");
114 fprintf(F, "&%s ", name);
119 case dump_node_info_txt:
126 static void *firm_init(FILE *outfile)
128 static struct obstack obst;
129 static int inited = 0;
130 arch_isa_t *isa = xmalloc(sizeof(*isa));
133 isa->impl = &firm_isa;
141 for(k = 0; k < N_CLASSES; ++k) {
142 arch_register_class_t *cls = ®_classes[k];
146 for(i = 0; i < cls->n_regs; ++i) {
150 arch_register_t *reg = (arch_register_t *) &cls->regs[i];
152 n = snprintf(buf, sizeof(buf), "r%d", i);
153 name = obstack_copy0(&obst, buf, n);
156 reg->reg_class = cls;
163 * Create some opcodes and types to let firm look a little
164 * bit more like real machines.
167 int push_opc = get_next_ir_opcode();
169 op_push = new_ir_op(push_opc, "Push",
170 op_pin_state_pinned, 0, oparity_binary, 0, 0, NULL);
174 int imm_opc = get_next_ir_opcode();
177 memset(&ops, 0, sizeof(ops));
178 ops.dump_node = dump_node_Imm;
180 op_imm = new_ir_op(imm_opc, "Imm",
181 op_pin_state_pinned, 0, oparity_zero, 0, sizeof(imm_attr_t), &ops);
187 static void firm_done(void *self)
192 static int firm_get_n_reg_class(const void *self)
197 static const arch_register_class_t *firm_get_reg_class(const void *self, int i)
199 assert(i >= 0 && i < N_CLASSES);
200 return ®_classes[i];
203 static const arch_register_class_t *firm_get_reg_class_for_mode(const void *self, const ir_mode *irm)
205 return mode_is_datab(irm) ? ®_classes[CLS_DATAB] : NULL;
208 static ir_type *firm_abi_get_between_type(void *self) {
209 static ir_type *between_type = NULL;
212 between_type = new_type_class(new_id_from_str("firm_be_between"));
213 set_type_size_bytes(between_type, 0);
219 static const be_abi_callbacks_t firm_abi_callbacks = {
222 firm_abi_get_between_type,
228 static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
230 const arch_register_class_t *cls = ®_classes[CLS_DATAB];
232 be_abi_call_flags_t flags = { { 0, 0, 0, 0, 0 } };
235 for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
236 ir_type *t = get_method_param_type(method_type, i);
237 if(is_Primitive_type(t))
238 be_abi_call_param_reg(abi, i, &cls->regs[i]);
240 be_abi_call_param_stack(abi, i, 1, 0, 0);
243 for(i = 0, n = get_method_n_ress(method_type); i < n; ++i) {
244 ir_type *t = get_method_res_type(method_type, i);
245 if(is_Primitive_type(t))
246 be_abi_call_res_reg(abi, i, &cls->regs[i]);
250 be_abi_call_set_flags(abi, flags, &firm_abi_callbacks);
254 static const arch_register_req_t firm_std_reg_req = {
255 arch_register_req_type_normal,
256 ®_classes[CLS_DATAB],
261 static const arch_register_req_t *
262 firm_get_irn_reg_req(const void *self,
263 arch_register_req_t *req, const ir_node *irn, int pos)
265 if(is_firm_be_mode(get_irn_mode(irn)))
266 memcpy(req, &firm_std_reg_req, sizeof(*req));
273 struct irn_reg_assoc {
275 const arch_register_t *reg;
278 static int cmp_irn_reg_assoc(const void *a, const void *b, size_t len)
280 const struct irn_reg_assoc *x = a;
281 const struct irn_reg_assoc *y = b;
283 return x->irn != y->irn;
286 static struct irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn)
288 static set *reg_set = NULL;
289 struct irn_reg_assoc templ;
292 reg_set = new_set(cmp_irn_reg_assoc, 1024);
297 return set_insert(reg_set, &templ, sizeof(templ), HASH_PTR(irn));
300 static void firm_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
302 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
306 static const arch_register_t *firm_get_irn_reg(const void *self, const ir_node *irn)
308 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
312 static arch_irn_class_t firm_classify(const void *self, const ir_node *irn)
314 arch_irn_class_t res;
316 switch(get_irn_opcode(irn)) {
319 res = arch_irn_class_branch;
322 res = arch_irn_class_call;
325 res = arch_irn_class_normal;
331 static arch_irn_flags_t firm_get_flags(const void *self, const ir_node *irn)
333 arch_irn_flags_t res = 0;
335 if(get_irn_op(irn) == op_imm)
336 res |= arch_irn_flags_rematerializable;
338 switch(get_irn_opcode(irn)) {
348 res |= arch_irn_flags_rematerializable;
356 static void firm_set_stack_bias(const void *self, ir_node *irn, int bias)
360 static entity *firm_get_frame_entity(const void *self, const ir_node *irn)
365 static void firm_set_frame_entity(const void *self, const ir_node *irn, entity *ent)
369 static const arch_irn_ops_if_t firm_irn_ops_if = {
370 firm_get_irn_reg_req,
375 firm_get_frame_entity,
376 firm_set_frame_entity,
378 NULL, /* get_inverse */
379 NULL, /* get_op_estimated_cost */
380 NULL, /* possible_memory_operand */
381 NULL, /* perform_memory_operand */
384 static const arch_irn_ops_t firm_irn_ops = {
388 static const void *firm_get_irn_ops(const arch_irn_handler_t *self,
391 return &firm_irn_ops;
394 const arch_irn_handler_t firm_irn_handler = {
398 static ir_node *new_Push(ir_graph *irg, ir_node *bl, ir_node *push, ir_node *arg)
403 return new_ir_node(NULL, irg, bl, op_push, mode_M, 2, ins);
407 * Creates an op_Imm node from an op_Const.
409 static ir_node *new_Imm(ir_graph *irg, ir_node *bl, ir_node *cnst) {
414 res = new_ir_node(NULL, irg, bl, op_imm, get_irn_mode(cnst), 0, ins);
415 attr = (imm_attr_t *) &res->attr;
417 switch (get_irn_opcode(cnst)) {
419 attr->tp = imm_Const;
420 attr->data.cnst_attr = get_irn_const_attr(cnst);
423 attr->tp = imm_SymConst;
424 attr->data.symc_attr = get_irn_symconst_attr(cnst);
429 assert(0 && "Cannot create Imm for this opcode");
435 static void prepare_walker(ir_node *irn, void *data)
437 opcode opc = get_irn_opcode(irn);
439 /* A replacement for this node has already been computed. */
440 if(get_irn_link(irn))
443 if(opc == iro_Call) {
444 ir_node *bl = get_nodes_block(irn);
445 ir_graph *irg = get_irn_irg(bl);
447 ir_node *store = get_Call_mem(irn);
448 ir_node *ptr = get_Call_ptr(irn);
449 ir_type *ct = get_Call_type(irn);
450 int np = get_Call_n_params(irn) > 0 ? 1 : 0;
456 int i, n = get_Call_n_params(irn);
458 unsigned cc = get_method_calling_convention(get_Call_type(irn));
460 if (cc & cc_last_on_top) {
461 store = new_Push(irg, bl, store, get_Call_param(irn, 0));
463 for (i = 1; i < n; ++i)
464 store = new_Push(irg, bl, store, get_Call_param(irn, i));
467 store = new_Push(irg, bl, store, get_Call_param(irn, n - 1));
469 for (i = n - 2; i >= 0; --i)
470 store = new_Push(irg, bl, store, get_Call_param(irn, i));
473 snprintf(buf, sizeof(buf), "push_%s", get_type_name(ct));
475 n = get_method_n_ress(ct);
476 nt = new_type_method(new_id_from_str(buf), 0, n);
477 for(i = 0; i < n; ++i)
478 set_method_res_type(nt, i, get_method_res_type(ct, i));
480 nc = new_r_Call(irg, bl, store, ptr, 0, ins, nt);
482 set_irn_link(nc, nc);
487 static void localize_const_walker(ir_node *irn, void *data)
491 ir_node *bl = get_nodes_block(irn);
493 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
494 ir_node *op = get_irn_n(irn, i);
495 opcode opc = get_irn_opcode(op);
498 || opc == iro_Unknown
499 || (opc == iro_SymConst /*&& get_SymConst_kind(op) == symconst_addr_ent*/)) {
500 ir_graph *irg = get_irn_irg(bl);
501 ir_node *imm_bl = is_Phi(irn) ? get_Block_cfgpred_block(bl, i) : bl;
503 ir_node *imm = new_Imm(irg, imm_bl, op);
504 set_irn_n(irn, i, imm);
510 static const arch_irn_handler_t *firm_get_irn_handler(const void *self)
512 return &firm_irn_handler;
515 typedef struct _firm_code_gen_t {
516 const arch_code_generator_if_t *impl;
521 static void firm_prepare_graph(void *self)
523 firm_code_gen_t *cg = self;
525 irg_walk_graph(cg->irg, firm_clear_link, localize_const_walker, NULL);
526 irg_walk_graph(cg->irg, NULL, prepare_walker, NULL);
529 static void firm_before_sched(void *self)
533 static void imm_scheduler(ir_node *irn, void *env) {
536 ir_node *user, *user_block, *before, *tgt_block;
538 if (1 != get_irn_n_edges(irn)) {
539 printf("Out edges: %d\n", get_irn_n_edges(irn));
540 assert(1 == get_irn_n_edges(irn));
543 e = get_irn_out_edge_first(irn);
545 user_block = get_nodes_block(user);
547 before = get_Block_cfgpred_block(user_block, e->pos);
551 tgt_block = user_block;
555 set_nodes_block(irn, tgt_block);
556 sched_add_before(before, irn);
560 static void firm_before_ra(void *self)
562 firm_code_gen_t *cg = self;
563 irg_walk_graph(cg->irg, imm_scheduler, NULL, NULL);
566 static void firm_after_ra(void *self)
570 static void firm_codegen_done(void *self)
575 static void *firm_cg_init(const be_irg_t *birg);
577 static const arch_code_generator_if_t firm_code_gen_if = {
588 static void *firm_cg_init(const be_irg_t *birg)
590 firm_code_gen_t *cg = xmalloc(sizeof(*cg));
591 cg->impl = &firm_code_gen_if;
597 static const arch_code_generator_if_t *firm_get_code_generator_if(void *self)
599 return &firm_code_gen_if;
602 static const list_sched_selector_t *firm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
603 return trivial_selector;
606 static const ilp_sched_selector_t *firm_get_ilp_sched_selector(const void *self) {
611 * Returns the necessary byte alignment for storing a register of given class.
613 static int firm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
614 ir_mode *mode = arch_register_class_mode(cls);
615 return get_mode_size_bytes(mode);
618 static const be_execution_unit_t ***firm_get_allowed_execution_units(const void *self, const ir_node *irn) {
624 static const be_machine_t *firm_get_machine(const void *self) {
631 * Returns the libFirm configuration parameter for this backend.
633 static const backend_params *firm_get_libfirm_params(void) {
634 static arch_dep_params_t ad = {
636 0, /* Muls are fast enough on Firm */
637 31, /* shift would be ok */
642 static backend_params p = {
643 NULL, /* no additional opcodes */
644 NULL, /* will be set later */
645 0, /* no dword lowering */
646 NULL, /* no creator function */
647 NULL, /* context for create_intrinsic_fkt */
655 static void firm_register_options(lc_opt_entry_t *ent)
660 const arch_isa_if_t firm_isa = {
663 firm_get_n_reg_class,
665 firm_get_reg_class_for_mode,
667 firm_get_irn_handler,
668 firm_get_code_generator_if,
669 firm_get_list_sched_selector,
670 firm_get_ilp_sched_selector,
671 firm_get_reg_class_alignment,
672 firm_get_libfirm_params,
673 firm_get_allowed_execution_units,
676 firm_register_options,