2 * ISA implementation for Firm IR nodes.
13 #include "iredges_t.h"
21 #include "../bearch.h"
22 #include "../besched.h"
23 #include "../beutil.h"
29 enum { imm_Const, imm_SymConst } tp;
32 symconst_attr symc_attr;
36 static arch_register_t datab_regs[N_REGS];
38 static arch_register_class_t reg_classes[] = {
39 { "datab", N_REGS, NULL, datab_regs },
42 static ir_op *op_push;
45 const arch_isa_if_t firm_isa;
48 (sizeof(reg_classes) / sizeof(reg_classes[0]))
52 tarval *get_Imm_tv(ir_node *n) {
53 imm_attr_t *attr = (imm_attr_t *)get_irn_generic_attr(n);
54 return attr->tp == imm_Const ? attr->data.cnst_attr.tv : NULL;
57 int is_Imm(const ir_node *irn) {
58 return get_irn_op(irn) == op_imm;
61 static int dump_node_Imm(ir_node *n, FILE *F, dump_reason_t reason) {
69 case dump_node_opcode_txt:
73 tarval_snprintf(buf, sizeof(buf), tv);
74 fprintf(F, "%s", buf);
77 fprintf(F, "immSymC");
81 case dump_node_mode_txt:
82 mode = get_irn_mode(n);
84 if (mode && mode != mode_BB && mode != mode_ANY && mode != mode_BAD && mode != mode_T) {
85 fprintf(F, "[%s]", get_mode_name(mode));
89 case dump_node_nodeattr_txt:
90 attr = (imm_attr_t *)get_irn_generic_attr(n);
92 if (is_Imm(n) && attr->tp == imm_SymConst) {
93 const char *name = NULL;
94 symconst_attr *sc_attr = &attr->data.symc_attr;
96 switch (sc_attr->num) {
97 case symconst_addr_name:
98 name = get_id_str(sc_attr->sym.ident_p);
101 case symconst_addr_ent:
102 name = get_entity_ld_name(sc_attr->sym.entity_p);
106 assert(!"Unsupported SymConst");
109 fprintf(F, "&%s ", name);
114 case dump_node_info_txt:
121 static void *firm_init(FILE *outfile)
123 static struct obstack obst;
124 static int inited = 0;
125 arch_isa_t *isa = xmalloc(sizeof(*isa));
128 isa->impl = &firm_isa;
136 for(k = 0; k < N_CLASSES; ++k) {
137 arch_register_class_t *cls = ®_classes[k];
141 for(i = 0; i < cls->n_regs; ++i) {
145 arch_register_t *reg = (arch_register_t *) &cls->regs[i];
147 n = snprintf(buf, sizeof(buf), "r%d", i);
148 name = obstack_copy0(&obst, buf, n);
151 reg->reg_class = cls;
158 * Create some opcodes and types to let firm look a little
159 * bit more like real machines.
162 int push_opc = get_next_ir_opcode();
164 op_push = new_ir_op(push_opc, "Push",
165 op_pin_state_pinned, 0, oparity_binary, 0, 0, NULL);
169 int imm_opc = get_next_ir_opcode();
172 memset(&ops, 0, sizeof(ops));
173 ops.dump_node = dump_node_Imm;
175 op_imm = new_ir_op(imm_opc, "Imm",
176 op_pin_state_pinned, 0, oparity_zero, 0, sizeof(imm_attr_t), &ops);
182 static void firm_done(void *self)
187 static int firm_get_n_reg_class(const void *self)
192 static const arch_register_class_t *firm_get_reg_class(const void *self, int i)
194 assert(i >= 0 && i < N_CLASSES);
195 return ®_classes[i];
198 static const arch_register_class_t *firm_get_reg_class_for_mode(const void *self, const ir_mode *irm)
200 return mode_is_datab(irm) ? ®_classes[CLS_DATAB] : NULL;
203 static ir_type *firm_abi_get_between_type(void *self) {
204 static ir_type *between_type = NULL;
207 between_type = new_type_class(new_id_from_str("firm_be_between"));
208 set_type_size_bytes(between_type, 0);
214 static const be_abi_callbacks_t firm_abi_callbacks = {
217 firm_abi_get_between_type,
223 static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
225 const arch_register_class_t *cls = ®_classes[CLS_DATAB];
227 be_abi_call_flags_t flags = { { 0, 0, 0, 0, 0 } };
230 for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
231 ir_type *t = get_method_param_type(method_type, i);
232 if(is_Primitive_type(t))
233 be_abi_call_param_reg(abi, i, &cls->regs[i]);
235 be_abi_call_param_stack(abi, i, 1, 0, 0);
238 for(i = 0, n = get_method_n_ress(method_type); i < n; ++i) {
239 ir_type *t = get_method_res_type(method_type, i);
240 if(is_Primitive_type(t))
241 be_abi_call_res_reg(abi, i, &cls->regs[i]);
245 be_abi_call_set_flags(abi, flags, &firm_abi_callbacks);
249 static const arch_register_req_t firm_std_reg_req = {
250 arch_register_req_type_normal,
251 ®_classes[CLS_DATAB],
256 static const arch_register_req_t *
257 firm_get_irn_reg_req(const void *self,
258 arch_register_req_t *req, const ir_node *irn, int pos)
260 if(is_firm_be_mode(get_irn_mode(irn)))
261 memcpy(req, &firm_std_reg_req, sizeof(*req));
268 struct irn_reg_assoc {
270 const arch_register_t *reg;
273 static int cmp_irn_reg_assoc(const void *a, const void *b, size_t len)
275 const struct irn_reg_assoc *x = a;
276 const struct irn_reg_assoc *y = b;
278 return x->irn != y->irn;
281 static struct irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn)
283 static set *reg_set = NULL;
284 struct irn_reg_assoc templ;
287 reg_set = new_set(cmp_irn_reg_assoc, 1024);
292 return set_insert(reg_set, &templ, sizeof(templ), HASH_PTR(irn));
295 static void firm_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
297 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
301 static const arch_register_t *firm_get_irn_reg(const void *self, const ir_node *irn)
303 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
307 static arch_irn_class_t firm_classify(const void *self, const ir_node *irn)
309 arch_irn_class_t res;
311 switch(get_irn_opcode(irn)) {
314 res = arch_irn_class_branch;
317 res = arch_irn_class_call;
320 res = arch_irn_class_normal;
326 static arch_irn_flags_t firm_get_flags(const void *self, const ir_node *irn)
328 arch_irn_flags_t res = 0;
330 if(get_irn_op(irn) == op_imm)
331 res |= arch_irn_flags_rematerializable;
333 switch(get_irn_opcode(irn)) {
343 res |= arch_irn_flags_rematerializable;
351 static void firm_set_stack_bias(const void *self, ir_node *irn, int bias)
355 static ir_entity *firm_get_frame_entity(const void *self, const ir_node *irn)
360 static void firm_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
364 static const arch_irn_ops_if_t firm_irn_ops_if = {
365 firm_get_irn_reg_req,
370 firm_get_frame_entity,
371 firm_set_frame_entity,
373 NULL, /* get_inverse */
374 NULL, /* get_op_estimated_cost */
375 NULL, /* possible_memory_operand */
376 NULL, /* perform_memory_operand */
379 static const arch_irn_ops_t firm_irn_ops = {
383 static const void *firm_get_irn_ops(const arch_irn_handler_t *self,
386 return &firm_irn_ops;
389 const arch_irn_handler_t firm_irn_handler = {
393 static ir_node *new_Push(ir_graph *irg, ir_node *bl, ir_node *push, ir_node *arg)
398 return new_ir_node(NULL, irg, bl, op_push, mode_M, 2, ins);
402 * Creates an op_Imm node from an op_Const.
404 static ir_node *new_Imm(ir_graph *irg, ir_node *bl, ir_node *cnst) {
409 res = new_ir_node(NULL, irg, bl, op_imm, get_irn_mode(cnst), 0, ins);
410 attr = (imm_attr_t *) &res->attr;
412 switch (get_irn_opcode(cnst)) {
414 attr->tp = imm_Const;
415 attr->data.cnst_attr = get_irn_const_attr(cnst);
418 attr->tp = imm_SymConst;
419 attr->data.symc_attr = get_irn_symconst_attr(cnst);
424 assert(0 && "Cannot create Imm for this opcode");
430 static void prepare_walker(ir_node *irn, void *data)
432 ir_opcode opc = get_irn_opcode(irn);
434 /* A replacement for this node has already been computed. */
435 if(get_irn_link(irn))
438 if(opc == iro_Call) {
439 ir_node *bl = get_nodes_block(irn);
440 ir_graph *irg = get_irn_irg(bl);
442 ir_node *store = get_Call_mem(irn);
443 ir_node *ptr = get_Call_ptr(irn);
444 ir_type *ct = get_Call_type(irn);
445 int np = get_Call_n_params(irn) > 0 ? 1 : 0;
451 int i, n = get_Call_n_params(irn);
453 unsigned cc = get_method_calling_convention(get_Call_type(irn));
455 if (cc & cc_last_on_top) {
456 store = new_Push(irg, bl, store, get_Call_param(irn, 0));
458 for (i = 1; i < n; ++i)
459 store = new_Push(irg, bl, store, get_Call_param(irn, i));
462 store = new_Push(irg, bl, store, get_Call_param(irn, n - 1));
464 for (i = n - 2; i >= 0; --i)
465 store = new_Push(irg, bl, store, get_Call_param(irn, i));
468 snprintf(buf, sizeof(buf), "push_%s", get_type_name(ct));
470 n = get_method_n_ress(ct);
471 nt = new_type_method(new_id_from_str(buf), 0, n);
472 for(i = 0; i < n; ++i)
473 set_method_res_type(nt, i, get_method_res_type(ct, i));
475 nc = new_r_Call(irg, bl, store, ptr, 0, ins, nt);
477 set_irn_link(nc, nc);
482 static void localize_const_walker(ir_node *irn, void *data)
486 ir_node *bl = get_nodes_block(irn);
488 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
489 ir_node *op = get_irn_n(irn, i);
490 ir_opcode opc = get_irn_opcode(op);
493 || opc == iro_Unknown
494 || (opc == iro_SymConst /*&& get_SymConst_kind(op) == symconst_addr_ent*/)) {
495 ir_graph *irg = get_irn_irg(bl);
496 ir_node *imm_bl = is_Phi(irn) ? get_Block_cfgpred_block(bl, i) : bl;
498 ir_node *imm = new_Imm(irg, imm_bl, op);
499 set_irn_n(irn, i, imm);
505 static const arch_irn_handler_t *firm_get_irn_handler(const void *self)
507 return &firm_irn_handler;
510 typedef struct _firm_code_gen_t {
511 const arch_code_generator_if_t *impl;
516 static void firm_prepare_graph(void *self)
518 firm_code_gen_t *cg = self;
520 irg_walk_graph(cg->irg, firm_clear_link, localize_const_walker, NULL);
521 irg_walk_graph(cg->irg, NULL, prepare_walker, NULL);
524 static void firm_before_sched(void *self)
528 static void imm_scheduler(ir_node *irn, void *env) {
531 ir_node *user, *user_block, *before, *tgt_block;
533 if (1 != get_irn_n_edges(irn)) {
534 printf("Out edges: %d\n", get_irn_n_edges(irn));
535 assert(1 == get_irn_n_edges(irn));
538 e = get_irn_out_edge_first(irn);
540 user_block = get_nodes_block(user);
542 before = get_Block_cfgpred_block(user_block, e->pos);
546 tgt_block = user_block;
550 set_nodes_block(irn, tgt_block);
551 sched_add_before(before, irn);
555 static void firm_before_ra(void *self)
557 firm_code_gen_t *cg = self;
558 irg_walk_graph(cg->irg, imm_scheduler, NULL, NULL);
561 static void firm_after_ra(void *self)
565 static void firm_codegen_done(void *self)
570 static void *firm_cg_init(be_irg_t *birg);
572 static const arch_code_generator_if_t firm_code_gen_if = {
583 static void *firm_cg_init(be_irg_t *birg)
585 firm_code_gen_t *cg = xmalloc(sizeof(*cg));
586 cg->impl = &firm_code_gen_if;
592 static const arch_code_generator_if_t *firm_get_code_generator_if(void *self)
594 return &firm_code_gen_if;
597 static const list_sched_selector_t *firm_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
598 return trivial_selector;
601 static const ilp_sched_selector_t *firm_get_ilp_sched_selector(const void *self) {
606 * Returns the necessary byte alignment for storing a register of given class.
608 static int firm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
609 ir_mode *mode = arch_register_class_mode(cls);
610 return get_mode_size_bytes(mode);
613 static const be_execution_unit_t ***firm_get_allowed_execution_units(const void *self, const ir_node *irn) {
619 static const be_machine_t *firm_get_machine(const void *self) {
626 * Return irp irgs in the desired order.
628 static ir_graph **firm_get_irg_list(const void *self, ir_graph ***irg_list) {
633 * Returns the libFirm configuration parameter for this backend.
635 static const backend_params *firm_get_libfirm_params(void) {
636 static arch_dep_params_t ad = {
638 0, /* Muls are fast enough on Firm */
639 31, /* shift would be ok */
644 static backend_params p = {
645 NULL, /* no additional opcodes */
646 NULL, /* will be set later */
647 0, /* no dword lowering */
648 NULL, /* no creator function */
649 NULL, /* context for create_intrinsic_fkt */
656 const arch_isa_if_t firm_isa = {
659 firm_get_n_reg_class,
661 firm_get_reg_class_for_mode,
663 firm_get_irn_handler,
664 firm_get_code_generator_if,
665 firm_get_list_sched_selector,
666 firm_get_ilp_sched_selector,
667 firm_get_reg_class_alignment,
668 firm_get_libfirm_params,
669 firm_get_allowed_execution_units,