3 * ISA implementation for Firm IR nodes.
10 #include <libcore/lc_opts.h>
18 #include "iredges_t.h"
25 #include "../bearch.h"
26 #include "../besched.h"
27 #include "../beutil.h"
33 enum { imm_Const, imm_SymConst } tp;
36 symconst_attr symc_attr;
40 static arch_register_t datab_regs[N_REGS];
42 static arch_register_class_t reg_classes[] = {
43 { "datab", N_REGS, NULL, datab_regs },
46 static ir_op *op_push;
49 const arch_isa_if_t firm_isa;
52 (sizeof(reg_classes) / sizeof(reg_classes[0]))
56 tarval *get_Imm_tv(ir_node *n) {
57 imm_attr_t *attr = (imm_attr_t *)get_irn_generic_attr(n);
58 return attr->tp == imm_Const ? attr->data.cnst_attr.tv : NULL;
61 int is_Imm(const ir_node *irn) {
62 return get_irn_op(irn) == op_imm;
65 static int dump_node_Imm(ir_node *n, FILE *F, dump_reason_t reason) {
73 case dump_node_opcode_txt:
77 tarval_snprintf(buf, sizeof(buf), tv);
78 fprintf(F, "%s", buf);
81 fprintf(F, "immSymC");
85 case dump_node_mode_txt:
86 mode = get_irn_mode(n);
88 if (mode && mode != mode_BB && mode != mode_ANY && mode != mode_BAD && mode != mode_T) {
89 fprintf(F, "[%s]", get_mode_name(mode));
93 case dump_node_nodeattr_txt:
94 attr = (imm_attr_t *)get_irn_generic_attr(n);
96 if (is_Imm(n) && attr->tp == imm_SymConst) {
97 const char *name = NULL;
98 symconst_attr *sc_attr = &attr->data.symc_attr;
100 switch (sc_attr->num) {
101 case symconst_addr_name:
102 name = get_id_str(sc_attr->sym.ident_p);
105 case symconst_addr_ent:
106 name = get_entity_ld_name(sc_attr->sym.entity_p);
110 assert(!"Unsupported SymConst");
113 fprintf(F, "&%s ", name);
118 case dump_node_info_txt:
125 static void *firm_init(void)
127 static struct obstack obst;
128 static int inited = 0;
129 arch_isa_t *isa = xmalloc(sizeof(*isa));
132 isa->impl = &firm_isa;
140 for(k = 0; k < N_CLASSES; ++k) {
141 arch_register_class_t *cls = ®_classes[k];
145 for(i = 0; i < cls->n_regs; ++i) {
149 arch_register_t *reg = (arch_register_t *) &cls->regs[i];
151 n = snprintf(buf, sizeof(buf), "r%d", i);
152 name = obstack_copy0(&obst, buf, n);
155 reg->reg_class = cls;
162 * Create some opcodes and types to let firm look a little
163 * bit more like real machines.
166 int push_opc = get_next_ir_opcode();
168 op_push = new_ir_op(push_opc, "Push",
169 op_pin_state_pinned, 0, oparity_binary, 0, 0, NULL);
173 int imm_opc = get_next_ir_opcode();
176 memset(&ops, 0, sizeof(ops));
177 ops.dump_node = dump_node_Imm;
179 op_imm = new_ir_op(imm_opc, "Imm",
180 op_pin_state_pinned, 0, oparity_zero, 0, sizeof(imm_attr_t), &ops);
186 static void firm_done(void *self)
191 static int firm_get_n_reg_class(const void *self)
196 static const arch_register_class_t *firm_get_reg_class(const void *self, int i)
198 assert(i >= 0 && i < N_CLASSES);
199 return ®_classes[i];
202 static const arch_register_class_t *firm_get_reg_class_for_mode(const void *self, const ir_mode *irm)
204 return mode_is_datab(irm) ? ®_classes[CLS_DATAB] : NULL;
207 static void firm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
209 static ir_type *between_type = NULL;
210 const arch_register_class_t *cls = ®_classes[CLS_DATAB];
214 between_type = new_type_class(new_id_from_str("firm_be_between"));
215 set_type_size_bytes(between_type, 0);
219 for(i = 0, n = get_method_n_params(method_type); i < n; ++i) {
220 ir_type *t = get_method_param_type(method_type, i);
221 if(is_Primitive_type(t))
222 be_abi_call_param_reg(abi, i, &cls->regs[i]);
224 be_abi_call_param_stack(abi, i);
227 for(i = 0, n = get_method_n_ress(method_type); i < n; ++i) {
228 ir_type *t = get_method_res_type(method_type, i);
229 if(is_Primitive_type(t))
230 be_abi_call_res_reg(abi, i, &cls->regs[i]);
233 be_abi_call_set_flags(abi, BE_ABI_NONE, between_type);
237 static const arch_register_req_t firm_std_reg_req = {
238 arch_register_req_type_normal,
239 ®_classes[CLS_DATAB],
244 static const arch_register_req_t *
245 firm_get_irn_reg_req(const void *self,
246 arch_register_req_t *req, const ir_node *irn, int pos)
248 if(is_firm_be_mode(get_irn_mode(irn)))
249 memcpy(req, &firm_std_reg_req, sizeof(*req));
256 struct irn_reg_assoc {
258 const arch_register_t *reg;
261 static int cmp_irn_reg_assoc(const void *a, const void *b, size_t len)
263 const struct irn_reg_assoc *x = a;
264 const struct irn_reg_assoc *y = b;
266 return x->irn != y->irn;
269 static struct irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn)
271 static set *reg_set = NULL;
272 struct irn_reg_assoc templ;
275 reg_set = new_set(cmp_irn_reg_assoc, 1024);
280 return set_insert(reg_set, &templ, sizeof(templ), HASH_PTR(irn));
283 static void firm_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
285 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
289 static const arch_register_t *firm_get_irn_reg(const void *self, const ir_node *irn)
291 struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn);
295 static arch_irn_class_t firm_classify(const void *self, const ir_node *irn)
297 arch_irn_class_t res;
299 switch(get_irn_opcode(irn)) {
302 res = arch_irn_class_branch;
305 res = arch_irn_class_call;
308 res = arch_irn_class_normal;
314 static arch_irn_flags_t firm_get_flags(const void *self, const ir_node *irn)
316 arch_irn_flags_t res = 0;
318 if(get_irn_op(irn) == op_imm)
319 res |= arch_irn_flags_rematerializable;
321 switch(get_irn_opcode(irn)) {
331 res |= arch_irn_flags_rematerializable;
339 static void firm_set_stack_bias(const void *self, ir_node *irn, int bias)
343 static entity *firm_get_frame_entity(const void *self, const ir_node *irn)
348 static const arch_irn_ops_if_t firm_irn_ops_if = {
349 firm_get_irn_reg_req,
354 firm_get_frame_entity,
358 static const arch_irn_ops_t firm_irn_ops = {
362 static const void *firm_get_irn_ops(const arch_irn_handler_t *self,
365 return &firm_irn_ops;
368 const arch_irn_handler_t firm_irn_handler = {
372 static ir_node *new_Push(ir_graph *irg, ir_node *bl, ir_node *push, ir_node *arg)
377 return new_ir_node(NULL, irg, bl, op_push, mode_M, 2, ins);
381 * Creates an op_Imm node from an op_Const.
383 static ir_node *new_Imm(ir_graph *irg, ir_node *bl, ir_node *cnst) {
388 res = new_ir_node(NULL, irg, bl, op_imm, get_irn_mode(cnst), 0, ins);
389 attr = (imm_attr_t *) &res->attr;
391 switch (get_irn_opcode(cnst)) {
393 attr->tp = imm_Const;
394 attr->data.cnst_attr = get_irn_const_attr(cnst);
397 attr->tp = imm_SymConst;
398 attr->data.symc_attr = get_irn_symconst_attr(cnst);
403 assert(0 && "Cannot create Imm for this opcode");
409 static void prepare_walker(ir_node *irn, void *data)
411 opcode opc = get_irn_opcode(irn);
413 /* A replacement for this node has already been computed. */
414 if(get_irn_link(irn))
417 if(opc == iro_Call) {
418 ir_node *bl = get_nodes_block(irn);
419 ir_graph *irg = get_irn_irg(bl);
421 ir_node *store = get_Call_mem(irn);
422 ir_node *ptr = get_Call_ptr(irn);
423 ir_type *ct = get_Call_type(irn);
424 int np = get_Call_n_params(irn) > 0 ? 1 : 0;
430 int i, n = get_Call_n_params(irn);
432 unsigned cc = get_method_calling_convention(get_Call_type(irn));
434 if (cc & cc_last_on_top) {
435 store = new_Push(irg, bl, store, get_Call_param(irn, 0));
437 for (i = 1; i < n; ++i)
438 store = new_Push(irg, bl, store, get_Call_param(irn, i));
441 store = new_Push(irg, bl, store, get_Call_param(irn, n - 1));
443 for (i = n - 2; i >= 0; --i)
444 store = new_Push(irg, bl, store, get_Call_param(irn, i));
447 snprintf(buf, sizeof(buf), "push_%s", get_type_name(ct));
449 n = get_method_n_ress(ct);
450 nt = new_type_method(new_id_from_str(buf), 0, n);
451 for(i = 0; i < n; ++i)
452 set_method_res_type(nt, i, get_method_res_type(ct, i));
454 nc = new_r_Call(irg, bl, store, ptr, 0, ins, nt);
456 set_irn_link(nc, nc);
461 static void localize_const_walker(ir_node *irn, void *data)
465 ir_node *bl = get_nodes_block(irn);
467 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
468 ir_node *op = get_irn_n(irn, i);
469 opcode opc = get_irn_opcode(op);
472 || opc == iro_Unknown
473 || (opc == iro_SymConst /*&& get_SymConst_kind(op) == symconst_addr_ent*/)) {
474 ir_graph *irg = get_irn_irg(bl);
475 ir_node *imm_bl = is_Phi(irn) ? get_Block_cfgpred_block(bl, i) : bl;
477 ir_node *imm = new_Imm(irg, imm_bl, op);
478 set_irn_n(irn, i, imm);
484 static const arch_irn_handler_t *firm_get_irn_handler(const void *self)
486 return &firm_irn_handler;
489 typedef struct _firm_code_gen_t {
490 const arch_code_generator_if_t *impl;
495 static void clear_link(ir_node *irn, void *data)
497 set_irn_link(irn, NULL);
500 static void firm_prepare_graph(void *self)
502 firm_code_gen_t *cg = self;
504 irg_walk_graph(cg->irg, clear_link, localize_const_walker, NULL);
505 irg_walk_graph(cg->irg, NULL, prepare_walker, NULL);
508 static void firm_before_sched(void *self)
512 static void imm_scheduler(ir_node *irn, void *env) {
515 ir_node *user, *user_block, *before, *tgt_block;
517 if (1 != get_irn_n_edges(irn)) {
518 printf("Out edges: %d\n", get_irn_n_edges(irn));
519 assert(1 == get_irn_n_edges(irn));
522 e = get_irn_out_edge_first(irn);
524 user_block = get_nodes_block(user);
526 before = get_Block_cfgpred_block(user_block, e->pos);
530 tgt_block = user_block;
534 set_nodes_block(irn, tgt_block);
535 sched_add_before(before, irn);
539 static void firm_before_ra(void *self)
541 firm_code_gen_t *cg = self;
542 irg_walk_graph(cg->irg, imm_scheduler, NULL, NULL);
545 static void firm_codegen_done(void *self)
550 static void *firm_cg_init(FILE *file_handle, const be_irg_t *birg);
552 static const arch_code_generator_if_t firm_code_gen_if = {
557 NULL, /* lower spill */
558 NULL, /* lower reload */
562 static void *firm_cg_init(FILE *file_handle, const be_irg_t *birg)
564 firm_code_gen_t *cg = xmalloc(sizeof(*cg));
565 cg->impl = &firm_code_gen_if;
571 static const arch_code_generator_if_t *firm_get_code_generator_if(void *self)
573 return &firm_code_gen_if;
576 static const list_sched_selector_t *firm_get_list_sched_selector(const void *self) {
577 return trivial_selector;
581 static void firm_register_options(lc_opt_entry_t *ent)
586 const arch_isa_if_t firm_isa = {
588 firm_register_options,
592 firm_get_n_reg_class,
594 firm_get_reg_class_for_mode,
596 firm_get_irn_handler,
597 firm_get_code_generator_if,
598 firm_get_list_sched_selector