4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provdies Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005 Universitaet Karlsruhe
11 * Released under the GPL
35 #include "besched_t.h"
40 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
41 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
43 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
45 typedef enum _node_kind_t {
56 const arch_register_class_t *cls;
63 arch_register_req_t req;
64 unsigned negate_limited : 1;
65 void (*old_limited)(void *ptr, bitset_t *bs);
66 void *old_limited_env;
70 const arch_register_t *reg;
76 const arch_register_class_t *cls;
77 be_reg_data_t *reg_data;
81 be_node_attr_t node_attr;
82 ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
83 entity *ent; /**< The entity in the stack frame the spill writes to. */
86 static ir_op *op_Spill;
87 static ir_op *op_Reload;
88 static ir_op *op_Perm;
89 static ir_op *op_Copy;
90 static ir_op *op_Keep;
92 static int beo_base = -1;
94 static const ir_op_ops be_node_op_ops;
96 #define N irop_flag_none
97 #define L irop_flag_labeled
98 #define C irop_flag_commutative
99 #define X irop_flag_cfopcode
100 #define I irop_flag_ip_cfopcode
101 #define F irop_flag_fragile
102 #define Y irop_flag_forking
103 #define H irop_flag_highlevel
104 #define c irop_flag_constlike
105 #define K irop_flag_keep
107 void be_node_init(void) {
108 static int inited = 0;
116 beo_base = get_next_ir_opcode();
118 /* Acquire all needed opcodes. We assume that they are consecutive! */
119 for(i = beo_Spill; i < beo_Last; ++i)
120 get_next_ir_opcode();
122 op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
123 op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
124 op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
125 op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
126 op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
128 set_op_tag(op_Spill, &be_node_tag);
129 set_op_tag(op_Reload, &be_node_tag);
130 set_op_tag(op_Perm, &be_node_tag);
131 set_op_tag(op_Copy, &be_node_tag);
132 set_op_tag(op_Keep, &be_node_tag);
135 static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int n_outs)
137 be_node_attr_t *a = get_irn_attr(irn);
146 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs);
147 memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0]));
148 for(i = 0; i < n_outs; ++i) {
149 a->reg_data[i].req.req.cls = cls;
150 a->reg_data[i].req.req.type = arch_register_req_type_normal;
157 static INLINE int is_be_node(const ir_node *irn)
159 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
162 be_opcode_t get_irn_be_opcode(const ir_node *irn)
164 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
167 ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx)
174 res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
175 a = init_node_attr(res, cls, irg, 0);
181 ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem)
187 res = new_ir_node(NULL, irg, bl, op_Reload, mode, 1, in);
188 init_node_attr(res, cls, irg, 1);
192 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
194 ir_node *irn = new_ir_node(NULL, irg, bl, op_Perm, mode_T, n, in);
195 init_node_attr(irn, cls, irg, n);
199 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
205 res = new_ir_node(NULL, irg, bl, op_Copy, get_irn_mode(op), 1, in);
206 init_node_attr(res, cls, irg, 1);
210 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
214 irn = new_ir_node(NULL, irg, bl, op_Keep, mode_ANY, n, in);
215 init_node_attr(irn, cls, irg, 0);
220 int be_is_Spill(const ir_node *irn)
222 return get_irn_be_opcode(irn) == beo_Spill;
225 int be_is_Reload(const ir_node *irn)
227 return get_irn_be_opcode(irn) == beo_Reload;
230 int be_is_Copy(const ir_node *irn)
232 return get_irn_be_opcode(irn) == beo_Copy;
235 int be_is_Perm(const ir_node *irn)
237 return get_irn_be_opcode(irn) == beo_Perm;
240 int be_is_Keep(const ir_node *irn)
242 return get_irn_be_opcode(irn) == beo_Keep;
245 static void be_limited(void *data, bitset_t *bs)
247 be_req_t *req = data;
249 req->old_limited(req->old_limited_env, bs);
250 if(req->negate_limited)
254 void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req, unsigned negate_limited)
256 be_node_attr_t *a = get_irn_attr(irn);
257 be_req_t *r = &a->reg_data[pos].req;
259 assert(be_is_Perm(irn));
260 assert(pos >= 0 && pos < get_irn_arity(irn));
261 memcpy(&r->req, req, sizeof(req[0]));
263 if(arch_register_req_is(req, limited)) {
264 r->old_limited = r->req.limited;
265 r->old_limited_env = r->req.limited_env;
266 r->req.limited = be_limited;
267 r->req.limited_env = r;
268 r->negate_limited = negate_limited;
272 void be_set_Spill_entity(ir_node *irn, entity *ent)
274 be_spill_attr_t *a = get_irn_attr(irn);
275 assert(be_is_Spill(irn));
279 static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
281 if(get_irn_visited(irn) < visited_nr) {
282 set_irn_visited(irn, visited_nr);
286 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
287 ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
293 else if(get_irn_be_opcode(irn) == beo_Spill)
300 ir_node *be_get_Spill_context(const ir_node *irn) {
301 const be_spill_attr_t *a = get_irn_attr(irn);
302 assert(be_is_Spill(irn));
307 * Finds a spill for a reload.
308 * If the reload is directly using the spill, this is simple,
309 * else we perform DFS from the reload (over all PhiMs) and return
310 * the first spill node we find.
312 static INLINE ir_node *find_a_spill(ir_node *irn)
314 ir_graph *irg = get_irn_irg(irn);
315 unsigned visited_nr = get_irg_visited(irg) + 1;
317 assert(be_is_Reload(irn));
318 set_irg_visited(irg, visited_nr);
319 return find_a_spill_walker(irn, visited_nr);
322 entity *be_get_spill_entity(ir_node *irn)
324 int opc = get_irn_opcode(irn);
326 switch(get_irn_be_opcode(irn)) {
328 return be_get_spill_entity(find_a_spill(irn));
331 be_spill_attr_t *a = get_irn_attr(irn);
335 assert(0 && "Must give spill/reload node");
341 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
343 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
345 ir_node *bl = get_nodes_block(irn);
346 ir_graph *irg = get_irn_irg(bl);
347 ir_node *spill = be_new_Spill(cls, irg, bl, irn, ctx);
351 * search the right insertion point. a spill of a phi cannot be put
352 * directly after the phi, if there are some phis behind the one which
355 insert = sched_next(irn);
356 while(is_Phi(insert) && !sched_is_end(insert))
357 insert = sched_next(insert);
359 sched_add_before(insert, spill);
363 ir_node *be_reload(const arch_env_t *arch_env,
364 const arch_register_class_t *cls,
365 ir_node *irn, int pos, ir_mode *mode, ir_node *spill)
369 ir_node *bl = get_nodes_block(irn);
370 ir_graph *irg = get_irn_irg(bl);
372 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
374 reload = be_new_Reload(cls, irg, bl, mode, spill);
376 set_irn_n(irn, pos, reload);
377 sched_add_before(irn, reload);
381 static int redir_proj(const ir_node **node, int pos)
383 const ir_node *n = *node;
386 assert(pos == -1 && "Illegal pos for a Proj");
387 *node = get_Proj_pred(n);
388 return get_Proj_proj(n);
394 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
396 const be_node_attr_t *a = get_irn_attr(irn);
399 if(out_pos < a->n_outs)
400 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
402 req->type = arch_register_req_type_none;
409 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
411 const be_node_attr_t *a = get_irn_attr(irn);
412 int n = get_irn_arity(irn);
414 req->type = arch_register_req_type_none;
417 switch(get_irn_be_opcode(irn)) {
423 req->type = arch_register_req_type_normal;
435 static const arch_register_req_t *
436 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
441 if(get_irn_mode(irn) == mode_T)
444 out_pos = redir_proj((const ir_node **) &irn, pos);
445 assert(is_be_node(irn));
446 return put_out_reg_req(req, irn, out_pos);
450 return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
457 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
462 out_pos = redir_proj((const ir_node **) &irn, -1);
463 a = get_irn_attr(irn);
465 assert(is_be_node(irn));
466 assert(out_pos < a->n_outs && "position too high");
467 a->reg_data[out_pos].reg = reg;
470 const arch_register_t *
471 be_node_get_irn_reg(const void *_self, const ir_node *irn)
476 out_pos = redir_proj((const ir_node **) &irn, -1);
477 a = get_irn_attr(irn);
479 assert(is_be_node(irn));
480 assert(out_pos < a->n_outs && "position too high");
482 return a->reg_data[out_pos].reg;
485 arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
487 redir_proj((const ir_node **) &irn, -1);
489 switch(get_irn_be_opcode(irn)) {
490 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
503 arch_irn_class_t be_node_get_flags(const void *_self, const ir_node *irn)
508 static const arch_irn_ops_if_t be_node_irn_ops_if = {
509 be_node_get_irn_reg_req,
516 static const arch_irn_ops_t be_node_irn_ops = {
520 const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
522 redir_proj((const ir_node **) &irn, -1);
523 return is_be_node(irn) ? &be_node_irn_ops : NULL;
526 const arch_irn_handler_t be_node_irn_handler = {
530 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
532 be_node_attr_t *at = get_irn_attr(irn);
535 assert(is_be_node(irn));
538 case dump_node_opcode_txt:
539 fprintf(f, get_op_name(get_irn_op(irn)));
541 case dump_node_mode_txt:
542 fprintf(f, get_mode_name(get_irn_mode(irn)));
544 case dump_node_nodeattr_txt:
546 case dump_node_info_txt:
547 fprintf(f, "reg class: %s\n", at->cls->name);
548 for(i = 0; i < at->n_outs; ++i) {
549 const arch_register_t *reg = at->reg_data[i].reg;
550 fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a");
553 if(get_irn_be_opcode(irn) == beo_Spill) {
554 be_spill_attr_t *a = (be_spill_attr_t *) at;
555 unsigned ofs = get_entity_offset_bytes(a->ent);
556 ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
557 ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
565 static const ir_op_ops be_node_op_ops = {
581 pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
583 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
584 ir_node *bl = get_nodes_block(pos);
588 live_foreach(bl, li) {
589 ir_node *irn = (ir_node *) li->irn;
590 if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
591 pset_insert_ptr(live, irn);
594 sched_foreach_reverse(bl, irn) {
599 * If we encounter the node we want to insert the Perm after,
600 * exit immediately, so that this node is still live
605 DBG((dbg, LEVEL_1, "%+F\n", irn));
606 for(x = pset_first(live); x; x = pset_next(live))
607 DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
609 if(arch_irn_has_reg_class(arch_env, irn, -1, cls))
610 pset_remove_ptr(live, irn);
612 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
613 ir_node *op = get_irn_n(irn, i);
615 if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
616 pset_insert_ptr(live, op);
623 ir_node *insert_Perm_after(const arch_env_t *arch_env,
624 const arch_register_class_t *cls,
625 dom_front_info_t *dom_front,
628 ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
629 ir_graph *irg = get_irn_irg(bl);
630 pset *live = pset_new_ptr_default();
631 firm_dbg_module_t *dbg = firm_dbg_register("be.node");
633 ir_node *curr, *irn, *perm, **nodes;
636 DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
638 if(!nodes_live_at(arch_env, cls, pos, live))
639 assert(0 && "position not found");
641 n = pset_count(live);
646 nodes = malloc(n * sizeof(nodes[0]));
648 DBG((dbg, LEVEL_1, "live:\n"));
649 for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
650 DBG((dbg, LEVEL_1, "\t%+F\n", irn));
654 perm = be_new_Perm(cls, irg, bl, n, nodes);
655 sched_add_after(pos, perm);
659 for(i = 0; i < n; ++i) {
661 ir_node *perm_op = get_irn_n(perm, i);
662 const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
664 ir_mode *mode = get_irn_mode(perm_op);
665 ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
666 arch_set_irn_register(arch_env, proj, reg);
668 sched_add_after(curr, proj);
672 be_introduce_copies(dom_front, perm_op, 1, copies);