4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provdies Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005 Universitaet Karlsruhe
11 * Released under the GPL
26 #include "bitfiddle.h"
36 #include "besched_t.h"
41 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
42 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
44 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
46 typedef enum _node_kind_t {
57 const arch_register_class_t *cls;
64 be_req_kind_old_limited,
65 be_req_kind_negate_old_limited,
66 be_req_kind_single_reg
70 arch_register_req_t req;
72 arch_irn_flags_t flags;
75 void (*old_limited)(void *ptr, bitset_t *bs);
76 void *old_limited_env;
79 const arch_register_t *single_reg;
84 const arch_register_t *reg;
91 const arch_register_class_t *cls;
92 be_reg_data_t *reg_data;
96 be_node_attr_t node_attr;
97 int offset; /**< The offset by which the stack shall be increased/decreased. */
98 be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */
102 be_node_attr_t node_attr;
108 be_frame_attr_t frame_attr;
109 ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
112 static ir_op *op_Spill;
113 static ir_op *op_Reload;
114 static ir_op *op_Perm;
115 static ir_op *op_Copy;
116 static ir_op *op_Keep;
117 static ir_op *op_Call;
118 static ir_op *op_Return;
119 static ir_op *op_IncSP;
120 static ir_op *op_AddSP;
121 static ir_op *op_RegParams;
122 static ir_op *op_StackParam;
123 static ir_op *op_FrameAddr;
124 static ir_op *op_FrameLoad;
125 static ir_op *op_FrameStore;
127 static int beo_base = -1;
129 static const ir_op_ops be_node_op_ops;
131 #define N irop_flag_none
132 #define L irop_flag_labeled
133 #define C irop_flag_commutative
134 #define X irop_flag_cfopcode
135 #define I irop_flag_ip_cfopcode
136 #define F irop_flag_fragile
137 #define Y irop_flag_forking
138 #define H irop_flag_highlevel
139 #define c irop_flag_constlike
140 #define K irop_flag_keep
142 void be_node_init(void) {
143 static int inited = 0;
150 /* Acquire all needed opcodes. */
151 beo_base = get_next_ir_opcodes(beo_Last - 1);
153 op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
154 op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
155 op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
156 op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
157 op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
158 op_Call = new_ir_op(beo_base + beo_Call, "Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
159 op_Return = new_ir_op(beo_base + beo_Return, "Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
160 op_AddSP = new_ir_op(beo_base + beo_AddSP, "AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
161 op_IncSP = new_ir_op(beo_base + beo_IncSP, "IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
162 op_RegParams = new_ir_op(beo_base + beo_RegParams, "RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
163 op_StackParam = new_ir_op(beo_base + beo_StackParam, "StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
164 op_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "FrameAddr", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
165 op_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
166 op_FrameStore = new_ir_op(beo_base + beo_FrameStore, "FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
168 set_op_tag(op_Spill, &be_node_tag);
169 set_op_tag(op_Reload, &be_node_tag);
170 set_op_tag(op_Perm, &be_node_tag);
171 set_op_tag(op_Copy, &be_node_tag);
172 set_op_tag(op_Keep, &be_node_tag);
173 set_op_tag(op_Call, &be_node_tag);
174 set_op_tag(op_Return, &be_node_tag);
175 set_op_tag(op_AddSP, &be_node_tag);
176 set_op_tag(op_IncSP, &be_node_tag);
177 set_op_tag(op_RegParams, &be_node_tag);
178 set_op_tag(op_StackParam, &be_node_tag);
179 set_op_tag(op_FrameLoad, &be_node_tag);
180 set_op_tag(op_FrameStore, &be_node_tag);
181 set_op_tag(op_FrameAddr, &be_node_tag);
184 static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data)
186 be_node_attr_t *a = get_irn_attr(irn);
188 a->max_reg_data = max_reg_data;
192 if(max_reg_data > 0) {
195 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
196 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
197 for(i = 0; i < max_reg_data; ++i) {
198 a->reg_data[i].req.req.cls = cls;
199 a->reg_data[i].req.req.type = arch_register_req_type_normal;
206 static INLINE int is_be_node(const ir_node *irn)
208 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
211 be_opcode_t be_get_irn_opcode(const ir_node *irn)
213 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
216 static int redir_proj(const ir_node **node, int pos)
218 const ir_node *n = *node;
221 assert(pos == -1 && "Illegal pos for a Proj");
222 *node = get_Proj_pred(n);
223 return get_Proj_proj(n);
230 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
235 out_pos = redir_proj((const ir_node **) &irn, -1);
236 a = get_irn_attr(irn);
238 assert(is_be_node(irn));
239 assert(out_pos < a->max_reg_data && "position too high");
240 a->reg_data[out_pos].reg = reg;
244 ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx)
251 res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
252 a = init_node_attr(res, cls, irg, 0);
253 a->frame_attr.ent = NULL;
254 a->frame_attr.offset = 0;
259 ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem)
265 res = new_ir_node(NULL, irg, bl, op_Reload, mode, 1, in);
266 init_node_attr(res, cls, irg, 1);
270 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
272 ir_node *irn = new_ir_node(NULL, irg, bl, op_Perm, mode_T, n, in);
273 init_node_attr(irn, cls, irg, n);
277 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
283 res = new_ir_node(NULL, irg, bl, op_Copy, get_irn_mode(op), 1, in);
284 init_node_attr(res, cls, irg, 1);
288 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
292 irn = new_ir_node(NULL, irg, bl, op_Keep, mode_ANY, n, in);
293 init_node_attr(irn, cls, irg, 0);
298 ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[])
304 real_in = malloc(sizeof(real_in[0]) * (real_n));
309 memcpy(&real_in[3], in, n * sizeof(in[0]));
311 irn = new_ir_node(NULL, irg, bl, op_Call, mode_T, real_n, real_in);
312 init_node_attr(irn, NULL, irg, (n_outs > real_n ? n_outs : real_n));
316 ir_node *be_new_Return(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
318 ir_node *irn = new_ir_node(NULL, irg, bl, op_Return, mode_X, n, in);
319 init_node_attr(irn, NULL, irg, n);
324 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned offset, be_stack_dir_t dir)
332 irn = new_ir_node(NULL, irg, bl, op_IncSP, sp->reg_class->mode, 2, in);
333 a = init_node_attr(irn, sp->reg_class, irg, 1);
337 be_node_set_flags(irn, -1, arch_irn_flags_ignore);
339 /* Set output constraint to stack register. */
340 be_set_constr_single_reg(irn, -1, sp);
341 be_node_set_irn_reg(NULL, irn, sp);
346 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op)
354 irn = new_ir_node(NULL, irg, bl, op_AddSP, sp->reg_class->mode, 2, in);
355 a = init_node_attr(irn, sp->reg_class, irg, 1);
357 be_node_set_flags(irn, -1, arch_irn_flags_ignore);
359 /* Set output constraint to stack register. */
360 be_set_constr_single_reg(irn, -1, sp);
361 be_node_set_irn_reg(NULL, irn, sp);
366 ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, entity *ent)
372 in[0] = frame_pointer;
373 irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in);
374 a = init_node_attr(irn, cls, irg, 1);
379 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
384 irn = new_ir_node(NULL, irg, bl, op_RegParams, mode_T, 0, in);
385 init_node_attr(irn, NULL, irg, n_outs);
389 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
390 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, entity *ent)
398 irn = new_ir_node(NULL, irg, bl, op_FrameLoad, mode_T, 2, in);
399 a = init_node_attr(irn, NULL, irg, 3);
402 be_node_set_reg_class(irn, 1, cls_frame);
403 be_node_set_reg_class(irn, -(pn_Load_res + 1), cls_data);
407 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
408 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, entity *ent)
417 irn = new_ir_node(NULL, irg, bl, op_FrameStore, mode_T, 3, in);
418 a = init_node_attr(irn, NULL, irg, 3);
421 be_node_set_reg_class(irn, 1, cls_frame);
422 be_node_set_reg_class(irn, 2, cls_data);
426 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, entity *ent)
433 irn = new_ir_node(NULL, irg, bl, op_FrameAddr, get_irn_mode(frame), 1, in);
434 a = init_node_attr(irn, NULL, irg, 1);
437 be_node_set_reg_class(irn, 0, cls_frame);
438 be_node_set_reg_class(irn, -1, cls_frame);
442 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
443 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
444 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
445 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
446 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
447 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
448 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
449 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
450 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
451 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
452 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
453 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
454 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
455 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
457 int be_has_frame_entity(const ir_node *irn)
459 switch(be_get_irn_opcode(irn)) {
471 entity *be_get_frame_entity(const ir_node *irn)
473 if(be_has_frame_entity(irn)) {
474 be_frame_attr_t *a = get_irn_attr(irn);
478 else if(be_get_irn_opcode(irn) == beo_Reload)
479 return be_get_spill_entity(irn);
484 static void be_limited(void *data, bitset_t *bs)
486 be_req_t *req = data;
489 case be_req_kind_negate_old_limited:
490 case be_req_kind_old_limited:
491 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
492 if(req->kind == be_req_kind_negate_old_limited)
495 case be_req_kind_single_reg:
496 bitset_clear_all(bs);
497 bitset_set(bs, req->x.single_reg->index);
502 static INLINE be_req_t *get_req(ir_node *irn, int pos)
504 int idx = pos < 0 ? -(pos + 1) : pos;
505 be_node_attr_t *a = get_irn_attr(irn);
506 be_reg_data_t *rd = &a->reg_data[idx];
507 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
509 assert(is_be_node(irn));
510 assert(!(pos >= 0) || pos < get_irn_arity(irn));
511 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
516 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
518 be_req_t *r = get_req(irn, pos);
520 r->kind = be_req_kind_single_reg;
521 r->x.single_reg = reg;
522 r->req.limited = be_limited;
523 r->req.limited_env = r;
524 r->req.type = arch_register_req_type_limited;
525 r->req.cls = reg->reg_class;
528 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
530 be_req_t *r = get_req(irn, pos);
532 assert(arch_register_req_is(req, limited));
534 r->kind = be_req_kind_old_limited;
535 r->req.limited = be_limited;
536 r->req.limited_env = r;
537 r->req.type = arch_register_req_type_limited;
538 r->req.cls = req->cls;
540 r->x.old_limited.old_limited = req->limited;
541 r->x.old_limited.old_limited_env = req->limited_env;
544 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
546 be_req_t *r = get_req(irn, pos);
550 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
552 be_req_t *r = get_req(irn, pos);
556 void be_set_IncSP_offset(ir_node *irn, unsigned offset)
558 be_stack_attr_t *a = get_irn_attr(irn);
559 assert(be_is_IncSP(irn));
563 unsigned be_get_IncSP_offset(ir_node *irn)
565 be_stack_attr_t *a = get_irn_attr(irn);
566 assert(be_is_IncSP(irn));
570 void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
572 be_stack_attr_t *a = get_irn_attr(irn);
573 assert(be_is_IncSP(irn));
577 be_stack_dir_t be_get_IncSP_direction(ir_node *irn)
579 be_stack_attr_t *a = get_irn_attr(irn);
580 assert(be_is_IncSP(irn));
584 void be_set_Spill_entity(ir_node *irn, entity *ent)
586 be_spill_attr_t *a = get_irn_attr(irn);
587 assert(be_is_Spill(irn));
588 a->frame_attr.ent = ent;
591 static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
593 if(get_irn_visited(irn) < visited_nr) {
594 set_irn_visited(irn, visited_nr);
598 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
599 ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
605 else if(be_get_irn_opcode(irn) == beo_Spill)
612 ir_node *be_get_Spill_context(const ir_node *irn) {
613 const be_spill_attr_t *a = get_irn_attr(irn);
614 assert(be_is_Spill(irn));
619 * Finds a spill for a reload.
620 * If the reload is directly using the spill, this is simple,
621 * else we perform DFS from the reload (over all PhiMs) and return
622 * the first spill node we find.
624 static INLINE ir_node *find_a_spill(const ir_node *irn)
626 ir_graph *irg = get_irn_irg(irn);
627 unsigned visited_nr = get_irg_visited(irg) + 1;
629 assert(be_is_Reload(irn));
630 set_irg_visited(irg, visited_nr);
631 return find_a_spill_walker((ir_node *)irn, visited_nr);
634 entity *be_get_spill_entity(const ir_node *irn)
636 int opc = get_irn_opcode(irn);
638 switch(be_get_irn_opcode(irn)) {
640 return be_get_spill_entity(find_a_spill(irn));
643 be_spill_attr_t *a = get_irn_attr(irn);
644 return a->frame_attr.ent;
647 assert(0 && "Must give spill/reload node");
653 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
655 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
657 ir_node *bl = get_nodes_block(irn);
658 ir_graph *irg = get_irn_irg(bl);
659 ir_node *spill = be_new_Spill(cls, irg, bl, irn, ctx);
663 * search the right insertion point. a spill of a phi cannot be put
664 * directly after the phi, if there are some phis behind the one which
667 insert = sched_next(irn);
668 while(is_Phi(insert) && !sched_is_end(insert))
669 insert = sched_next(insert);
671 sched_add_before(insert, spill);
675 ir_node *be_reload(const arch_env_t *arch_env,
676 const arch_register_class_t *cls,
677 ir_node *irn, int pos, ir_mode *mode, ir_node *spill)
681 ir_node *bl = get_nodes_block(irn);
682 ir_graph *irg = get_irn_irg(bl);
684 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
686 reload = be_new_Reload(cls, irg, bl, mode, spill);
688 set_irn_n(irn, pos, reload);
689 sched_add_before(irn, reload);
693 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
695 const be_node_attr_t *a = get_irn_attr(irn);
697 if(out_pos < a->max_reg_data)
698 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
700 req->type = arch_register_req_type_none;
707 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
709 const be_node_attr_t *a = get_irn_attr(irn);
710 int n = get_irn_arity(irn);
712 if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
713 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
715 req->type = arch_register_req_type_none;
722 static const arch_register_req_t *
723 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
728 if(get_irn_mode(irn) == mode_T)
731 out_pos = redir_proj((const ir_node **) &irn, pos);
732 assert(is_be_node(irn));
733 return put_out_reg_req(req, irn, out_pos);
737 return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
743 const arch_register_t *
744 be_node_get_irn_reg(const void *_self, const ir_node *irn)
749 out_pos = redir_proj((const ir_node **) &irn, -1);
750 a = get_irn_attr(irn);
752 assert(is_be_node(irn));
753 assert(out_pos < a->max_reg_data && "position too high");
755 return a->reg_data[out_pos].reg;
758 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
760 redir_proj((const ir_node **) &irn, -1);
762 switch(be_get_irn_opcode(irn)) {
763 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
776 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
781 out_pos = redir_proj((const ir_node **) &irn, -1);
782 a = get_irn_attr(irn);
784 assert(is_be_node(irn));
785 assert(out_pos < a->max_reg_data && "position too high");
787 return a->reg_data[out_pos].req.flags;
790 static entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
792 return be_get_frame_entity(irn);
795 static void be_node_set_stack_bias(const void *self, ir_node *irn, int bias)
800 static const arch_irn_ops_if_t be_node_irn_ops_if = {
801 be_node_get_irn_reg_req,
806 be_node_get_frame_entity,
807 be_node_set_stack_bias
810 static const arch_irn_ops_t be_node_irn_ops = {
814 const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
816 redir_proj((const ir_node **) &irn, -1);
817 return is_be_node(irn) ? &be_node_irn_ops : NULL;
820 const arch_irn_handler_t be_node_irn_handler = {
825 static void dump_node_req(FILE *f, be_req_t *req)
828 int did_something = 0;
829 const char *suffix = "";
831 if(req->flags != arch_irn_flags_none) {
832 fprintf(f, "flags: ");
833 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
834 if(req->flags & (1 << i)) {
835 fprintf(f, "%s%s", suffix, arch_irn_flag_str(1 << i));
843 if(req->req.cls != 0) {
846 arch_register_req_format(tmp, sizeof(tmp), &req->req);
847 fprintf(f, "%s", tmp);
855 static void dump_node_reqs(FILE *f, ir_node *irn)
858 be_node_attr_t *a = get_irn_attr(irn);
860 fprintf(f, "registers: \n");
861 for(i = 0; i < a->max_reg_data; ++i) {
862 be_reg_data_t *rd = &a->reg_data[i];
864 fprintf(f, "#%d: %s\n", i, rd->reg->name);
867 fprintf(f, "in requirements\n");
868 for(i = 0; i < a->max_reg_data; ++i) {
869 dump_node_req(f, &a->reg_data[i].in_req);
872 fprintf(f, "\nout requirements\n");
873 for(i = 0; i < a->max_reg_data; ++i) {
874 dump_node_req(f, &a->reg_data[i].req);
878 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
880 be_node_attr_t *at = get_irn_attr(irn);
882 assert(is_be_node(irn));
885 case dump_node_opcode_txt:
886 fprintf(f, get_op_name(get_irn_op(irn)));
888 case dump_node_mode_txt:
889 fprintf(f, get_mode_name(get_irn_mode(irn)));
891 case dump_node_nodeattr_txt:
893 case dump_node_info_txt:
894 dump_node_reqs(f, irn);
896 if(be_has_frame_entity(irn)) {
897 be_frame_attr_t *a = (be_frame_attr_t *) at;
899 unsigned ofs = get_entity_offset_bytes(a->ent);
900 ir_fprintf(f, "frame entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
904 switch(be_get_irn_opcode(irn)) {
907 be_spill_attr_t *a = (be_spill_attr_t *) at;
908 ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
914 be_stack_attr_t *a = (be_stack_attr_t *) at;
915 fprintf(f, "offset: %u\n", a->offset);
916 fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against");
927 * Copies the backend specific attributes from old node to new node.
929 static void copy_attr(const ir_node *old_node, ir_node *new_node)
931 be_node_attr_t *old_attr = get_irn_attr(old_node);
932 be_node_attr_t *new_attr = get_irn_attr(new_node);
935 assert(is_be_node(old_node));
936 assert(is_be_node(new_node));
938 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
939 new_attr->reg_data = NULL;
941 if(new_attr->max_reg_data > 0) {
942 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
943 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
945 for(i = 0; i < old_attr->max_reg_data; ++i) {
948 r = &new_attr->reg_data[i].req;
949 r->req.limited_env = r;
951 r = &new_attr->reg_data[i].in_req;
952 r->req.limited_env = r;
957 static const ir_op_ops be_node_op_ops = {
973 pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
975 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
976 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
980 live_foreach(bl, li) {
981 ir_node *irn = (ir_node *) li->irn;
982 if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
983 pset_insert_ptr(live, irn);
986 sched_foreach_reverse(bl, irn) {
991 * If we encounter the node we want to insert the Perm after,
992 * exit immediately, so that this node is still live
997 DBG((dbg, LEVEL_1, "%+F\n", irn));
998 for(x = pset_first(live); x; x = pset_next(live))
999 DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
1001 if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
1002 pset_remove_ptr(live, irn);
1004 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
1005 ir_node *op = get_irn_n(irn, i);
1007 if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
1008 pset_insert_ptr(live, op);
1015 ir_node *insert_Perm_after(const arch_env_t *arch_env,
1016 const arch_register_class_t *cls,
1017 dom_front_info_t *dom_front,
1020 ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
1021 ir_graph *irg = get_irn_irg(bl);
1022 pset *live = pset_new_ptr_default();
1023 firm_dbg_module_t *dbg = firm_dbg_register("be.node");
1025 ir_node *curr, *irn, *perm, **nodes;
1028 DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
1030 if(!nodes_live_at(arch_env, cls, pos, live));
1032 n = pset_count(live);
1037 nodes = malloc(n * sizeof(nodes[0]));
1039 DBG((dbg, LEVEL_1, "live:\n"));
1040 for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
1041 DBG((dbg, LEVEL_1, "\t%+F\n", irn));
1045 perm = be_new_Perm(cls, irg, bl, n, nodes);
1046 sched_add_after(pos, perm);
1050 for(i = 0; i < n; ++i) {
1052 ir_node *perm_op = get_irn_n(perm, i);
1053 const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
1055 ir_mode *mode = get_irn_mode(perm_op);
1056 ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
1057 arch_set_irn_register(arch_env, proj, reg);
1059 sched_add_after(curr, proj);
1062 copies[0] = perm_op;
1064 be_ssa_constr(dom_front, 2, copies);