4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provdies Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005 Universitaet Karlsruhe
11 * Released under the GPL
35 #include "besched_t.h"
40 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
41 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
43 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
45 typedef enum _node_kind_t {
56 const arch_register_class_t *cls;
63 be_req_kind_old_limited,
64 be_req_kind_negate_old_limited,
65 be_req_kind_single_reg
69 arch_register_req_t req;
71 arch_irn_flags_t flags;
74 void (*old_limited)(void *ptr, bitset_t *bs);
75 void *old_limited_env;
78 const arch_register_t *single_reg;
83 const arch_register_t *reg;
90 const arch_register_class_t *cls;
91 be_reg_data_t *reg_data;
95 be_node_attr_t node_attr;
96 ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
97 entity *ent; /**< The entity in the stack frame the spill writes to. */
101 be_node_attr_t node_attr;
102 int offset; /**< The offset by which the stack shall be increased/decreased. */
103 be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */
106 static ir_op *op_Spill;
107 static ir_op *op_Reload;
108 static ir_op *op_Perm;
109 static ir_op *op_Copy;
110 static ir_op *op_Keep;
111 static ir_op *op_Call;
112 static ir_op *op_Return;
113 static ir_op *op_IncSP;
114 static ir_op *op_AddSP;
115 static ir_op *op_RegParams;
116 static ir_op *op_StackParam;
117 static ir_op *op_NoReg;
119 static int beo_base = -1;
121 static const ir_op_ops be_node_op_ops;
123 #define N irop_flag_none
124 #define L irop_flag_labeled
125 #define C irop_flag_commutative
126 #define X irop_flag_cfopcode
127 #define I irop_flag_ip_cfopcode
128 #define F irop_flag_fragile
129 #define Y irop_flag_forking
130 #define H irop_flag_highlevel
131 #define c irop_flag_constlike
132 #define K irop_flag_keep
134 void be_node_init(void) {
135 static int inited = 0;
142 /* Acquire all needed opcodes. */
143 beo_base = get_next_ir_opcodes(beo_Last - 1);
145 op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
146 op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
147 op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
148 op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
149 op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
150 op_NoReg = new_ir_op(beo_base + beo_NoReg, "NoReg", op_pin_state_floats, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
151 op_Call = new_ir_op(beo_base + beo_Call, "Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
152 op_Return = new_ir_op(beo_base + beo_Return, "Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
153 op_AddSP = new_ir_op(beo_base + beo_AddSP, "AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
154 op_IncSP = new_ir_op(beo_base + beo_IncSP, "IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
155 op_RegParams = new_ir_op(beo_base + beo_RegParams, "RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
156 op_StackParam = new_ir_op(beo_base + beo_StackParam, "StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
158 set_op_tag(op_Spill, &be_node_tag);
159 set_op_tag(op_Reload, &be_node_tag);
160 set_op_tag(op_Perm, &be_node_tag);
161 set_op_tag(op_Copy, &be_node_tag);
162 set_op_tag(op_Keep, &be_node_tag);
163 set_op_tag(op_NoReg, &be_node_tag);
164 set_op_tag(op_Call, &be_node_tag);
165 set_op_tag(op_Return, &be_node_tag);
166 set_op_tag(op_AddSP, &be_node_tag);
167 set_op_tag(op_IncSP, &be_node_tag);
168 set_op_tag(op_RegParams, &be_node_tag);
169 set_op_tag(op_StackParam, &be_node_tag);
172 static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data)
174 be_node_attr_t *a = get_irn_attr(irn);
176 a->max_reg_data = max_reg_data;
180 if(max_reg_data > 0) {
183 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
184 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
185 for(i = 0; i < max_reg_data; ++i) {
186 a->reg_data[i].req.req.cls = cls;
187 a->reg_data[i].req.req.type = arch_register_req_type_normal;
194 static INLINE int is_be_node(const ir_node *irn)
196 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
199 be_opcode_t be_get_irn_opcode(const ir_node *irn)
201 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
204 static int redir_proj(const ir_node **node, int pos)
206 const ir_node *n = *node;
209 assert(pos == -1 && "Illegal pos for a Proj");
210 *node = get_Proj_pred(n);
211 return get_Proj_proj(n);
218 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
223 out_pos = redir_proj((const ir_node **) &irn, -1);
224 a = get_irn_attr(irn);
226 assert(is_be_node(irn));
227 assert(out_pos < a->max_reg_data && "position too high");
228 a->reg_data[out_pos].reg = reg;
232 ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx)
239 res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
240 a = init_node_attr(res, cls, irg, 0);
246 ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem)
252 res = new_ir_node(NULL, irg, bl, op_Reload, mode, 1, in);
253 init_node_attr(res, cls, irg, 1);
257 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
259 ir_node *irn = new_ir_node(NULL, irg, bl, op_Perm, mode_T, n, in);
260 init_node_attr(irn, cls, irg, n);
264 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
270 res = new_ir_node(NULL, irg, bl, op_Copy, get_irn_mode(op), 1, in);
271 init_node_attr(res, cls, irg, 1);
275 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
279 irn = new_ir_node(NULL, irg, bl, op_Keep, mode_ANY, n, in);
280 init_node_attr(irn, cls, irg, 0);
285 ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[])
291 real_in = malloc(sizeof(real_in[0]) * (real_n));
296 memcpy(&real_in[3], in, n * sizeof(in[0]));
298 irn = new_ir_node(NULL, irg, bl, op_Call, mode_T, real_n, real_in);
299 init_node_attr(irn, NULL, irg, (n_outs > real_n ? n_outs : real_n));
303 ir_node *be_new_Return(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
305 ir_node *irn = new_ir_node(NULL, irg, bl, op_Return, mode_X, n, in);
306 init_node_attr(irn, NULL, irg, n);
311 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned offset, be_stack_dir_t dir)
319 irn = new_ir_node(NULL, irg, bl, op_IncSP, sp->reg_class->mode, 2, in);
320 a = init_node_attr(irn, sp->reg_class, irg, 1);
324 be_node_set_flags(irn, -1, arch_irn_flags_ignore);
326 /* Set output constraint to stack register. */
327 be_set_constr_single_reg(irn, -1, sp);
328 be_node_set_irn_reg(NULL, irn, sp);
333 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op)
341 irn = new_ir_node(NULL, irg, bl, op_AddSP, sp->reg_class->mode, 2, in);
342 a = init_node_attr(irn, sp->reg_class, irg, 1);
344 be_node_set_flags(irn, -1, arch_irn_flags_ignore);
346 /* Set output constraint to stack register. */
347 be_set_constr_single_reg(irn, -1, sp);
348 be_node_set_irn_reg(NULL, irn, sp);
353 ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl)
359 irn = new_ir_node(NULL, irg, bl, op_NoReg, reg->reg_class->mode, 0, in);
360 a = init_node_attr(irn, reg->reg_class, irg, 1);
361 be_node_set_flags(irn, -1, arch_irn_flags_ignore);
362 be_set_constr_single_reg(irn, -1, reg);
363 be_node_set_irn_reg(NULL, irn, reg);
367 ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset)
373 in[0] = frame_pointer;
374 irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in);
375 a = init_node_attr(irn, cls, irg, 1);
380 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
385 irn = new_ir_node(NULL, irg, bl, op_RegParams, mode_T, 0, in);
386 init_node_attr(irn, NULL, irg, n_outs);
390 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
391 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
392 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
393 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
394 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
395 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
396 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
397 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
398 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
399 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
400 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
401 int be_is_NoReg (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_NoReg ; }
403 static void be_limited(void *data, bitset_t *bs)
405 be_req_t *req = data;
408 case be_req_kind_negate_old_limited:
409 case be_req_kind_old_limited:
410 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
411 if(req->kind == be_req_kind_negate_old_limited)
414 case be_req_kind_single_reg:
415 bitset_clear_all(bs);
416 bitset_set(bs, req->x.single_reg->index);
421 static INLINE be_req_t *get_req(ir_node *irn, int pos)
423 int idx = pos < 0 ? -(pos + 1) : pos;
424 be_node_attr_t *a = get_irn_attr(irn);
425 be_reg_data_t *rd = &a->reg_data[idx];
426 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
428 assert(is_be_node(irn));
429 assert(!(pos >= 0) || pos < get_irn_arity(irn));
430 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
435 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
437 be_req_t *r = get_req(irn, pos);
439 r->kind = be_req_kind_single_reg;
440 r->x.single_reg = reg;
441 r->req.limited = be_limited;
442 r->req.limited_env = r;
443 r->req.type = arch_register_req_type_limited;
444 r->req.cls = reg->reg_class;
447 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
449 be_req_t *r = get_req(irn, pos);
451 assert(arch_register_req_is(req, limited));
453 r->kind = be_req_kind_old_limited;
454 r->req.limited = be_limited;
455 r->req.limited_env = r;
456 r->req.type = arch_register_req_type_limited;
457 r->req.cls = req->cls;
459 r->x.old_limited.old_limited = req->limited;
460 r->x.old_limited.old_limited_env = req->limited_env;
463 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
465 be_req_t *r = get_req(irn, pos);
469 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
471 be_req_t *r = get_req(irn, pos);
475 void be_set_IncSP_offset(ir_node *irn, unsigned offset)
477 be_stack_attr_t *a = get_irn_attr(irn);
478 assert(be_is_IncSP(irn));
482 unsigned be_get_IncSP_offset(ir_node *irn)
484 be_stack_attr_t *a = get_irn_attr(irn);
485 assert(be_is_IncSP(irn));
489 void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
491 be_stack_attr_t *a = get_irn_attr(irn);
492 assert(be_is_IncSP(irn));
496 be_stack_dir_t be_get_IncSP_direction(ir_node *irn)
498 be_stack_attr_t *a = get_irn_attr(irn);
499 assert(be_is_IncSP(irn));
503 void be_set_Spill_entity(ir_node *irn, entity *ent)
505 be_spill_attr_t *a = get_irn_attr(irn);
506 assert(be_is_Spill(irn));
510 static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
512 if(get_irn_visited(irn) < visited_nr) {
513 set_irn_visited(irn, visited_nr);
517 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
518 ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
524 else if(be_get_irn_opcode(irn) == beo_Spill)
531 ir_node *be_get_Spill_context(const ir_node *irn) {
532 const be_spill_attr_t *a = get_irn_attr(irn);
533 assert(be_is_Spill(irn));
538 * Finds a spill for a reload.
539 * If the reload is directly using the spill, this is simple,
540 * else we perform DFS from the reload (over all PhiMs) and return
541 * the first spill node we find.
543 static INLINE ir_node *find_a_spill(ir_node *irn)
545 ir_graph *irg = get_irn_irg(irn);
546 unsigned visited_nr = get_irg_visited(irg) + 1;
548 assert(be_is_Reload(irn));
549 set_irg_visited(irg, visited_nr);
550 return find_a_spill_walker(irn, visited_nr);
553 entity *be_get_spill_entity(ir_node *irn)
555 int opc = get_irn_opcode(irn);
557 switch(be_get_irn_opcode(irn)) {
559 return be_get_spill_entity(find_a_spill(irn));
562 be_spill_attr_t *a = get_irn_attr(irn);
566 assert(0 && "Must give spill/reload node");
572 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
574 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
576 ir_node *bl = get_nodes_block(irn);
577 ir_graph *irg = get_irn_irg(bl);
578 ir_node *spill = be_new_Spill(cls, irg, bl, irn, ctx);
582 * search the right insertion point. a spill of a phi cannot be put
583 * directly after the phi, if there are some phis behind the one which
586 insert = sched_next(irn);
587 while(is_Phi(insert) && !sched_is_end(insert))
588 insert = sched_next(insert);
590 sched_add_before(insert, spill);
594 ir_node *be_reload(const arch_env_t *arch_env,
595 const arch_register_class_t *cls,
596 ir_node *irn, int pos, ir_mode *mode, ir_node *spill)
600 ir_node *bl = get_nodes_block(irn);
601 ir_graph *irg = get_irn_irg(bl);
603 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
605 reload = be_new_Reload(cls, irg, bl, mode, spill);
607 set_irn_n(irn, pos, reload);
608 sched_add_before(irn, reload);
612 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
614 const be_node_attr_t *a = get_irn_attr(irn);
616 if(out_pos < a->max_reg_data)
617 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
619 req->type = arch_register_req_type_none;
626 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
628 const be_node_attr_t *a = get_irn_attr(irn);
629 int n = get_irn_arity(irn);
631 if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
632 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
634 req->type = arch_register_req_type_none;
641 static const arch_register_req_t *
642 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
647 if(get_irn_mode(irn) == mode_T)
650 out_pos = redir_proj((const ir_node **) &irn, pos);
651 assert(is_be_node(irn));
652 return put_out_reg_req(req, irn, out_pos);
656 return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
662 const arch_register_t *
663 be_node_get_irn_reg(const void *_self, const ir_node *irn)
668 out_pos = redir_proj((const ir_node **) &irn, -1);
669 a = get_irn_attr(irn);
671 assert(is_be_node(irn));
672 assert(out_pos < a->max_reg_data && "position too high");
674 return a->reg_data[out_pos].reg;
677 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
679 redir_proj((const ir_node **) &irn, -1);
681 switch(be_get_irn_opcode(irn)) {
682 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
695 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
699 redir_proj((const ir_node **) &irn, -1);
700 assert(is_be_node(irn));
701 a = get_irn_attr(irn);
702 return a->max_reg_data > 0 ? a->reg_data[0].req.flags : arch_irn_flags_none;
705 static const arch_irn_ops_if_t be_node_irn_ops_if = {
706 be_node_get_irn_reg_req,
713 static const arch_irn_ops_t be_node_irn_ops = {
717 const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
719 redir_proj((const ir_node **) &irn, -1);
720 return is_be_node(irn) ? &be_node_irn_ops : NULL;
723 const arch_irn_handler_t be_node_irn_handler = {
727 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
729 be_node_attr_t *at = get_irn_attr(irn);
732 assert(is_be_node(irn));
735 case dump_node_opcode_txt:
736 fprintf(f, get_op_name(get_irn_op(irn)));
738 case dump_node_mode_txt:
739 fprintf(f, get_mode_name(get_irn_mode(irn)));
741 case dump_node_nodeattr_txt:
743 case dump_node_info_txt:
744 fprintf(f, "reg class: %s\n", at->cls ? at->cls->name : "n/a");
745 for(i = 0; i < at->max_reg_data; ++i) {
746 const arch_register_t *reg = at->reg_data[i].reg;
747 fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a");
750 switch(be_get_irn_opcode(irn)) {
753 be_spill_attr_t *a = (be_spill_attr_t *) at;
755 ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
757 unsigned ofs = get_entity_offset_bytes(a->ent);
758 ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
761 ir_fprintf(f, "spill entity: n/a\n");
768 be_stack_attr_t *a = (be_stack_attr_t *) at;
769 fprintf(f, "offset: %u\n", a->offset);
770 fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against");
781 * Copies the backend specific attributes from old node to new node.
783 static void copy_attr(const ir_node *old_node, ir_node *new_node)
785 be_node_attr_t *old_attr = get_irn_attr(old_node);
786 be_node_attr_t *new_attr = get_irn_attr(new_node);
789 assert(is_be_node(old_node));
790 assert(is_be_node(new_node));
792 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
793 new_attr->reg_data = NULL;
795 if(new_attr->max_reg_data > 0) {
796 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
797 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
799 for(i = 0; i < old_attr->max_reg_data; ++i) {
802 r = &new_attr->reg_data[i].req;
803 r->req.limited_env = r;
805 r = &new_attr->reg_data[i].in_req;
806 r->req.limited_env = r;
811 static const ir_op_ops be_node_op_ops = {
827 pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
829 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
830 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
834 live_foreach(bl, li) {
835 ir_node *irn = (ir_node *) li->irn;
836 if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
837 pset_insert_ptr(live, irn);
840 sched_foreach_reverse(bl, irn) {
845 * If we encounter the node we want to insert the Perm after,
846 * exit immediately, so that this node is still live
851 DBG((dbg, LEVEL_1, "%+F\n", irn));
852 for(x = pset_first(live); x; x = pset_next(live))
853 DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
855 if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
856 pset_remove_ptr(live, irn);
858 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
859 ir_node *op = get_irn_n(irn, i);
861 if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
862 pset_insert_ptr(live, op);
869 ir_node *insert_Perm_after(const arch_env_t *arch_env,
870 const arch_register_class_t *cls,
871 dom_front_info_t *dom_front,
874 ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
875 ir_graph *irg = get_irn_irg(bl);
876 pset *live = pset_new_ptr_default();
877 firm_dbg_module_t *dbg = firm_dbg_register("be.node");
879 ir_node *curr, *irn, *perm, **nodes;
882 DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
884 if(!nodes_live_at(arch_env, cls, pos, live));
886 n = pset_count(live);
891 nodes = malloc(n * sizeof(nodes[0]));
893 DBG((dbg, LEVEL_1, "live:\n"));
894 for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
895 DBG((dbg, LEVEL_1, "\t%+F\n", irn));
899 perm = be_new_Perm(cls, irg, bl, n, nodes);
900 sched_add_after(pos, perm);
904 for(i = 0; i < n; ++i) {
906 ir_node *perm_op = get_irn_n(perm, i);
907 const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
909 ir_mode *mode = get_irn_mode(perm_op);
910 ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
911 arch_set_irn_register(arch_env, proj, reg);
913 sched_add_after(curr, proj);
918 be_ssa_constr(dom_front, 2, copies);