4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provdies Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005 Universitaet Karlsruhe
11 * Released under the GPL
35 #include "besched_t.h"
40 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
41 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
43 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
45 typedef enum _node_kind_t {
56 const arch_register_class_t *cls;
63 be_req_kind_old_limited,
64 be_req_kind_negate_old_limited,
65 be_req_kind_single_reg
69 arch_register_req_t req;
73 void (*old_limited)(void *ptr, bitset_t *bs);
74 void *old_limited_env;
77 const arch_register_t *single_reg;
82 const arch_register_t *reg;
89 arch_irn_flags_t flags;
90 const arch_register_class_t *cls;
91 be_reg_data_t *reg_data;
95 be_node_attr_t node_attr;
96 ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
97 entity *ent; /**< The entity in the stack frame the spill writes to. */
101 be_node_attr_t node_attr;
102 int offset; /**< The offset by which the stack shall be increased/decreased. */
103 be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */
106 static ir_op *op_Spill;
107 static ir_op *op_Reload;
108 static ir_op *op_Perm;
109 static ir_op *op_Copy;
110 static ir_op *op_Keep;
111 static ir_op *op_Call;
112 static ir_op *op_IncSP;
113 static ir_op *op_AddSP;
114 static ir_op *op_RegParams;
115 static ir_op *op_StackParam;
116 static ir_op *op_NoReg;
118 static int beo_base = -1;
120 static const ir_op_ops be_node_op_ops;
122 #define N irop_flag_none
123 #define L irop_flag_labeled
124 #define C irop_flag_commutative
125 #define X irop_flag_cfopcode
126 #define I irop_flag_ip_cfopcode
127 #define F irop_flag_fragile
128 #define Y irop_flag_forking
129 #define H irop_flag_highlevel
130 #define c irop_flag_constlike
131 #define K irop_flag_keep
133 void be_node_init(void) {
134 static int inited = 0;
141 /* Acquire all needed opcodes. */
142 beo_base = get_next_ir_opcodes(beo_Last - 1);
144 op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
145 op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
146 op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
147 op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
148 op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
149 op_NoReg = new_ir_op(beo_base + beo_Keep, "NoReg", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
150 op_Call = new_ir_op(beo_base + beo_Keep, "Call", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
151 op_AddSP = new_ir_op(beo_base + beo_Keep, "AddSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
152 op_IncSP = new_ir_op(beo_base + beo_Keep, "IncSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
153 op_RegParams = new_ir_op(beo_base + beo_Keep, "RegParams", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
154 op_StackParam = new_ir_op(beo_base + beo_Keep, "StackParam", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
156 set_op_tag(op_Spill, &be_node_tag);
157 set_op_tag(op_Reload, &be_node_tag);
158 set_op_tag(op_Perm, &be_node_tag);
159 set_op_tag(op_Copy, &be_node_tag);
160 set_op_tag(op_Keep, &be_node_tag);
161 set_op_tag(op_NoReg, &be_node_tag);
162 set_op_tag(op_Call, &be_node_tag);
163 set_op_tag(op_AddSP, &be_node_tag);
164 set_op_tag(op_IncSP, &be_node_tag);
165 set_op_tag(op_RegParams, &be_node_tag);
166 set_op_tag(op_StackParam, &be_node_tag);
169 static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data)
171 be_node_attr_t *a = get_irn_attr(irn);
173 a->max_reg_data = max_reg_data;
174 a->flags = arch_irn_flags_none;
178 if(max_reg_data > 0) {
181 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
182 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
183 for(i = 0; i < max_reg_data; ++i) {
184 a->reg_data[i].req.req.cls = cls;
185 a->reg_data[i].req.req.type = arch_register_req_type_normal;
192 static INLINE int is_be_node(const ir_node *irn)
194 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
197 be_opcode_t get_irn_be_opcode(const ir_node *irn)
199 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
202 static int redir_proj(const ir_node **node, int pos)
204 const ir_node *n = *node;
207 assert(pos == -1 && "Illegal pos for a Proj");
208 *node = get_Proj_pred(n);
209 return get_Proj_proj(n);
216 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
221 out_pos = redir_proj((const ir_node **) &irn, -1);
222 a = get_irn_attr(irn);
224 assert(is_be_node(irn));
225 assert(out_pos < a->max_reg_data && "position too high");
226 a->reg_data[out_pos].reg = reg;
230 ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx)
237 res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
238 a = init_node_attr(res, cls, irg, 0);
244 ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem)
250 res = new_ir_node(NULL, irg, bl, op_Reload, mode, 1, in);
251 init_node_attr(res, cls, irg, 1);
255 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
257 ir_node *irn = new_ir_node(NULL, irg, bl, op_Perm, mode_T, n, in);
258 init_node_attr(irn, cls, irg, n);
262 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
268 res = new_ir_node(NULL, irg, bl, op_Copy, get_irn_mode(op), 1, in);
269 init_node_attr(res, cls, irg, 1);
273 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
277 irn = new_ir_node(NULL, irg, bl, op_Keep, mode_ANY, n, in);
278 init_node_attr(irn, cls, irg, 0);
283 ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[])
289 real_in = malloc(sizeof(real_in[0]) * (real_n));
294 memcpy(&real_in[3], in, n * sizeof(in[0]));
296 irn = new_ir_node(NULL, irg, bl, op_Call, mode_T, real_n, real_in);
297 init_node_attr(irn, NULL, irg, (n_outs > real_n ? n_outs : real_n));
301 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, unsigned offset, be_stack_dir_t dir)
308 irn = new_ir_node(NULL, irg, bl, op_IncSP, sp->reg_class->mode, 1, in);
309 a = init_node_attr(irn, sp->reg_class, irg, 1);
313 a->node_attr.flags |= arch_irn_flags_ignore;
315 /* Set output constraint to stack register. */
316 be_set_constr_single_reg(irn, -1, sp);
317 be_node_set_irn_reg(NULL, irn, sp);
322 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op)
330 irn = new_ir_node(NULL, irg, bl, op_AddSP, sp->reg_class->mode, 2, in);
331 a = init_node_attr(irn, sp->reg_class, irg, 1);
332 a->flags |= arch_irn_flags_ignore;
334 /* Set output constraint to stack register. */
335 be_set_constr_single_reg(irn, -1, sp);
336 be_node_set_irn_reg(NULL, irn, sp);
341 ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl)
347 irn = new_ir_node(NULL, irg, bl, op_NoReg, reg->reg_class->mode, 0, in);
348 a = init_node_attr(irn, reg->reg_class, irg, 1);
349 a->flags |= arch_irn_flags_ignore;
350 be_set_constr_single_reg(irn, -1, reg);
351 be_node_set_irn_reg(NULL, irn, reg);
355 ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset)
361 in[0] = frame_pointer;
362 irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in);
363 a = init_node_attr(irn, cls, irg, 1);
368 int be_is_Spill (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Spill ; }
369 int be_is_Reload (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Reload ; }
370 int be_is_Copy (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Copy ; }
371 int be_is_Perm (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Perm ; }
372 int be_is_Keep (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Keep ; }
373 int be_is_Call (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Call ; }
374 int be_is_IncSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_IncSP ; }
375 int be_is_AddSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_AddSP ; }
376 int be_is_RegParams (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_RegParams ; }
377 int be_is_StackParam (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_StackParam ; }
378 int be_is_NoReg (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_NoReg ; }
380 static void be_limited(void *data, bitset_t *bs)
382 be_req_t *req = data;
385 case be_req_kind_negate_old_limited:
386 case be_req_kind_old_limited:
387 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
388 if(req->kind == be_req_kind_negate_old_limited)
391 case be_req_kind_single_reg:
392 bitset_clear_all(bs);
393 bitset_set(bs, req->x.single_reg->index);
398 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
400 int idx = pos < 0 ? -(pos - 1) : pos;
401 be_node_attr_t *a = get_irn_attr(irn);
402 be_reg_data_t *rd = &a->reg_data[idx];
403 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
405 assert(is_be_node(irn));
406 assert(!(pos >= 0) || pos < get_irn_arity(irn));
407 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
409 r->kind = be_req_kind_single_reg;
410 r->x.single_reg = reg;
411 r->req.limited = be_limited;
412 r->req.limited_env = r;
413 r->req.type = arch_register_req_type_limited;
414 r->req.cls = reg->reg_class;
417 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
419 int idx = pos < 0 ? -(pos - 1) : pos;
420 be_node_attr_t *a = get_irn_attr(irn);
421 be_reg_data_t *rd = &a->reg_data[idx];
422 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
424 assert(is_be_node(irn));
425 assert(!(pos >= 0) || pos < get_irn_arity(irn));
426 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
427 assert(arch_register_req_is(req, limited));
429 r->kind = be_req_kind_old_limited;
430 r->req.limited = be_limited;
431 r->req.limited_env = r;
432 r->req.type = arch_register_req_type_limited;
433 r->req.cls = req->cls;
435 r->x.old_limited.old_limited = req->limited;
436 r->x.old_limited.old_limited_env = req->limited_env;
439 void be_set_IncSP_offset(ir_node *irn, unsigned offset)
441 be_stack_attr_t *a = get_irn_attr(irn);
442 assert(be_is_IncSP(irn));
446 unsigned be_get_IncSP_offset(ir_node *irn)
448 be_stack_attr_t *a = get_irn_attr(irn);
449 assert(be_is_IncSP(irn));
453 void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
455 be_stack_attr_t *a = get_irn_attr(irn);
456 assert(be_is_IncSP(irn));
460 be_stack_dir_t be_get_IncSP_direction(ir_node *irn)
462 be_stack_attr_t *a = get_irn_attr(irn);
463 assert(be_is_IncSP(irn));
467 void be_set_Spill_entity(ir_node *irn, entity *ent)
469 be_spill_attr_t *a = get_irn_attr(irn);
470 assert(be_is_Spill(irn));
474 static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
476 if(get_irn_visited(irn) < visited_nr) {
477 set_irn_visited(irn, visited_nr);
481 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
482 ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
488 else if(get_irn_be_opcode(irn) == beo_Spill)
495 ir_node *be_get_Spill_context(const ir_node *irn) {
496 const be_spill_attr_t *a = get_irn_attr(irn);
497 assert(be_is_Spill(irn));
502 * Finds a spill for a reload.
503 * If the reload is directly using the spill, this is simple,
504 * else we perform DFS from the reload (over all PhiMs) and return
505 * the first spill node we find.
507 static INLINE ir_node *find_a_spill(ir_node *irn)
509 ir_graph *irg = get_irn_irg(irn);
510 unsigned visited_nr = get_irg_visited(irg) + 1;
512 assert(be_is_Reload(irn));
513 set_irg_visited(irg, visited_nr);
514 return find_a_spill_walker(irn, visited_nr);
517 entity *be_get_spill_entity(ir_node *irn)
519 int opc = get_irn_opcode(irn);
521 switch(get_irn_be_opcode(irn)) {
523 return be_get_spill_entity(find_a_spill(irn));
526 be_spill_attr_t *a = get_irn_attr(irn);
530 assert(0 && "Must give spill/reload node");
536 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
538 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
540 ir_node *bl = get_nodes_block(irn);
541 ir_graph *irg = get_irn_irg(bl);
542 ir_node *spill = be_new_Spill(cls, irg, bl, irn, ctx);
546 * search the right insertion point. a spill of a phi cannot be put
547 * directly after the phi, if there are some phis behind the one which
550 insert = sched_next(irn);
551 while(is_Phi(insert) && !sched_is_end(insert))
552 insert = sched_next(insert);
554 sched_add_before(insert, spill);
558 ir_node *be_reload(const arch_env_t *arch_env,
559 const arch_register_class_t *cls,
560 ir_node *irn, int pos, ir_mode *mode, ir_node *spill)
564 ir_node *bl = get_nodes_block(irn);
565 ir_graph *irg = get_irn_irg(bl);
567 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
569 reload = be_new_Reload(cls, irg, bl, mode, spill);
571 set_irn_n(irn, pos, reload);
572 sched_add_before(irn, reload);
576 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
578 const be_node_attr_t *a = get_irn_attr(irn);
580 if(out_pos < a->max_reg_data)
581 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
583 req->type = arch_register_req_type_none;
590 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
592 const be_node_attr_t *a = get_irn_attr(irn);
593 int n = get_irn_arity(irn);
595 if(pos < get_irn_arity(irn))
596 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
598 req->type = arch_register_req_type_none;
605 static const arch_register_req_t *
606 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
611 if(get_irn_mode(irn) == mode_T)
614 out_pos = redir_proj((const ir_node **) &irn, pos);
615 assert(is_be_node(irn));
616 return put_out_reg_req(req, irn, out_pos);
620 return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
626 const arch_register_t *
627 be_node_get_irn_reg(const void *_self, const ir_node *irn)
632 out_pos = redir_proj((const ir_node **) &irn, -1);
633 a = get_irn_attr(irn);
635 assert(is_be_node(irn));
636 assert(out_pos < a->max_reg_data && "position too high");
638 return a->reg_data[out_pos].reg;
641 arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
643 redir_proj((const ir_node **) &irn, -1);
645 switch(get_irn_be_opcode(irn)) {
646 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
659 arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
661 be_node_attr_t *a = get_irn_attr(irn);
665 static const arch_irn_ops_if_t be_node_irn_ops_if = {
666 be_node_get_irn_reg_req,
673 static const arch_irn_ops_t be_node_irn_ops = {
677 const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
679 redir_proj((const ir_node **) &irn, -1);
680 return is_be_node(irn) ? &be_node_irn_ops : NULL;
683 const arch_irn_handler_t be_node_irn_handler = {
687 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
689 be_node_attr_t *at = get_irn_attr(irn);
692 assert(is_be_node(irn));
695 case dump_node_opcode_txt:
696 fprintf(f, get_op_name(get_irn_op(irn)));
698 case dump_node_mode_txt:
699 fprintf(f, get_mode_name(get_irn_mode(irn)));
701 case dump_node_nodeattr_txt:
703 case dump_node_info_txt:
704 fprintf(f, "reg class: %s\n", at->cls->name);
705 for(i = 0; i < at->max_reg_data; ++i) {
706 const arch_register_t *reg = at->reg_data[i].reg;
707 fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a");
710 switch(get_irn_be_opcode(irn)) {
713 be_spill_attr_t *a = (be_spill_attr_t *) at;
715 ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
717 unsigned ofs = get_entity_offset_bytes(a->ent);
718 ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
721 ir_fprintf(f, "spill entity: n/a\n");
728 be_stack_attr_t *a = (be_stack_attr_t *) at;
729 fprintf(f, "offset: %u\n", a->offset);
730 fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against");
740 void copy_attr(const ir_node *old_node, ir_node *new_node)
742 be_node_attr_t *old_attr = get_irn_attr(old_attr);
743 be_node_attr_t *new_attr = get_irn_attr(new_node);
745 assert(is_be_node(old_node));
746 assert(is_be_node(new_node));
748 memcpy(new_attr, old_attr, old_node->op->attr_size);
750 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
751 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
754 static const ir_op_ops be_node_op_ops = {
770 pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
772 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
773 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
777 live_foreach(bl, li) {
778 ir_node *irn = (ir_node *) li->irn;
779 if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
780 pset_insert_ptr(live, irn);
783 sched_foreach_reverse(bl, irn) {
788 * If we encounter the node we want to insert the Perm after,
789 * exit immediately, so that this node is still live
794 DBG((dbg, LEVEL_1, "%+F\n", irn));
795 for(x = pset_first(live); x; x = pset_next(live))
796 DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
798 if(arch_irn_has_reg_class(arch_env, irn, -1, cls))
799 pset_remove_ptr(live, irn);
801 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
802 ir_node *op = get_irn_n(irn, i);
804 if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
805 pset_insert_ptr(live, op);
812 ir_node *insert_Perm_after(const arch_env_t *arch_env,
813 const arch_register_class_t *cls,
814 dom_front_info_t *dom_front,
817 ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
818 ir_graph *irg = get_irn_irg(bl);
819 pset *live = pset_new_ptr_default();
820 firm_dbg_module_t *dbg = firm_dbg_register("be.node");
822 ir_node *curr, *irn, *perm, **nodes;
825 DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
827 if(!nodes_live_at(arch_env, cls, pos, live));
829 n = pset_count(live);
834 nodes = malloc(n * sizeof(nodes[0]));
836 DBG((dbg, LEVEL_1, "live:\n"));
837 for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
838 DBG((dbg, LEVEL_1, "\t%+F\n", irn));
842 perm = be_new_Perm(cls, irg, bl, n, nodes);
843 sched_add_after(pos, perm);
847 for(i = 0; i < n; ++i) {
849 ir_node *perm_op = get_irn_n(perm, i);
850 const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
852 ir_mode *mode = get_irn_mode(perm_op);
853 ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
854 arch_set_irn_register(arch_env, proj, reg);
856 sched_add_after(curr, proj);
860 be_ssa_constr_single(dom_front, perm_op, 1, copies);