4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
27 #include "bitfiddle.h"
39 #include "besched_t.h"
44 #define OUT_POS(x) (-((x) + 1))
46 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
47 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
49 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
52 be_req_kind_old_limited,
53 be_req_kind_negate_old_limited,
54 be_req_kind_single_reg
58 arch_register_req_t req;
60 arch_irn_flags_t flags;
63 void (*old_limited)(void *ptr, bitset_t *bs);
64 void *old_limited_env;
67 const arch_register_t *single_reg;
72 const arch_register_t *reg;
77 /** The generic be nodes attribute type. */
80 be_reg_data_t *reg_data;
83 /** The be_Return nodes attribute type. */
85 be_node_attr_t node_attr;
86 int num_ret_vals; /**< number of return values */
89 /** The be_Stack attribute type. */
91 be_node_attr_t node_attr;
92 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
95 /** The be_Frame attribute type. */
97 be_node_attr_t node_attr;
102 /** The be_Call attribute type. */
104 be_node_attr_t node_attr;
105 ir_entity *ent; /**< The called entity if this is a static call. */
106 ir_type *call_tp; /**< The call type, copied from the original Call node. */
110 be_node_attr_t node_attr;
111 ir_entity **in_entities;
112 ir_entity **out_entities;
118 ir_op *op_be_MemPerm;
121 ir_op *op_be_CopyKeep;
128 ir_op *op_be_RegParams;
129 ir_op *op_be_StackParam;
130 ir_op *op_be_FrameAddr;
131 ir_op *op_be_FrameLoad;
132 ir_op *op_be_FrameStore;
133 ir_op *op_be_Barrier;
135 static int beo_base = -1;
137 static const ir_op_ops be_node_op_ops;
139 #define N irop_flag_none
140 #define L irop_flag_labeled
141 #define C irop_flag_commutative
142 #define X irop_flag_cfopcode
143 #define I irop_flag_ip_cfopcode
144 #define F irop_flag_fragile
145 #define Y irop_flag_forking
146 #define H irop_flag_highlevel
147 #define c irop_flag_constlike
148 #define K irop_flag_keep
149 #define M irop_flag_machine
153 * Compare two node attributes.
155 * @return zero if both attributes are identically
157 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if (a->max_reg_data == b->max_reg_data) {
161 for (i = 0; i < a->max_reg_data; ++i) {
162 if (a->reg_data[i].reg != b->reg_data[i].reg ||
163 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
164 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node* irn, int max_reg_data)
245 ir_graph *irg = get_irn_irg(irn);
246 be_node_attr_t *a = get_irn_attr(irn);
248 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn))));
249 a->max_reg_data = max_reg_data;
252 if(max_reg_data > 0) {
255 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
256 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
257 for(i = 0; i < max_reg_data; ++i) {
258 a->reg_data[i].req.req.cls = NULL;
259 a->reg_data[i].req.req.type = arch_register_req_type_none;
266 int is_be_node(const ir_node *irn)
268 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
271 be_opcode_t be_get_irn_opcode(const ir_node *irn)
273 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
277 * Skip Proj nodes and return their Proj numbers.
279 * If *node is a Proj or Proj(Proj) node, skip it.
281 * @param node points to the node to be skipped
283 * @return 0 if *node was no Proj node, its Proj number else.
285 static int redir_proj(const ir_node **node)
287 const ir_node *n = *node;
292 *node = irn = get_Proj_pred(n);
294 assert(get_irn_mode(irn) == mode_T);
295 *node = get_Proj_pred(irn);
297 return get_Proj_proj(n);
303 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
306 be_node_attr_t *res = NULL;
307 int *pos = the_pos ? the_pos : &dummy;
311 ir_node *pred = get_Proj_pred(irn);
312 int p = get_Proj_proj(irn);
314 if(is_be_node(pred)) {
315 assert(get_irn_mode(pred) == mode_T);
317 res = get_irn_attr(pred);
318 assert(p >= 0 && p < res->max_reg_data && "illegal proj number");
322 else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
323 be_node_attr_t *a = get_irn_attr(irn);
324 if(a->max_reg_data > 0) {
333 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
336 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
337 return a ? &a->reg_data[pos] : NULL;
341 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
343 be_reg_data_t *r = retrieve_reg_data(irn);
350 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
351 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
359 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
360 a = init_node_attr(res, 2);
364 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
365 be_node_set_reg_class(res, be_pos_Spill_val, cls);
369 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
370 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
377 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
379 init_node_attr(res, 2);
380 be_node_set_reg_class(res, -1, cls);
381 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
382 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
386 ir_node *be_get_Reload_mem(const ir_node *irn)
388 assert(be_is_Reload(irn));
389 return get_irn_n(irn, be_pos_Reload_mem);
392 ir_node *be_get_Reload_frame(const ir_node *irn)
394 assert(be_is_Reload(irn));
395 return get_irn_n(irn, be_pos_Reload_frame);
398 ir_node *be_get_Spill_val(const ir_node *irn)
400 assert(be_is_Spill(irn));
401 return get_irn_n(irn, be_pos_Spill_val);
403 ir_node *be_get_Spill_frame(const ir_node *irn)
405 assert(be_is_Spill(irn));
406 return get_irn_n(irn, be_pos_Spill_frame);
409 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
412 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
413 init_node_attr(irn, n);
414 for(i = 0; i < n; ++i) {
415 be_node_set_reg_class(irn, i, cls);
416 be_node_set_reg_class(irn, OUT_POS(i), cls);
422 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
425 ir_node *frame = get_irg_frame(irg);
426 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
428 const arch_register_t *sp = arch_env->isa->sp;
429 be_memperm_attr_t *attr;
432 real_in = alloca((n+1) * sizeof(real_in[0]));
434 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
436 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
438 init_node_attr(irn, n + 1);
439 be_node_set_reg_class(irn, 0, sp->reg_class);
440 for(i = 0; i < n; ++i) {
441 be_node_set_reg_class(irn, i + 1, cls_frame);
442 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
445 attr = get_irn_attr(irn);
447 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
448 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
449 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
450 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
456 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
462 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
463 init_node_attr(res, 1);
464 be_node_set_reg_class(res, 0, cls);
465 be_node_set_reg_class(res, OUT_POS(0), cls);
469 ir_node *be_get_Copy_op(const ir_node *cpy) {
470 return get_irn_n(cpy, be_pos_Copy_op);
473 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
474 set_irn_n(cpy, be_pos_Copy_op, op);
477 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
482 irn = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, n, in);
483 init_node_attr(irn, n);
484 for(i = 0; i < n; ++i) {
485 be_node_set_reg_class(irn, i, cls);
491 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
492 int n_outs, int n, ir_node *in[], ir_type *call_tp)
495 int real_n = be_pos_Call_first_arg + n;
499 NEW_ARR_A(ir_node *, real_in, real_n);
500 real_in[be_pos_Call_mem] = mem;
501 real_in[be_pos_Call_sp] = sp;
502 real_in[be_pos_Call_ptr] = ptr;
503 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
505 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
506 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
508 a->call_tp = call_tp;
512 /* Gets the call entity or NULL if this is no static call. */
513 ir_entity *be_Call_get_entity(const ir_node *call) {
514 be_call_attr_t *a = get_irn_attr(call);
515 assert(be_is_Call(call));
519 /* Sets the call entity. */
520 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
521 be_call_attr_t *a = get_irn_attr(call);
522 assert(be_is_Call(call));
526 /* Gets the call type. */
527 ir_type *be_Call_get_type(ir_node *call) {
528 be_call_attr_t *a = get_irn_attr(call);
529 assert(be_is_Call(call));
533 /* Sets the call type. */
534 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
535 be_call_attr_t *a = get_irn_attr(call);
536 assert(be_is_Call(call));
537 a->call_tp = call_tp;
540 /* Construct a new be_Return. */
541 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
544 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
545 init_node_attr(irn, n);
546 a = get_irn_attr(irn);
547 a->num_ret_vals = n_res;
552 /* Returns the number of real returns values */
553 int be_Return_get_n_rets(ir_node *ret)
555 be_return_attr_t *a = get_irn_attr(ret);
556 return a->num_ret_vals;
559 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
566 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
567 a = init_node_attr(irn, 1);
570 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
572 /* Set output constraint to stack register. */
573 be_node_set_reg_class(irn, 0, sp->reg_class);
574 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
575 be_node_set_irn_reg(NULL, irn, sp);
580 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
584 ir_node *in[be_pos_AddSP_last];
586 in[be_pos_AddSP_old_sp] = old_sp;
587 in[be_pos_AddSP_size] = sz;
589 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
590 a = init_node_attr(irn, be_pos_AddSP_last);
592 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
594 /* Set output constraint to stack register. */
595 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
596 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
597 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
598 a->reg_data[pn_be_AddSP_res].reg = sp;
603 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
607 ir_node *in[be_pos_SubSP_last];
609 in[be_pos_SubSP_old_sp] = old_sp;
610 in[be_pos_SubSP_size] = sz;
612 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
613 a = init_node_attr(irn, be_pos_SubSP_last);
615 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
617 /* Set output constraint to stack register. */
618 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
619 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
620 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
621 a->reg_data[pn_be_SubSP_res].reg = sp;
626 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
635 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
636 a = init_node_attr(irn, 3);
638 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
640 /* Set output constraint to stack register. */
641 be_set_constr_single_reg(irn, OUT_POS(0), sp);
642 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
643 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
648 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
654 in[0] = frame_pointer;
655 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
656 a = init_node_attr(irn, 1);
659 be_node_set_reg_class(irn, 0, cls_frame);
660 be_node_set_reg_class(irn, OUT_POS(0), cls);
664 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
669 irn = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, in);
670 init_node_attr(irn, n_outs);
674 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
675 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
683 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
684 a = init_node_attr(irn, 3);
687 be_node_set_reg_class(irn, 1, cls_frame);
688 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
692 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
693 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
702 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
703 a = init_node_attr(irn, 3);
706 be_node_set_reg_class(irn, 1, cls_frame);
707 be_node_set_reg_class(irn, 2, cls_data);
711 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
718 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
719 a = init_node_attr(irn, 1);
722 be_node_set_reg_class(irn, 0, cls_frame);
723 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
725 return optimize_node(irn);
728 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
731 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
734 memcpy(&in[1], in_keep, n * sizeof(in[0]));
735 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
736 init_node_attr(irn, n + 1);
737 be_node_set_reg_class(irn, OUT_POS(0), cls);
738 be_node_set_reg_class(irn, 0, cls);
743 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
748 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
751 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
752 return get_irn_n(cpy, be_pos_CopyKeep_op);
755 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
756 set_irn_n(cpy, be_pos_CopyKeep_op, op);
759 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
763 irn = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, n, in);
764 init_node_attr(irn, n);
768 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
769 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
770 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
771 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
772 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
773 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
774 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
775 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
776 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
777 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
778 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
779 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
780 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
781 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
782 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
783 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
784 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
785 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
787 int be_has_frame_entity(const ir_node *irn)
789 switch(be_get_irn_opcode(irn)) {
802 ir_entity *be_get_frame_entity(const ir_node *irn)
804 if (be_has_frame_entity(irn)) {
805 be_frame_attr_t *a = get_irn_attr(irn);
811 int be_get_frame_offset(const ir_node *irn)
813 assert(is_be_node(irn));
814 if (be_has_frame_entity(irn)) {
815 be_frame_attr_t *a = get_irn_attr(irn);
821 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
823 be_memperm_attr_t *attr = get_irn_attr(irn);
825 assert(be_is_MemPerm(irn));
826 assert(n < be_get_MemPerm_entity_arity(irn));
828 attr->in_entities[n] = ent;
831 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
833 be_memperm_attr_t *attr = get_irn_attr(irn);
835 assert(be_is_MemPerm(irn));
836 assert(n < be_get_MemPerm_entity_arity(irn));
838 return attr->in_entities[n];
841 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
843 be_memperm_attr_t *attr = get_irn_attr(irn);
845 assert(be_is_MemPerm(irn));
846 assert(n < be_get_MemPerm_entity_arity(irn));
848 attr->out_entities[n] = ent;
851 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
853 be_memperm_attr_t *attr = get_irn_attr(irn);
855 assert(be_is_MemPerm(irn));
856 assert(n < be_get_MemPerm_entity_arity(irn));
858 return attr->out_entities[n];
861 int be_get_MemPerm_entity_arity(const ir_node *irn)
863 return get_irn_arity(irn) - 1;
866 static void be_limited(void *data, bitset_t *bs)
868 be_req_t *req = data;
871 case be_req_kind_negate_old_limited:
872 case be_req_kind_old_limited:
873 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
874 if(req->kind == be_req_kind_negate_old_limited)
877 case be_req_kind_single_reg:
878 bitset_clear_all(bs);
879 bitset_set(bs, req->x.single_reg->index);
884 static INLINE be_req_t *get_req(ir_node *irn, int pos)
886 int idx = pos < 0 ? -(pos + 1) : pos;
887 be_node_attr_t *a = get_irn_attr(irn);
888 be_reg_data_t *rd = &a->reg_data[idx];
889 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
891 assert(is_be_node(irn));
892 assert(!(pos >= 0) || pos < get_irn_arity(irn));
893 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
898 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
900 be_req_t *r = get_req(irn, pos);
902 r->kind = be_req_kind_single_reg;
903 r->x.single_reg = reg;
904 r->req.limited = be_limited;
905 r->req.limited_env = r;
906 r->req.type = arch_register_req_type_limited;
907 r->req.cls = reg->reg_class;
910 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
912 be_req_t *r = get_req(irn, pos);
914 assert(arch_register_req_is(req, limited));
916 r->kind = be_req_kind_old_limited;
917 r->req.limited = be_limited;
918 r->req.limited_env = r;
919 r->req.type = arch_register_req_type_limited;
920 r->req.cls = req->cls;
922 r->x.old_limited.old_limited = req->limited;
923 r->x.old_limited.old_limited_env = req->limited_env;
926 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
928 be_req_t *r = get_req(irn, pos);
932 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
934 be_req_t *r = get_req(irn, pos);
939 r->req.type = arch_register_req_type_none;
940 else if (r->req.type == arch_register_req_type_none)
941 r->req.type = arch_register_req_type_normal;
944 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
946 be_req_t *r = get_req(irn, pos);
950 ir_node *be_get_IncSP_pred(ir_node *irn) {
951 assert(be_is_IncSP(irn));
952 return get_irn_n(irn, 0);
955 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
956 assert(be_is_IncSP(incsp));
957 set_irn_n(incsp, 0, pred);
960 ir_node *be_get_IncSP_mem(ir_node *irn) {
961 assert(be_is_IncSP(irn));
962 return get_irn_n(irn, 1);
965 void be_set_IncSP_offset(ir_node *irn, int offset)
967 be_stack_attr_t *a = get_irn_attr(irn);
968 assert(be_is_IncSP(irn));
972 int be_get_IncSP_offset(const ir_node *irn)
974 be_stack_attr_t *a = get_irn_attr(irn);
975 assert(be_is_IncSP(irn));
979 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
981 ir_node *bl = get_nodes_block(irn);
982 ir_graph *irg = get_irn_irg(bl);
983 ir_node *frame = get_irg_frame(irg);
984 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
985 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
988 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
992 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
995 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
996 ir_graph *irg = get_irn_irg(bl);
997 ir_node *frame = get_irg_frame(irg);
998 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1000 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1002 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
1004 if (is_Block(insert)) {
1005 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
1006 sched_add_after(insert, reload);
1010 sched_add_before(insert, reload);
1017 | _ \ ___ __ _ | _ \ ___ __ _ ___
1018 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1019 | _ < __/ (_| | | _ < __/ (_| \__ \
1020 |_| \_\___|\__, | |_| \_\___|\__, |___/
1026 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1028 const be_node_attr_t *a = get_irn_attr(irn);
1030 if(out_pos < a->max_reg_data) {
1031 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1033 if(be_is_Copy(irn)) {
1034 req->type |= arch_register_req_type_should_be_same;
1035 req->other_same = be_get_Copy_op(irn);
1039 req->type = arch_register_req_type_none;
1046 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1048 const be_node_attr_t *a = get_irn_attr(irn);
1050 if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
1051 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1053 req->type = arch_register_req_type_none;
1060 static const arch_register_req_t *
1061 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1066 if (get_irn_mode(irn) == mode_T)
1069 out_pos = redir_proj((const ir_node **)&irn);
1070 assert(is_be_node(irn));
1071 return put_out_reg_req(req, irn, out_pos);
1075 if (is_be_node(irn)) {
1077 For spills and reloads, we return "none" as requirement for frame pointer,
1078 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1079 large number as pos, so put_in_reg_req will return "none" as requirement.
1081 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1082 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1083 return put_in_reg_req(req, irn, INT_MAX);
1085 return put_in_reg_req(req, irn, pos);
1093 const arch_register_t *
1094 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1096 be_reg_data_t *r = retrieve_reg_data(irn);
1097 return r ? r->reg : NULL;
1100 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1102 redir_proj((const ir_node **) &irn);
1104 switch(be_get_irn_opcode(irn)) {
1105 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1107 XXX(Reload, reload);
1110 XXX(Return, branch);
1111 XXX(StackParam, stackparam);
1114 return arch_irn_class_normal;
1120 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1122 be_reg_data_t *r = retrieve_reg_data(irn);
1123 return r ? r->req.flags : 0;
1126 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1128 return be_get_frame_entity(irn);
1131 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1135 assert(be_has_frame_entity(irn));
1137 a = get_irn_attr(irn);
1141 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1143 if(be_has_frame_entity(irn)) {
1144 be_frame_attr_t *a = get_irn_attr(irn);
1149 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1151 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1155 ___ ____ _ _ _ _ _ _
1156 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1157 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1158 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1159 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1163 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1164 be_node_get_irn_reg_req,
1165 be_node_set_irn_reg,
1166 be_node_get_irn_reg,
1169 be_node_get_frame_entity,
1170 be_node_set_frame_entity,
1171 be_node_set_frame_offset,
1172 be_node_get_sp_bias,
1173 NULL, /* get_inverse */
1174 NULL, /* get_op_estimated_cost */
1175 NULL, /* possible_memory_operand */
1176 NULL, /* perform_memory_operand */
1179 static const arch_irn_ops_t be_node_irn_ops = {
1183 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1185 redir_proj((const ir_node **) &irn);
1186 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1189 const arch_irn_handler_t be_node_irn_handler = {
1194 ____ _ _ ___ ____ _ _ _ _ _ _
1195 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1196 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1197 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1198 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1203 arch_irn_handler_t irn_handler;
1204 arch_irn_ops_t irn_ops;
1205 const arch_env_t *arch_env;
1209 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1210 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1212 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1214 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1215 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1219 * Get register class of a Phi.
1222 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1224 int n = get_irn_arity(phi);
1228 if(*visited && pset_find_ptr(*visited, phi))
1231 for(i = 0; i < n; ++i) {
1232 op = get_irn_n(phi, i);
1234 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1238 The operands of that Phi were all Phis themselves.
1239 We have to start a DFS for a non-Phi argument now.
1242 *visited = pset_new_ptr(16);
1244 pset_insert_ptr(*visited, phi);
1246 for(i = 0; i < n; ++i) {
1247 op = get_irn_n(phi, i);
1248 if(get_Phi_reg_req_recursive(h, req, op, visited))
1255 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1257 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1258 pset *visited = NULL;
1260 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1261 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1262 req->type = arch_register_req_type_normal;
1269 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1271 phi_handler_t *h = get_phi_handler_from_ops(self);
1272 pmap_insert(h->regs, irn, (void *) reg);
1275 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1277 phi_handler_t *h = get_phi_handler_from_ops(self);
1278 return pmap_get(h->regs, (void *) irn);
1281 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1283 return arch_irn_class_normal;
1286 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1288 return arch_irn_flags_none;
1291 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1296 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1300 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1304 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1309 static const arch_irn_ops_if_t phi_irn_ops = {
1310 phi_get_irn_reg_req,
1315 phi_get_frame_entity,
1316 phi_set_frame_entity,
1317 phi_set_frame_offset,
1319 NULL, /* get_inverse */
1320 NULL, /* get_op_estimated_cost */
1321 NULL, /* possible_memory_operand */
1322 NULL, /* perform_memory_operand */
1325 static const arch_irn_handler_t phi_irn_handler = {
1329 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1331 phi_handler_t *h = xmalloc(sizeof(h[0]));
1332 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1333 h->irn_ops.impl = &phi_irn_ops;
1334 h->arch_env = arch_env;
1335 h->regs = pmap_create();
1336 return (arch_irn_handler_t *) h;
1339 void be_phi_handler_free(arch_irn_handler_t *handler)
1341 phi_handler_t *h = (void *) handler;
1342 pmap_destroy(h->regs);
1346 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1348 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1349 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1352 void be_phi_handler_reset(arch_irn_handler_t *handler)
1354 phi_handler_t *h = get_phi_handler_from_handler(handler);
1356 pmap_destroy(h->regs);
1357 h->regs = pmap_create();
1362 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1363 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1364 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1365 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1370 * Dumps a register requirement to a file.
1372 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1375 int did_something = 0;
1377 const char *prefix = buf;
1379 snprintf(buf, sizeof(buf), "#%d ", idx);
1380 buf[sizeof(buf) - 1] = '\0';
1382 if(req->flags != arch_irn_flags_none) {
1383 fprintf(f, "%sflags: ", prefix);
1385 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1386 if(req->flags & (1 << i)) {
1387 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1395 if(req->req.cls != 0) {
1398 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1399 fprintf(f, "%s", tmp);
1408 * Dumps node register requirements to a file.
1410 static void dump_node_reqs(FILE *f, ir_node *irn)
1413 be_node_attr_t *a = get_irn_attr(irn);
1415 fprintf(f, "registers: \n");
1416 for(i = 0; i < a->max_reg_data; ++i) {
1417 be_reg_data_t *rd = &a->reg_data[i];
1419 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1422 fprintf(f, "in requirements\n");
1423 for(i = 0; i < a->max_reg_data; ++i) {
1424 dump_node_req(f, i, &a->reg_data[i].in_req);
1427 fprintf(f, "\nout requirements\n");
1428 for(i = 0; i < a->max_reg_data; ++i) {
1429 dump_node_req(f, i, &a->reg_data[i].req);
1434 * ir_op-Operation: dump a be node to file
1436 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1438 be_node_attr_t *at = get_irn_attr(irn);
1440 assert(is_be_node(irn));
1443 case dump_node_opcode_txt:
1444 fprintf(f, get_op_name(get_irn_op(irn)));
1446 case dump_node_mode_txt:
1447 fprintf(f, get_mode_name(get_irn_mode(irn)));
1449 case dump_node_nodeattr_txt:
1451 case dump_node_info_txt:
1452 dump_node_reqs(f, irn);
1454 if(be_has_frame_entity(irn)) {
1455 be_frame_attr_t *a = (be_frame_attr_t *) at;
1457 int bits = get_type_size_bits(get_entity_type(a->ent));
1458 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1459 a->ent, a->offset, a->offset, bits, bits);
1464 switch(be_get_irn_opcode(irn)) {
1467 be_stack_attr_t *a = (be_stack_attr_t *) at;
1468 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1469 fprintf(f, "offset: FRAME_SIZE\n");
1470 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1471 fprintf(f, "offset: -FRAME SIZE\n");
1473 fprintf(f, "offset: %u\n", a->offset);
1478 be_call_attr_t *a = (be_call_attr_t *) at;
1481 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1487 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1488 ir_entity *in, *out;
1489 in = be_get_MemPerm_in_entity(irn, i);
1490 out = be_get_MemPerm_out_entity(irn, i);
1492 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1495 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1511 * Copies the backend specific attributes from old node to new node.
1513 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1515 be_node_attr_t *old_attr = get_irn_attr(old_node);
1516 be_node_attr_t *new_attr = get_irn_attr(new_node);
1519 assert(is_be_node(old_node));
1520 assert(is_be_node(new_node));
1522 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1523 new_attr->reg_data = NULL;
1525 if(new_attr->max_reg_data > 0) {
1526 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
1527 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
1529 for(i = 0; i < old_attr->max_reg_data; ++i) {
1532 r = &new_attr->reg_data[i].req;
1533 r->req.limited_env = r;
1535 r = &new_attr->reg_data[i].in_req;
1536 r->req.limited_env = r;
1541 static const ir_op_ops be_node_op_ops = {