4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
27 #include "bitfiddle.h"
39 #include "besched_t.h"
44 #define OUT_POS(x) (-((x) + 1))
46 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
47 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
49 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
52 be_req_kind_old_limited,
53 be_req_kind_negate_old_limited,
54 be_req_kind_single_reg
58 arch_register_req_t req;
60 arch_irn_flags_t flags;
63 void (*old_limited)(void *ptr, bitset_t *bs);
64 void *old_limited_env;
67 const arch_register_t *single_reg;
72 const arch_register_t *reg;
77 /** The generic be nodes attribute type. */
80 be_reg_data_t *reg_data;
83 /** The be_Return nodes attribute type. */
85 be_node_attr_t node_attr;
86 int num_ret_vals; /**< number of return values */
89 /** The be_Stack attribute type. */
91 be_node_attr_t node_attr;
92 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
95 /** The be_Frame attribute type. */
97 be_node_attr_t node_attr;
102 /** The be_Call attribute type. */
104 be_node_attr_t node_attr;
105 ir_entity *ent; /**< The called entity if this is a static call. */
106 ir_type *call_tp; /**< The call type, copied from the original Call node. */
110 be_node_attr_t node_attr;
111 ir_entity **in_entities;
112 ir_entity **out_entities;
118 ir_op *op_be_MemPerm;
121 ir_op *op_be_CopyKeep;
128 ir_op *op_be_RegParams;
129 ir_op *op_be_StackParam;
130 ir_op *op_be_FrameAddr;
131 ir_op *op_be_FrameLoad;
132 ir_op *op_be_FrameStore;
133 ir_op *op_be_Barrier;
135 static int beo_base = -1;
137 static const ir_op_ops be_node_op_ops;
139 #define N irop_flag_none
140 #define L irop_flag_labeled
141 #define C irop_flag_commutative
142 #define X irop_flag_cfopcode
143 #define I irop_flag_ip_cfopcode
144 #define F irop_flag_fragile
145 #define Y irop_flag_forking
146 #define H irop_flag_highlevel
147 #define c irop_flag_constlike
148 #define K irop_flag_keep
149 #define M irop_flag_machine
153 * Compare two node attributes.
155 * @return zero if both attributes are identically
157 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if (a->max_reg_data == b->max_reg_data) {
161 for (i = 0; i < a->max_reg_data; ++i) {
162 if (a->reg_data[i].reg != b->reg_data[i].reg ||
163 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
164 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node* irn, int max_reg_data)
245 ir_graph *irg = get_irn_irg(irn);
246 be_node_attr_t *a = get_irn_attr(irn);
248 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn))));
249 a->max_reg_data = max_reg_data;
252 if(max_reg_data > 0) {
255 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
256 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
257 for(i = 0; i < max_reg_data; ++i) {
258 a->reg_data[i].req.req.cls = NULL;
259 a->reg_data[i].req.req.type = arch_register_req_type_none;
266 int is_be_node(const ir_node *irn)
268 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
271 be_opcode_t be_get_irn_opcode(const ir_node *irn)
273 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
276 static int redir_proj(const ir_node **node, int pos)
278 const ir_node *n = *node;
283 *node = irn = get_Proj_pred(n);
285 assert(get_irn_mode(irn) == mode_T);
286 *node = get_Proj_pred(irn);
288 return get_Proj_proj(n);
294 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
297 be_node_attr_t *res = NULL;
298 int *pos = the_pos ? the_pos : &dummy;
302 ir_node *pred = get_Proj_pred(irn);
303 int p = get_Proj_proj(irn);
305 if(is_be_node(pred)) {
306 assert(get_irn_mode(pred) == mode_T);
308 res = get_irn_attr(pred);
309 assert(p >= 0 && p < res->max_reg_data && "illegal proj number");
313 else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
314 be_node_attr_t *a = get_irn_attr(irn);
315 if(a->max_reg_data > 0) {
324 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
327 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
328 return a ? &a->reg_data[pos] : NULL;
332 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
334 be_reg_data_t *r = retrieve_reg_data(irn);
341 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
342 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
350 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
351 a = init_node_attr(res, 2);
355 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
356 be_node_set_reg_class(res, be_pos_Spill_val, cls);
360 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
361 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
368 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
370 init_node_attr(res, 2);
371 be_node_set_reg_class(res, -1, cls);
372 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
373 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
377 ir_node *be_get_Reload_mem(const ir_node *irn)
379 assert(be_is_Reload(irn));
380 return get_irn_n(irn, be_pos_Reload_mem);
383 ir_node *be_get_Reload_frame(const ir_node *irn)
385 assert(be_is_Reload(irn));
386 return get_irn_n(irn, be_pos_Reload_frame);
389 ir_node *be_get_Spill_val(const ir_node *irn)
391 assert(be_is_Spill(irn));
392 return get_irn_n(irn, be_pos_Spill_val);
394 ir_node *be_get_Spill_frame(const ir_node *irn)
396 assert(be_is_Spill(irn));
397 return get_irn_n(irn, be_pos_Spill_frame);
400 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
403 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
404 init_node_attr(irn, n);
405 for(i = 0; i < n; ++i) {
406 be_node_set_reg_class(irn, i, cls);
407 be_node_set_reg_class(irn, OUT_POS(i), cls);
413 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
416 ir_node *frame = get_irg_frame(irg);
417 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
419 const arch_register_t *sp = arch_env->isa->sp;
420 be_memperm_attr_t *attr;
423 real_in = alloca((n+1) * sizeof(real_in[0]));
425 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
427 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
429 init_node_attr(irn, n + 1);
430 be_node_set_reg_class(irn, 0, sp->reg_class);
431 for(i = 0; i < n; ++i) {
432 be_node_set_reg_class(irn, i + 1, cls_frame);
433 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
436 attr = get_irn_attr(irn);
438 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
439 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
440 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
441 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
447 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
453 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
454 init_node_attr(res, 1);
455 be_node_set_reg_class(res, 0, cls);
456 be_node_set_reg_class(res, OUT_POS(0), cls);
460 ir_node *be_get_Copy_op(const ir_node *cpy) {
461 return get_irn_n(cpy, be_pos_Copy_op);
464 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
465 set_irn_n(cpy, be_pos_Copy_op, op);
468 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
473 irn = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, n, in);
474 init_node_attr(irn, n);
475 for(i = 0; i < n; ++i) {
476 be_node_set_reg_class(irn, i, cls);
482 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
483 int n_outs, int n, ir_node *in[], ir_type *call_tp)
486 int real_n = be_pos_Call_first_arg + n;
490 NEW_ARR_A(ir_node *, real_in, real_n);
491 real_in[be_pos_Call_mem] = mem;
492 real_in[be_pos_Call_sp] = sp;
493 real_in[be_pos_Call_ptr] = ptr;
494 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
496 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
497 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
499 a->call_tp = call_tp;
503 /* Gets the call entity or NULL if this is no static call. */
504 ir_entity *be_Call_get_entity(const ir_node *call) {
505 be_call_attr_t *a = get_irn_attr(call);
506 assert(be_is_Call(call));
510 /* Sets the call entity. */
511 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
512 be_call_attr_t *a = get_irn_attr(call);
513 assert(be_is_Call(call));
517 /* Gets the call type. */
518 ir_type *be_Call_get_type(ir_node *call) {
519 be_call_attr_t *a = get_irn_attr(call);
520 assert(be_is_Call(call));
524 /* Sets the call type. */
525 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
526 be_call_attr_t *a = get_irn_attr(call);
527 assert(be_is_Call(call));
528 a->call_tp = call_tp;
531 /* Construct a new be_Return. */
532 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
535 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
536 init_node_attr(irn, n);
537 a = get_irn_attr(irn);
538 a->num_ret_vals = n_res;
543 /* Returns the number of real returns values */
544 int be_Return_get_n_rets(ir_node *ret)
546 be_return_attr_t *a = get_irn_attr(ret);
547 return a->num_ret_vals;
550 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
557 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
558 a = init_node_attr(irn, 1);
561 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
563 /* Set output constraint to stack register. */
564 be_node_set_reg_class(irn, 0, sp->reg_class);
565 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
566 be_node_set_irn_reg(NULL, irn, sp);
571 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
575 ir_node *in[be_pos_AddSP_last];
577 in[be_pos_AddSP_old_sp] = old_sp;
578 in[be_pos_AddSP_size] = sz;
580 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
581 a = init_node_attr(irn, be_pos_AddSP_last);
583 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
585 /* Set output constraint to stack register. */
586 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
587 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
588 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
589 a->reg_data[pn_be_AddSP_res].reg = sp;
594 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
598 ir_node *in[be_pos_SubSP_last];
600 in[be_pos_SubSP_old_sp] = old_sp;
601 in[be_pos_SubSP_size] = sz;
603 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
604 a = init_node_attr(irn, be_pos_SubSP_last);
606 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
608 /* Set output constraint to stack register. */
609 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
610 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
611 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
612 a->reg_data[pn_be_SubSP_res].reg = sp;
617 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
626 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
627 a = init_node_attr(irn, 3);
629 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
631 /* Set output constraint to stack register. */
632 be_set_constr_single_reg(irn, OUT_POS(0), sp);
633 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
634 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
639 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
645 in[0] = frame_pointer;
646 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
647 a = init_node_attr(irn, 1);
650 be_node_set_reg_class(irn, 0, cls_frame);
651 be_node_set_reg_class(irn, OUT_POS(0), cls);
655 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
660 irn = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, in);
661 init_node_attr(irn, n_outs);
665 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
666 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
674 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
675 a = init_node_attr(irn, 3);
678 be_node_set_reg_class(irn, 1, cls_frame);
679 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
683 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
684 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
693 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
694 a = init_node_attr(irn, 3);
697 be_node_set_reg_class(irn, 1, cls_frame);
698 be_node_set_reg_class(irn, 2, cls_data);
702 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
709 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
710 a = init_node_attr(irn, 1);
713 be_node_set_reg_class(irn, 0, cls_frame);
714 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
716 return optimize_node(irn);
719 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
722 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
725 memcpy(&in[1], in_keep, n * sizeof(in[0]));
726 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
727 init_node_attr(irn, n + 1);
728 be_node_set_reg_class(irn, OUT_POS(0), cls);
729 be_node_set_reg_class(irn, 0, cls);
734 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
739 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
742 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
743 return get_irn_n(cpy, be_pos_CopyKeep_op);
746 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
747 set_irn_n(cpy, be_pos_CopyKeep_op, op);
750 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
754 irn = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, n, in);
755 init_node_attr(irn, n);
759 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
760 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
761 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
762 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
763 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
764 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
765 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
766 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
767 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
768 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
769 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
770 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
771 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
772 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
773 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
774 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
775 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
776 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
778 int be_has_frame_entity(const ir_node *irn)
780 switch(be_get_irn_opcode(irn)) {
793 ir_entity *be_get_frame_entity(const ir_node *irn)
795 if (be_has_frame_entity(irn)) {
796 be_frame_attr_t *a = get_irn_attr(irn);
802 int be_get_frame_offset(const ir_node *irn)
804 assert(is_be_node(irn));
805 if (be_has_frame_entity(irn)) {
806 be_frame_attr_t *a = get_irn_attr(irn);
812 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
814 be_memperm_attr_t *attr = get_irn_attr(irn);
816 assert(be_is_MemPerm(irn));
817 assert(n < be_get_MemPerm_entity_arity(irn));
819 attr->in_entities[n] = ent;
822 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
824 be_memperm_attr_t *attr = get_irn_attr(irn);
826 assert(be_is_MemPerm(irn));
827 assert(n < be_get_MemPerm_entity_arity(irn));
829 return attr->in_entities[n];
832 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
834 be_memperm_attr_t *attr = get_irn_attr(irn);
836 assert(be_is_MemPerm(irn));
837 assert(n < be_get_MemPerm_entity_arity(irn));
839 attr->out_entities[n] = ent;
842 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
844 be_memperm_attr_t *attr = get_irn_attr(irn);
846 assert(be_is_MemPerm(irn));
847 assert(n < be_get_MemPerm_entity_arity(irn));
849 return attr->out_entities[n];
852 int be_get_MemPerm_entity_arity(const ir_node *irn)
854 return get_irn_arity(irn) - 1;
857 static void be_limited(void *data, bitset_t *bs)
859 be_req_t *req = data;
862 case be_req_kind_negate_old_limited:
863 case be_req_kind_old_limited:
864 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
865 if(req->kind == be_req_kind_negate_old_limited)
868 case be_req_kind_single_reg:
869 bitset_clear_all(bs);
870 bitset_set(bs, req->x.single_reg->index);
875 static INLINE be_req_t *get_req(ir_node *irn, int pos)
877 int idx = pos < 0 ? -(pos + 1) : pos;
878 be_node_attr_t *a = get_irn_attr(irn);
879 be_reg_data_t *rd = &a->reg_data[idx];
880 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
882 assert(is_be_node(irn));
883 assert(!(pos >= 0) || pos < get_irn_arity(irn));
884 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
889 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
891 be_req_t *r = get_req(irn, pos);
893 r->kind = be_req_kind_single_reg;
894 r->x.single_reg = reg;
895 r->req.limited = be_limited;
896 r->req.limited_env = r;
897 r->req.type = arch_register_req_type_limited;
898 r->req.cls = reg->reg_class;
901 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
903 be_req_t *r = get_req(irn, pos);
905 assert(arch_register_req_is(req, limited));
907 r->kind = be_req_kind_old_limited;
908 r->req.limited = be_limited;
909 r->req.limited_env = r;
910 r->req.type = arch_register_req_type_limited;
911 r->req.cls = req->cls;
913 r->x.old_limited.old_limited = req->limited;
914 r->x.old_limited.old_limited_env = req->limited_env;
917 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
919 be_req_t *r = get_req(irn, pos);
923 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
925 be_req_t *r = get_req(irn, pos);
930 r->req.type = arch_register_req_type_none;
931 else if (r->req.type == arch_register_req_type_none)
932 r->req.type = arch_register_req_type_normal;
935 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
937 be_req_t *r = get_req(irn, pos);
941 ir_node *be_get_IncSP_pred(ir_node *irn) {
942 assert(be_is_IncSP(irn));
943 return get_irn_n(irn, 0);
946 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
947 assert(be_is_IncSP(incsp));
948 set_irn_n(incsp, 0, pred);
951 ir_node *be_get_IncSP_mem(ir_node *irn) {
952 assert(be_is_IncSP(irn));
953 return get_irn_n(irn, 1);
956 void be_set_IncSP_offset(ir_node *irn, int offset)
958 be_stack_attr_t *a = get_irn_attr(irn);
959 assert(be_is_IncSP(irn));
963 int be_get_IncSP_offset(const ir_node *irn)
965 be_stack_attr_t *a = get_irn_attr(irn);
966 assert(be_is_IncSP(irn));
970 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
972 ir_node *bl = get_nodes_block(irn);
973 ir_graph *irg = get_irn_irg(bl);
974 ir_node *frame = get_irg_frame(irg);
975 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
976 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
979 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
983 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
986 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
987 ir_graph *irg = get_irn_irg(bl);
988 ir_node *frame = get_irg_frame(irg);
989 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
991 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
993 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
995 if (is_Block(insert)) {
996 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
997 sched_add_after(insert, reload);
1001 sched_add_before(insert, reload);
1008 | _ \ ___ __ _ | _ \ ___ __ _ ___
1009 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1010 | _ < __/ (_| | | _ < __/ (_| \__ \
1011 |_| \_\___|\__, | |_| \_\___|\__, |___/
1017 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1019 const be_node_attr_t *a = get_irn_attr(irn);
1021 if(out_pos < a->max_reg_data) {
1022 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1024 if(be_is_Copy(irn)) {
1025 req->type |= arch_register_req_type_should_be_same;
1026 req->other_same = be_get_Copy_op(irn);
1030 req->type = arch_register_req_type_none;
1037 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1039 const be_node_attr_t *a = get_irn_attr(irn);
1041 if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
1042 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1044 req->type = arch_register_req_type_none;
1051 static const arch_register_req_t *
1052 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1057 if (get_irn_mode(irn) == mode_T)
1060 out_pos = redir_proj((const ir_node **)&irn, pos);
1061 assert(is_be_node(irn));
1062 return put_out_reg_req(req, irn, out_pos);
1066 if (is_be_node(irn)) {
1068 For spills and reloads, we return "none" as requirement for frame pointer,
1069 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1070 large number as pos, so put_in_reg_req will return "none" as requirement.
1072 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1073 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1074 return put_in_reg_req(req, irn, INT_MAX);
1076 return put_in_reg_req(req, irn, pos);
1084 const arch_register_t *
1085 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1087 be_reg_data_t *r = retrieve_reg_data(irn);
1088 return r ? r->reg : NULL;
1091 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1093 redir_proj((const ir_node **) &irn, -1);
1095 switch(be_get_irn_opcode(irn)) {
1096 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1098 XXX(Reload, reload);
1101 XXX(Return, branch);
1102 XXX(StackParam, stackparam);
1105 return arch_irn_class_normal;
1111 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1113 be_reg_data_t *r = retrieve_reg_data(irn);
1114 return r ? r->req.flags : 0;
1117 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1119 return be_get_frame_entity(irn);
1122 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1126 assert(be_has_frame_entity(irn));
1128 a = get_irn_attr(irn);
1132 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1134 if(be_has_frame_entity(irn)) {
1135 be_frame_attr_t *a = get_irn_attr(irn);
1140 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1142 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1146 ___ ____ _ _ _ _ _ _
1147 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1148 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1149 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1150 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1154 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1155 be_node_get_irn_reg_req,
1156 be_node_set_irn_reg,
1157 be_node_get_irn_reg,
1160 be_node_get_frame_entity,
1161 be_node_set_frame_entity,
1162 be_node_set_frame_offset,
1163 be_node_get_sp_bias,
1164 NULL, /* get_inverse */
1165 NULL, /* get_op_estimated_cost */
1166 NULL, /* possible_memory_operand */
1167 NULL, /* perform_memory_operand */
1170 static const arch_irn_ops_t be_node_irn_ops = {
1174 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1176 redir_proj((const ir_node **) &irn, -1);
1177 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1180 const arch_irn_handler_t be_node_irn_handler = {
1185 ____ _ _ ___ ____ _ _ _ _ _ _
1186 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1187 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1188 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1189 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1194 arch_irn_handler_t irn_handler;
1195 arch_irn_ops_t irn_ops;
1196 const arch_env_t *arch_env;
1200 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1201 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1203 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1205 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1206 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1210 * Get register class of a Phi.
1213 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1215 int n = get_irn_arity(phi);
1219 if(*visited && pset_find_ptr(*visited, phi))
1222 for(i = 0; i < n; ++i) {
1223 op = get_irn_n(phi, i);
1225 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1229 The operands of that Phi were all Phis themselves.
1230 We have to start a DFS for a non-Phi argument now.
1233 *visited = pset_new_ptr(16);
1235 pset_insert_ptr(*visited, phi);
1237 for(i = 0; i < n; ++i) {
1238 op = get_irn_n(phi, i);
1239 if(get_Phi_reg_req_recursive(h, req, op, visited))
1246 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1248 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1249 pset *visited = NULL;
1251 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1252 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1253 req->type = arch_register_req_type_normal;
1260 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1262 phi_handler_t *h = get_phi_handler_from_ops(self);
1263 pmap_insert(h->regs, irn, (void *) reg);
1266 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1268 phi_handler_t *h = get_phi_handler_from_ops(self);
1269 return pmap_get(h->regs, (void *) irn);
1272 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1274 return arch_irn_class_normal;
1277 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1279 return arch_irn_flags_none;
1282 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1287 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1291 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1295 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1300 static const arch_irn_ops_if_t phi_irn_ops = {
1301 phi_get_irn_reg_req,
1306 phi_get_frame_entity,
1307 phi_set_frame_entity,
1308 phi_set_frame_offset,
1310 NULL, /* get_inverse */
1311 NULL, /* get_op_estimated_cost */
1312 NULL, /* possible_memory_operand */
1313 NULL, /* perform_memory_operand */
1316 static const arch_irn_handler_t phi_irn_handler = {
1320 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1322 phi_handler_t *h = xmalloc(sizeof(h[0]));
1323 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1324 h->irn_ops.impl = &phi_irn_ops;
1325 h->arch_env = arch_env;
1326 h->regs = pmap_create();
1327 return (arch_irn_handler_t *) h;
1330 void be_phi_handler_free(arch_irn_handler_t *handler)
1332 phi_handler_t *h = (void *) handler;
1333 pmap_destroy(h->regs);
1337 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1339 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1340 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1343 void be_phi_handler_reset(arch_irn_handler_t *handler)
1345 phi_handler_t *h = get_phi_handler_from_handler(handler);
1347 pmap_destroy(h->regs);
1348 h->regs = pmap_create();
1353 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1354 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1355 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1356 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1361 * Dumps a register requirement to a file.
1363 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1366 int did_something = 0;
1368 const char *prefix = buf;
1370 snprintf(buf, sizeof(buf), "#%d ", idx);
1371 buf[sizeof(buf) - 1] = '\0';
1373 if(req->flags != arch_irn_flags_none) {
1374 fprintf(f, "%sflags: ", prefix);
1376 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1377 if(req->flags & (1 << i)) {
1378 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1386 if(req->req.cls != 0) {
1389 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1390 fprintf(f, "%s", tmp);
1399 * Dumps node register requirements to a file.
1401 static void dump_node_reqs(FILE *f, ir_node *irn)
1404 be_node_attr_t *a = get_irn_attr(irn);
1406 fprintf(f, "registers: \n");
1407 for(i = 0; i < a->max_reg_data; ++i) {
1408 be_reg_data_t *rd = &a->reg_data[i];
1410 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1413 fprintf(f, "in requirements\n");
1414 for(i = 0; i < a->max_reg_data; ++i) {
1415 dump_node_req(f, i, &a->reg_data[i].in_req);
1418 fprintf(f, "\nout requirements\n");
1419 for(i = 0; i < a->max_reg_data; ++i) {
1420 dump_node_req(f, i, &a->reg_data[i].req);
1425 * ir_op-Operation: dump a be node to file
1427 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1429 be_node_attr_t *at = get_irn_attr(irn);
1431 assert(is_be_node(irn));
1434 case dump_node_opcode_txt:
1435 fprintf(f, get_op_name(get_irn_op(irn)));
1437 case dump_node_mode_txt:
1438 fprintf(f, get_mode_name(get_irn_mode(irn)));
1440 case dump_node_nodeattr_txt:
1442 case dump_node_info_txt:
1443 dump_node_reqs(f, irn);
1445 if(be_has_frame_entity(irn)) {
1446 be_frame_attr_t *a = (be_frame_attr_t *) at;
1448 int bits = get_type_size_bits(get_entity_type(a->ent));
1449 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1450 a->ent, a->offset, a->offset, bits, bits);
1455 switch(be_get_irn_opcode(irn)) {
1458 be_stack_attr_t *a = (be_stack_attr_t *) at;
1459 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1460 fprintf(f, "offset: FRAME_SIZE\n");
1461 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1462 fprintf(f, "offset: -FRAME SIZE\n");
1464 fprintf(f, "offset: %u\n", a->offset);
1469 be_call_attr_t *a = (be_call_attr_t *) at;
1472 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1478 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1479 ir_entity *in, *out;
1480 in = be_get_MemPerm_in_entity(irn, i);
1481 out = be_get_MemPerm_out_entity(irn, i);
1483 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1486 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1502 * Copies the backend specific attributes from old node to new node.
1504 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1506 be_node_attr_t *old_attr = get_irn_attr(old_node);
1507 be_node_attr_t *new_attr = get_irn_attr(new_node);
1510 assert(is_be_node(old_node));
1511 assert(is_be_node(new_node));
1513 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1514 new_attr->reg_data = NULL;
1516 if(new_attr->max_reg_data > 0) {
1517 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
1518 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
1520 for(i = 0; i < old_attr->max_reg_data; ++i) {
1523 r = &new_attr->reg_data[i].req;
1524 r->req.limited_env = r;
1526 r = &new_attr->reg_data[i].in_req;
1527 r->req.limited_env = r;
1532 static const ir_op_ops be_node_op_ops = {