4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
27 #include "bitfiddle.h"
39 #include "besched_t.h"
44 #define OUT_POS(x) (-((x) + 1))
46 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
47 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
49 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
52 be_req_kind_old_limited,
53 be_req_kind_negate_old_limited,
54 be_req_kind_single_reg
58 arch_register_req_t req;
60 arch_irn_flags_t flags;
63 void (*old_limited)(void *ptr, bitset_t *bs);
64 void *old_limited_env;
67 const arch_register_t *single_reg;
72 const arch_register_t *reg;
77 /** The generic be nodes attribute type. */
80 be_reg_data_t *reg_data;
83 /** The be_Return nodes attribute type. */
85 be_node_attr_t node_attr;
86 int num_ret_vals; /**< number of return values */
89 /** The be_Stack attribute type. */
91 be_node_attr_t node_attr;
92 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
95 /** The be_Frame attribute type. */
97 be_node_attr_t node_attr;
102 /** The be_Call attribute type. */
104 be_node_attr_t node_attr;
105 ir_entity *ent; /**< The called entity if this is a static call. */
106 ir_type *call_tp; /**< The call type, copied from the original Call node. */
110 be_node_attr_t node_attr;
111 ir_entity **in_entities;
112 ir_entity **out_entities;
118 ir_op *op_be_MemPerm;
121 ir_op *op_be_CopyKeep;
128 ir_op *op_be_RegParams;
129 ir_op *op_be_StackParam;
130 ir_op *op_be_FrameAddr;
131 ir_op *op_be_FrameLoad;
132 ir_op *op_be_FrameStore;
133 ir_op *op_be_Barrier;
135 static int beo_base = -1;
137 static const ir_op_ops be_node_op_ops;
139 #define N irop_flag_none
140 #define L irop_flag_labeled
141 #define C irop_flag_commutative
142 #define X irop_flag_cfopcode
143 #define I irop_flag_ip_cfopcode
144 #define F irop_flag_fragile
145 #define Y irop_flag_forking
146 #define H irop_flag_highlevel
147 #define c irop_flag_constlike
148 #define K irop_flag_keep
149 #define M irop_flag_machine
153 * Compare two node attributes.
155 * @return zero if both attributes are identically
157 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if (a->max_reg_data == b->max_reg_data) {
161 for (i = 0; i < a->max_reg_data; ++i) {
162 if (a->reg_data[i].reg != b->reg_data[i].reg ||
163 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
164 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node* irn, int max_reg_data)
245 ir_graph *irg = get_irn_irg(irn);
246 be_node_attr_t *a = get_irn_attr(irn);
248 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn))));
249 a->max_reg_data = max_reg_data;
252 if(max_reg_data > 0) {
255 a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
256 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
257 for(i = 0; i < max_reg_data; ++i) {
258 a->reg_data[i].req.req.cls = NULL;
259 a->reg_data[i].req.req.type = arch_register_req_type_none;
266 int is_be_node(const ir_node *irn)
268 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
271 be_opcode_t be_get_irn_opcode(const ir_node *irn)
273 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
277 * Skip Proj nodes and return their Proj numbers.
279 * If *node is a Proj or Proj(Proj) node, skip it.
281 * @param node points to the node to be skipped
283 * @return 0 if *node was no Proj node, its Proj number else.
285 static int redir_proj(const ir_node **node)
287 const ir_node *n = *node;
292 *node = irn = get_Proj_pred(n);
294 assert(get_irn_mode(irn) == mode_T);
295 *node = get_Proj_pred(irn);
297 return get_Proj_proj(n);
303 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
306 be_node_attr_t *res = NULL;
307 int *pos = the_pos ? the_pos : &dummy;
311 ir_node *pred = get_Proj_pred(irn);
312 int p = get_Proj_proj(irn);
314 if(is_be_node(pred)) {
315 assert(get_irn_mode(pred) == mode_T);
317 res = get_irn_attr(pred);
318 assert(p >= 0 && p < res->max_reg_data && "illegal proj number");
322 else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
323 be_node_attr_t *a = get_irn_attr(irn);
324 if(a->max_reg_data > 0) {
333 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
336 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
337 return a ? &a->reg_data[pos] : NULL;
341 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
343 be_reg_data_t *r = retrieve_reg_data(irn);
350 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
351 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
359 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
360 a = init_node_attr(res, 2);
364 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
365 be_node_set_reg_class(res, be_pos_Spill_val, cls);
369 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
370 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
377 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
379 init_node_attr(res, 2);
380 be_node_set_reg_class(res, -1, cls);
381 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
382 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
386 ir_node *be_get_Reload_mem(const ir_node *irn)
388 assert(be_is_Reload(irn));
389 return get_irn_n(irn, be_pos_Reload_mem);
392 ir_node *be_get_Reload_frame(const ir_node *irn)
394 assert(be_is_Reload(irn));
395 return get_irn_n(irn, be_pos_Reload_frame);
398 ir_node *be_get_Spill_val(const ir_node *irn)
400 assert(be_is_Spill(irn));
401 return get_irn_n(irn, be_pos_Spill_val);
403 ir_node *be_get_Spill_frame(const ir_node *irn)
405 assert(be_is_Spill(irn));
406 return get_irn_n(irn, be_pos_Spill_frame);
409 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
412 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
413 init_node_attr(irn, n);
414 for(i = 0; i < n; ++i) {
415 be_node_set_reg_class(irn, i, cls);
416 be_node_set_reg_class(irn, OUT_POS(i), cls);
422 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
425 ir_node *frame = get_irg_frame(irg);
426 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
428 const arch_register_t *sp = arch_env->isa->sp;
429 be_memperm_attr_t *attr;
432 real_in = alloca((n+1) * sizeof(real_in[0]));
434 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
436 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
438 init_node_attr(irn, n + 1);
439 be_node_set_reg_class(irn, 0, sp->reg_class);
440 for(i = 0; i < n; ++i) {
441 be_node_set_reg_class(irn, i + 1, cls_frame);
442 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
445 attr = get_irn_attr(irn);
447 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
448 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
449 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
450 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
456 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
462 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
463 init_node_attr(res, 1);
464 be_node_set_reg_class(res, 0, cls);
465 be_node_set_reg_class(res, OUT_POS(0), cls);
469 ir_node *be_get_Copy_op(const ir_node *cpy) {
470 return get_irn_n(cpy, be_pos_Copy_op);
473 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
474 set_irn_n(cpy, be_pos_Copy_op, op);
477 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
482 irn = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, n, in);
483 init_node_attr(irn, n);
484 for(i = 0; i < n; ++i) {
485 be_node_set_reg_class(irn, i, cls);
491 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
492 int n_outs, int n, ir_node *in[], ir_type *call_tp)
495 int real_n = be_pos_Call_first_arg + n;
499 NEW_ARR_A(ir_node *, real_in, real_n);
500 real_in[be_pos_Call_mem] = mem;
501 real_in[be_pos_Call_sp] = sp;
502 real_in[be_pos_Call_ptr] = ptr;
503 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
505 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
506 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
508 a->call_tp = call_tp;
512 /* Gets the call entity or NULL if this is no static call. */
513 ir_entity *be_Call_get_entity(const ir_node *call) {
514 be_call_attr_t *a = get_irn_attr(call);
515 assert(be_is_Call(call));
519 /* Sets the call entity. */
520 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
521 be_call_attr_t *a = get_irn_attr(call);
522 assert(be_is_Call(call));
526 /* Gets the call type. */
527 ir_type *be_Call_get_type(ir_node *call) {
528 be_call_attr_t *a = get_irn_attr(call);
529 assert(be_is_Call(call));
533 /* Sets the call type. */
534 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
535 be_call_attr_t *a = get_irn_attr(call);
536 assert(be_is_Call(call));
537 a->call_tp = call_tp;
540 /* Construct a new be_Return. */
541 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
544 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
545 init_node_attr(irn, n);
546 a = get_irn_attr(irn);
547 a->num_ret_vals = n_res;
552 /* Returns the number of real returns values */
553 int be_Return_get_n_rets(ir_node *ret)
555 be_return_attr_t *a = get_irn_attr(ret);
556 return a->num_ret_vals;
559 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
566 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
567 a = init_node_attr(irn, 1);
570 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
572 /* Set output constraint to stack register. */
573 be_node_set_reg_class(irn, 0, sp->reg_class);
574 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
575 be_node_set_irn_reg(NULL, irn, sp);
580 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
584 ir_node *in[be_pos_AddSP_last];
586 in[be_pos_AddSP_old_sp] = old_sp;
587 in[be_pos_AddSP_size] = sz;
589 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
590 a = init_node_attr(irn, be_pos_AddSP_last);
592 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
594 /* Set output constraint to stack register. */
595 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
596 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
597 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
598 a->reg_data[pn_be_AddSP_res].reg = sp;
603 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
607 ir_node *in[be_pos_SubSP_last];
609 in[be_pos_SubSP_old_sp] = old_sp;
610 in[be_pos_SubSP_size] = sz;
612 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
613 a = init_node_attr(irn, be_pos_SubSP_last);
615 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
617 /* Set output constraint to stack register. */
618 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
619 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
620 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
621 a->reg_data[pn_be_SubSP_res].reg = sp;
626 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
635 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
636 a = init_node_attr(irn, 3);
638 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
640 /* Set output constraint to stack register. */
641 be_set_constr_single_reg(irn, OUT_POS(0), sp);
642 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
643 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
648 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
654 in[0] = frame_pointer;
655 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
656 a = init_node_attr(irn, 1);
659 be_node_set_reg_class(irn, 0, cls_frame);
660 be_node_set_reg_class(irn, OUT_POS(0), cls);
664 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
669 irn = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, in);
670 init_node_attr(irn, n_outs);
674 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
675 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
683 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
684 a = init_node_attr(irn, 3);
687 be_node_set_reg_class(irn, 1, cls_frame);
688 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
692 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
693 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
702 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
703 a = init_node_attr(irn, 3);
706 be_node_set_reg_class(irn, 1, cls_frame);
707 be_node_set_reg_class(irn, 2, cls_data);
711 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
718 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
719 a = init_node_attr(irn, 1);
722 be_node_set_reg_class(irn, 0, cls_frame);
723 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
725 return optimize_node(irn);
728 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
731 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
734 memcpy(&in[1], in_keep, n * sizeof(in[0]));
735 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
736 init_node_attr(irn, n + 1);
737 be_node_set_reg_class(irn, OUT_POS(0), cls);
738 be_node_set_reg_class(irn, 0, cls);
743 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
748 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
751 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
752 return get_irn_n(cpy, be_pos_CopyKeep_op);
755 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
756 set_irn_n(cpy, be_pos_CopyKeep_op, op);
759 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
763 irn = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, n, in);
764 init_node_attr(irn, n);
768 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
769 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
770 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
771 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
772 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
773 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
774 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
775 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
776 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
777 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
778 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
779 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
780 int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
781 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
782 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
783 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
784 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
785 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
786 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
788 int be_has_frame_entity(const ir_node *irn)
790 switch(be_get_irn_opcode(irn)) {
803 ir_entity *be_get_frame_entity(const ir_node *irn)
805 if (be_has_frame_entity(irn)) {
806 be_frame_attr_t *a = get_irn_attr(irn);
812 int be_get_frame_offset(const ir_node *irn)
814 assert(is_be_node(irn));
815 if (be_has_frame_entity(irn)) {
816 be_frame_attr_t *a = get_irn_attr(irn);
822 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
824 be_memperm_attr_t *attr = get_irn_attr(irn);
826 assert(be_is_MemPerm(irn));
827 assert(n < be_get_MemPerm_entity_arity(irn));
829 attr->in_entities[n] = ent;
832 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
834 be_memperm_attr_t *attr = get_irn_attr(irn);
836 assert(be_is_MemPerm(irn));
837 assert(n < be_get_MemPerm_entity_arity(irn));
839 return attr->in_entities[n];
842 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
844 be_memperm_attr_t *attr = get_irn_attr(irn);
846 assert(be_is_MemPerm(irn));
847 assert(n < be_get_MemPerm_entity_arity(irn));
849 attr->out_entities[n] = ent;
852 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
854 be_memperm_attr_t *attr = get_irn_attr(irn);
856 assert(be_is_MemPerm(irn));
857 assert(n < be_get_MemPerm_entity_arity(irn));
859 return attr->out_entities[n];
862 int be_get_MemPerm_entity_arity(const ir_node *irn)
864 return get_irn_arity(irn) - 1;
867 static void be_limited(void *data, bitset_t *bs)
869 be_req_t *req = data;
872 case be_req_kind_negate_old_limited:
873 case be_req_kind_old_limited:
874 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
875 if(req->kind == be_req_kind_negate_old_limited)
878 case be_req_kind_single_reg:
879 bitset_clear_all(bs);
880 bitset_set(bs, req->x.single_reg->index);
885 static INLINE be_req_t *get_req(ir_node *irn, int pos)
887 int idx = pos < 0 ? -(pos + 1) : pos;
888 be_node_attr_t *a = get_irn_attr(irn);
889 be_reg_data_t *rd = &a->reg_data[idx];
890 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
892 assert(is_be_node(irn));
893 assert(!(pos >= 0) || pos < get_irn_arity(irn));
894 assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
899 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
901 be_req_t *r = get_req(irn, pos);
903 r->kind = be_req_kind_single_reg;
904 r->x.single_reg = reg;
905 r->req.limited = be_limited;
906 r->req.limited_env = r;
907 r->req.type = arch_register_req_type_limited;
908 r->req.cls = reg->reg_class;
911 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
913 be_req_t *r = get_req(irn, pos);
915 assert(arch_register_req_is(req, limited));
917 r->kind = be_req_kind_old_limited;
918 r->req.limited = be_limited;
919 r->req.limited_env = r;
920 r->req.type = arch_register_req_type_limited;
921 r->req.cls = req->cls;
923 r->x.old_limited.old_limited = req->limited;
924 r->x.old_limited.old_limited_env = req->limited_env;
927 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
929 be_req_t *r = get_req(irn, pos);
933 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
935 be_req_t *r = get_req(irn, pos);
940 r->req.type = arch_register_req_type_none;
941 else if (r->req.type == arch_register_req_type_none)
942 r->req.type = arch_register_req_type_normal;
945 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
947 be_req_t *r = get_req(irn, pos);
951 ir_node *be_get_IncSP_pred(ir_node *irn) {
952 assert(be_is_IncSP(irn));
953 return get_irn_n(irn, 0);
956 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
957 assert(be_is_IncSP(incsp));
958 set_irn_n(incsp, 0, pred);
961 ir_node *be_get_IncSP_mem(ir_node *irn) {
962 assert(be_is_IncSP(irn));
963 return get_irn_n(irn, 1);
966 void be_set_IncSP_offset(ir_node *irn, int offset)
968 be_stack_attr_t *a = get_irn_attr(irn);
969 assert(be_is_IncSP(irn));
973 int be_get_IncSP_offset(const ir_node *irn)
975 be_stack_attr_t *a = get_irn_attr(irn);
976 assert(be_is_IncSP(irn));
980 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
982 ir_node *bl = get_nodes_block(irn);
983 ir_graph *irg = get_irn_irg(bl);
984 ir_node *frame = get_irg_frame(irg);
985 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
986 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
989 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
993 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
996 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
997 ir_graph *irg = get_irn_irg(bl);
998 ir_node *frame = get_irg_frame(irg);
999 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1001 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1003 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
1005 if (is_Block(insert)) {
1006 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
1007 sched_add_after(insert, reload);
1011 sched_add_before(insert, reload);
1018 | _ \ ___ __ _ | _ \ ___ __ _ ___
1019 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1020 | _ < __/ (_| | | _ < __/ (_| \__ \
1021 |_| \_\___|\__, | |_| \_\___|\__, |___/
1027 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1029 const be_node_attr_t *a = get_irn_attr(irn);
1031 if(out_pos < a->max_reg_data) {
1032 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1034 if(be_is_Copy(irn)) {
1035 req->type |= arch_register_req_type_should_be_same;
1036 req->other_same = be_get_Copy_op(irn);
1040 req->type = arch_register_req_type_none;
1047 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1049 const be_node_attr_t *a = get_irn_attr(irn);
1051 if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
1052 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1054 req->type = arch_register_req_type_none;
1061 static const arch_register_req_t *
1062 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1067 if (get_irn_mode(irn) == mode_T)
1070 out_pos = redir_proj((const ir_node **)&irn);
1071 assert(is_be_node(irn));
1072 return put_out_reg_req(req, irn, out_pos);
1076 if (is_be_node(irn)) {
1078 For spills and reloads, we return "none" as requirement for frame pointer,
1079 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1080 large number as pos, so put_in_reg_req will return "none" as requirement.
1082 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1083 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1084 return put_in_reg_req(req, irn, INT_MAX);
1086 return put_in_reg_req(req, irn, pos);
1094 const arch_register_t *
1095 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1097 be_reg_data_t *r = retrieve_reg_data(irn);
1098 return r ? r->reg : NULL;
1101 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1103 redir_proj((const ir_node **) &irn);
1105 switch(be_get_irn_opcode(irn)) {
1106 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1108 XXX(Reload, reload);
1111 XXX(Return, branch);
1112 XXX(StackParam, stackparam);
1115 return arch_irn_class_normal;
1121 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1123 be_reg_data_t *r = retrieve_reg_data(irn);
1124 return r ? r->req.flags : 0;
1127 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1129 return be_get_frame_entity(irn);
1132 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1136 assert(be_has_frame_entity(irn));
1138 a = get_irn_attr(irn);
1142 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1144 if(be_has_frame_entity(irn)) {
1145 be_frame_attr_t *a = get_irn_attr(irn);
1150 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1152 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1156 ___ ____ _ _ _ _ _ _
1157 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1158 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1159 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1160 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1164 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1165 be_node_get_irn_reg_req,
1166 be_node_set_irn_reg,
1167 be_node_get_irn_reg,
1170 be_node_get_frame_entity,
1171 be_node_set_frame_entity,
1172 be_node_set_frame_offset,
1173 be_node_get_sp_bias,
1174 NULL, /* get_inverse */
1175 NULL, /* get_op_estimated_cost */
1176 NULL, /* possible_memory_operand */
1177 NULL, /* perform_memory_operand */
1180 static const arch_irn_ops_t be_node_irn_ops = {
1184 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1186 redir_proj((const ir_node **) &irn);
1187 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1190 const arch_irn_handler_t be_node_irn_handler = {
1195 ____ _ _ ___ ____ _ _ _ _ _ _
1196 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1197 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1198 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1199 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1204 arch_irn_handler_t irn_handler;
1205 arch_irn_ops_t irn_ops;
1206 const arch_env_t *arch_env;
1210 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1211 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1213 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1215 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1216 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1220 * Get register class of a Phi.
1223 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1225 int n = get_irn_arity(phi);
1229 if(*visited && pset_find_ptr(*visited, phi))
1232 for(i = 0; i < n; ++i) {
1233 op = get_irn_n(phi, i);
1235 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1239 The operands of that Phi were all Phis themselves.
1240 We have to start a DFS for a non-Phi argument now.
1243 *visited = pset_new_ptr(16);
1245 pset_insert_ptr(*visited, phi);
1247 for(i = 0; i < n; ++i) {
1248 op = get_irn_n(phi, i);
1249 if(get_Phi_reg_req_recursive(h, req, op, visited))
1256 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1258 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1259 pset *visited = NULL;
1261 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1262 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1263 req->type = arch_register_req_type_normal;
1270 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1272 phi_handler_t *h = get_phi_handler_from_ops(self);
1273 pmap_insert(h->regs, irn, (void *) reg);
1276 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1278 phi_handler_t *h = get_phi_handler_from_ops(self);
1279 return pmap_get(h->regs, (void *) irn);
1282 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1284 return arch_irn_class_normal;
1287 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1289 return arch_irn_flags_none;
1292 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1297 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1301 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1305 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1310 static const arch_irn_ops_if_t phi_irn_ops = {
1311 phi_get_irn_reg_req,
1316 phi_get_frame_entity,
1317 phi_set_frame_entity,
1318 phi_set_frame_offset,
1320 NULL, /* get_inverse */
1321 NULL, /* get_op_estimated_cost */
1322 NULL, /* possible_memory_operand */
1323 NULL, /* perform_memory_operand */
1326 static const arch_irn_handler_t phi_irn_handler = {
1330 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1332 phi_handler_t *h = xmalloc(sizeof(h[0]));
1333 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1334 h->irn_ops.impl = &phi_irn_ops;
1335 h->arch_env = arch_env;
1336 h->regs = pmap_create();
1337 return (arch_irn_handler_t *) h;
1340 void be_phi_handler_free(arch_irn_handler_t *handler)
1342 phi_handler_t *h = (void *) handler;
1343 pmap_destroy(h->regs);
1347 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1349 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1350 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1353 void be_phi_handler_reset(arch_irn_handler_t *handler)
1355 phi_handler_t *h = get_phi_handler_from_handler(handler);
1357 pmap_destroy(h->regs);
1358 h->regs = pmap_create();
1363 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1364 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1365 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1366 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1371 * Dumps a register requirement to a file.
1373 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1376 int did_something = 0;
1378 const char *prefix = buf;
1380 snprintf(buf, sizeof(buf), "#%d ", idx);
1381 buf[sizeof(buf) - 1] = '\0';
1383 if(req->flags != arch_irn_flags_none) {
1384 fprintf(f, "%sflags: ", prefix);
1386 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1387 if(req->flags & (1 << i)) {
1388 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1396 if(req->req.cls != 0) {
1399 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1400 fprintf(f, "%s", tmp);
1409 * Dumps node register requirements to a file.
1411 static void dump_node_reqs(FILE *f, ir_node *irn)
1414 be_node_attr_t *a = get_irn_attr(irn);
1416 fprintf(f, "registers: \n");
1417 for(i = 0; i < a->max_reg_data; ++i) {
1418 be_reg_data_t *rd = &a->reg_data[i];
1420 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1423 fprintf(f, "in requirements\n");
1424 for(i = 0; i < a->max_reg_data; ++i) {
1425 dump_node_req(f, i, &a->reg_data[i].in_req);
1428 fprintf(f, "\nout requirements\n");
1429 for(i = 0; i < a->max_reg_data; ++i) {
1430 dump_node_req(f, i, &a->reg_data[i].req);
1435 * ir_op-Operation: dump a be node to file
1437 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1439 be_node_attr_t *at = get_irn_attr(irn);
1441 assert(is_be_node(irn));
1444 case dump_node_opcode_txt:
1445 fprintf(f, get_op_name(get_irn_op(irn)));
1447 case dump_node_mode_txt:
1448 fprintf(f, get_mode_name(get_irn_mode(irn)));
1450 case dump_node_nodeattr_txt:
1452 case dump_node_info_txt:
1453 dump_node_reqs(f, irn);
1455 if(be_has_frame_entity(irn)) {
1456 be_frame_attr_t *a = (be_frame_attr_t *) at;
1458 int bits = get_type_size_bits(get_entity_type(a->ent));
1459 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1460 a->ent, a->offset, a->offset, bits, bits);
1465 switch(be_get_irn_opcode(irn)) {
1468 be_stack_attr_t *a = (be_stack_attr_t *) at;
1469 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1470 fprintf(f, "offset: FRAME_SIZE\n");
1471 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1472 fprintf(f, "offset: -FRAME SIZE\n");
1474 fprintf(f, "offset: %u\n", a->offset);
1479 be_call_attr_t *a = (be_call_attr_t *) at;
1482 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1488 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1489 ir_entity *in, *out;
1490 in = be_get_MemPerm_in_entity(irn, i);
1491 out = be_get_MemPerm_out_entity(irn, i);
1493 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1496 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1512 * Copies the backend specific attributes from old node to new node.
1514 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1516 be_node_attr_t *old_attr = get_irn_attr(old_node);
1517 be_node_attr_t *new_attr = get_irn_attr(new_node);
1520 assert(is_be_node(old_node));
1521 assert(is_be_node(new_node));
1523 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1524 new_attr->reg_data = NULL;
1526 if(new_attr->max_reg_data > 0) {
1527 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
1528 memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
1530 for(i = 0; i < old_attr->max_reg_data; ++i) {
1533 r = &new_attr->reg_data[i].req;
1534 r->req.limited_env = r;
1536 r = &new_attr->reg_data[i].in_req;
1537 r->req.limited_env = r;
1542 static const ir_op_ops be_node_op_ops = {