4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
26 #include "bitfiddle.h"
38 #include "besched_t.h"
43 #define OUT_POS(x) (-((x) + 1))
45 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
46 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
48 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
51 be_req_kind_old_limited,
52 be_req_kind_negate_old_limited,
53 be_req_kind_single_reg
57 arch_register_req_t req;
59 arch_irn_flags_t flags;
62 void (*old_limited)(void *ptr, bitset_t *bs);
63 void *old_limited_env;
66 const arch_register_t *single_reg;
71 const arch_register_t *reg;
76 /** The generic be nodes attribute type. */
78 be_reg_data_t *reg_data;
81 /** The be_Return nodes attribute type. */
83 be_node_attr_t node_attr;
84 int num_ret_vals; /**< number of return values */
87 /** The be_Stack attribute type. */
89 be_node_attr_t node_attr;
90 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
93 /** The be_Frame attribute type. */
95 be_node_attr_t node_attr;
100 /** The be_Call attribute type. */
102 be_node_attr_t node_attr;
103 ir_entity *ent; /**< The called entity if this is a static call. */
104 ir_type *call_tp; /**< The call type, copied from the original Call node. */
108 be_node_attr_t node_attr;
109 ir_entity **in_entities;
110 ir_entity **out_entities;
116 ir_op *op_be_MemPerm;
119 ir_op *op_be_CopyKeep;
126 ir_op *op_be_RegParams;
127 ir_op *op_be_StackParam;
128 ir_op *op_be_FrameAddr;
129 ir_op *op_be_FrameLoad;
130 ir_op *op_be_FrameStore;
131 ir_op *op_be_Barrier;
133 static int beo_base = -1;
135 static const ir_op_ops be_node_op_ops;
137 #define N irop_flag_none
138 #define L irop_flag_labeled
139 #define C irop_flag_commutative
140 #define X irop_flag_cfopcode
141 #define I irop_flag_ip_cfopcode
142 #define F irop_flag_fragile
143 #define Y irop_flag_forking
144 #define H irop_flag_highlevel
145 #define c irop_flag_constlike
146 #define K irop_flag_keep
147 #define M irop_flag_machine
151 * Compare two node attributes.
153 * @return zero if both attributes are identically
155 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if(ARR_LEN(a->reg_data) != ARR_LEN(b->reg_data))
161 len = ARR_LEN(a->reg_data);
162 for (i = 0; i < len; ++i) {
163 if (a->reg_data[i].reg != b->reg_data[i].reg ||
164 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
165 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node* irn, int max_reg_data)
245 ir_graph *irg = get_irn_irg(irn);
246 struct obstack *obst = get_irg_obstack(irg);
247 be_node_attr_t *a = get_irn_attr(irn);
250 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn))));
252 if(max_reg_data >= 0) {
253 a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
254 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
257 for(i = 0; i < max_reg_data; ++i) {
258 a->reg_data[i].req.req.cls = NULL;
259 a->reg_data[i].req.req.type = arch_register_req_type_none;
265 int is_be_node(const ir_node *irn)
267 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
270 be_opcode_t be_get_irn_opcode(const ir_node *irn)
272 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
276 * Skip Proj nodes and return their Proj numbers.
278 * If *node is a Proj or Proj(Proj) node, skip it.
280 * @param node points to the node to be skipped
282 * @return 0 if *node was no Proj node, its Proj number else.
284 static int redir_proj(const ir_node **node)
286 const ir_node *n = *node;
291 *node = irn = get_Proj_pred(n);
293 assert(get_irn_mode(irn) == mode_T);
294 *node = get_Proj_pred(irn);
296 return get_Proj_proj(n);
302 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
305 be_node_attr_t *res = NULL;
306 int *pos = the_pos ? the_pos : &dummy;
310 ir_node *pred = get_Proj_pred(irn);
311 int p = get_Proj_proj(irn);
313 if(is_be_node(pred)) {
314 assert(get_irn_mode(pred) == mode_T);
316 res = get_irn_attr(pred);
317 assert(p >= 0 && p < ARR_LEN(res->reg_data) && "illegal proj number");
319 } else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
320 be_node_attr_t *a = get_irn_attr(irn);
321 if(ARR_LEN(a->reg_data) > 0) {
330 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
333 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
334 return a ? &a->reg_data[pos] : NULL;
338 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
340 be_reg_data_t *r = retrieve_reg_data(irn);
347 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
348 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
356 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
357 a = init_node_attr(res, 2);
361 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
362 be_node_set_reg_class(res, be_pos_Spill_val, cls);
366 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
367 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
374 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
376 init_node_attr(res, 2);
377 be_node_set_reg_class(res, -1, cls);
378 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
379 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
383 ir_node *be_get_Reload_mem(const ir_node *irn)
385 assert(be_is_Reload(irn));
386 return get_irn_n(irn, be_pos_Reload_mem);
389 ir_node *be_get_Reload_frame(const ir_node *irn)
391 assert(be_is_Reload(irn));
392 return get_irn_n(irn, be_pos_Reload_frame);
395 ir_node *be_get_Spill_val(const ir_node *irn)
397 assert(be_is_Spill(irn));
398 return get_irn_n(irn, be_pos_Spill_val);
400 ir_node *be_get_Spill_frame(const ir_node *irn)
402 assert(be_is_Spill(irn));
403 return get_irn_n(irn, be_pos_Spill_frame);
406 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
409 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
410 init_node_attr(irn, n);
411 for(i = 0; i < n; ++i) {
412 be_node_set_reg_class(irn, i, cls);
413 be_node_set_reg_class(irn, OUT_POS(i), cls);
419 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
422 ir_node *frame = get_irg_frame(irg);
423 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
425 const arch_register_t *sp = arch_env->isa->sp;
426 be_memperm_attr_t *attr;
429 real_in = alloca((n+1) * sizeof(real_in[0]));
431 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
433 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
435 init_node_attr(irn, n + 1);
436 be_node_set_reg_class(irn, 0, sp->reg_class);
437 for(i = 0; i < n; ++i) {
438 be_node_set_reg_class(irn, i + 1, cls_frame);
439 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
442 attr = get_irn_attr(irn);
444 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
445 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
446 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
447 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
453 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
459 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
460 init_node_attr(res, 1);
461 be_node_set_reg_class(res, 0, cls);
462 be_node_set_reg_class(res, OUT_POS(0), cls);
466 ir_node *be_get_Copy_op(const ir_node *cpy) {
467 return get_irn_n(cpy, be_pos_Copy_op);
470 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
471 set_irn_n(cpy, be_pos_Copy_op, op);
474 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
479 irn = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, n, in);
480 init_node_attr(irn, n);
481 for(i = 0; i < n; ++i) {
482 be_node_set_reg_class(irn, i, cls);
488 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
489 int n_outs, int n, ir_node *in[], ir_type *call_tp)
492 int real_n = be_pos_Call_first_arg + n;
496 NEW_ARR_A(ir_node *, real_in, real_n);
497 real_in[be_pos_Call_mem] = mem;
498 real_in[be_pos_Call_sp] = sp;
499 real_in[be_pos_Call_ptr] = ptr;
500 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
502 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
503 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
505 a->call_tp = call_tp;
509 /* Gets the call entity or NULL if this is no static call. */
510 ir_entity *be_Call_get_entity(const ir_node *call) {
511 be_call_attr_t *a = get_irn_attr(call);
512 assert(be_is_Call(call));
516 /* Sets the call entity. */
517 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
518 be_call_attr_t *a = get_irn_attr(call);
519 assert(be_is_Call(call));
523 /* Gets the call type. */
524 ir_type *be_Call_get_type(ir_node *call) {
525 be_call_attr_t *a = get_irn_attr(call);
526 assert(be_is_Call(call));
530 /* Sets the call type. */
531 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
532 be_call_attr_t *a = get_irn_attr(call);
533 assert(be_is_Call(call));
534 a->call_tp = call_tp;
537 /* Construct a new be_Return. */
538 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
541 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
542 init_node_attr(irn, n);
543 a = get_irn_attr(irn);
544 a->num_ret_vals = n_res;
549 /* Returns the number of real returns values */
550 int be_Return_get_n_rets(ir_node *ret)
552 be_return_attr_t *a = get_irn_attr(ret);
553 return a->num_ret_vals;
556 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
563 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
564 a = init_node_attr(irn, 1);
567 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
569 /* Set output constraint to stack register. */
570 be_node_set_reg_class(irn, 0, sp->reg_class);
571 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
572 be_node_set_irn_reg(NULL, irn, sp);
577 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
581 ir_node *in[be_pos_AddSP_last];
583 in[be_pos_AddSP_old_sp] = old_sp;
584 in[be_pos_AddSP_size] = sz;
586 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
587 a = init_node_attr(irn, be_pos_AddSP_last);
589 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
591 /* Set output constraint to stack register. */
592 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
593 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
594 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
595 a->reg_data[pn_be_AddSP_res].reg = sp;
600 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
604 ir_node *in[be_pos_SubSP_last];
606 in[be_pos_SubSP_old_sp] = old_sp;
607 in[be_pos_SubSP_size] = sz;
609 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
610 a = init_node_attr(irn, be_pos_SubSP_last);
612 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
614 /* Set output constraint to stack register. */
615 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
616 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
617 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
618 a->reg_data[pn_be_SubSP_res].reg = sp;
623 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
632 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
633 a = init_node_attr(irn, 3);
635 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
637 /* Set output constraint to stack register. */
638 be_set_constr_single_reg(irn, OUT_POS(0), sp);
639 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
640 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
645 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
651 in[0] = frame_pointer;
652 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
653 a = init_node_attr(irn, 1);
656 be_node_set_reg_class(irn, 0, cls_frame);
657 be_node_set_reg_class(irn, OUT_POS(0), cls);
661 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
666 irn = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, in);
667 init_node_attr(irn, n_outs);
671 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
672 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
680 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
681 a = init_node_attr(irn, 3);
684 be_node_set_reg_class(irn, 1, cls_frame);
685 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
689 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
690 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
699 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
700 a = init_node_attr(irn, 3);
703 be_node_set_reg_class(irn, 1, cls_frame);
704 be_node_set_reg_class(irn, 2, cls_data);
708 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
715 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
716 a = init_node_attr(irn, 1);
719 be_node_set_reg_class(irn, 0, cls_frame);
720 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
722 return optimize_node(irn);
725 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
728 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
731 memcpy(&in[1], in_keep, n * sizeof(in[0]));
732 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
733 init_node_attr(irn, n + 1);
734 be_node_set_reg_class(irn, OUT_POS(0), cls);
735 be_node_set_reg_class(irn, 0, cls);
740 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
745 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
748 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
749 return get_irn_n(cpy, be_pos_CopyKeep_op);
752 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
753 set_irn_n(cpy, be_pos_CopyKeep_op, op);
756 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
760 irn = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, n, in);
761 init_node_attr(irn, n);
765 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
766 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
767 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
768 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
769 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
770 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
771 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
772 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
773 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
774 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
775 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
776 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
777 int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
778 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
779 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
780 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
781 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
782 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
783 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
785 int be_has_frame_entity(const ir_node *irn)
787 switch(be_get_irn_opcode(irn)) {
800 ir_entity *be_get_frame_entity(const ir_node *irn)
802 if (be_has_frame_entity(irn)) {
803 be_frame_attr_t *a = get_irn_attr(irn);
809 int be_get_frame_offset(const ir_node *irn)
811 assert(is_be_node(irn));
812 if (be_has_frame_entity(irn)) {
813 be_frame_attr_t *a = get_irn_attr(irn);
819 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
821 be_memperm_attr_t *attr = get_irn_attr(irn);
823 assert(be_is_MemPerm(irn));
824 assert(n < be_get_MemPerm_entity_arity(irn));
826 attr->in_entities[n] = ent;
829 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
831 be_memperm_attr_t *attr = get_irn_attr(irn);
833 assert(be_is_MemPerm(irn));
834 assert(n < be_get_MemPerm_entity_arity(irn));
836 return attr->in_entities[n];
839 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
841 be_memperm_attr_t *attr = get_irn_attr(irn);
843 assert(be_is_MemPerm(irn));
844 assert(n < be_get_MemPerm_entity_arity(irn));
846 attr->out_entities[n] = ent;
849 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
851 be_memperm_attr_t *attr = get_irn_attr(irn);
853 assert(be_is_MemPerm(irn));
854 assert(n < be_get_MemPerm_entity_arity(irn));
856 return attr->out_entities[n];
859 int be_get_MemPerm_entity_arity(const ir_node *irn)
861 return get_irn_arity(irn) - 1;
864 static void be_limited(void *data, bitset_t *bs)
866 be_req_t *req = data;
869 case be_req_kind_negate_old_limited:
870 case be_req_kind_old_limited:
871 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
872 if(req->kind == be_req_kind_negate_old_limited)
875 case be_req_kind_single_reg:
876 bitset_clear_all(bs);
877 bitset_set(bs, req->x.single_reg->index);
882 static INLINE be_req_t *get_req(ir_node *irn, int pos)
884 int idx = pos < 0 ? -(pos + 1) : pos;
885 be_node_attr_t *a = get_irn_attr(irn);
886 be_reg_data_t *rd = &a->reg_data[idx];
887 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
889 assert(is_be_node(irn));
890 assert(!(pos >= 0) || pos < get_irn_arity(irn));
891 assert(!(pos < 0) || -(pos + 1) <= ARR_LEN(a->reg_data));
896 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
898 be_req_t *r = get_req(irn, pos);
900 r->kind = be_req_kind_single_reg;
901 r->x.single_reg = reg;
902 r->req.limited = be_limited;
903 r->req.limited_env = r;
904 r->req.type = arch_register_req_type_limited;
905 r->req.cls = reg->reg_class;
908 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
910 be_req_t *r = get_req(irn, pos);
912 assert(arch_register_req_is(req, limited));
914 r->kind = be_req_kind_old_limited;
915 r->req.limited = be_limited;
916 r->req.limited_env = r;
917 r->req.type = arch_register_req_type_limited;
918 r->req.cls = req->cls;
920 r->x.old_limited.old_limited = req->limited;
921 r->x.old_limited.old_limited_env = req->limited_env;
924 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
926 be_req_t *r = get_req(irn, pos);
930 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
932 be_req_t *r = get_req(irn, pos);
937 r->req.type = arch_register_req_type_none;
938 else if (r->req.type == arch_register_req_type_none)
939 r->req.type = arch_register_req_type_normal;
942 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
944 be_req_t *r = get_req(irn, pos);
948 ir_node *be_get_IncSP_pred(ir_node *irn) {
949 assert(be_is_IncSP(irn));
950 return get_irn_n(irn, 0);
953 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
954 assert(be_is_IncSP(incsp));
955 set_irn_n(incsp, 0, pred);
958 ir_node *be_get_IncSP_mem(ir_node *irn) {
959 assert(be_is_IncSP(irn));
960 return get_irn_n(irn, 1);
963 void be_set_IncSP_offset(ir_node *irn, int offset)
965 be_stack_attr_t *a = get_irn_attr(irn);
966 assert(be_is_IncSP(irn));
970 int be_get_IncSP_offset(const ir_node *irn)
972 be_stack_attr_t *a = get_irn_attr(irn);
973 assert(be_is_IncSP(irn));
977 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
979 ir_node *bl = get_nodes_block(irn);
980 ir_graph *irg = get_irn_irg(bl);
981 ir_node *frame = get_irg_frame(irg);
982 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
983 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
986 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
990 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
993 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
994 ir_graph *irg = get_irn_irg(bl);
995 ir_node *frame = get_irg_frame(irg);
996 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
998 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1000 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
1002 if (is_Block(insert)) {
1003 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
1004 sched_add_after(insert, reload);
1006 sched_add_before(insert, reload);
1014 | _ \ ___ __ _ | _ \ ___ __ _ ___
1015 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1016 | _ < __/ (_| | | _ < __/ (_| \__ \
1017 |_| \_\___|\__, | |_| \_\___|\__, |___/
1023 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1025 const be_node_attr_t *a = get_irn_attr(irn);
1027 if(out_pos < ARR_LEN(a->reg_data)) {
1028 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1030 if(be_is_Copy(irn)) {
1031 req->type |= arch_register_req_type_should_be_same;
1032 req->other_same = be_get_Copy_op(irn);
1035 req->type = arch_register_req_type_none;
1042 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1044 const be_node_attr_t *a = get_irn_attr(irn);
1046 if(pos < get_irn_arity(irn) && pos < ARR_LEN(a->reg_data)) {
1047 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1049 req->type = arch_register_req_type_none;
1056 static const arch_register_req_t *
1057 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1062 if (get_irn_mode(irn) == mode_T)
1065 out_pos = redir_proj((const ir_node **)&irn);
1066 assert(is_be_node(irn));
1067 return put_out_reg_req(req, irn, out_pos);
1071 if (is_be_node(irn)) {
1073 For spills and reloads, we return "none" as requirement for frame pointer,
1074 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1075 large number as pos, so put_in_reg_req will return "none" as requirement.
1077 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1078 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1079 return put_in_reg_req(req, irn, INT_MAX);
1081 return put_in_reg_req(req, irn, pos);
1089 const arch_register_t *
1090 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1092 be_reg_data_t *r = retrieve_reg_data(irn);
1093 return r ? r->reg : NULL;
1096 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1098 redir_proj((const ir_node **) &irn);
1100 switch(be_get_irn_opcode(irn)) {
1101 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1103 XXX(Reload, reload);
1106 XXX(Return, branch);
1107 XXX(StackParam, stackparam);
1110 return arch_irn_class_normal;
1116 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1118 be_reg_data_t *r = retrieve_reg_data(irn);
1119 return r ? r->req.flags : 0;
1122 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1124 return be_get_frame_entity(irn);
1127 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1131 assert(be_has_frame_entity(irn));
1133 a = get_irn_attr(irn);
1137 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1139 if(be_has_frame_entity(irn)) {
1140 be_frame_attr_t *a = get_irn_attr(irn);
1145 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1147 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1151 ___ ____ _ _ _ _ _ _
1152 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1153 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1154 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1155 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1159 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1160 be_node_get_irn_reg_req,
1161 be_node_set_irn_reg,
1162 be_node_get_irn_reg,
1165 be_node_get_frame_entity,
1166 be_node_set_frame_entity,
1167 be_node_set_frame_offset,
1168 be_node_get_sp_bias,
1169 NULL, /* get_inverse */
1170 NULL, /* get_op_estimated_cost */
1171 NULL, /* possible_memory_operand */
1172 NULL, /* perform_memory_operand */
1175 static const arch_irn_ops_t be_node_irn_ops = {
1179 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1181 redir_proj((const ir_node **) &irn);
1182 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1185 const arch_irn_handler_t be_node_irn_handler = {
1190 ____ _ _ ___ ____ _ _ _ _ _ _
1191 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1192 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1193 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1194 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1199 arch_irn_handler_t irn_handler;
1200 arch_irn_ops_t irn_ops;
1201 const arch_env_t *arch_env;
1205 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1206 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1208 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1210 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1211 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1215 * Get register class of a Phi.
1218 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1220 int n = get_irn_arity(phi);
1224 if(*visited && pset_find_ptr(*visited, phi))
1227 for(i = 0; i < n; ++i) {
1228 op = get_irn_n(phi, i);
1230 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1234 The operands of that Phi were all Phis themselves.
1235 We have to start a DFS for a non-Phi argument now.
1238 *visited = pset_new_ptr(16);
1240 pset_insert_ptr(*visited, phi);
1242 for(i = 0; i < n; ++i) {
1243 op = get_irn_n(phi, i);
1244 if(get_Phi_reg_req_recursive(h, req, op, visited))
1251 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1253 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1254 pset *visited = NULL;
1256 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1257 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1258 req->type = arch_register_req_type_normal;
1265 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1267 phi_handler_t *h = get_phi_handler_from_ops(self);
1268 pmap_insert(h->regs, irn, (void *) reg);
1271 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1273 phi_handler_t *h = get_phi_handler_from_ops(self);
1274 return pmap_get(h->regs, (void *) irn);
1277 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1279 return arch_irn_class_normal;
1282 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1284 return arch_irn_flags_none;
1287 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1292 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1296 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1300 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1305 static const arch_irn_ops_if_t phi_irn_ops = {
1306 phi_get_irn_reg_req,
1311 phi_get_frame_entity,
1312 phi_set_frame_entity,
1313 phi_set_frame_offset,
1315 NULL, /* get_inverse */
1316 NULL, /* get_op_estimated_cost */
1317 NULL, /* possible_memory_operand */
1318 NULL, /* perform_memory_operand */
1321 static const arch_irn_handler_t phi_irn_handler = {
1325 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1327 phi_handler_t *h = xmalloc(sizeof(h[0]));
1328 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1329 h->irn_ops.impl = &phi_irn_ops;
1330 h->arch_env = arch_env;
1331 h->regs = pmap_create();
1332 return (arch_irn_handler_t *) h;
1335 void be_phi_handler_free(arch_irn_handler_t *handler)
1337 phi_handler_t *h = (void *) handler;
1338 pmap_destroy(h->regs);
1342 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1344 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1345 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1348 void be_phi_handler_reset(arch_irn_handler_t *handler)
1350 phi_handler_t *h = get_phi_handler_from_handler(handler);
1352 pmap_destroy(h->regs);
1353 h->regs = pmap_create();
1358 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1359 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1360 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1361 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1366 * Dumps a register requirement to a file.
1368 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1371 int did_something = 0;
1373 const char *prefix = buf;
1375 snprintf(buf, sizeof(buf), "#%d ", idx);
1376 buf[sizeof(buf) - 1] = '\0';
1378 if(req->flags != arch_irn_flags_none) {
1379 fprintf(f, "%sflags: ", prefix);
1381 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1382 if(req->flags & (1 << i)) {
1383 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1391 if(req->req.cls != 0) {
1394 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1395 fprintf(f, "%s", tmp);
1404 * Dumps node register requirements to a file.
1406 static void dump_node_reqs(FILE *f, ir_node *irn)
1409 be_node_attr_t *a = get_irn_attr(irn);
1410 int len = ARR_LEN(a->reg_data);
1412 fprintf(f, "registers: \n");
1413 for(i = 0; i < len; ++i) {
1414 be_reg_data_t *rd = &a->reg_data[i];
1416 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1419 fprintf(f, "in requirements\n");
1420 for(i = 0; i < len; ++i) {
1421 dump_node_req(f, i, &a->reg_data[i].in_req);
1424 fprintf(f, "\nout requirements\n");
1425 for(i = 0; i < len; ++i) {
1426 dump_node_req(f, i, &a->reg_data[i].req);
1431 * ir_op-Operation: dump a be node to file
1433 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1435 be_node_attr_t *at = get_irn_attr(irn);
1437 assert(is_be_node(irn));
1440 case dump_node_opcode_txt:
1441 fprintf(f, get_op_name(get_irn_op(irn)));
1443 case dump_node_mode_txt:
1444 fprintf(f, get_mode_name(get_irn_mode(irn)));
1446 case dump_node_nodeattr_txt:
1448 case dump_node_info_txt:
1449 dump_node_reqs(f, irn);
1451 if(be_has_frame_entity(irn)) {
1452 be_frame_attr_t *a = (be_frame_attr_t *) at;
1454 int bits = get_type_size_bits(get_entity_type(a->ent));
1455 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1456 a->ent, a->offset, a->offset, bits, bits);
1461 switch(be_get_irn_opcode(irn)) {
1464 be_stack_attr_t *a = (be_stack_attr_t *) at;
1465 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1466 fprintf(f, "offset: FRAME_SIZE\n");
1467 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1468 fprintf(f, "offset: -FRAME SIZE\n");
1470 fprintf(f, "offset: %u\n", a->offset);
1475 be_call_attr_t *a = (be_call_attr_t *) at;
1478 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1484 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1485 ir_entity *in, *out;
1486 in = be_get_MemPerm_in_entity(irn, i);
1487 out = be_get_MemPerm_out_entity(irn, i);
1489 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1492 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1508 * Copies the backend specific attributes from old node to new node.
1510 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1512 be_node_attr_t *old_attr = get_irn_attr(old_node);
1513 be_node_attr_t *new_attr = get_irn_attr(new_node);
1514 struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
1517 assert(is_be_node(old_node));
1518 assert(is_be_node(new_node));
1520 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1521 new_attr->reg_data = NULL;
1523 if(old_attr->reg_data != NULL)
1524 len = ARR_LEN(old_attr->reg_data);
1528 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
1531 memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
1533 for(i = 0; i < len; ++i) {
1536 r = &new_attr->reg_data[i].req;
1537 r->req.limited_env = r;
1539 r = &new_attr->reg_data[i].in_req;
1540 r->req.limited_env = r;
1545 static const ir_op_ops be_node_op_ops = {