4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
26 #include "bitfiddle.h"
38 #include "besched_t.h"
43 #define OUT_POS(x) (-((x) + 1))
45 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
46 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
48 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
51 be_req_kind_old_limited,
52 be_req_kind_negate_old_limited,
53 be_req_kind_single_reg
57 arch_register_req_t req;
59 arch_irn_flags_t flags;
62 void (*old_limited)(void *ptr, bitset_t *bs);
63 void *old_limited_env;
66 const arch_register_t *single_reg;
71 const arch_register_t *reg;
76 /** The generic be nodes attribute type. */
78 be_reg_data_t *reg_data;
81 /** The be_Return nodes attribute type. */
83 be_node_attr_t node_attr;
84 int num_ret_vals; /**< number of return values */
87 /** The be_Stack attribute type. */
89 be_node_attr_t node_attr;
90 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
93 /** The be_Frame attribute type. */
95 be_node_attr_t node_attr;
100 /** The be_Call attribute type. */
102 be_node_attr_t node_attr;
103 ir_entity *ent; /**< The called entity if this is a static call. */
104 ir_type *call_tp; /**< The call type, copied from the original Call node. */
108 be_node_attr_t node_attr;
109 ir_entity **in_entities;
110 ir_entity **out_entities;
116 ir_op *op_be_MemPerm;
119 ir_op *op_be_CopyKeep;
126 ir_op *op_be_RegParams;
127 ir_op *op_be_StackParam;
128 ir_op *op_be_FrameAddr;
129 ir_op *op_be_FrameLoad;
130 ir_op *op_be_FrameStore;
131 ir_op *op_be_Barrier;
133 static int beo_base = -1;
135 static const ir_op_ops be_node_op_ops;
137 #define N irop_flag_none
138 #define L irop_flag_labeled
139 #define C irop_flag_commutative
140 #define X irop_flag_cfopcode
141 #define I irop_flag_ip_cfopcode
142 #define F irop_flag_fragile
143 #define Y irop_flag_forking
144 #define H irop_flag_highlevel
145 #define c irop_flag_constlike
146 #define K irop_flag_keep
147 #define M irop_flag_machine
151 * Compare two node attributes.
153 * @return zero if both attributes are identically
155 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if(ARR_LEN(a->reg_data) != ARR_LEN(b->reg_data))
161 len = ARR_LEN(a->reg_data);
162 for (i = 0; i < len; ++i) {
163 if (a->reg_data[i].reg != b->reg_data[i].reg ||
164 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
165 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node *node, int max_reg_data)
245 ir_graph *irg = get_irn_irg(node);
246 struct obstack *obst = get_irg_obstack(irg);
247 be_node_attr_t *a = get_irn_attr(node);
249 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node))));
251 if(max_reg_data >= 0) {
252 a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
253 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
255 a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
261 static void add_register_req(ir_node *node)
263 be_node_attr_t *a = get_irn_attr(node);
264 be_reg_data_t regreq;
265 memset(®req, 0, sizeof(regreq));
266 ARR_APP1(be_reg_data_t, a->reg_data, regreq);
269 int is_be_node(const ir_node *irn)
271 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
274 be_opcode_t be_get_irn_opcode(const ir_node *irn)
276 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
280 * Skip Proj nodes and return their Proj numbers.
282 * If *node is a Proj or Proj(Proj) node, skip it.
284 * @param node points to the node to be skipped
286 * @return 0 if *node was no Proj node, its Proj number else.
288 static int redir_proj(const ir_node **node)
290 const ir_node *n = *node;
295 *node = irn = get_Proj_pred(n);
297 assert(get_irn_mode(irn) == mode_T);
298 *node = get_Proj_pred(irn);
300 return get_Proj_proj(n);
306 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
309 be_node_attr_t *res = NULL;
310 int *pos = the_pos ? the_pos : &dummy;
314 ir_node *pred = get_Proj_pred(irn);
315 int p = get_Proj_proj(irn);
317 if(is_be_node(pred)) {
318 assert(get_irn_mode(pred) == mode_T);
320 res = get_irn_attr(pred);
321 assert(p >= 0 && p < ARR_LEN(res->reg_data) && "illegal proj number");
323 } else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
324 be_node_attr_t *a = get_irn_attr(irn);
325 if(ARR_LEN(a->reg_data) > 0) {
334 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
337 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
338 return a ? &a->reg_data[pos] : NULL;
342 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
344 be_reg_data_t *r = retrieve_reg_data(irn);
351 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
352 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
360 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
361 a = init_node_attr(res, 2);
365 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
366 be_node_set_reg_class(res, be_pos_Spill_val, cls);
370 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
371 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
378 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
380 init_node_attr(res, 2);
381 be_node_set_reg_class(res, -1, cls);
382 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
383 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
387 ir_node *be_get_Reload_mem(const ir_node *irn)
389 assert(be_is_Reload(irn));
390 return get_irn_n(irn, be_pos_Reload_mem);
393 ir_node *be_get_Reload_frame(const ir_node *irn)
395 assert(be_is_Reload(irn));
396 return get_irn_n(irn, be_pos_Reload_frame);
399 ir_node *be_get_Spill_val(const ir_node *irn)
401 assert(be_is_Spill(irn));
402 return get_irn_n(irn, be_pos_Spill_val);
404 ir_node *be_get_Spill_frame(const ir_node *irn)
406 assert(be_is_Spill(irn));
407 return get_irn_n(irn, be_pos_Spill_frame);
410 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
413 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
414 init_node_attr(irn, n);
415 for(i = 0; i < n; ++i) {
416 be_node_set_reg_class(irn, i, cls);
417 be_node_set_reg_class(irn, OUT_POS(i), cls);
423 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
426 ir_node *frame = get_irg_frame(irg);
427 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
429 const arch_register_t *sp = arch_env->isa->sp;
430 be_memperm_attr_t *attr;
433 real_in = alloca((n+1) * sizeof(real_in[0]));
435 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
437 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
439 init_node_attr(irn, n + 1);
440 be_node_set_reg_class(irn, 0, sp->reg_class);
441 for(i = 0; i < n; ++i) {
442 be_node_set_reg_class(irn, i + 1, cls_frame);
443 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
446 attr = get_irn_attr(irn);
448 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
449 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
450 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
451 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
457 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
463 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
464 init_node_attr(res, 1);
465 be_node_set_reg_class(res, 0, cls);
466 be_node_set_reg_class(res, OUT_POS(0), cls);
470 ir_node *be_get_Copy_op(const ir_node *cpy) {
471 return get_irn_n(cpy, be_pos_Copy_op);
474 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
475 set_irn_n(cpy, be_pos_Copy_op, op);
478 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
483 res = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, -1, NULL);
484 init_node_attr(res, -1);
486 for(i = 0; i < n; ++i) {
487 add_irn_n(res, in[i]);
488 add_register_req(res);
489 be_node_set_reg_class(res, i, cls);
496 void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node)
500 assert(be_is_Keep(keep));
501 n = add_irn_n(keep, node);
502 add_register_req(keep);
503 be_node_set_reg_class(keep, n, cls);
506 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
507 int n_outs, int n, ir_node *in[], ir_type *call_tp)
510 int real_n = be_pos_Call_first_arg + n;
514 NEW_ARR_A(ir_node *, real_in, real_n);
515 real_in[be_pos_Call_mem] = mem;
516 real_in[be_pos_Call_sp] = sp;
517 real_in[be_pos_Call_ptr] = ptr;
518 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
520 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
521 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
523 a->call_tp = call_tp;
527 /* Gets the call entity or NULL if this is no static call. */
528 ir_entity *be_Call_get_entity(const ir_node *call) {
529 be_call_attr_t *a = get_irn_attr(call);
530 assert(be_is_Call(call));
534 /* Sets the call entity. */
535 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
536 be_call_attr_t *a = get_irn_attr(call);
537 assert(be_is_Call(call));
541 /* Gets the call type. */
542 ir_type *be_Call_get_type(ir_node *call) {
543 be_call_attr_t *a = get_irn_attr(call);
544 assert(be_is_Call(call));
548 /* Sets the call type. */
549 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
550 be_call_attr_t *a = get_irn_attr(call);
551 assert(be_is_Call(call));
552 a->call_tp = call_tp;
555 /* Construct a new be_Return. */
556 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
559 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
560 init_node_attr(irn, n);
561 a = get_irn_attr(irn);
562 a->num_ret_vals = n_res;
567 /* Returns the number of real returns values */
568 int be_Return_get_n_rets(ir_node *ret)
570 be_return_attr_t *a = get_irn_attr(ret);
571 return a->num_ret_vals;
574 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
581 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
582 a = init_node_attr(irn, 1);
585 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
587 /* Set output constraint to stack register. */
588 be_node_set_reg_class(irn, 0, sp->reg_class);
589 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
590 be_node_set_irn_reg(NULL, irn, sp);
595 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
599 ir_node *in[be_pos_AddSP_last];
601 in[be_pos_AddSP_old_sp] = old_sp;
602 in[be_pos_AddSP_size] = sz;
604 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
605 a = init_node_attr(irn, be_pos_AddSP_last);
607 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
609 /* Set output constraint to stack register. */
610 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
611 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
612 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
613 a->reg_data[pn_be_AddSP_res].reg = sp;
618 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
622 ir_node *in[be_pos_SubSP_last];
624 in[be_pos_SubSP_old_sp] = old_sp;
625 in[be_pos_SubSP_size] = sz;
627 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
628 a = init_node_attr(irn, be_pos_SubSP_last);
630 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
632 /* Set output constraint to stack register. */
633 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
634 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
635 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
636 a->reg_data[pn_be_SubSP_res].reg = sp;
641 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
650 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
651 a = init_node_attr(irn, 3);
653 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
655 /* Set output constraint to stack register. */
656 be_set_constr_single_reg(irn, OUT_POS(0), sp);
657 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
658 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
663 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
669 in[0] = frame_pointer;
670 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
671 a = init_node_attr(irn, 1);
674 be_node_set_reg_class(irn, 0, cls_frame);
675 be_node_set_reg_class(irn, OUT_POS(0), cls);
679 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
684 res = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, NULL);
685 init_node_attr(res, -1);
686 for(i = 0; i < n_outs; ++i)
687 add_register_req(res);
692 ir_node *be_RegParams_append_out_reg(ir_node *regparams,
693 const arch_env_t *arch_env,
694 const arch_register_t *reg)
696 ir_graph *irg = get_irn_irg(regparams);
697 ir_node *block = get_nodes_block(regparams);
698 be_node_attr_t *attr = get_irn_attr(regparams);
699 const arch_register_class_t *cls = arch_register_get_class(reg);
700 ir_mode *mode = arch_register_class_mode(cls);
701 int n = ARR_LEN(attr->reg_data);
704 assert(be_is_RegParams(regparams));
705 proj = new_r_Proj(irg, block, regparams, mode, n);
706 add_register_req(regparams);
707 be_set_constr_single_reg(regparams, n, reg);
708 arch_set_irn_register(arch_env, proj, reg);
710 /* TODO decide, whether we need to set ignore/modify sp flags here? */
715 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
716 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
724 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
725 a = init_node_attr(irn, 3);
728 be_node_set_reg_class(irn, 1, cls_frame);
729 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
733 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
734 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
743 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
744 a = init_node_attr(irn, 3);
747 be_node_set_reg_class(irn, 1, cls_frame);
748 be_node_set_reg_class(irn, 2, cls_data);
752 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
759 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
760 a = init_node_attr(irn, 1);
763 be_node_set_reg_class(irn, 0, cls_frame);
764 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
766 return optimize_node(irn);
769 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
772 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
775 memcpy(&in[1], in_keep, n * sizeof(in[0]));
776 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
777 init_node_attr(irn, n + 1);
778 be_node_set_reg_class(irn, OUT_POS(0), cls);
779 be_node_set_reg_class(irn, 0, cls);
784 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
789 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
792 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
793 return get_irn_n(cpy, be_pos_CopyKeep_op);
796 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
797 set_irn_n(cpy, be_pos_CopyKeep_op, op);
800 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
805 res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL);
806 init_node_attr(res, -1);
807 for(i = 0; i < n; ++i) {
808 add_irn_n(res, in[i]);
809 add_register_req(res);
815 ir_node *be_Barrier_append_node(ir_node *barrier, ir_node *node)
817 ir_graph *irg = get_irn_irg(barrier);
818 ir_node *block = get_nodes_block(barrier);
819 ir_mode *mode = get_irn_mode(node);
820 int n = add_irn_n(barrier, node);
821 ir_node *proj = new_r_Proj(irg, block, barrier, mode, n);
822 add_register_req(barrier);
827 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
828 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
829 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
830 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
831 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
832 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
833 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
834 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
835 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
836 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
837 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
838 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
839 int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
840 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
841 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
842 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
843 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
844 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
845 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
847 int be_has_frame_entity(const ir_node *irn)
849 switch(be_get_irn_opcode(irn)) {
862 ir_entity *be_get_frame_entity(const ir_node *irn)
864 if (be_has_frame_entity(irn)) {
865 be_frame_attr_t *a = get_irn_attr(irn);
871 int be_get_frame_offset(const ir_node *irn)
873 assert(is_be_node(irn));
874 if (be_has_frame_entity(irn)) {
875 be_frame_attr_t *a = get_irn_attr(irn);
881 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
883 be_memperm_attr_t *attr = get_irn_attr(irn);
885 assert(be_is_MemPerm(irn));
886 assert(n < be_get_MemPerm_entity_arity(irn));
888 attr->in_entities[n] = ent;
891 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
893 be_memperm_attr_t *attr = get_irn_attr(irn);
895 assert(be_is_MemPerm(irn));
896 assert(n < be_get_MemPerm_entity_arity(irn));
898 return attr->in_entities[n];
901 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
903 be_memperm_attr_t *attr = get_irn_attr(irn);
905 assert(be_is_MemPerm(irn));
906 assert(n < be_get_MemPerm_entity_arity(irn));
908 attr->out_entities[n] = ent;
911 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
913 be_memperm_attr_t *attr = get_irn_attr(irn);
915 assert(be_is_MemPerm(irn));
916 assert(n < be_get_MemPerm_entity_arity(irn));
918 return attr->out_entities[n];
921 int be_get_MemPerm_entity_arity(const ir_node *irn)
923 return get_irn_arity(irn) - 1;
926 static void be_limited(void *data, bitset_t *bs)
928 be_req_t *req = data;
931 case be_req_kind_negate_old_limited:
932 case be_req_kind_old_limited:
933 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
934 if(req->kind == be_req_kind_negate_old_limited)
937 case be_req_kind_single_reg:
938 bitset_clear_all(bs);
939 bitset_set(bs, req->x.single_reg->index);
944 static INLINE be_req_t *get_req(ir_node *irn, int pos)
946 int idx = pos < 0 ? -(pos + 1) : pos;
947 be_node_attr_t *a = get_irn_attr(irn);
948 be_reg_data_t *rd = &a->reg_data[idx];
949 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
951 assert(is_be_node(irn));
952 assert(!(pos >= 0) || pos < get_irn_arity(irn));
953 assert(!(pos < 0) || -(pos + 1) <= ARR_LEN(a->reg_data));
958 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
960 be_req_t *r = get_req(irn, pos);
962 r->kind = be_req_kind_single_reg;
963 r->x.single_reg = reg;
964 r->req.limited = be_limited;
965 r->req.limited_env = r;
966 r->req.type = arch_register_req_type_limited;
967 r->req.cls = reg->reg_class;
970 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
972 be_req_t *r = get_req(irn, pos);
974 assert(arch_register_req_is(req, limited));
976 r->kind = be_req_kind_old_limited;
977 r->req.limited = be_limited;
978 r->req.limited_env = r;
979 r->req.type = arch_register_req_type_limited;
980 r->req.cls = req->cls;
982 r->x.old_limited.old_limited = req->limited;
983 r->x.old_limited.old_limited_env = req->limited_env;
986 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
988 be_req_t *r = get_req(irn, pos);
992 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
994 be_req_t *r = get_req(irn, pos);
999 r->req.type = arch_register_req_type_none;
1000 } else if (r->req.type == arch_register_req_type_none) {
1001 r->req.type = arch_register_req_type_normal;
1005 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
1007 be_req_t *r = get_req(irn, pos);
1011 ir_node *be_get_IncSP_pred(ir_node *irn) {
1012 assert(be_is_IncSP(irn));
1013 return get_irn_n(irn, 0);
1016 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
1017 assert(be_is_IncSP(incsp));
1018 set_irn_n(incsp, 0, pred);
1021 ir_node *be_get_IncSP_mem(ir_node *irn) {
1022 assert(be_is_IncSP(irn));
1023 return get_irn_n(irn, 1);
1026 void be_set_IncSP_offset(ir_node *irn, int offset)
1028 be_stack_attr_t *a = get_irn_attr(irn);
1029 assert(be_is_IncSP(irn));
1033 int be_get_IncSP_offset(const ir_node *irn)
1035 be_stack_attr_t *a = get_irn_attr(irn);
1036 assert(be_is_IncSP(irn));
1040 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
1042 ir_node *bl = get_nodes_block(irn);
1043 ir_graph *irg = get_irn_irg(bl);
1044 ir_node *frame = get_irg_frame(irg);
1045 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
1046 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1049 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
1053 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
1056 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
1057 ir_graph *irg = get_irn_irg(bl);
1058 ir_node *frame = get_irg_frame(irg);
1059 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1061 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1063 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
1065 if (is_Block(insert)) {
1066 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
1067 sched_add_after(insert, reload);
1069 sched_add_before(insert, reload);
1077 | _ \ ___ __ _ | _ \ ___ __ _ ___
1078 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1079 | _ < __/ (_| | | _ < __/ (_| \__ \
1080 |_| \_\___|\__, | |_| \_\___|\__, |___/
1086 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1088 const be_node_attr_t *a = get_irn_attr(irn);
1090 if(out_pos < ARR_LEN(a->reg_data)) {
1091 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1093 if(be_is_Copy(irn)) {
1094 req->type |= arch_register_req_type_should_be_same;
1095 req->other_same = be_get_Copy_op(irn);
1098 req->type = arch_register_req_type_none;
1105 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1107 const be_node_attr_t *a = get_irn_attr(irn);
1109 if(pos < get_irn_arity(irn) && pos < ARR_LEN(a->reg_data)) {
1110 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1112 req->type = arch_register_req_type_none;
1119 static const arch_register_req_t *
1120 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1125 if (get_irn_mode(irn) == mode_T)
1128 out_pos = redir_proj((const ir_node **)&irn);
1129 assert(is_be_node(irn));
1130 return put_out_reg_req(req, irn, out_pos);
1134 if (is_be_node(irn)) {
1136 For spills and reloads, we return "none" as requirement for frame pointer,
1137 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1138 large number as pos, so put_in_reg_req will return "none" as requirement.
1140 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1141 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1142 return put_in_reg_req(req, irn, INT_MAX);
1144 return put_in_reg_req(req, irn, pos);
1152 const arch_register_t *
1153 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1155 be_reg_data_t *r = retrieve_reg_data(irn);
1156 return r ? r->reg : NULL;
1159 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1161 redir_proj((const ir_node **) &irn);
1163 switch(be_get_irn_opcode(irn)) {
1164 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1166 XXX(Reload, reload);
1169 XXX(Return, branch);
1170 XXX(StackParam, stackparam);
1173 return arch_irn_class_normal;
1179 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1181 be_reg_data_t *r = retrieve_reg_data(irn);
1182 return r ? r->req.flags : 0;
1185 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1187 return be_get_frame_entity(irn);
1190 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1194 assert(be_has_frame_entity(irn));
1196 a = get_irn_attr(irn);
1200 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1202 if(be_has_frame_entity(irn)) {
1203 be_frame_attr_t *a = get_irn_attr(irn);
1208 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1210 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1214 ___ ____ _ _ _ _ _ _
1215 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1216 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1217 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1218 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1222 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1223 be_node_get_irn_reg_req,
1224 be_node_set_irn_reg,
1225 be_node_get_irn_reg,
1228 be_node_get_frame_entity,
1229 be_node_set_frame_entity,
1230 be_node_set_frame_offset,
1231 be_node_get_sp_bias,
1232 NULL, /* get_inverse */
1233 NULL, /* get_op_estimated_cost */
1234 NULL, /* possible_memory_operand */
1235 NULL, /* perform_memory_operand */
1238 static const arch_irn_ops_t be_node_irn_ops = {
1242 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1244 redir_proj((const ir_node **) &irn);
1245 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1248 const arch_irn_handler_t be_node_irn_handler = {
1253 ____ _ _ ___ ____ _ _ _ _ _ _
1254 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1255 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1256 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1257 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1262 arch_irn_handler_t irn_handler;
1263 arch_irn_ops_t irn_ops;
1264 const arch_env_t *arch_env;
1268 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1269 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1271 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1273 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1274 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1278 * Get register class of a Phi.
1281 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1283 int n = get_irn_arity(phi);
1287 if(*visited && pset_find_ptr(*visited, phi))
1290 for(i = 0; i < n; ++i) {
1291 op = get_irn_n(phi, i);
1293 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1297 The operands of that Phi were all Phis themselves.
1298 We have to start a DFS for a non-Phi argument now.
1301 *visited = pset_new_ptr(16);
1303 pset_insert_ptr(*visited, phi);
1305 for(i = 0; i < n; ++i) {
1306 op = get_irn_n(phi, i);
1307 if(get_Phi_reg_req_recursive(h, req, op, visited))
1314 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1316 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1317 pset *visited = NULL;
1319 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1320 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1321 req->type = arch_register_req_type_normal;
1328 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1330 phi_handler_t *h = get_phi_handler_from_ops(self);
1331 pmap_insert(h->regs, irn, (void *) reg);
1334 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1336 phi_handler_t *h = get_phi_handler_from_ops(self);
1337 return pmap_get(h->regs, (void *) irn);
1340 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1342 return arch_irn_class_normal;
1345 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1347 return arch_irn_flags_none;
1350 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1355 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1359 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1363 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1368 static const arch_irn_ops_if_t phi_irn_ops = {
1369 phi_get_irn_reg_req,
1374 phi_get_frame_entity,
1375 phi_set_frame_entity,
1376 phi_set_frame_offset,
1378 NULL, /* get_inverse */
1379 NULL, /* get_op_estimated_cost */
1380 NULL, /* possible_memory_operand */
1381 NULL, /* perform_memory_operand */
1384 static const arch_irn_handler_t phi_irn_handler = {
1388 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1390 phi_handler_t *h = xmalloc(sizeof(h[0]));
1391 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1392 h->irn_ops.impl = &phi_irn_ops;
1393 h->arch_env = arch_env;
1394 h->regs = pmap_create();
1395 return (arch_irn_handler_t *) h;
1398 void be_phi_handler_free(arch_irn_handler_t *handler)
1400 phi_handler_t *h = (void *) handler;
1401 pmap_destroy(h->regs);
1405 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1407 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1408 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1411 void be_phi_handler_reset(arch_irn_handler_t *handler)
1413 phi_handler_t *h = get_phi_handler_from_handler(handler);
1415 pmap_destroy(h->regs);
1416 h->regs = pmap_create();
1421 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1422 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1423 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1424 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1429 * Dumps a register requirement to a file.
1431 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1434 int did_something = 0;
1436 const char *prefix = buf;
1438 snprintf(buf, sizeof(buf), "#%d ", idx);
1439 buf[sizeof(buf) - 1] = '\0';
1441 if(req->flags != arch_irn_flags_none) {
1442 fprintf(f, "%sflags: ", prefix);
1444 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1445 if(req->flags & (1 << i)) {
1446 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1454 if(req->req.cls != 0) {
1457 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1458 fprintf(f, "%s", tmp);
1467 * Dumps node register requirements to a file.
1469 static void dump_node_reqs(FILE *f, ir_node *irn)
1472 be_node_attr_t *a = get_irn_attr(irn);
1473 int len = ARR_LEN(a->reg_data);
1475 fprintf(f, "registers: \n");
1476 for(i = 0; i < len; ++i) {
1477 be_reg_data_t *rd = &a->reg_data[i];
1479 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1482 fprintf(f, "in requirements\n");
1483 for(i = 0; i < len; ++i) {
1484 dump_node_req(f, i, &a->reg_data[i].in_req);
1487 fprintf(f, "\nout requirements\n");
1488 for(i = 0; i < len; ++i) {
1489 dump_node_req(f, i, &a->reg_data[i].req);
1494 * ir_op-Operation: dump a be node to file
1496 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1498 be_node_attr_t *at = get_irn_attr(irn);
1500 assert(is_be_node(irn));
1503 case dump_node_opcode_txt:
1504 fprintf(f, get_op_name(get_irn_op(irn)));
1506 case dump_node_mode_txt:
1507 fprintf(f, get_mode_name(get_irn_mode(irn)));
1509 case dump_node_nodeattr_txt:
1511 case dump_node_info_txt:
1512 dump_node_reqs(f, irn);
1514 if(be_has_frame_entity(irn)) {
1515 be_frame_attr_t *a = (be_frame_attr_t *) at;
1517 int bits = get_type_size_bits(get_entity_type(a->ent));
1518 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1519 a->ent, a->offset, a->offset, bits, bits);
1524 switch(be_get_irn_opcode(irn)) {
1527 be_stack_attr_t *a = (be_stack_attr_t *) at;
1528 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1529 fprintf(f, "offset: FRAME_SIZE\n");
1530 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1531 fprintf(f, "offset: -FRAME SIZE\n");
1533 fprintf(f, "offset: %u\n", a->offset);
1538 be_call_attr_t *a = (be_call_attr_t *) at;
1541 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1547 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1548 ir_entity *in, *out;
1549 in = be_get_MemPerm_in_entity(irn, i);
1550 out = be_get_MemPerm_out_entity(irn, i);
1552 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1555 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1571 * Copies the backend specific attributes from old node to new node.
1573 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1575 be_node_attr_t *old_attr = get_irn_attr(old_node);
1576 be_node_attr_t *new_attr = get_irn_attr(new_node);
1577 struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
1580 assert(is_be_node(old_node));
1581 assert(is_be_node(new_node));
1583 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1584 new_attr->reg_data = NULL;
1586 if(old_attr->reg_data != NULL)
1587 len = ARR_LEN(old_attr->reg_data);
1591 if(be_is_Keep(old_node) || be_is_RegParams(old_node) || be_is_Barrier(old_node)) {
1592 new_attr->reg_data = NEW_ARR_F(be_reg_data_t, len);
1594 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
1598 memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
1600 for(i = 0; i < len; ++i) {
1603 r = &new_attr->reg_data[i].req;
1604 r->req.limited_env = r;
1606 r = &new_attr->reg_data[i].in_req;
1607 r->req.limited_env = r;
1612 static const ir_op_ops be_node_op_ops = {