4 * @author Sebastian Hack
6 * Backend node support.
8 * This file provides Perm, Copy, Spill and Reload nodes.
10 * Copyright (C) 2005-2006 Universitaet Karlsruhe
11 * Released under the GPL
26 #include "bitfiddle.h"
38 #include "besched_t.h"
43 #define OUT_POS(x) (-((x) + 1))
45 /* Sometimes we want to put const nodes into get_irn_generic_attr ... */
46 #define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn))
48 static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
51 be_req_kind_old_limited,
52 be_req_kind_negate_old_limited,
53 be_req_kind_single_reg
57 arch_register_req_t req;
59 arch_irn_flags_t flags;
62 void (*old_limited)(void *ptr, bitset_t *bs);
63 void *old_limited_env;
66 const arch_register_t *single_reg;
71 const arch_register_t *reg;
76 /** The generic be nodes attribute type. */
78 be_reg_data_t *reg_data;
81 /** The be_Return nodes attribute type. */
83 be_node_attr_t node_attr;
84 int num_ret_vals; /**< number of return values */
87 /** The be_Stack attribute type. */
89 be_node_attr_t node_attr;
90 int offset; /**< The offset by which the stack shall be expanded/shrinked. */
93 /** The be_Frame attribute type. */
95 be_node_attr_t node_attr;
100 /** The be_Call attribute type. */
102 be_node_attr_t node_attr;
103 ir_entity *ent; /**< The called entity if this is a static call. */
104 ir_type *call_tp; /**< The call type, copied from the original Call node. */
108 be_node_attr_t node_attr;
109 ir_entity **in_entities;
110 ir_entity **out_entities;
116 ir_op *op_be_MemPerm;
119 ir_op *op_be_CopyKeep;
126 ir_op *op_be_RegParams;
127 ir_op *op_be_StackParam;
128 ir_op *op_be_FrameAddr;
129 ir_op *op_be_FrameLoad;
130 ir_op *op_be_FrameStore;
131 ir_op *op_be_Barrier;
133 static int beo_base = -1;
135 static const ir_op_ops be_node_op_ops;
137 #define N irop_flag_none
138 #define L irop_flag_labeled
139 #define C irop_flag_commutative
140 #define X irop_flag_cfopcode
141 #define I irop_flag_ip_cfopcode
142 #define F irop_flag_fragile
143 #define Y irop_flag_forking
144 #define H irop_flag_highlevel
145 #define c irop_flag_constlike
146 #define K irop_flag_keep
147 #define M irop_flag_machine
151 * Compare two node attributes.
153 * @return zero if both attributes are identically
155 static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
158 if(ARR_LEN(a->reg_data) != ARR_LEN(b->reg_data))
161 len = ARR_LEN(a->reg_data);
162 for (i = 0; i < len; ++i) {
163 if (a->reg_data[i].reg != b->reg_data[i].reg ||
164 memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
165 memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
173 * Compare the attributes of two FrameAddr nodes.
175 * @return zero if both attributes are identically
177 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
178 be_frame_attr_t *a_attr = get_irn_attr(a);
179 be_frame_attr_t *b_attr = get_irn_attr(b);
181 if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset)
182 return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr);
186 void be_node_init(void) {
187 static int inited = 0;
194 /* Acquire all needed opcodes. */
195 beo_base = get_next_ir_opcodes(beo_Last - 1);
197 op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
198 op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
199 op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
200 op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
201 op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
202 op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
203 op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
204 op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
205 op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
206 op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
207 op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
208 op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
209 op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
210 op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
211 op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
212 op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
213 op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
214 op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
215 op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
217 set_op_tag(op_be_Spill, &be_node_tag);
218 set_op_tag(op_be_Reload, &be_node_tag);
219 set_op_tag(op_be_Perm, &be_node_tag);
220 set_op_tag(op_be_MemPerm, &be_node_tag);
221 set_op_tag(op_be_Copy, &be_node_tag);
222 set_op_tag(op_be_Keep, &be_node_tag);
223 set_op_tag(op_be_CopyKeep, &be_node_tag);
224 set_op_tag(op_be_Call, &be_node_tag);
225 set_op_tag(op_be_Return, &be_node_tag);
226 set_op_tag(op_be_AddSP, &be_node_tag);
227 set_op_tag(op_be_SubSP, &be_node_tag);
228 set_op_tag(op_be_SetSP, &be_node_tag);
229 set_op_tag(op_be_IncSP, &be_node_tag);
230 set_op_tag(op_be_RegParams, &be_node_tag);
231 set_op_tag(op_be_StackParam, &be_node_tag);
232 set_op_tag(op_be_FrameLoad, &be_node_tag);
233 set_op_tag(op_be_FrameStore, &be_node_tag);
234 set_op_tag(op_be_FrameAddr, &be_node_tag);
235 set_op_tag(op_be_Barrier, &be_node_tag);
237 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
241 * Initializes the generic attribute of all be nodes and return ir.
243 static void *init_node_attr(ir_node *node, int max_reg_data)
245 ir_graph *irg = get_irn_irg(node);
246 struct obstack *obst = get_irg_obstack(irg);
247 be_node_attr_t *a = get_irn_attr(node);
249 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node))));
251 if(max_reg_data >= 0) {
252 a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
253 memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
255 a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
261 static void add_register_req(ir_node *node)
263 be_node_attr_t *a = get_irn_attr(node);
264 be_reg_data_t regreq;
265 memset(®req, 0, sizeof(regreq));
266 ARR_APP1(be_reg_data_t, a->reg_data, regreq);
269 int is_be_node(const ir_node *irn)
271 return get_op_tag(get_irn_op(irn)) == &be_node_tag;
274 be_opcode_t be_get_irn_opcode(const ir_node *irn)
276 return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
280 * Skip Proj nodes and return their Proj numbers.
282 * If *node is a Proj or Proj(Proj) node, skip it.
284 * @param node points to the node to be skipped
286 * @return 0 if *node was no Proj node, its Proj number else.
288 static int redir_proj(const ir_node **node)
290 const ir_node *n = *node;
295 *node = irn = get_Proj_pred(n);
297 assert(get_irn_mode(irn) == mode_T);
298 *node = get_Proj_pred(irn);
300 return get_Proj_proj(n);
306 static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos)
309 be_node_attr_t *res = NULL;
310 int *pos = the_pos ? the_pos : &dummy;
314 ir_node *pred = get_Proj_pred(irn);
315 int p = get_Proj_proj(irn);
317 if(is_be_node(pred)) {
318 assert(get_irn_mode(pred) == mode_T);
320 res = get_irn_attr(pred);
321 assert(p >= 0 && p < ARR_LEN(res->reg_data) && "illegal proj number");
323 } else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
324 be_node_attr_t *a = get_irn_attr(irn);
325 if(ARR_LEN(a->reg_data) > 0) {
334 static be_reg_data_t *retrieve_reg_data(const ir_node *irn)
337 be_node_attr_t *a = retrieve_irn_attr(irn, &pos);
338 return a ? &a->reg_data[pos] : NULL;
342 be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
344 be_reg_data_t *r = retrieve_reg_data(irn);
351 ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
352 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
360 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
361 a = init_node_attr(res, 2);
365 be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
366 be_node_set_reg_class(res, be_pos_Spill_val, cls);
370 ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
371 ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
378 res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
380 init_node_attr(res, 2);
381 be_node_set_reg_class(res, -1, cls);
382 be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
383 be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
387 ir_node *be_get_Reload_mem(const ir_node *irn)
389 assert(be_is_Reload(irn));
390 return get_irn_n(irn, be_pos_Reload_mem);
393 ir_node *be_get_Reload_frame(const ir_node *irn)
395 assert(be_is_Reload(irn));
396 return get_irn_n(irn, be_pos_Reload_frame);
399 ir_node *be_get_Spill_val(const ir_node *irn)
401 assert(be_is_Spill(irn));
402 return get_irn_n(irn, be_pos_Spill_val);
404 ir_node *be_get_Spill_frame(const ir_node *irn)
406 assert(be_is_Spill(irn));
407 return get_irn_n(irn, be_pos_Spill_frame);
410 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
413 ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
414 init_node_attr(irn, n);
415 for(i = 0; i < n; ++i) {
416 be_node_set_reg_class(irn, i, cls);
417 be_node_set_reg_class(irn, OUT_POS(i), cls);
423 ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
426 ir_node *frame = get_irg_frame(irg);
427 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
429 const arch_register_t *sp = arch_env->isa->sp;
430 be_memperm_attr_t *attr;
433 real_in = alloca((n+1) * sizeof(real_in[0]));
435 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
437 irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
439 init_node_attr(irn, n + 1);
440 be_node_set_reg_class(irn, 0, sp->reg_class);
441 for(i = 0; i < n; ++i) {
442 be_node_set_reg_class(irn, i + 1, cls_frame);
443 be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
446 attr = get_irn_attr(irn);
448 attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
449 memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
450 attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
451 memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
457 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
463 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
464 init_node_attr(res, 1);
465 be_node_set_reg_class(res, 0, cls);
466 be_node_set_reg_class(res, OUT_POS(0), cls);
470 ir_node *be_get_Copy_op(const ir_node *cpy) {
471 return get_irn_n(cpy, be_pos_Copy_op);
474 void be_set_Copy_op(ir_node *cpy, ir_node *op) {
475 set_irn_n(cpy, be_pos_Copy_op, op);
478 ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
483 res = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, -1, NULL);
484 init_node_attr(res, -1);
486 for(i = 0; i < n; ++i) {
487 add_irn_n(res, in[i]);
488 add_register_req(res);
489 be_node_set_reg_class(res, i, cls);
496 void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node)
500 assert(be_is_Keep(keep));
501 n = add_irn_n(keep, node);
502 add_register_req(keep);
503 be_node_set_reg_class(keep, n, cls);
506 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
507 int n_outs, int n, ir_node *in[], ir_type *call_tp)
510 int real_n = be_pos_Call_first_arg + n;
514 NEW_ARR_A(ir_node *, real_in, real_n);
515 real_in[be_pos_Call_mem] = mem;
516 real_in[be_pos_Call_sp] = sp;
517 real_in[be_pos_Call_ptr] = ptr;
518 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
520 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
521 a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
523 a->call_tp = call_tp;
527 /* Gets the call entity or NULL if this is no static call. */
528 ir_entity *be_Call_get_entity(const ir_node *call) {
529 be_call_attr_t *a = get_irn_attr(call);
530 assert(be_is_Call(call));
534 /* Sets the call entity. */
535 void be_Call_set_entity(ir_node *call, ir_entity *ent) {
536 be_call_attr_t *a = get_irn_attr(call);
537 assert(be_is_Call(call));
541 /* Gets the call type. */
542 ir_type *be_Call_get_type(ir_node *call) {
543 be_call_attr_t *a = get_irn_attr(call);
544 assert(be_is_Call(call));
548 /* Sets the call type. */
549 void be_Call_set_type(ir_node *call, ir_type *call_tp) {
550 be_call_attr_t *a = get_irn_attr(call);
551 assert(be_is_Call(call));
552 a->call_tp = call_tp;
555 /* Construct a new be_Return. */
556 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[])
559 ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in);
560 init_node_attr(irn, n);
561 a = get_irn_attr(irn);
562 a->num_ret_vals = n_res;
567 /* Returns the number of real returns values */
568 int be_Return_get_n_rets(ir_node *ret)
570 be_return_attr_t *a = get_irn_attr(ret);
571 return a->num_ret_vals;
574 ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
581 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
582 a = init_node_attr(irn, 1);
585 be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
587 /* Set output constraint to stack register. */
588 be_node_set_reg_class(irn, 0, sp->reg_class);
589 be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
590 be_node_set_irn_reg(NULL, irn, sp);
595 ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
599 ir_node *in[be_pos_AddSP_last];
601 in[be_pos_AddSP_old_sp] = old_sp;
602 in[be_pos_AddSP_size] = sz;
604 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
605 a = init_node_attr(irn, be_pos_AddSP_last);
607 be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
609 /* Set output constraint to stack register. */
610 be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
611 be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
612 be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp);
613 a->reg_data[pn_be_AddSP_res].reg = sp;
618 ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz)
622 ir_node *in[be_pos_SubSP_last];
624 in[be_pos_SubSP_old_sp] = old_sp;
625 in[be_pos_SubSP_size] = sz;
627 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
628 a = init_node_attr(irn, be_pos_SubSP_last);
630 be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
632 /* Set output constraint to stack register. */
633 be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
634 be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
635 be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp);
636 a->reg_data[pn_be_SubSP_res].reg = sp;
641 ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem)
650 irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in);
651 a = init_node_attr(irn, 3);
653 be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp);
655 /* Set output constraint to stack register. */
656 be_set_constr_single_reg(irn, OUT_POS(0), sp);
657 be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class);
658 be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class);
663 ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent)
669 in[0] = frame_pointer;
670 irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in);
671 a = init_node_attr(irn, 1);
674 be_node_set_reg_class(irn, 0, cls_frame);
675 be_node_set_reg_class(irn, OUT_POS(0), cls);
679 ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs)
684 res = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, NULL);
685 init_node_attr(res, -1);
686 for(i = 0; i < n_outs; ++i)
687 add_register_req(res);
692 ir_node *be_RegParams_append_out_reg(ir_node *regparams,
693 const arch_env_t *arch_env,
694 const arch_register_t *reg)
696 ir_graph *irg = get_irn_irg(regparams);
697 ir_node *block = get_nodes_block(regparams);
698 be_node_attr_t *attr = get_irn_attr(regparams);
699 const arch_register_class_t *cls = arch_register_get_class(reg);
700 ir_mode *mode = arch_register_class_mode(cls);
701 int n = ARR_LEN(attr->reg_data);
703 assert(be_is_RegParams(regparams));
704 ir_node *proj = new_r_Proj(irg, block, regparams, mode, n);
705 add_register_req(regparams);
706 be_set_constr_single_reg(regparams, n, reg);
707 arch_set_irn_register(arch_env, proj, reg);
709 /* TODO decide, whether we need to set ignore/modifysp flags here? */
714 ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
715 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent)
723 irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in);
724 a = init_node_attr(irn, 3);
727 be_node_set_reg_class(irn, 1, cls_frame);
728 be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data);
732 ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
733 ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent)
742 irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in);
743 a = init_node_attr(irn, 3);
746 be_node_set_reg_class(irn, 1, cls_frame);
747 be_node_set_reg_class(irn, 2, cls_data);
751 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
758 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
759 a = init_node_attr(irn, 1);
762 be_node_set_reg_class(irn, 0, cls_frame);
763 be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
765 return optimize_node(irn);
768 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
771 ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
774 memcpy(&in[1], in_keep, n * sizeof(in[0]));
775 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
776 init_node_attr(irn, n + 1);
777 be_node_set_reg_class(irn, OUT_POS(0), cls);
778 be_node_set_reg_class(irn, 0, cls);
783 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
788 return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode);
791 ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
792 return get_irn_n(cpy, be_pos_CopyKeep_op);
795 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
796 set_irn_n(cpy, be_pos_CopyKeep_op, op);
799 ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[])
804 res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL);
805 init_node_attr(res, -1);
806 for(i = 0; i < n; ++i) {
807 add_irn_n(res, in[i]);
808 add_register_req(res);
814 ir_node *be_Barrier_append_node(ir_node *barrier, ir_node *node)
816 ir_graph *irg = get_irn_irg(barrier);
817 ir_node *block = get_nodes_block(barrier);
818 ir_mode *mode = get_irn_mode(node);
819 int n = add_irn_n(barrier, node);
820 ir_node *proj = new_r_Proj(irg, block, barrier, mode, n);
821 add_register_req(barrier);
826 int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; }
827 int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; }
828 int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
829 int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
830 int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
831 int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
832 int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
833 int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
834 int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
835 int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
836 int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
837 int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
838 int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
839 int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
840 int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
841 int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
842 int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; }
843 int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; }
844 int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; }
846 int be_has_frame_entity(const ir_node *irn)
848 switch(be_get_irn_opcode(irn)) {
861 ir_entity *be_get_frame_entity(const ir_node *irn)
863 if (be_has_frame_entity(irn)) {
864 be_frame_attr_t *a = get_irn_attr(irn);
870 int be_get_frame_offset(const ir_node *irn)
872 assert(is_be_node(irn));
873 if (be_has_frame_entity(irn)) {
874 be_frame_attr_t *a = get_irn_attr(irn);
880 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
882 be_memperm_attr_t *attr = get_irn_attr(irn);
884 assert(be_is_MemPerm(irn));
885 assert(n < be_get_MemPerm_entity_arity(irn));
887 attr->in_entities[n] = ent;
890 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
892 be_memperm_attr_t *attr = get_irn_attr(irn);
894 assert(be_is_MemPerm(irn));
895 assert(n < be_get_MemPerm_entity_arity(irn));
897 return attr->in_entities[n];
900 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
902 be_memperm_attr_t *attr = get_irn_attr(irn);
904 assert(be_is_MemPerm(irn));
905 assert(n < be_get_MemPerm_entity_arity(irn));
907 attr->out_entities[n] = ent;
910 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
912 be_memperm_attr_t *attr = get_irn_attr(irn);
914 assert(be_is_MemPerm(irn));
915 assert(n < be_get_MemPerm_entity_arity(irn));
917 return attr->out_entities[n];
920 int be_get_MemPerm_entity_arity(const ir_node *irn)
922 return get_irn_arity(irn) - 1;
925 static void be_limited(void *data, bitset_t *bs)
927 be_req_t *req = data;
930 case be_req_kind_negate_old_limited:
931 case be_req_kind_old_limited:
932 req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
933 if(req->kind == be_req_kind_negate_old_limited)
936 case be_req_kind_single_reg:
937 bitset_clear_all(bs);
938 bitset_set(bs, req->x.single_reg->index);
943 static INLINE be_req_t *get_req(ir_node *irn, int pos)
945 int idx = pos < 0 ? -(pos + 1) : pos;
946 be_node_attr_t *a = get_irn_attr(irn);
947 be_reg_data_t *rd = &a->reg_data[idx];
948 be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
950 assert(is_be_node(irn));
951 assert(!(pos >= 0) || pos < get_irn_arity(irn));
952 assert(!(pos < 0) || -(pos + 1) <= ARR_LEN(a->reg_data));
957 void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
959 be_req_t *r = get_req(irn, pos);
961 r->kind = be_req_kind_single_reg;
962 r->x.single_reg = reg;
963 r->req.limited = be_limited;
964 r->req.limited_env = r;
965 r->req.type = arch_register_req_type_limited;
966 r->req.cls = reg->reg_class;
969 void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
971 be_req_t *r = get_req(irn, pos);
973 assert(arch_register_req_is(req, limited));
975 r->kind = be_req_kind_old_limited;
976 r->req.limited = be_limited;
977 r->req.limited_env = r;
978 r->req.type = arch_register_req_type_limited;
979 r->req.cls = req->cls;
981 r->x.old_limited.old_limited = req->limited;
982 r->x.old_limited.old_limited_env = req->limited_env;
985 void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
987 be_req_t *r = get_req(irn, pos);
991 void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
993 be_req_t *r = get_req(irn, pos);
998 r->req.type = arch_register_req_type_none;
999 } else if (r->req.type == arch_register_req_type_none) {
1000 r->req.type = arch_register_req_type_normal;
1004 void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
1006 be_req_t *r = get_req(irn, pos);
1010 ir_node *be_get_IncSP_pred(ir_node *irn) {
1011 assert(be_is_IncSP(irn));
1012 return get_irn_n(irn, 0);
1015 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
1016 assert(be_is_IncSP(incsp));
1017 set_irn_n(incsp, 0, pred);
1020 ir_node *be_get_IncSP_mem(ir_node *irn) {
1021 assert(be_is_IncSP(irn));
1022 return get_irn_n(irn, 1);
1025 void be_set_IncSP_offset(ir_node *irn, int offset)
1027 be_stack_attr_t *a = get_irn_attr(irn);
1028 assert(be_is_IncSP(irn));
1032 int be_get_IncSP_offset(const ir_node *irn)
1034 be_stack_attr_t *a = get_irn_attr(irn);
1035 assert(be_is_IncSP(irn));
1039 ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
1041 ir_node *bl = get_nodes_block(irn);
1042 ir_graph *irg = get_irn_irg(bl);
1043 ir_node *frame = get_irg_frame(irg);
1044 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
1045 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1048 spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
1052 ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
1055 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
1056 ir_graph *irg = get_irn_irg(bl);
1057 ir_node *frame = get_irg_frame(irg);
1058 const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
1060 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1062 reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
1064 if (is_Block(insert)) {
1065 insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
1066 sched_add_after(insert, reload);
1068 sched_add_before(insert, reload);
1076 | _ \ ___ __ _ | _ \ ___ __ _ ___
1077 | |_) / _ \/ _` | | |_) / _ \/ _` / __|
1078 | _ < __/ (_| | | _ < __/ (_| \__ \
1079 |_| \_\___|\__, | |_| \_\___|\__, |___/
1085 static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos)
1087 const be_node_attr_t *a = get_irn_attr(irn);
1089 if(out_pos < ARR_LEN(a->reg_data)) {
1090 memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
1092 if(be_is_Copy(irn)) {
1093 req->type |= arch_register_req_type_should_be_same;
1094 req->other_same = be_get_Copy_op(irn);
1097 req->type = arch_register_req_type_none;
1104 static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos)
1106 const be_node_attr_t *a = get_irn_attr(irn);
1108 if(pos < get_irn_arity(irn) && pos < ARR_LEN(a->reg_data)) {
1109 memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
1111 req->type = arch_register_req_type_none;
1118 static const arch_register_req_t *
1119 be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1124 if (get_irn_mode(irn) == mode_T)
1127 out_pos = redir_proj((const ir_node **)&irn);
1128 assert(is_be_node(irn));
1129 return put_out_reg_req(req, irn, out_pos);
1133 if (is_be_node(irn)) {
1135 For spills and reloads, we return "none" as requirement for frame pointer,
1136 so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
1137 large number as pos, so put_in_reg_req will return "none" as requirement.
1139 if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
1140 (be_is_Reload(irn) && pos == be_pos_Reload_frame))
1141 return put_in_reg_req(req, irn, INT_MAX);
1143 return put_in_reg_req(req, irn, pos);
1151 const arch_register_t *
1152 be_node_get_irn_reg(const void *_self, const ir_node *irn)
1154 be_reg_data_t *r = retrieve_reg_data(irn);
1155 return r ? r->reg : NULL;
1158 static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
1160 redir_proj((const ir_node **) &irn);
1162 switch(be_get_irn_opcode(irn)) {
1163 #define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
1165 XXX(Reload, reload);
1168 XXX(Return, branch);
1169 XXX(StackParam, stackparam);
1172 return arch_irn_class_normal;
1178 static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn)
1180 be_reg_data_t *r = retrieve_reg_data(irn);
1181 return r ? r->req.flags : 0;
1184 static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn)
1186 return be_get_frame_entity(irn);
1189 static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent)
1193 assert(be_has_frame_entity(irn));
1195 a = get_irn_attr(irn);
1199 static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
1201 if(be_has_frame_entity(irn)) {
1202 be_frame_attr_t *a = get_irn_attr(irn);
1207 static int be_node_get_sp_bias(const void *self, const ir_node *irn)
1209 return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0;
1213 ___ ____ _ _ _ _ _ _
1214 |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1215 | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1216 | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1217 |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1221 static const arch_irn_ops_if_t be_node_irn_ops_if = {
1222 be_node_get_irn_reg_req,
1223 be_node_set_irn_reg,
1224 be_node_get_irn_reg,
1227 be_node_get_frame_entity,
1228 be_node_set_frame_entity,
1229 be_node_set_frame_offset,
1230 be_node_get_sp_bias,
1231 NULL, /* get_inverse */
1232 NULL, /* get_op_estimated_cost */
1233 NULL, /* possible_memory_operand */
1234 NULL, /* perform_memory_operand */
1237 static const arch_irn_ops_t be_node_irn_ops = {
1241 const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1243 redir_proj((const ir_node **) &irn);
1244 return is_be_node(irn) ? &be_node_irn_ops : NULL;
1247 const arch_irn_handler_t be_node_irn_handler = {
1252 ____ _ _ ___ ____ _ _ _ _ _ _
1253 | _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
1254 | |_) | '_ \| | | || |_) | \| | | |_| |/ _` | '_ \ / _` | |/ _ \ '__|
1255 | __/| | | | | | || _ <| |\ | | _ | (_| | | | | (_| | | __/ |
1256 |_| |_| |_|_| |___|_| \_\_| \_| |_| |_|\__,_|_| |_|\__,_|_|\___|_|
1261 arch_irn_handler_t irn_handler;
1262 arch_irn_ops_t irn_ops;
1263 const arch_env_t *arch_env;
1267 #define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler)
1268 #define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
1270 static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
1272 const phi_handler_t *h = get_phi_handler_from_handler(handler);
1273 return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL;
1277 * Get register class of a Phi.
1280 static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited)
1282 int n = get_irn_arity(phi);
1286 if(*visited && pset_find_ptr(*visited, phi))
1289 for(i = 0; i < n; ++i) {
1290 op = get_irn_n(phi, i);
1292 return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0));
1296 The operands of that Phi were all Phis themselves.
1297 We have to start a DFS for a non-Phi argument now.
1300 *visited = pset_new_ptr(16);
1302 pset_insert_ptr(*visited, phi);
1304 for(i = 0; i < n; ++i) {
1305 op = get_irn_n(phi, i);
1306 if(get_Phi_reg_req_recursive(h, req, op, visited))
1313 static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
1315 phi_handler_t *phi_handler = get_phi_handler_from_ops(self);
1316 pset *visited = NULL;
1318 get_Phi_reg_req_recursive(phi_handler, req, irn, &visited);
1319 /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */
1320 req->type = arch_register_req_type_normal;
1327 static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg)
1329 phi_handler_t *h = get_phi_handler_from_ops(self);
1330 pmap_insert(h->regs, irn, (void *) reg);
1333 static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn)
1335 phi_handler_t *h = get_phi_handler_from_ops(self);
1336 return pmap_get(h->regs, (void *) irn);
1339 static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn)
1341 return arch_irn_class_normal;
1344 static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn)
1346 return arch_irn_flags_none;
1349 static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn)
1354 static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent)
1358 static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
1362 static int phi_get_sp_bias(const void* self, const ir_node *irn)
1367 static const arch_irn_ops_if_t phi_irn_ops = {
1368 phi_get_irn_reg_req,
1373 phi_get_frame_entity,
1374 phi_set_frame_entity,
1375 phi_set_frame_offset,
1377 NULL, /* get_inverse */
1378 NULL, /* get_op_estimated_cost */
1379 NULL, /* possible_memory_operand */
1380 NULL, /* perform_memory_operand */
1383 static const arch_irn_handler_t phi_irn_handler = {
1387 arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env)
1389 phi_handler_t *h = xmalloc(sizeof(h[0]));
1390 h->irn_handler.get_irn_ops = phi_get_irn_ops;
1391 h->irn_ops.impl = &phi_irn_ops;
1392 h->arch_env = arch_env;
1393 h->regs = pmap_create();
1394 return (arch_irn_handler_t *) h;
1397 void be_phi_handler_free(arch_irn_handler_t *handler)
1399 phi_handler_t *h = (void *) handler;
1400 pmap_destroy(h->regs);
1404 const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
1406 phi_handler_t *phi_handler = get_phi_handler_from_handler(self);
1407 return is_Phi(irn) ? &phi_handler->irn_ops : NULL;
1410 void be_phi_handler_reset(arch_irn_handler_t *handler)
1412 phi_handler_t *h = get_phi_handler_from_handler(handler);
1414 pmap_destroy(h->regs);
1415 h->regs = pmap_create();
1420 | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
1421 | \| |/ _ \ / _` |/ _ \ | | | | | | | '_ ` _ \| '_ \| | '_ \ / _` |
1422 | |\ | (_) | (_| | __/ | |_| | |_| | | | | | | |_) | | | | | (_| |
1423 |_| \_|\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|_| |_|\__, |
1428 * Dumps a register requirement to a file.
1430 static void dump_node_req(FILE *f, int idx, be_req_t *req)
1433 int did_something = 0;
1435 const char *prefix = buf;
1437 snprintf(buf, sizeof(buf), "#%d ", idx);
1438 buf[sizeof(buf) - 1] = '\0';
1440 if(req->flags != arch_irn_flags_none) {
1441 fprintf(f, "%sflags: ", prefix);
1443 for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) {
1444 if(req->flags & (1 << i)) {
1445 fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i));
1453 if(req->req.cls != 0) {
1456 arch_register_req_format(tmp, sizeof(tmp), &req->req);
1457 fprintf(f, "%s", tmp);
1466 * Dumps node register requirements to a file.
1468 static void dump_node_reqs(FILE *f, ir_node *irn)
1471 be_node_attr_t *a = get_irn_attr(irn);
1472 int len = ARR_LEN(a->reg_data);
1474 fprintf(f, "registers: \n");
1475 for(i = 0; i < len; ++i) {
1476 be_reg_data_t *rd = &a->reg_data[i];
1478 fprintf(f, "#%d: %s\n", i, rd->reg->name);
1481 fprintf(f, "in requirements\n");
1482 for(i = 0; i < len; ++i) {
1483 dump_node_req(f, i, &a->reg_data[i].in_req);
1486 fprintf(f, "\nout requirements\n");
1487 for(i = 0; i < len; ++i) {
1488 dump_node_req(f, i, &a->reg_data[i].req);
1493 * ir_op-Operation: dump a be node to file
1495 static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
1497 be_node_attr_t *at = get_irn_attr(irn);
1499 assert(is_be_node(irn));
1502 case dump_node_opcode_txt:
1503 fprintf(f, get_op_name(get_irn_op(irn)));
1505 case dump_node_mode_txt:
1506 fprintf(f, get_mode_name(get_irn_mode(irn)));
1508 case dump_node_nodeattr_txt:
1510 case dump_node_info_txt:
1511 dump_node_reqs(f, irn);
1513 if(be_has_frame_entity(irn)) {
1514 be_frame_attr_t *a = (be_frame_attr_t *) at;
1516 int bits = get_type_size_bits(get_entity_type(a->ent));
1517 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n",
1518 a->ent, a->offset, a->offset, bits, bits);
1523 switch(be_get_irn_opcode(irn)) {
1526 be_stack_attr_t *a = (be_stack_attr_t *) at;
1527 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1528 fprintf(f, "offset: FRAME_SIZE\n");
1529 else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1530 fprintf(f, "offset: -FRAME SIZE\n");
1532 fprintf(f, "offset: %u\n", a->offset);
1537 be_call_attr_t *a = (be_call_attr_t *) at;
1540 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1546 for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1547 ir_entity *in, *out;
1548 in = be_get_MemPerm_in_entity(irn, i);
1549 out = be_get_MemPerm_out_entity(irn, i);
1551 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1554 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1570 * Copies the backend specific attributes from old node to new node.
1572 static void copy_attr(const ir_node *old_node, ir_node *new_node)
1574 be_node_attr_t *old_attr = get_irn_attr(old_node);
1575 be_node_attr_t *new_attr = get_irn_attr(new_node);
1576 struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
1579 assert(is_be_node(old_node));
1580 assert(is_be_node(new_node));
1582 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1583 new_attr->reg_data = NULL;
1585 if(old_attr->reg_data != NULL)
1586 len = ARR_LEN(old_attr->reg_data);
1590 if(be_is_Keep(old_node) || be_is_RegParams(old_node) || be_is_Barrier(old_node)) {
1591 new_attr->reg_data = NEW_ARR_F(be_reg_data_t, len);
1593 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
1597 memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
1599 for(i = 0; i < len; ++i) {
1602 r = &new_attr->reg_data[i].req;
1603 r->req.limited_env = r;
1605 r = &new_attr->reg_data[i].in_req;
1606 r->req.limited_env = r;
1611 static const ir_op_ops be_node_op_ops = {