2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Backend node support for generic backend nodes.
23 * @author Sebastian Hack
27 * Backend node support for generic backend nodes.
28 * This file provides Perm, Copy, Spill and Reload nodes.
40 #include "bitfiddle.h"
41 #include "raw_bitset.h"
62 const arch_register_req_t *in_req;
65 /** The generic be nodes attribute type. */
67 be_reg_data_t *reg_data;
70 /** The be_Return nodes attribute type. */
72 be_node_attr_t node_attr; /**< base attributes of every be node. */
73 int num_ret_vals; /**< number of return values */
74 unsigned pop; /**< number of bytes that should be popped */
75 int emit_pop; /**< if set, emit pop bytes, even if pop = 0 */
78 /** The be_IncSP attribute type. */
80 be_node_attr_t node_attr; /**< base attributes of every be node. */
81 int offset; /**< The offset by which the stack shall be
83 int align; /**< whether stack should be aligned after the
87 /** The be_Frame attribute type. */
89 be_node_attr_t node_attr; /**< base attributes of every be node. */
94 /** The be_Call attribute type. */
96 be_node_attr_t node_attr; /**< base attributes of every be node. */
97 ir_entity *ent; /**< called entity if this is a static call. */
99 ir_type *call_tp; /**< call type, copied from the original Call */
103 be_node_attr_t node_attr; /**< base attributes of every be node. */
104 ir_entity **in_entities;
105 ir_entity **out_entities;
111 ir_op *op_be_MemPerm;
114 ir_op *op_be_CopyKeep;
121 ir_op *op_be_FrameAddr;
122 ir_op *op_be_Barrier;
124 static const ir_op_ops be_node_op_ops;
127 * Compare two be node attributes.
129 * @return zero if both attributes are identically
131 static int node_cmp_attr(ir_node *a, ir_node *b)
133 const be_node_attr_t *a_attr = get_irn_generic_attr_const(a);
134 const be_node_attr_t *b_attr = get_irn_generic_attr_const(b);
135 int i, len = ARR_LEN(a_attr->reg_data);
137 if (len != ARR_LEN(b_attr->reg_data))
140 if (!be_nodes_equal(a, b))
143 for (i = len - 1; i >= 0; --i) {
144 if (!reg_reqs_equal(a_attr->reg_data[i].in_req,
145 b_attr->reg_data[i].in_req))
153 * Compare the attributes of two be_FrameAddr nodes.
155 * @return zero if both nodes have identically attributes
157 static int FrameAddr_cmp_attr(ir_node *a, ir_node *b)
159 const be_frame_attr_t *a_attr = get_irn_generic_attr_const(a);
160 const be_frame_attr_t *b_attr = get_irn_generic_attr_const(b);
162 if (a_attr->ent != b_attr->ent || a_attr->offset != b_attr->offset)
165 return node_cmp_attr(a, b);
169 * Compare the attributes of two be_Return nodes.
171 * @return zero if both nodes have identically attributes
173 static int Return_cmp_attr(ir_node *a, ir_node *b)
175 const be_return_attr_t *a_attr = get_irn_generic_attr_const(a);
176 const be_return_attr_t *b_attr = get_irn_generic_attr_const(b);
178 if (a_attr->num_ret_vals != b_attr->num_ret_vals)
180 if (a_attr->pop != b_attr->pop)
182 if (a_attr->emit_pop != b_attr->emit_pop)
185 return node_cmp_attr(a, b);
189 * Compare the attributes of two be_IncSP nodes.
191 * @return zero if both nodes have identically attributes
193 static int IncSP_cmp_attr(ir_node *a, ir_node *b)
195 const be_incsp_attr_t *a_attr = get_irn_generic_attr_const(a);
196 const be_incsp_attr_t *b_attr = get_irn_generic_attr_const(b);
198 if (a_attr->offset != b_attr->offset)
201 return node_cmp_attr(a, b);
205 * Compare the attributes of two be_Call nodes.
207 * @return zero if both nodes have identically attributes
209 static int Call_cmp_attr(ir_node *a, ir_node *b)
211 const be_call_attr_t *a_attr = get_irn_generic_attr_const(a);
212 const be_call_attr_t *b_attr = get_irn_generic_attr_const(b);
214 if (a_attr->ent != b_attr->ent ||
215 a_attr->call_tp != b_attr->call_tp)
218 return node_cmp_attr(a, b);
221 static arch_register_req_t *allocate_reg_req(const ir_node *node)
223 ir_graph *irg = get_irn_irg(node);
224 struct obstack *obst = be_get_be_obst(irg);
226 arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
227 memset(req, 0, sizeof(*req));
231 void be_set_constr_in(ir_node *node, int pos, const arch_register_req_t *req)
233 const be_node_attr_t *attr = get_irn_generic_attr_const(node);
234 be_reg_data_t *rd = &attr->reg_data[pos];
235 assert(pos < ARR_LEN(attr->reg_data));
239 void be_set_constr_out(ir_node *node, int pos, const arch_register_req_t *req)
241 backend_info_t *info = be_get_info(node);
242 info->out_infos[pos].req = req;
246 * Initializes the generic attribute of all be nodes and return it.
248 static void *init_node_attr(ir_node *node, int n_inputs, int n_outputs)
250 ir_graph *irg = get_irn_irg(node);
251 struct obstack *obst = be_get_be_obst(irg);
252 be_node_attr_t *a = get_irn_generic_attr(node);
253 backend_info_t *info = be_get_info(node);
255 memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node))));
259 a->reg_data = NEW_ARR_D(be_reg_data_t, obst, n_inputs);
260 for (i = 0; i < n_inputs; ++i) {
261 a->reg_data[i].in_req = arch_no_register_req;
264 a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
267 if (n_outputs >= 0) {
269 info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outputs);
270 memset(info->out_infos, 0, n_outputs * sizeof(info->out_infos[0]));
271 for (i = 0; i < n_outputs; ++i) {
272 info->out_infos[i].req = arch_no_register_req;
275 info->out_infos = NEW_ARR_F(reg_out_info_t, 0);
281 static void add_register_req_out(ir_node *node)
283 backend_info_t *info = be_get_info(node);
284 reg_out_info_t out_info;
285 memset(&out_info, 0, sizeof(out_info));
286 out_info.req = arch_no_register_req;
287 ARR_APP1(reg_out_info_t, info->out_infos, out_info);
290 static void add_register_req_in(ir_node *node)
292 be_node_attr_t *a = get_irn_generic_attr(node);
293 be_reg_data_t regreq;
294 memset(®req, 0, sizeof(regreq));
295 regreq.in_req = arch_no_register_req;
296 ARR_APP1(be_reg_data_t, a->reg_data, regreq);
299 ir_node *be_new_Spill(const arch_register_class_t *cls,
300 const arch_register_class_t *cls_frame, ir_node *bl,
301 ir_node *frame, ir_node *to_spill)
306 ir_graph *irg = get_Block_irg(bl);
310 res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
311 a = init_node_attr(res, 2, 1);
315 be_node_set_reg_class_in(res, be_pos_Spill_frame, cls_frame);
316 be_node_set_reg_class_in(res, be_pos_Spill_val, cls);
318 * For spills and reloads, we return "none" as requirement for frame
319 * pointer, so every input is ok. Some backends need this (STA).
320 * Matze: we should investigate if this is really needed, this solution
321 * looks very hacky to me
323 be_set_constr_in(res, be_pos_Spill_frame, arch_no_register_req);
325 arch_set_out_register_req(res, 0, arch_no_register_req);
330 ir_node *be_new_Reload(const arch_register_class_t *cls,
331 const arch_register_class_t *cls_frame, ir_node *block,
332 ir_node *frame, ir_node *mem, ir_mode *mode)
336 ir_graph *irg = get_Block_irg(block);
340 res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in);
342 init_node_attr(res, 2, 1);
343 be_node_set_reg_class_out(res, 0, cls);
345 be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame);
346 arch_irn_set_flags(res, arch_irn_flags_rematerializable);
349 * For spills and reloads, we return "none" as requirement for frame
350 * pointer, so every input is ok. Some backends need this (e.g. STA).
351 * Matze: we should investigate if this is really needed, this solution
352 * looks very hacky to me
354 be_set_constr_in(res, be_pos_Reload_frame, arch_no_register_req);
359 ir_node *be_get_Reload_mem(const ir_node *irn)
361 assert(be_is_Reload(irn));
362 return get_irn_n(irn, be_pos_Reload_mem);
365 ir_node *be_get_Reload_frame(const ir_node *irn)
367 assert(be_is_Reload(irn));
368 return get_irn_n(irn, be_pos_Reload_frame);
371 ir_node *be_get_Spill_val(const ir_node *irn)
373 assert(be_is_Spill(irn));
374 return get_irn_n(irn, be_pos_Spill_val);
377 ir_node *be_get_Spill_frame(const ir_node *irn)
379 assert(be_is_Spill(irn));
380 return get_irn_n(irn, be_pos_Spill_frame);
383 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block,
384 int n, ir_node *in[])
387 ir_graph *irg = get_Block_irg(block);
389 ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in);
390 init_node_attr(irn, n, n);
391 for (i = 0; i < n; ++i) {
392 be_node_set_reg_class_in(irn, i, cls);
393 be_node_set_reg_class_out(irn, i, cls);
399 void be_Perm_reduce(ir_node *perm, int new_size, int *map)
401 int arity = get_irn_arity(perm);
402 be_reg_data_t *old_data = ALLOCAN(be_reg_data_t, arity);
403 reg_out_info_t *old_infos = ALLOCAN(reg_out_info_t, arity);
404 be_node_attr_t *attr = get_irn_generic_attr(perm);
405 backend_info_t *info = be_get_info(perm);
410 assert(be_is_Perm(perm));
411 assert(new_size <= arity);
413 new_in = alloca(new_size * sizeof(*new_in));
415 /* save the old register data */
416 memcpy(old_data, attr->reg_data, arity * sizeof(old_data[0]));
417 memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0]));
419 /* compose the new in array and set the new register data directly */
420 for (i = 0; i < new_size; ++i) {
422 new_in[i] = get_irn_n(perm, idx);
423 attr->reg_data[i] = old_data[idx];
424 info->out_infos[i] = old_infos[idx];
427 set_irn_in(perm, new_size, new_in);
430 ir_node *be_new_MemPerm(ir_node *block, int n, ir_node *in[])
432 ir_graph *irg = get_Block_irg(block);
433 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
434 ir_node *frame = get_irg_frame(irg);
435 const arch_register_t *sp = arch_env->sp;
437 be_memperm_attr_t *attr;
440 real_in = ALLOCAN(ir_node*, n + 1);
442 memcpy(&real_in[1], in, n * sizeof(real_in[0]));
444 irn = new_ir_node(NULL, irg, block, op_be_MemPerm, mode_T, n+1, real_in);
446 init_node_attr(irn, n + 1, n);
447 be_node_set_reg_class_in(irn, 0, sp->reg_class);
449 attr = get_irn_generic_attr(irn);
450 attr->in_entities = OALLOCNZ(irg->obst, ir_entity*, n);
451 attr->out_entities = OALLOCNZ(irg->obst, ir_entity*, n);
456 ir_node *be_new_Copy(const arch_register_class_t *cls, ir_node *bl, ir_node *op)
460 arch_register_req_t *req;
461 ir_graph *irg = get_Block_irg(bl);
464 res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
465 init_node_attr(res, 1, 1);
466 be_node_set_reg_class_in(res, 0, cls);
467 be_node_set_reg_class_out(res, 0, cls);
469 req = allocate_reg_req(res);
471 req->type = arch_register_req_type_should_be_same;
472 req->other_same = 1U << 0;
474 be_set_constr_out(res, 0, req);
479 ir_node *be_get_Copy_op(const ir_node *cpy)
481 return get_irn_n(cpy, be_pos_Copy_op);
484 void be_set_Copy_op(ir_node *cpy, ir_node *op)
486 set_irn_n(cpy, be_pos_Copy_op, op);
489 ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
493 ir_graph *irg = get_Block_irg(block);
495 res = new_ir_node(NULL, irg, block, op_be_Keep, mode_ANY, -1, NULL);
496 init_node_attr(res, -1, 1);
498 for (i = 0; i < n; ++i) {
499 add_irn_n(res, in[i]);
500 add_register_req_in(res);
507 void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node)
511 assert(be_is_Keep(keep));
512 n = add_irn_n(keep, node);
513 add_register_req_in(keep);
514 be_node_set_reg_class_in(keep, n, cls);
517 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem,
518 ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[],
522 int real_n = be_pos_Call_first_arg + n;
526 NEW_ARR_A(ir_node *, real_in, real_n);
527 real_in[be_pos_Call_mem] = mem;
528 real_in[be_pos_Call_sp] = sp;
529 real_in[be_pos_Call_ptr] = ptr;
530 memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
532 irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
533 a = init_node_attr(irn, real_n, n_outs);
535 a->call_tp = call_tp;
540 ir_entity *be_Call_get_entity(const ir_node *call)
542 const be_call_attr_t *a = get_irn_generic_attr_const(call);
543 assert(be_is_Call(call));
547 void be_Call_set_entity(ir_node *call, ir_entity *ent)
549 be_call_attr_t *a = get_irn_generic_attr(call);
550 assert(be_is_Call(call));
554 ir_type *be_Call_get_type(ir_node *call)
556 const be_call_attr_t *a = get_irn_generic_attr_const(call);
557 assert(be_is_Call(call));
561 void be_Call_set_type(ir_node *call, ir_type *call_tp)
563 be_call_attr_t *a = get_irn_generic_attr(call);
564 assert(be_is_Call(call));
565 a->call_tp = call_tp;
568 void be_Call_set_pop(ir_node *call, unsigned pop)
570 be_call_attr_t *a = get_irn_generic_attr(call);
574 unsigned be_Call_get_pop(const ir_node *call)
576 const be_call_attr_t *a = get_irn_generic_attr_const(call);
580 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *block, int n_res,
581 unsigned pop, int n, ir_node *in[])
587 res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, -1, NULL);
588 init_node_attr(res, -1, 1);
589 for (i = 0; i < n; ++i) {
590 add_irn_n(res, in[i]);
591 add_register_req_in(res);
593 be_set_constr_out(res, 0, arch_no_register_req);
595 a = get_irn_generic_attr(res);
596 a->num_ret_vals = n_res;
603 int be_Return_get_n_rets(const ir_node *ret)
605 const be_return_attr_t *a = get_irn_generic_attr_const(ret);
606 return a->num_ret_vals;
609 unsigned be_Return_get_pop(const ir_node *ret)
611 const be_return_attr_t *a = get_irn_generic_attr_const(ret);
615 int be_Return_get_emit_pop(const ir_node *ret)
617 const be_return_attr_t *a = get_irn_generic_attr_const(ret);
621 void be_Return_set_emit_pop(ir_node *ret, int emit_pop)
623 be_return_attr_t *a = get_irn_generic_attr(ret);
624 a->emit_pop = emit_pop;
627 int be_Return_append_node(ir_node *ret, ir_node *node)
631 pos = add_irn_n(ret, node);
632 add_register_req_in(ret);
637 ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl,
638 ir_node *old_sp, int offset, int align)
643 ir_graph *irg = get_Block_irg(bl);
646 irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode,
647 sizeof(in) / sizeof(in[0]), in);
648 a = init_node_attr(irn, 1, 1);
652 /* Set output constraint to stack register. */
653 be_node_set_reg_class_in(irn, 0, sp->reg_class);
654 be_set_constr_single_reg_out(irn, 0, sp, arch_register_req_type_produces_sp);
659 ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp,
664 ir_node *in[be_pos_AddSP_last];
665 const arch_register_class_t *cls;
668 in[be_pos_AddSP_old_sp] = old_sp;
669 in[be_pos_AddSP_size] = sz;
671 irg = get_Block_irg(bl);
672 irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
673 a = init_node_attr(irn, be_pos_AddSP_last, pn_be_AddSP_last);
675 /* Set output constraint to stack register. */
676 be_set_constr_single_reg_in(irn, be_pos_AddSP_old_sp, sp, 0);
677 be_node_set_reg_class_in(irn, be_pos_AddSP_size, arch_register_get_class(sp));
678 be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
679 arch_register_req_type_produces_sp);
681 cls = arch_register_get_class(sp);
686 ir_node *be_new_SubSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz)
690 ir_node *in[be_pos_SubSP_last];
693 in[be_pos_SubSP_old_sp] = old_sp;
694 in[be_pos_SubSP_size] = sz;
696 irg = get_Block_irg(bl);
697 irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
698 a = init_node_attr(irn, be_pos_SubSP_last, pn_be_SubSP_last);
700 /* Set output constraint to stack register. */
701 be_set_constr_single_reg_in(irn, be_pos_SubSP_old_sp, sp, 0);
702 be_node_set_reg_class_in(irn, be_pos_SubSP_size, arch_register_get_class(sp));
703 be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp);
708 ir_node *be_new_Start(dbg_info *dbgi, ir_node *bl, int n_outs)
712 ir_graph *irg = get_Block_irg(bl);
714 res = new_ir_node(dbgi, irg, bl, op_be_Start, mode_T, 0, NULL);
715 init_node_attr(res, 0, -1);
716 for (i = 0; i < n_outs; ++i) {
717 add_register_req_out(res);
723 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_node *bl, ir_node *frame, ir_entity *ent)
728 ir_graph *irg = get_Block_irg(bl);
731 irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
732 a = init_node_attr(irn, 1, 1);
735 be_node_set_reg_class_in(irn, 0, cls_frame);
736 be_node_set_reg_class_out(irn, 0, cls_frame);
738 return optimize_node(irn);
741 ir_node *be_get_FrameAddr_frame(const ir_node *node)
743 assert(be_is_FrameAddr(node));
744 return get_irn_n(node, be_pos_FrameAddr_ptr);
747 ir_entity *be_get_FrameAddr_entity(const ir_node *node)
749 const be_frame_attr_t *attr = get_irn_generic_attr_const(node);
753 ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
756 ir_node **in = ALLOCAN(ir_node*, n + 1);
757 ir_graph *irg = get_Block_irg(bl);
760 memcpy(&in[1], in_keep, n * sizeof(in[0]));
761 irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
762 init_node_attr(irn, n + 1, 1);
763 be_node_set_reg_class_in(irn, 0, cls);
764 be_node_set_reg_class_out(irn, 0, cls);
769 ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
771 return be_new_CopyKeep(cls, bl, src, 1, &keep, mode);
774 ir_node *be_get_CopyKeep_op(const ir_node *cpy)
776 return get_irn_n(cpy, be_pos_CopyKeep_op);
779 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op)
781 set_irn_n(cpy, be_pos_CopyKeep_op, op);
784 ir_node *be_new_Barrier(ir_node *bl, int n, ir_node *in[])
788 ir_graph *irg = get_Block_irg(bl);
790 res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL);
791 init_node_attr(res, -1, -1);
792 for (i = 0; i < n; ++i) {
793 add_irn_n(res, in[i]);
794 add_register_req_in(res);
795 add_register_req_out(res);
801 ir_node *be_Barrier_append_node(ir_node *barrier, ir_node *node)
803 ir_mode *mode = get_irn_mode(node);
804 int n = add_irn_n(barrier, node);
806 ir_node *proj = new_r_Proj(barrier, mode, n);
807 add_register_req_in(barrier);
808 add_register_req_out(barrier);
813 static bool be_has_frame_entity(const ir_node *irn)
815 switch (get_irn_opcode(irn)) {
825 ir_entity *be_get_frame_entity(const ir_node *irn)
827 if (be_has_frame_entity(irn)) {
828 const be_frame_attr_t *a = get_irn_generic_attr_const(irn);
834 int be_get_frame_offset(const ir_node *irn)
836 assert(is_be_node(irn));
837 if (be_has_frame_entity(irn)) {
838 const be_frame_attr_t *a = get_irn_generic_attr_const(irn);
844 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
846 const be_memperm_attr_t *attr = get_irn_generic_attr_const(irn);
848 assert(be_is_MemPerm(irn));
849 assert(n < be_get_MemPerm_entity_arity(irn));
851 attr->in_entities[n] = ent;
854 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
856 const be_memperm_attr_t *attr = get_irn_generic_attr_const(irn);
858 assert(be_is_MemPerm(irn));
859 assert(n < be_get_MemPerm_entity_arity(irn));
861 return attr->in_entities[n];
864 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
866 const be_memperm_attr_t *attr = get_irn_generic_attr_const(irn);
868 assert(be_is_MemPerm(irn));
869 assert(n < be_get_MemPerm_entity_arity(irn));
871 attr->out_entities[n] = ent;
874 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
876 const be_memperm_attr_t *attr = get_irn_generic_attr_const(irn);
878 assert(be_is_MemPerm(irn));
879 assert(n < be_get_MemPerm_entity_arity(irn));
881 return attr->out_entities[n];
884 int be_get_MemPerm_entity_arity(const ir_node *irn)
886 return get_irn_arity(irn) - 1;
889 const arch_register_req_t *be_create_reg_req(struct obstack *obst,
890 const arch_register_t *reg, arch_register_req_type_t additional_types)
892 arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
893 const arch_register_class_t *cls = arch_register_get_class(reg);
894 unsigned *limited_bitset;
896 limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
897 rbitset_set(limited_bitset, arch_register_get_index(reg));
899 req->type = arch_register_req_type_limited | additional_types;
901 req->limited = limited_bitset;
906 void be_set_constr_single_reg_in(ir_node *node, int pos,
907 const arch_register_t *reg, arch_register_req_type_t additional_types)
909 const arch_register_req_t *req;
911 if (additional_types == 0) {
912 req = reg->single_req;
914 ir_graph *irg = get_irn_irg(node);
915 struct obstack *obst = be_get_be_obst(irg);
916 req = be_create_reg_req(obst, reg, additional_types);
918 be_set_constr_in(node, pos, req);
921 void be_set_constr_single_reg_out(ir_node *node, int pos,
922 const arch_register_t *reg, arch_register_req_type_t additional_types)
924 const arch_register_req_t *req;
926 /* if we have an ignore register, add ignore flag and just assign it */
927 if (reg->type & arch_register_type_ignore) {
928 additional_types |= arch_register_req_type_ignore;
931 if (additional_types == 0) {
932 req = reg->single_req;
934 ir_graph *irg = get_irn_irg(node);
935 struct obstack *obst = be_get_be_obst(irg);
936 req = be_create_reg_req(obst, reg, additional_types);
939 arch_irn_set_register(node, pos, reg);
940 be_set_constr_out(node, pos, req);
943 void be_node_set_reg_class_in(ir_node *irn, int pos,
944 const arch_register_class_t *cls)
946 be_set_constr_in(irn, pos, cls->class_req);
949 void be_node_set_reg_class_out(ir_node *irn, int pos,
950 const arch_register_class_t *cls)
952 be_set_constr_out(irn, pos, cls->class_req);
955 ir_node *be_get_IncSP_pred(ir_node *irn)
957 assert(be_is_IncSP(irn));
958 return get_irn_n(irn, 0);
961 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred)
963 assert(be_is_IncSP(incsp));
964 set_irn_n(incsp, 0, pred);
967 void be_set_IncSP_offset(ir_node *irn, int offset)
969 be_incsp_attr_t *a = get_irn_generic_attr(irn);
970 assert(be_is_IncSP(irn));
974 int be_get_IncSP_offset(const ir_node *irn)
976 const be_incsp_attr_t *a = get_irn_generic_attr_const(irn);
977 assert(be_is_IncSP(irn));
981 int be_get_IncSP_align(const ir_node *irn)
983 const be_incsp_attr_t *a = get_irn_generic_attr_const(irn);
984 assert(be_is_IncSP(irn));
988 ir_node *be_spill(ir_node *block, ir_node *irn)
990 ir_graph *irg = get_Block_irg(block);
991 ir_node *frame = get_irg_frame(irg);
992 const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn);
993 const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
996 spill = be_new_Spill(cls, cls_frame, block, frame, irn);
1000 ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
1003 ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
1004 ir_graph *irg = get_Block_irg(bl);
1005 ir_node *frame = get_irg_frame(irg);
1006 const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
1008 assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
1010 reload = be_new_Reload(cls, cls_frame, bl, frame, spill, mode);
1012 if (is_Block(insert)) {
1013 insert = sched_skip(insert, 0, sched_skip_cf_predicator, NULL);
1014 sched_add_after(insert, reload);
1016 sched_add_before(insert, reload);
1024 static const arch_register_req_t *be_node_get_in_reg_req(
1025 const ir_node *irn, int pos)
1027 const be_node_attr_t *a = get_irn_generic_attr_const(irn);
1030 if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
1031 return arch_no_register_req;
1033 return a->reg_data[pos].in_req;
1036 static arch_irn_class_t be_node_classify(const ir_node *irn)
1038 switch (get_irn_opcode(irn)) {
1039 case beo_Spill: return arch_irn_class_spill;
1040 case beo_Reload: return arch_irn_class_reload;
1041 case beo_Perm: return arch_irn_class_perm;
1042 case beo_Copy: return arch_irn_class_copy;
1047 static ir_entity *be_node_get_frame_entity(const ir_node *irn)
1049 return be_get_frame_entity(irn);
1052 void be_node_set_frame_entity(ir_node *irn, ir_entity *ent)
1056 assert(be_has_frame_entity(irn));
1058 a = get_irn_generic_attr(irn);
1062 static void be_node_set_frame_offset(ir_node *irn, int offset)
1066 if (!be_has_frame_entity(irn))
1069 a = get_irn_generic_attr(irn);
1073 static int be_node_get_sp_bias(const ir_node *irn)
1075 if (be_is_IncSP(irn))
1076 return be_get_IncSP_offset(irn);
1077 if (be_is_Call(irn))
1078 return -(int)be_Call_get_pop(irn);
1086 static const arch_irn_ops_t be_node_irn_ops = {
1087 be_node_get_in_reg_req,
1089 be_node_get_frame_entity,
1090 be_node_set_frame_offset,
1091 be_node_get_sp_bias,
1092 NULL, /* get_inverse */
1093 NULL, /* get_op_estimated_cost */
1094 NULL, /* possible_memory_operand */
1095 NULL, /* perform_memory_operand */
1098 static const arch_register_req_t *dummy_reg_req(
1099 const ir_node *node, int pos)
1103 return arch_no_register_req;
1106 static arch_irn_class_t dummy_classify(const ir_node *node)
1112 static ir_entity* dummy_get_frame_entity(const ir_node *node)
1118 static void dummy_set_frame_offset(ir_node *node, int bias)
1122 panic("dummy_set_frame_offset() should not be called");
1125 static int dummy_get_sp_bias(const ir_node *node)
1131 /* for "middleend" nodes */
1132 static const arch_irn_ops_t dummy_be_irn_ops = {
1135 dummy_get_frame_entity,
1136 dummy_set_frame_offset,
1138 NULL, /* get_inverse */
1139 NULL, /* get_op_estimated_cost */
1140 NULL, /* possible_memory_operand */
1141 NULL, /* perform_memory_operand */
1146 ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
1147 const arch_register_class_t *cls)
1149 struct obstack *obst = be_get_be_obst(get_irn_irg(block));
1150 backend_info_t *info;
1152 ir_node *phi = new_r_Phi(block, n_ins, ins, mode);
1153 info = be_get_info(phi);
1154 info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
1155 memset(info->out_infos, 0, 1 * sizeof(info->out_infos[0]));
1157 info->out_infos[0].req = arch_no_register_req;
1159 info->out_infos[0].req = cls->class_req;
1166 * Guess correct register class of a phi node by looking at its arguments
1168 static const arch_register_req_t *get_Phi_reg_req_recursive(const ir_node *phi,
1171 int n = get_irn_arity(phi);
1175 if (*visited && pset_find_ptr(*visited, phi))
1178 for (i = 0; i < n; ++i) {
1179 op = get_irn_n(phi, i);
1180 /* Matze: don't we unnecessary constraint our phis with this?
1181 * we only need to take the regclass IMO*/
1183 return arch_get_register_req_out(op);
1187 * The operands of that Phi were all Phis themselves.
1188 * We have to start a DFS for a non-Phi argument now.
1191 *visited = pset_new_ptr(16);
1193 pset_insert_ptr(*visited, phi);
1195 for (i = 0; i < n; ++i) {
1196 const arch_register_req_t *req;
1197 op = get_irn_n(phi, i);
1198 req = get_Phi_reg_req_recursive(op, visited);
1206 static const arch_register_req_t *phi_get_irn_reg_req(const ir_node *node,
1209 backend_info_t *info = be_get_info(node);
1210 const arch_register_req_t *req = info->out_infos[0].req;
1216 if (!mode_is_datab(get_irn_mode(node))) {
1217 req = arch_no_register_req;
1219 pset *visited = NULL;
1221 req = get_Phi_reg_req_recursive(node, &visited);
1222 assert(req->cls != NULL);
1223 if (req->width > 1) {
1224 arch_register_req_t *new_req = allocate_reg_req(node);
1225 new_req->type = arch_register_req_type_normal;
1226 new_req->cls = req->cls;
1227 new_req->other_same = 0;
1228 new_req->other_different = 0;
1229 new_req->width = req->width;
1232 req = req->cls->class_req;
1235 if (visited != NULL)
1238 info->out_infos[0].req = req;
1242 void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req)
1244 backend_info_t *info = be_get_info(node);
1245 info->out_infos[0].req = req;
1247 assert(mode_is_datab(get_irn_mode(node)));
1250 void be_dump_phi_reg_reqs(FILE *F, ir_node *node, dump_reason_t reason)
1253 case dump_node_opcode_txt:
1254 fputs(get_op_name(get_irn_op(node)), F);
1256 case dump_node_mode_txt:
1257 fprintf(F, "%s", get_mode_name(get_irn_mode(node)));
1259 case dump_node_nodeattr_txt:
1261 case dump_node_info_txt:
1263 backend_info_t *info = be_get_info(node);
1264 if (info != NULL && info->out_infos[0].req != NULL) {
1265 arch_dump_reqs_and_registers(F, node);
1275 static const arch_irn_ops_t phi_irn_ops = {
1276 phi_get_irn_reg_req,
1278 dummy_get_frame_entity,
1279 dummy_set_frame_offset,
1281 NULL, /* get_inverse */
1282 NULL, /* get_op_estimated_cost */
1283 NULL, /* possible_memory_operand */
1284 NULL, /* perform_memory_operand */
1290 * ir_op-Operation: dump a be node to file
1292 static void dump_node(FILE *f, ir_node *irn, dump_reason_t reason)
1294 assert(is_be_node(irn));
1297 case dump_node_opcode_txt:
1298 fputs(get_op_name(get_irn_op(irn)), f);
1300 case dump_node_mode_txt:
1301 if (be_is_Copy(irn) || be_is_CopyKeep(irn)) {
1302 fprintf(f, "%s", get_mode_name(get_irn_mode(irn)));
1305 case dump_node_nodeattr_txt:
1306 if (be_is_Call(irn)) {
1307 const be_call_attr_t *a = get_irn_generic_attr_const(irn);
1309 fprintf(f, " [%s] ", get_entity_name(a->ent));
1311 if (be_is_IncSP(irn)) {
1312 const be_incsp_attr_t *attr = get_irn_generic_attr_const(irn);
1313 if (attr->offset == BE_STACK_FRAME_SIZE_EXPAND) {
1314 fprintf(f, " [Setup Stackframe] ");
1315 } else if (attr->offset == BE_STACK_FRAME_SIZE_SHRINK) {
1316 fprintf(f, " [Destroy Stackframe] ");
1318 fprintf(f, " [%d] ", attr->offset);
1322 case dump_node_info_txt:
1323 arch_dump_reqs_and_registers(f, irn);
1325 if (be_has_frame_entity(irn)) {
1326 const be_frame_attr_t *a = get_irn_generic_attr_const(irn);
1328 unsigned size = get_type_size_bytes(get_entity_type(a->ent));
1329 ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bytes\n",
1330 a->ent, a->offset, a->offset, size, size);
1335 switch (get_irn_opcode(irn)) {
1337 const be_incsp_attr_t *a = get_irn_generic_attr_const(irn);
1338 fprintf(f, "align: %d\n", a->align);
1339 if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
1340 fprintf(f, "offset: FRAME_SIZE\n");
1341 else if (a->offset == BE_STACK_FRAME_SIZE_SHRINK)
1342 fprintf(f, "offset: -FRAME SIZE\n");
1344 fprintf(f, "offset: %d\n", a->offset);
1348 const be_call_attr_t *a = get_irn_generic_attr_const(irn);
1351 fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1356 for (i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1357 ir_entity *in, *out;
1358 in = be_get_MemPerm_in_entity(irn, i);
1359 out = be_get_MemPerm_out_entity(irn, i);
1361 fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1364 fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1378 * Copies the backend specific attributes from old node to new node.
1380 static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1382 const be_node_attr_t *old_attr = get_irn_generic_attr_const(old_node);
1383 be_node_attr_t *new_attr = get_irn_generic_attr(new_node);
1384 struct obstack *obst = be_get_be_obst(irg);
1385 backend_info_t *old_info = be_get_info(old_node);
1386 backend_info_t *new_info = be_get_info(new_node);
1388 assert(is_be_node(old_node));
1389 assert(is_be_node(new_node));
1391 memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1393 if (old_info->out_infos != NULL) {
1394 unsigned n_outs = ARR_LEN(old_info->out_infos);
1395 /* need dyanmic out infos? */
1396 if (be_is_Barrier(new_node) || be_is_Perm(new_node)) {
1397 new_info->out_infos = NEW_ARR_F(reg_out_info_t, n_outs);
1399 new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outs);
1401 memcpy(new_info->out_infos, old_info->out_infos,
1402 n_outs * sizeof(new_info->out_infos[0]));
1404 new_info->out_infos = NULL;
1408 if (old_attr->reg_data != NULL) {
1409 unsigned n_ins = ARR_LEN(old_attr->reg_data);
1410 /* need dynamic in infos? */
1411 if (get_irn_op(old_node)->opar == oparity_dynamic) {
1412 new_attr->reg_data = NEW_ARR_F(be_reg_data_t, n_ins);
1414 new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, n_ins);
1416 memcpy(new_attr->reg_data, old_attr->reg_data,
1417 n_ins * sizeof(be_reg_data_t));
1419 new_attr->reg_data = NULL;
1423 static const ir_op_ops be_node_op_ops = {
1444 int is_be_node(const ir_node *irn)
1446 return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops;
1449 void be_init_op(void)
1453 /* Acquire all needed opcodes. */
1454 op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
1455 op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_pinned, irop_flag_none, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
1456 op_be_Perm = new_ir_op(beo_Perm, "be_Perm", op_pin_state_pinned, irop_flag_none, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1457 op_be_MemPerm = new_ir_op(beo_MemPerm, "be_MemPerm", op_pin_state_pinned, irop_flag_none, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
1458 op_be_Copy = new_ir_op(beo_Copy, "be_Copy", op_pin_state_floats, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1459 op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_floats, irop_flag_keep, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1460 op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_floats, irop_flag_keep, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1461 op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
1462 op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_pinned, irop_flag_cfopcode, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops);
1463 op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1464 op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1465 op_be_IncSP = new_ir_op(beo_IncSP, "be_IncSP", op_pin_state_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_incsp_attr_t), &be_node_op_ops);
1466 op_be_Start = new_ir_op(beo_Start, "be_Start", op_pin_state_pinned, irop_flag_none, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1467 op_be_FrameAddr = new_ir_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_floats, irop_flag_none, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
1468 op_be_Barrier = new_ir_op(beo_Barrier, "be_Barrier", op_pin_state_pinned, irop_flag_none, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
1470 op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr;
1471 op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr;
1472 op_be_Perm->ops.node_cmp_attr = node_cmp_attr;
1473 op_be_MemPerm->ops.node_cmp_attr = node_cmp_attr;
1474 op_be_Copy->ops.node_cmp_attr = node_cmp_attr;
1475 op_be_Keep->ops.node_cmp_attr = node_cmp_attr;
1476 op_be_CopyKeep->ops.node_cmp_attr = node_cmp_attr;
1477 op_be_Call->ops.node_cmp_attr = Call_cmp_attr;
1478 op_be_Return->ops.node_cmp_attr = Return_cmp_attr;
1479 op_be_AddSP->ops.node_cmp_attr = node_cmp_attr;
1480 op_be_SubSP->ops.node_cmp_attr = node_cmp_attr;
1481 op_be_IncSP->ops.node_cmp_attr = IncSP_cmp_attr;
1482 op_be_Start->ops.node_cmp_attr = node_cmp_attr;
1483 op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
1484 op_be_Barrier->ops.node_cmp_attr = node_cmp_attr;
1486 /* attach out dummy_ops to middle end nodes */
1487 for (opc = iro_First; opc <= iro_Last; ++opc) {
1488 ir_op *op = get_irp_opcode(opc);
1489 assert(op->ops.be_ops == NULL);
1490 op->ops.be_ops = &dummy_be_irn_ops;
1493 op_Phi->ops.be_ops = &phi_irn_ops;