2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node *const *in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = (char*)obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_arity)(const ir_node *node)
216 return _get_irn_arity(node);
219 /* Returns the array with ins. This array is shifted with respect to the
220 array accessed by get_irn_n: The block operand is at position 0 not -1.
221 (@@@ This should be changed.)
222 The order of the predecessors in this array is not guaranteed, except that
223 lists of operands as predecessors of Block or arguments of a Call are
225 ir_node **get_irn_in(const ir_node *node)
230 void set_irn_in(ir_node *node, int arity, ir_node **in)
234 ir_graph *irg = get_irn_irg(node);
239 for (i = 0; i < arity; i++) {
240 if (i < (int)ARR_LEN(*pOld_in)-1)
241 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
243 edges_notify_edge(node, i, in[i], NULL, irg);
245 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
246 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
249 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
250 ir_node * block = (*pOld_in)[0];
251 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
252 (*pOld_in)[0] = block;
254 fix_backedges(irg->obst, node);
256 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
259 ir_node *(get_irn_n)(const ir_node *node, int n)
261 return _get_irn_n(node, n);
264 void set_irn_n(ir_node *node, int n, ir_node *in)
266 ir_graph *irg = get_irn_irg(node);
267 assert(node && node->kind == k_ir_node);
269 assert(n < get_irn_arity(node));
270 assert(in && in->kind == k_ir_node);
273 hook_set_irn_n(node, n, in, node->in[n + 1]);
275 /* Here, we rely on src and tgt being in the current ir graph */
276 edges_notify_edge(node, n, in, node->in[n + 1], irg);
278 node->in[n + 1] = in;
281 int add_irn_n(ir_node *node, ir_node *in)
284 ir_graph *irg = get_irn_irg(node);
286 assert(node->op->opar == oparity_dynamic);
287 pos = ARR_LEN(node->in) - 1;
288 ARR_APP1(ir_node *, node->in, in);
289 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
292 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
297 void del_Sync_n(ir_node *n, int i)
299 int arity = get_Sync_n_preds(n);
300 ir_node *last_pred = get_Sync_pred(n, arity - 1);
301 set_Sync_pred(n, i, last_pred);
302 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
303 ARR_SHRINKLEN(get_irn_in(n), arity);
306 int (get_irn_deps)(const ir_node *node)
308 return _get_irn_deps(node);
311 ir_node *(get_irn_dep)(const ir_node *node, int pos)
313 return _get_irn_dep(node, pos);
316 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
318 _set_irn_dep(node, pos, dep);
321 int add_irn_dep(ir_node *node, ir_node *dep)
325 /* DEP edges are only allowed in backend phase */
326 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
327 if (node->deps == NULL) {
328 node->deps = NEW_ARR_F(ir_node *, 1);
334 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
335 if (node->deps[i] == NULL)
338 if (node->deps[i] == dep)
342 if (first_zero >= 0) {
343 node->deps[first_zero] = dep;
346 ARR_APP1(ir_node *, node->deps, dep);
351 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
356 void add_irn_deps(ir_node *tgt, ir_node *src)
360 for (i = 0, n = get_irn_deps(src); i < n; ++i)
361 add_irn_dep(tgt, get_irn_dep(src, i));
365 ir_mode *(get_irn_mode)(const ir_node *node)
367 return _get_irn_mode(node);
370 void (set_irn_mode)(ir_node *node, ir_mode *mode)
372 _set_irn_mode(node, mode);
375 ir_op *(get_irn_op)(const ir_node *node)
377 return _get_irn_op(node);
380 /* should be private to the library: */
381 void (set_irn_op)(ir_node *node, ir_op *op)
383 _set_irn_op(node, op);
386 unsigned (get_irn_opcode)(const ir_node *node)
388 return _get_irn_opcode(node);
391 const char *get_irn_opname(const ir_node *node)
394 if (is_Phi0(node)) return "Phi0";
395 return get_id_str(node->op->name);
398 ident *get_irn_opident(const ir_node *node)
401 return node->op->name;
404 ir_visited_t (get_irn_visited)(const ir_node *node)
406 return _get_irn_visited(node);
409 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
411 _set_irn_visited(node, visited);
414 void (mark_irn_visited)(ir_node *node)
416 _mark_irn_visited(node);
419 int (irn_visited)(const ir_node *node)
421 return _irn_visited(node);
424 int (irn_visited_else_mark)(ir_node *node)
426 return _irn_visited_else_mark(node);
429 void (set_irn_link)(ir_node *node, void *link)
431 _set_irn_link(node, link);
434 void *(get_irn_link)(const ir_node *node)
436 return _get_irn_link(node);
439 op_pin_state (get_irn_pinned)(const ir_node *node)
441 return _get_irn_pinned(node);
444 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
446 return _is_irn_pinned_in_irg(node);
449 void set_irn_pinned(ir_node *node, op_pin_state state)
451 /* due to optimization an opt may be turned into a Tuple */
455 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
456 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
458 node->attr.except.pin_state = state;
461 /* Outputs a unique number for this node */
462 long get_irn_node_nr(const ir_node *node)
465 return node->node_nr;
468 void *(get_irn_generic_attr)(ir_node *node)
470 assert(is_ir_node(node));
471 return _get_irn_generic_attr(node);
474 const void *(get_irn_generic_attr_const)(const ir_node *node)
476 assert(is_ir_node(node));
477 return _get_irn_generic_attr_const(node);
480 unsigned (get_irn_idx)(const ir_node *node)
482 assert(is_ir_node(node));
483 return _get_irn_idx(node);
486 int get_irn_pred_pos(ir_node *node, ir_node *arg)
489 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
490 if (get_irn_n(node, i) == arg)
496 /** manipulate fields of individual nodes **/
498 ir_node *(get_nodes_block)(const ir_node *node)
500 return _get_nodes_block(node);
503 void set_nodes_block(ir_node *node, ir_node *block)
505 assert(node->op != op_Block);
506 set_irn_n(node, -1, block);
509 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
510 * from Start. If so returns frame type, else Null. */
511 ir_type *is_frame_pointer(const ir_node *n)
513 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
514 ir_node *start = get_Proj_pred(n);
515 if (is_Start(start)) {
516 return get_irg_frame_type(get_irn_irg(start));
522 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
523 * from Start. If so returns tls type, else Null. */
524 ir_type *is_tls_pointer(const ir_node *n)
526 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
527 ir_node *start = get_Proj_pred(n);
528 if (is_Start(start)) {
529 return get_tls_type();
535 ir_node **get_Block_cfgpred_arr(ir_node *node)
537 assert(is_Block(node));
538 return (ir_node **)&(get_irn_in(node)[1]);
541 int (get_Block_n_cfgpreds)(const ir_node *node)
543 return _get_Block_n_cfgpreds(node);
546 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
548 return _get_Block_cfgpred(node, pos);
551 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
553 assert(is_Block(node));
554 set_irn_n(node, pos, pred);
557 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
561 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
562 if (get_Block_cfgpred_block(block, i) == pred)
568 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
570 return _get_Block_cfgpred_block(node, pos);
573 int get_Block_matured(const ir_node *node)
575 assert(is_Block(node));
576 return (int)node->attr.block.is_matured;
579 void set_Block_matured(ir_node *node, int matured)
581 assert(is_Block(node));
582 node->attr.block.is_matured = matured;
585 ir_visited_t (get_Block_block_visited)(const ir_node *node)
587 return _get_Block_block_visited(node);
590 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
592 _set_Block_block_visited(node, visit);
595 void (mark_Block_block_visited)(ir_node *node)
597 _mark_Block_block_visited(node);
600 int (Block_block_visited)(const ir_node *node)
602 return _Block_block_visited(node);
605 ir_node *(set_Block_dead)(ir_node *block)
607 return _set_Block_dead(block);
610 int (is_Block_dead)(const ir_node *block)
612 return _is_Block_dead(block);
615 ir_extblk *get_Block_extbb(const ir_node *block)
618 assert(is_Block(block));
619 res = block->attr.block.extblk;
620 assert(res == NULL || is_ir_extbb(res));
624 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
626 assert(is_Block(block));
627 assert(extblk == NULL || is_ir_extbb(extblk));
628 block->attr.block.extblk = extblk;
631 /* returns the graph of a Block. */
632 ir_graph *(get_Block_irg)(const ir_node *block)
634 return _get_Block_irg(block);
637 ir_entity *create_Block_entity(ir_node *block)
640 assert(is_Block(block));
642 entity = block->attr.block.entity;
643 if (entity == NULL) {
647 glob = get_glob_type();
648 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
649 set_entity_visibility(entity, ir_visibility_local);
650 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
651 nr = get_irp_next_label_nr();
652 set_entity_label(entity, nr);
653 set_entity_compiler_generated(entity, 1);
655 block->attr.block.entity = entity;
660 ir_entity *get_Block_entity(const ir_node *block)
662 assert(is_Block(block));
663 return block->attr.block.entity;
666 void set_Block_entity(ir_node *block, ir_entity *entity)
668 assert(is_Block(block));
669 assert(get_entity_type(entity) == get_code_type());
670 block->attr.block.entity = entity;
673 int has_Block_entity(const ir_node *block)
675 return block->attr.block.entity != NULL;
678 ir_node *(get_Block_phis)(const ir_node *block)
680 return _get_Block_phis(block);
683 void (set_Block_phis)(ir_node *block, ir_node *phi)
685 _set_Block_phis(block, phi);
688 void (add_Block_phi)(ir_node *block, ir_node *phi)
690 _add_Block_phi(block, phi);
693 /* Get the Block mark (single bit). */
694 unsigned (get_Block_mark)(const ir_node *block)
696 return _get_Block_mark(block);
699 /* Set the Block mark (single bit). */
700 void (set_Block_mark)(ir_node *block, unsigned mark)
702 _set_Block_mark(block, mark);
705 int get_End_n_keepalives(const ir_node *end)
708 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
711 ir_node *get_End_keepalive(const ir_node *end, int pos)
714 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
717 void add_End_keepalive(ir_node *end, ir_node *ka)
723 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
726 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
729 /* Set new keep-alives */
730 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
734 ir_graph *irg = get_irn_irg(end);
736 /* notify that edges are deleted */
737 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
738 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
740 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
742 for (i = 0; i < n; ++i) {
743 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
744 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
748 /* Set new keep-alives from old keep-alives, skipping irn */
749 void remove_End_keepalive(ir_node *end, ir_node *irn)
751 int n = get_End_n_keepalives(end);
756 for (i = n -1; i >= 0; --i) {
757 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
767 irg = get_irn_irg(end);
769 /* remove the edge */
770 edges_notify_edge(end, idx, NULL, irn, irg);
773 /* exchange with the last one */
774 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
775 edges_notify_edge(end, n - 1, NULL, old, irg);
776 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
777 edges_notify_edge(end, idx, old, NULL, irg);
779 /* now n - 1 keeps, 1 block input */
780 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
783 /* remove Bads, NoMems and doublets from the keep-alive set */
784 void remove_End_Bads_and_doublets(ir_node *end)
787 int idx, n = get_End_n_keepalives(end);
793 irg = get_irn_irg(end);
794 pset_new_init(&keeps);
796 for (idx = n - 1; idx >= 0; --idx) {
797 ir_node *ka = get_End_keepalive(end, idx);
799 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
800 /* remove the edge */
801 edges_notify_edge(end, idx, NULL, ka, irg);
804 /* exchange with the last one */
805 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
806 edges_notify_edge(end, n - 1, NULL, old, irg);
807 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
808 edges_notify_edge(end, idx, old, NULL, irg);
812 pset_new_insert(&keeps, ka);
815 /* n keeps, 1 block input */
816 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
818 pset_new_destroy(&keeps);
821 void free_End(ir_node *end)
826 end->in = NULL; /* @@@ make sure we get an error if we use the
827 in array afterwards ... */
830 size_t get_Return_n_ress(const ir_node *node)
832 assert(is_Return(node));
833 return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
836 ir_node **get_Return_res_arr(ir_node *node)
838 assert(is_Return(node));
839 if (get_Return_n_ress(node) > 0)
840 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
845 ir_node *get_Return_res(const ir_node *node, int pos)
847 assert(is_Return(node));
849 assert(get_Return_n_ress(node) > (size_t)pos);
850 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
853 void set_Return_res(ir_node *node, int pos, ir_node *res)
855 assert(is_Return(node));
856 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
859 int (is_Const_null)(const ir_node *node)
861 return _is_Const_null(node);
864 int (is_Const_one)(const ir_node *node)
866 return _is_Const_one(node);
869 int (is_Const_all_one)(const ir_node *node)
871 return _is_Const_all_one(node);
876 symconst_kind get_SymConst_kind(const ir_node *node)
878 assert(is_SymConst(node));
879 return node->attr.symc.kind;
882 void set_SymConst_kind(ir_node *node, symconst_kind kind)
884 assert(is_SymConst(node));
885 node->attr.symc.kind = kind;
888 ir_type *get_SymConst_type(const ir_node *node)
890 /* the cast here is annoying, but we have to compensate for
892 ir_node *irn = (ir_node *)node;
893 assert(is_SymConst(node) &&
894 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
895 return irn->attr.symc.sym.type_p;
898 void set_SymConst_type(ir_node *node, ir_type *tp)
900 assert(is_SymConst(node) &&
901 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
902 node->attr.symc.sym.type_p = tp;
906 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
907 ir_entity *get_SymConst_entity(const ir_node *node)
909 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
910 return node->attr.symc.sym.entity_p;
913 void set_SymConst_entity(ir_node *node, ir_entity *ent)
915 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
916 node->attr.symc.sym.entity_p = ent;
919 ir_enum_const *get_SymConst_enum(const ir_node *node)
921 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
922 return node->attr.symc.sym.enum_p;
925 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
927 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
928 node->attr.symc.sym.enum_p = ec;
931 union symconst_symbol
932 get_SymConst_symbol(const ir_node *node)
934 assert(is_SymConst(node));
935 return node->attr.symc.sym;
938 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
940 assert(is_SymConst(node));
941 node->attr.symc.sym = sym;
944 int get_Sel_n_indexs(const ir_node *node)
946 assert(is_Sel(node));
947 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
950 ir_node **get_Sel_index_arr(ir_node *node)
952 assert(is_Sel(node));
953 if (get_Sel_n_indexs(node) > 0)
954 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
959 ir_node *get_Sel_index(const ir_node *node, int pos)
961 assert(is_Sel(node));
962 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
965 void set_Sel_index(ir_node *node, int pos, ir_node *index)
967 assert(is_Sel(node));
968 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
971 ir_node **get_Call_param_arr(ir_node *node)
973 assert(is_Call(node));
974 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
977 size_t get_Call_n_params(const ir_node *node)
979 assert(is_Call(node));
980 return (size_t) (get_irn_arity(node) - CALL_PARAM_OFFSET);
983 ir_node *get_Call_param(const ir_node *node, int pos)
985 assert(is_Call(node));
986 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
989 void set_Call_param(ir_node *node, int pos, ir_node *param)
991 assert(is_Call(node));
992 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
995 ir_node **get_Builtin_param_arr(ir_node *node)
997 assert(is_Builtin(node));
998 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1001 int get_Builtin_n_params(const ir_node *node)
1003 assert(is_Builtin(node));
1004 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1007 ir_node *get_Builtin_param(const ir_node *node, int pos)
1009 assert(is_Builtin(node));
1010 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1013 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1015 assert(is_Builtin(node));
1016 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1019 /* Returns a human readable string for the ir_builtin_kind. */
1020 const char *get_builtin_kind_name(ir_builtin_kind kind)
1022 #define X(a) case a: return #a
1025 X(ir_bk_debugbreak);
1026 X(ir_bk_return_address);
1027 X(ir_bk_frame_address);
1037 X(ir_bk_inner_trampoline);
1044 int Call_has_callees(const ir_node *node)
1046 assert(is_Call(node));
1047 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1048 (node->attr.call.callee_arr != NULL));
1051 int get_Call_n_callees(const ir_node *node)
1053 assert(is_Call(node) && node->attr.call.callee_arr);
1054 return ARR_LEN(node->attr.call.callee_arr);
1057 ir_entity *get_Call_callee(const ir_node *node, int pos)
1059 assert(pos >= 0 && pos < get_Call_n_callees(node));
1060 return node->attr.call.callee_arr[pos];
1063 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1065 ir_graph *irg = get_irn_irg(node);
1067 assert(is_Call(node));
1068 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1069 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1071 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1074 void remove_Call_callee_arr(ir_node *node)
1076 assert(is_Call(node));
1077 node->attr.call.callee_arr = NULL;
1081 * Returns non-zero if a Call is surely a self-recursive Call.
1082 * Beware: if this functions returns 0, the call might be self-recursive!
1084 int is_self_recursive_Call(const ir_node *call)
1086 const ir_node *callee = get_Call_ptr(call);
1088 if (is_SymConst_addr_ent(callee)) {
1089 const ir_entity *ent = get_SymConst_entity(callee);
1090 const ir_graph *irg = get_entity_irg(ent);
1091 if (irg == get_irn_irg(call))
1097 /* Checks for upcast.
1099 * Returns true if the Cast node casts a class type to a super type.
1101 int is_Cast_upcast(ir_node *node)
1103 ir_type *totype = get_Cast_type(node);
1104 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1106 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1109 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1110 totype = get_pointer_points_to_type(totype);
1111 fromtype = get_pointer_points_to_type(fromtype);
1116 if (!is_Class_type(totype)) return 0;
1117 return is_SubClass_of(fromtype, totype);
1120 /* Checks for downcast.
1122 * Returns true if the Cast node casts a class type to a sub type.
1124 int is_Cast_downcast(ir_node *node)
1126 ir_type *totype = get_Cast_type(node);
1127 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1129 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1132 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1133 totype = get_pointer_points_to_type(totype);
1134 fromtype = get_pointer_points_to_type(fromtype);
1139 if (!is_Class_type(totype)) return 0;
1140 return is_SubClass_of(totype, fromtype);
1143 int (is_unop)(const ir_node *node)
1145 return _is_unop(node);
1148 ir_node *get_unop_op(const ir_node *node)
1150 if (node->op->opar == oparity_unary)
1151 return get_irn_n(node, node->op->op_index);
1153 assert(node->op->opar == oparity_unary);
1157 void set_unop_op(ir_node *node, ir_node *op)
1159 if (node->op->opar == oparity_unary)
1160 set_irn_n(node, node->op->op_index, op);
1162 assert(node->op->opar == oparity_unary);
1165 int (is_binop)(const ir_node *node)
1167 return _is_binop(node);
1170 ir_node *get_binop_left(const ir_node *node)
1172 assert(node->op->opar == oparity_binary);
1173 return get_irn_n(node, node->op->op_index);
1176 void set_binop_left(ir_node *node, ir_node *left)
1178 assert(node->op->opar == oparity_binary);
1179 set_irn_n(node, node->op->op_index, left);
1182 ir_node *get_binop_right(const ir_node *node)
1184 assert(node->op->opar == oparity_binary);
1185 return get_irn_n(node, node->op->op_index + 1);
1188 void set_binop_right(ir_node *node, ir_node *right)
1190 assert(node->op->opar == oparity_binary);
1191 set_irn_n(node, node->op->op_index + 1, right);
1194 int is_Phi0(const ir_node *n)
1198 return ((get_irn_op(n) == op_Phi) &&
1199 (get_irn_arity(n) == 0) &&
1200 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1203 ir_node **get_Phi_preds_arr(ir_node *node)
1205 assert(is_Phi(node));
1206 return (ir_node **)&(get_irn_in(node)[1]);
1209 int get_Phi_n_preds(const ir_node *node)
1211 assert(is_Phi(node) || is_Phi0(node));
1212 return (get_irn_arity(node));
1215 ir_node *get_Phi_pred(const ir_node *node, int pos)
1217 assert(is_Phi(node) || is_Phi0(node));
1218 return get_irn_n(node, pos);
1221 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1223 assert(is_Phi(node) || is_Phi0(node));
1224 set_irn_n(node, pos, pred);
1227 ir_node *(get_Phi_next)(const ir_node *phi)
1229 return _get_Phi_next(phi);
1232 void (set_Phi_next)(ir_node *phi, ir_node *next)
1234 _set_Phi_next(phi, next);
1237 int is_memop(const ir_node *node)
1239 unsigned code = get_irn_opcode(node);
1240 return (code == iro_Load || code == iro_Store);
1243 ir_node *get_memop_mem(const ir_node *node)
1245 assert(is_memop(node));
1246 return get_irn_n(node, 0);
1249 void set_memop_mem(ir_node *node, ir_node *mem)
1251 assert(is_memop(node));
1252 set_irn_n(node, 0, mem);
1255 ir_node *get_memop_ptr(const ir_node *node)
1257 assert(is_memop(node));
1258 return get_irn_n(node, 1);
1261 void set_memop_ptr(ir_node *node, ir_node *ptr)
1263 assert(is_memop(node));
1264 set_irn_n(node, 1, ptr);
1267 ir_volatility get_Load_volatility(const ir_node *node)
1269 assert(is_Load(node));
1270 return (ir_volatility)node->attr.load.volatility;
1273 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1275 assert(is_Load(node));
1276 node->attr.load.volatility = volatility;
1279 ir_align get_Load_align(const ir_node *node)
1281 assert(is_Load(node));
1282 return (ir_align)node->attr.load.aligned;
1285 void set_Load_align(ir_node *node, ir_align align)
1287 assert(is_Load(node));
1288 node->attr.load.aligned = align;
1292 ir_volatility get_Store_volatility(const ir_node *node)
1294 assert(is_Store(node));
1295 return (ir_volatility)node->attr.store.volatility;
1298 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1300 assert(is_Store(node));
1301 node->attr.store.volatility = volatility;
1304 ir_align get_Store_align(const ir_node *node)
1306 assert(is_Store(node));
1307 return (ir_align)node->attr.store.aligned;
1310 void set_Store_align(ir_node *node, ir_align align)
1312 assert(is_Store(node));
1313 node->attr.store.aligned = align;
1317 ir_node **get_Sync_preds_arr(ir_node *node)
1319 assert(is_Sync(node));
1320 return (ir_node **)&(get_irn_in(node)[1]);
1323 int get_Sync_n_preds(const ir_node *node)
1325 assert(is_Sync(node));
1326 return (get_irn_arity(node));
1330 void set_Sync_n_preds(ir_node *node, int n_preds)
1332 assert(is_Sync(node));
1336 ir_node *get_Sync_pred(const ir_node *node, int pos)
1338 assert(is_Sync(node));
1339 return get_irn_n(node, pos);
1342 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1344 assert(is_Sync(node));
1345 set_irn_n(node, pos, pred);
1348 /* Add a new Sync predecessor */
1349 void add_Sync_pred(ir_node *node, ir_node *pred)
1351 assert(is_Sync(node));
1352 add_irn_n(node, pred);
1355 int (is_arg_Proj)(const ir_node *node)
1357 return _is_arg_Proj(node);
1360 pn_Cmp (get_Proj_pn_cmp)(const ir_node *node)
1362 return _get_Proj_pn_cmp(node);
1365 ir_node **get_Tuple_preds_arr(ir_node *node)
1367 assert(is_Tuple(node));
1368 return (ir_node **)&(get_irn_in(node)[1]);
1371 int get_Tuple_n_preds(const ir_node *node)
1373 assert(is_Tuple(node));
1374 return get_irn_arity(node);
1377 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1379 assert(is_Tuple(node));
1380 return get_irn_n(node, pos);
1383 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1385 assert(is_Tuple(node));
1386 set_irn_n(node, pos, pred);
1389 int get_ASM_n_input_constraints(const ir_node *node)
1391 assert(is_ASM(node));
1392 return ARR_LEN(node->attr.assem.input_constraints);
1395 int get_ASM_n_output_constraints(const ir_node *node)
1397 assert(is_ASM(node));
1398 return ARR_LEN(node->attr.assem.output_constraints);
1401 int get_ASM_n_clobbers(const ir_node *node)
1403 assert(is_ASM(node));
1404 return ARR_LEN(node->attr.assem.clobbers);
1407 /* returns the graph of a node */
1408 ir_graph *(get_irn_irg)(const ir_node *node)
1410 return _get_irn_irg(node);
1414 /*----------------------------------------------------------------*/
1415 /* Auxiliary routines */
1416 /*----------------------------------------------------------------*/
1418 ir_node *skip_Proj(ir_node *node)
1420 /* don't assert node !!! */
1425 node = get_Proj_pred(node);
1431 skip_Proj_const(const ir_node *node)
1433 /* don't assert node !!! */
1438 node = get_Proj_pred(node);
1443 ir_node *skip_Tuple(ir_node *node)
1448 if (is_Proj(node)) {
1449 pred = get_Proj_pred(node);
1451 if (is_Proj(pred)) { /* nested Tuple ? */
1452 pred = skip_Tuple(pred);
1454 if (is_Tuple(pred)) {
1455 node = get_Tuple_pred(pred, get_Proj_proj(node));
1458 } else if (is_Tuple(pred)) {
1459 node = get_Tuple_pred(pred, get_Proj_proj(node));
1466 /* returns operand of node if node is a Cast */
1467 ir_node *skip_Cast(ir_node *node)
1470 return get_Cast_op(node);
1474 /* returns operand of node if node is a Cast */
1475 const ir_node *skip_Cast_const(const ir_node *node)
1478 return get_Cast_op(node);
1482 /* returns operand of node if node is a Pin */
1483 ir_node *skip_Pin(ir_node *node)
1486 return get_Pin_op(node);
1490 /* returns operand of node if node is a Confirm */
1491 ir_node *skip_Confirm(ir_node *node)
1493 if (is_Confirm(node))
1494 return get_Confirm_value(node);
1498 /* skip all high-level ops */
1499 ir_node *skip_HighLevel_ops(ir_node *node)
1501 while (is_op_highlevel(get_irn_op(node))) {
1502 node = get_irn_n(node, 0);
1508 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1509 * than any other approach, as Id chains are resolved and all point to the real node, or
1510 * all id's are self loops.
1512 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1513 * a little bit "hand optimized".
1515 ir_node *skip_Id(ir_node *node)
1518 /* don't assert node !!! */
1520 if (!node || (node->op != op_Id)) return node;
1522 /* Don't use get_Id_pred(): We get into an endless loop for
1523 self-referencing Ids. */
1524 pred = node->in[0+1];
1526 if (pred->op != op_Id) return pred;
1528 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1529 ir_node *rem_pred, *res;
1531 if (pred->op != op_Id) return pred; /* shortcut */
1534 assert(get_irn_arity (node) > 0);
1536 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1537 res = skip_Id(rem_pred);
1538 if (is_Id(res)) /* self-loop */ return node;
1540 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1547 int (is_strictConv)(const ir_node *node)
1549 return _is_strictConv(node);
1552 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1553 int (is_SymConst_addr_ent)(const ir_node *node)
1555 return _is_SymConst_addr_ent(node);
1558 /* Returns true if the operation manipulates control flow. */
1559 int is_cfop(const ir_node *node)
1561 return is_op_cfopcode(get_irn_op(node));
1564 /* Returns true if the operation can change the control flow because
1566 int is_fragile_op(const ir_node *node)
1568 return is_op_fragile(get_irn_op(node));
1571 /* Returns the memory operand of fragile operations. */
1572 ir_node *get_fragile_op_mem(ir_node *node)
1574 assert(node && is_fragile_op(node));
1576 switch (get_irn_opcode(node)) {
1585 return get_irn_n(node, pn_Generic_M);
1590 panic("should not be reached");
1594 /* Returns true if the operation is a forking control flow operation. */
1595 int (is_irn_forking)(const ir_node *node)
1597 return _is_irn_forking(node);
1600 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1602 _copy_node_attr(irg, old_node, new_node);
1605 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1607 ir_type *(get_irn_type_attr)(ir_node *node)
1609 return _get_irn_type_attr(node);
1612 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1613 ir_entity *(get_irn_entity_attr)(ir_node *node)
1615 return _get_irn_entity_attr(node);
1618 /* Returns non-zero for constant-like nodes. */
1619 int (is_irn_constlike)(const ir_node *node)
1621 return _is_irn_constlike(node);
1625 * Returns non-zero for nodes that are allowed to have keep-alives and
1626 * are neither Block nor PhiM.
1628 int (is_irn_keep)(const ir_node *node)
1630 return _is_irn_keep(node);
1634 * Returns non-zero for nodes that are always placed in the start block.
1636 int (is_irn_start_block_placed)(const ir_node *node)
1638 return _is_irn_start_block_placed(node);
1641 /* Returns non-zero for nodes that are machine operations. */
1642 int (is_irn_machine_op)(const ir_node *node)
1644 return _is_irn_machine_op(node);
1647 /* Returns non-zero for nodes that are machine operands. */
1648 int (is_irn_machine_operand)(const ir_node *node)
1650 return _is_irn_machine_operand(node);
1653 /* Returns non-zero for nodes that have the n'th user machine flag set. */
1654 int (is_irn_machine_user)(const ir_node *node, unsigned n)
1656 return _is_irn_machine_user(node, n);
1659 /* Returns non-zero for nodes that are CSE neutral to its users. */
1660 int (is_irn_cse_neutral)(const ir_node *node)
1662 return _is_irn_cse_neutral(node);
1665 /* Gets the string representation of the jump prediction .*/
1666 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1668 #define X(a) case a: return #a
1670 X(COND_JMP_PRED_NONE);
1671 X(COND_JMP_PRED_TRUE);
1672 X(COND_JMP_PRED_FALSE);
1678 /** Return the attribute type of a SymConst node if exists */
1679 static ir_type *get_SymConst_attr_type(const ir_node *self)
1681 symconst_kind kind = get_SymConst_kind(self);
1682 if (SYMCONST_HAS_TYPE(kind))
1683 return get_SymConst_type(self);
1687 /** Return the attribute entity of a SymConst node if exists */
1688 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1690 symconst_kind kind = get_SymConst_kind(self);
1691 if (SYMCONST_HAS_ENT(kind))
1692 return get_SymConst_entity(self);
1696 /** the get_type_attr operation must be always implemented */
1697 static ir_type *get_Null_type(const ir_node *n)
1700 return firm_unknown_type;
1703 /* Sets the get_type operation for an ir_op_ops. */
1704 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
1707 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1708 case iro_Call: ops->get_type_attr = get_Call_type; break;
1709 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1710 case iro_Free: ops->get_type_attr = get_Free_type; break;
1711 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1713 /* not allowed to be NULL */
1714 if (! ops->get_type_attr)
1715 ops->get_type_attr = get_Null_type;
1721 /** the get_entity_attr operation must be always implemented */
1722 static ir_entity *get_Null_ent(const ir_node *n)
1728 /* Sets the get_type operation for an ir_op_ops. */
1729 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
1732 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1733 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1735 /* not allowed to be NULL */
1736 if (! ops->get_entity_attr)
1737 ops->get_entity_attr = get_Null_ent;
1743 /* Sets the debug information of a node. */
1744 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1746 _set_irn_dbg_info(n, db);
1750 * Returns the debug information of an node.
1752 * @param n The node.
1754 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1756 return _get_irn_dbg_info(n);
1759 /* checks whether a node represents a global address */
1760 int is_Global(const ir_node *node)
1762 return is_SymConst_addr_ent(node);
1765 /* returns the entity of a global address */
1766 ir_entity *get_Global_entity(const ir_node *node)
1768 return get_SymConst_entity(node);
1772 * Calculate a hash value of a node.
1774 unsigned firm_default_hash(const ir_node *node)
1779 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1780 h = irn_arity = get_irn_arity(node);
1782 /* consider all in nodes... except the block if not a control flow. */
1783 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1784 ir_node *pred = get_irn_n(node, i);
1785 if (is_irn_cse_neutral(pred))
1788 h = 9*h + HASH_PTR(pred);
1792 h = 9*h + HASH_PTR(get_irn_mode(node));
1794 h = 9*h + HASH_PTR(get_irn_op(node));
1797 } /* firm_default_hash */
1799 /* include generated code */
1800 #include "gen_irnode.c.inl"