3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
88 * Indicates, whether additional data can be registered to ir nodes.
89 * If set to 1, this is not possible anymore.
91 static int forbid_new_data = 0;
94 * The amount of additional space for custom data to be allocated upon
95 * creating a new node.
97 static size_t additional_node_data_size = 0;
100 size_t register_additional_node_data(size_t size)
102 assert(!forbid_new_data && "Too late to register additional node data");
107 return additional_node_data_size += size;
114 /* Forbid the addition of new data to an ir node. */
119 * irnode constructor.
120 * Create a new irnode in irg, with an op, mode, arity and
121 * some incoming irnodes.
122 * If arity is negative, a node with a dynamic array is created.
125 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
126 int arity, ir_node **in)
129 size_t node_size = offsetof(ir_node, attr) + op->attr_size + additional_node_data_size;
132 assert(irg && op && mode);
133 p = obstack_alloc (irg->obst, node_size);
134 memset(p, 0, node_size);
135 res = (ir_node *) (p + additional_node_data_size);
137 res->kind = k_ir_node;
143 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
145 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
146 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
149 set_irn_dbg_info(res, db);
153 res->node_nr = get_irp_new_node_nr();
161 /* Copies all attributes stored in the old node to the new node.
162 Assumes both have the same opcode and sufficient size. */
164 copy_attrs (const ir_node *old_node, ir_node *new_node) {
165 assert(get_irn_op(old_node) == get_irn_op(new_node));
166 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
167 if (get_irn_op(new_node) == op_Call) remove_Call_callee_arr(new_node);
170 /*-- getting some parameters from ir_nodes --*/
173 (is_ir_node)(const void *thing) {
174 return __is_ir_node(thing);
178 (get_irn_intra_arity)(const ir_node *node) {
179 return __get_irn_intra_arity(node);
183 (get_irn_inter_arity)(const ir_node *node) {
184 return __get_irn_inter_arity(node);
187 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
190 (get_irn_arity)(const ir_node *node) {
191 return __get_irn_arity(node);
194 /* Returns the array with ins. This array is shifted with respect to the
195 array accessed by get_irn_n: The block operand is at position 0 not -1.
196 (@@@ This should be changed.)
197 The order of the predecessors in this array is not guaranteed, except that
198 lists of operands as predecessors of Block or arguments of a Call are
201 get_irn_in (const ir_node *node) {
203 if (get_interprocedural_view()) { /* handle Filter and Block specially */
204 if (get_irn_opcode(node) == iro_Filter) {
205 assert(node->attr.filter.in_cg);
206 return node->attr.filter.in_cg;
207 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
208 return node->attr.block.in_cg;
210 /* else fall through */
216 set_irn_in (ir_node *node, int arity, ir_node **in) {
219 if (get_interprocedural_view()) { /* handle Filter and Block specially */
220 if (get_irn_opcode(node) == iro_Filter) {
221 assert(node->attr.filter.in_cg);
222 arr = &node->attr.filter.in_cg;
223 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
224 arr = &node->attr.block.in_cg;
231 if (arity != ARR_LEN(*arr) - 1) {
232 ir_node * block = (*arr)[0];
233 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
236 fix_backedges(current_ir_graph->obst, node);
237 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
241 (get_irn_intra_n)(ir_node *node, int n) {
242 return __get_irn_intra_n (node, n);
246 (get_irn_inter_n)(ir_node *node, int n) {
247 return __get_irn_inter_n (node, n);
250 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
253 (get_irn_n)(ir_node *node, int n) {
254 return __get_irn_n(node, n);
258 set_irn_n (ir_node *node, int n, ir_node *in) {
259 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
260 assert(in && in->kind == k_ir_node);
261 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
262 /* Change block pred in both views! */
263 node->in[n + 1] = in;
264 assert(node->attr.filter.in_cg);
265 node->attr.filter.in_cg[n + 1] = in;
268 if (get_interprocedural_view()) { /* handle Filter and Block specially */
269 if (get_irn_opcode(node) == iro_Filter) {
270 assert(node->attr.filter.in_cg);
271 node->attr.filter.in_cg[n + 1] = in;
273 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
274 node->attr.block.in_cg[n + 1] = in;
277 /* else fall through */
279 node->in[n + 1] = in;
283 (get_irn_mode)(const ir_node *node) {
284 return __get_irn_mode(node);
288 (set_irn_mode)(ir_node *node, ir_mode *mode)
290 __set_irn_mode(node, mode);
294 get_irn_modecode (const ir_node *node)
297 return node->mode->code;
300 /** Gets the string representation of the mode .*/
302 get_irn_modename (const ir_node *node)
305 return get_mode_name(node->mode);
309 get_irn_modeident (const ir_node *node)
312 return get_mode_ident(node->mode);
316 (get_irn_op)(const ir_node *node)
318 return __get_irn_op(node);
321 /* should be private to the library: */
323 set_irn_op (ir_node *node, ir_op *op)
330 (get_irn_opcode)(const ir_node *node)
332 return __get_irn_opcode(node);
336 get_irn_opname (const ir_node *node)
339 if ((get_irn_op((ir_node *)node) == op_Phi) &&
340 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
341 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
342 return get_id_str(node->op->name);
346 get_irn_opident (const ir_node *node)
349 return node->op->name;
353 (get_irn_visited)(const ir_node *node)
355 return __get_irn_visited(node);
359 (set_irn_visited)(ir_node *node, unsigned long visited)
361 __set_irn_visited(node, visited);
365 (mark_irn_visited)(ir_node *node) {
366 __mark_irn_visited(node);
370 (irn_not_visited)(const ir_node *node) {
371 return __irn_not_visited(node);
375 (irn_visited)(const ir_node *node) {
376 return __irn_visited(node);
380 (set_irn_link)(ir_node *node, void *link) {
381 __set_irn_link(node, link);
385 (get_irn_link)(const ir_node *node) {
386 return __get_irn_link(node);
390 (get_irn_pinned)(const ir_node *node) {
391 return __get_irn_pinned(node);
394 void set_irn_pinned(ir_node *node, op_pin_state state) {
395 /* due to optimization an opt may be turned into a Tuple */
396 if (get_irn_op(node) == op_Tuple)
399 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
400 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
402 node->attr.except.pin_state = state;
405 #ifdef DO_HEAPANALYSIS
406 /* Access the abstract interpretation information of a node.
407 Returns NULL if no such information is available. */
408 struct abstval *get_irn_abst_value(ir_node *n) {
411 /* Set the abstract interpretation information of a node. */
412 void set_irn_abst_value(ir_node *n, struct abstval *os) {
415 struct section *firm_get_irn_section(ir_node *n) {
418 void firm_set_irn_section(ir_node *n, struct section *s) {
422 /* Dummies needed for firmjni. */
423 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
424 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
425 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
426 void firm_set_irn_section(ir_node *n, struct section *s) {}
427 #endif /* DO_HEAPANALYSIS */
430 /* Outputs a unique number for this node */
432 get_irn_node_nr(const ir_node *node) {
435 return node->node_nr;
442 get_irn_const_attr (ir_node *node)
444 assert (node->op == op_Const);
445 return node->attr.con;
449 get_irn_proj_attr (ir_node *node)
451 assert (node->op == op_Proj);
452 return node->attr.proj;
456 get_irn_alloc_attr (ir_node *node)
458 assert (node->op == op_Alloc);
463 get_irn_free_attr (ir_node *node)
465 assert (node->op == op_Free);
466 return node->attr.f = skip_tid(node->attr.f);
470 get_irn_symconst_attr (ir_node *node)
472 assert (node->op == op_SymConst);
477 get_irn_call_attr (ir_node *node)
479 assert (node->op == op_Call);
480 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
484 get_irn_sel_attr (ir_node *node)
486 assert (node->op == op_Sel);
491 get_irn_phi_attr (ir_node *node)
493 assert (node->op == op_Phi);
494 return node->attr.phi0_pos;
498 get_irn_block_attr (ir_node *node)
500 assert (node->op == op_Block);
501 return node->attr.block;
505 get_irn_load_attr (ir_node *node)
507 assert (node->op == op_Load);
508 return node->attr.load;
512 get_irn_store_attr (ir_node *node)
514 assert (node->op == op_Store);
515 return node->attr.store;
519 get_irn_except_attr (ir_node *node)
521 assert (node->op == op_Div || node->op == op_Quot ||
522 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
523 return node->attr.except;
526 /** manipulate fields of individual nodes **/
528 /* this works for all except Block */
530 get_nodes_block (ir_node *node) {
531 assert (!(node->op == op_Block));
532 return get_irn_n(node, -1);
536 set_nodes_block (ir_node *node, ir_node *block) {
537 assert (!(node->op == op_Block));
538 set_irn_n(node, -1, block);
541 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
542 * from Start. If so returns frame type, else Null. */
543 type *is_frame_pointer(ir_node *n) {
544 if ((get_irn_op(n) == op_Proj) &&
545 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
546 ir_node *start = get_Proj_pred(n);
547 if (get_irn_op(start) == op_Start) {
548 return get_irg_frame_type(get_irn_irg(start));
554 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
555 * from Start. If so returns global type, else Null. */
556 type *is_globals_pointer(ir_node *n) {
557 if ((get_irn_op(n) == op_Proj) &&
558 (get_Proj_proj(n) == pn_Start_P_globals)) {
559 ir_node *start = get_Proj_pred(n);
560 if (get_irn_op(start) == op_Start) {
561 return get_glob_type();
567 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
568 * from Start. If so returns 1, else 0. */
569 int is_value_arg_pointer(ir_node *n) {
570 if ((get_irn_op(n) == op_Proj) &&
571 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
572 (get_irn_op(get_Proj_pred(n)) == op_Start))
577 /* Returns an array with the predecessors of the Block. Depending on
578 the implementation of the graph data structure this can be a copy of
579 the internal representation of predecessors as well as the internal
580 array itself. Therefore writing to this array might obstruct the ir. */
582 get_Block_cfgpred_arr (ir_node *node)
584 assert ((node->op == op_Block));
585 return (ir_node **)&(get_irn_in(node)[1]);
590 get_Block_n_cfgpreds (ir_node *node) {
591 assert ((node->op == op_Block));
592 return get_irn_arity(node);
596 get_Block_cfgpred (ir_node *node, int pos) {
597 assert(-1 <= pos && pos < get_irn_arity(node));
598 assert(node->op == op_Block);
599 return get_irn_n(node, pos);
603 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
604 assert (node->op == op_Block);
605 set_irn_n(node, pos, pred);
609 get_Block_matured (ir_node *node) {
610 assert (node->op == op_Block);
611 return node->attr.block.matured;
615 set_Block_matured (ir_node *node, bool matured) {
616 assert (node->op == op_Block);
617 node->attr.block.matured = matured;
620 get_Block_block_visited (ir_node *node) {
621 assert (node->op == op_Block);
622 return node->attr.block.block_visited;
626 set_Block_block_visited (ir_node *node, unsigned long visit) {
627 assert (node->op == op_Block);
628 node->attr.block.block_visited = visit;
631 /* For this current_ir_graph must be set. */
633 mark_Block_block_visited (ir_node *node) {
634 assert (node->op == op_Block);
635 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
639 Block_not_block_visited(ir_node *node) {
640 assert (node->op == op_Block);
641 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
645 get_Block_graph_arr (ir_node *node, int pos) {
646 assert (node->op == op_Block);
647 return node->attr.block.graph_arr[pos+1];
651 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
652 assert (node->op == op_Block);
653 node->attr.block.graph_arr[pos+1] = value;
656 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
657 assert(node->op == op_Block);
658 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
659 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
660 node->attr.block.in_cg[0] = NULL;
661 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
663 /* Fix backedge array. fix_backedges operates depending on
664 interprocedural_view. */
665 int ipv = get_interprocedural_view();
666 set_interprocedural_view(true);
667 fix_backedges(current_ir_graph->obst, node);
668 set_interprocedural_view(ipv);
671 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
674 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
675 assert(node->op == op_Block &&
676 node->attr.block.in_cg &&
677 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
678 node->attr.block.in_cg[pos + 1] = pred;
681 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
682 assert(node->op == op_Block);
683 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
686 int get_Block_cg_n_cfgpreds(ir_node * node) {
687 assert(node->op == op_Block);
688 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
691 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
692 assert(node->op == op_Block && node->attr.block.in_cg);
693 return node->attr.block.in_cg[pos + 1];
696 void remove_Block_cg_cfgpred_arr(ir_node * node) {
697 assert(node->op == op_Block);
698 node->attr.block.in_cg = NULL;
702 set_Start_irg(ir_node *node, ir_graph *irg) {
703 assert(node->op == op_Start);
704 assert(is_ir_graph(irg));
705 assert(0 && " Why set irg? -- use set_irn_irg");
709 get_End_n_keepalives(ir_node *end) {
710 assert (end->op == op_End);
711 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
715 get_End_keepalive(ir_node *end, int pos) {
716 assert (end->op == op_End);
717 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
721 add_End_keepalive (ir_node *end, ir_node *ka) {
722 assert (end->op == op_End);
723 ARR_APP1 (ir_node *, end->in, ka);
727 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
728 assert (end->op == op_End);
729 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
733 free_End (ir_node *end) {
734 assert (end->op == op_End);
736 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
737 end->in = NULL; /* @@@ make sure we get an error if we use the
738 in array afterwards ... */
743 > Implementing the case construct (which is where the constant Proj node is
744 > important) involves far more than simply determining the constant values.
745 > We could argue that this is more properly a function of the translator from
746 > Firm to the target machine. That could be done if there was some way of
747 > projecting "default" out of the Cond node.
748 I know it's complicated.
749 Basically there are two proglems:
750 - determining the gaps between the projs
751 - determining the biggest case constant to know the proj number for
753 I see several solutions:
754 1. Introduce a ProjDefault node. Solves both problems.
755 This means to extend all optimizations executed during construction.
756 2. Give the Cond node for switch two flavors:
757 a) there are no gaps in the projs (existing flavor)
758 b) gaps may exist, default proj is still the Proj with the largest
759 projection number. This covers also the gaps.
760 3. Fix the semantic of the Cond to that of 2b)
762 Solution 2 seems to be the best:
763 Computing the gaps in the Firm representation is not too hard, i.e.,
764 libFIRM can implement a routine that transforms between the two
765 flavours. This is also possible for 1) but 2) does not require to
766 change any existing optimization.
767 Further it should be far simpler to determine the biggest constant than
769 I don't want to choose 3) as 2a) seems to have advantages for
770 dataflow analysis and 3) does not allow to convert the representation to
774 get_Cond_selector (ir_node *node) {
775 assert (node->op == op_Cond);
776 return get_irn_n(node, 0);
780 set_Cond_selector (ir_node *node, ir_node *selector) {
781 assert (node->op == op_Cond);
782 set_irn_n(node, 0, selector);
786 get_Cond_kind (ir_node *node) {
787 assert (node->op == op_Cond);
788 return node->attr.c.kind;
792 set_Cond_kind (ir_node *node, cond_kind kind) {
793 assert (node->op == op_Cond);
794 node->attr.c.kind = kind;
798 get_Cond_defaultProj (ir_node *node) {
799 assert (node->op == op_Cond);
800 return node->attr.c.default_proj;
804 get_Return_mem (ir_node *node) {
805 assert (node->op == op_Return);
806 return get_irn_n(node, 0);
810 set_Return_mem (ir_node *node, ir_node *mem) {
811 assert (node->op == op_Return);
812 set_irn_n(node, 0, mem);
816 get_Return_n_ress (ir_node *node) {
817 assert (node->op == op_Return);
818 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
822 get_Return_res_arr (ir_node *node)
824 assert ((node->op == op_Return));
825 if (get_Return_n_ress(node) > 0)
826 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
833 set_Return_n_res (ir_node *node, int results) {
834 assert (node->op == op_Return);
839 get_Return_res (ir_node *node, int pos) {
840 assert (node->op == op_Return);
841 assert (get_Return_n_ress(node) > pos);
842 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
846 set_Return_res (ir_node *node, int pos, ir_node *res){
847 assert (node->op == op_Return);
848 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
852 get_Raise_mem (ir_node *node) {
853 assert (node->op == op_Raise);
854 return get_irn_n(node, 0);
858 set_Raise_mem (ir_node *node, ir_node *mem) {
859 assert (node->op == op_Raise);
860 set_irn_n(node, 0, mem);
864 get_Raise_exo_ptr (ir_node *node) {
865 assert (node->op == op_Raise);
866 return get_irn_n(node, 1);
870 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
871 assert (node->op == op_Raise);
872 set_irn_n(node, 1, exo_ptr);
875 tarval *get_Const_tarval (ir_node *node) {
876 assert (node->op == op_Const);
877 return node->attr.con.tv;
881 set_Const_tarval (ir_node *node, tarval *con) {
882 assert (node->op == op_Const);
883 node->attr.con.tv = con;
887 /* The source language type. Must be an atomic type. Mode of type must
888 be mode of node. For tarvals from entities type must be pointer to
891 get_Const_type (ir_node *node) {
892 assert (node->op == op_Const);
893 return node->attr.con.tp;
897 set_Const_type (ir_node *node, type *tp) {
898 assert (node->op == op_Const);
899 if (tp != unknown_type) {
900 assert (is_atomic_type(tp));
901 assert (get_type_mode(tp) == get_irn_mode(node));
903 node->attr.con.tp = tp;
908 get_SymConst_kind (const ir_node *node) {
909 assert (node->op == op_SymConst);
910 return node->attr.i.num;
914 set_SymConst_kind (ir_node *node, symconst_kind num) {
915 assert (node->op == op_SymConst);
916 node->attr.i.num = num;
920 get_SymConst_type (ir_node *node) {
921 assert ( (node->op == op_SymConst)
922 && ( get_SymConst_kind(node) == symconst_type_tag
923 || get_SymConst_kind(node) == symconst_size));
924 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
928 set_SymConst_type (ir_node *node, type *tp) {
929 assert ( (node->op == op_SymConst)
930 && ( get_SymConst_kind(node) == symconst_type_tag
931 || get_SymConst_kind(node) == symconst_size));
932 node->attr.i.sym.type_p = tp;
936 get_SymConst_name (ir_node *node) {
937 assert ( (node->op == op_SymConst)
938 && (get_SymConst_kind(node) == symconst_addr_name));
939 return node->attr.i.sym.ident_p;
943 set_SymConst_name (ir_node *node, ident *name) {
944 assert ( (node->op == op_SymConst)
945 && (get_SymConst_kind(node) == symconst_addr_name));
946 node->attr.i.sym.ident_p = name;
950 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
951 entity *get_SymConst_entity (ir_node *node) {
952 assert ( (node->op == op_SymConst)
953 && (get_SymConst_kind (node) == symconst_addr_ent));
954 return node->attr.i.sym.entity_p;
957 void set_SymConst_entity (ir_node *node, entity *ent) {
958 assert ( (node->op == op_SymConst)
959 && (get_SymConst_kind(node) == symconst_addr_ent));
960 node->attr.i.sym.entity_p = ent;
963 union symconst_symbol
964 get_SymConst_symbol (ir_node *node) {
965 assert (node->op == op_SymConst);
966 return node->attr.i.sym;
970 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
971 assert (node->op == op_SymConst);
972 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
973 node->attr.i.sym = sym;
977 get_SymConst_value_type (ir_node *node) {
978 assert (node->op == op_SymConst);
979 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
980 return node->attr.i.tp;
984 set_SymConst_value_type (ir_node *node, type *tp) {
985 assert (node->op == op_SymConst);
986 node->attr.i.tp = tp;
990 get_Sel_mem (ir_node *node) {
991 assert (node->op == op_Sel);
992 return get_irn_n(node, 0);
996 set_Sel_mem (ir_node *node, ir_node *mem) {
997 assert (node->op == op_Sel);
998 set_irn_n(node, 0, mem);
1002 get_Sel_ptr (ir_node *node) {
1003 assert (node->op == op_Sel);
1004 return get_irn_n(node, 1);
1008 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1009 assert (node->op == op_Sel);
1010 set_irn_n(node, 1, ptr);
1014 get_Sel_n_indexs (ir_node *node) {
1015 assert (node->op == op_Sel);
1016 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1020 get_Sel_index_arr (ir_node *node)
1022 assert ((node->op == op_Sel));
1023 if (get_Sel_n_indexs(node) > 0)
1024 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1030 get_Sel_index (ir_node *node, int pos) {
1031 assert (node->op == op_Sel);
1032 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1036 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1037 assert (node->op == op_Sel);
1038 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1042 get_Sel_entity (ir_node *node) {
1043 assert (node->op == op_Sel);
1044 return node->attr.s.ent;
1048 set_Sel_entity (ir_node *node, entity *ent) {
1049 assert (node->op == op_Sel);
1050 node->attr.s.ent = ent;
1054 get_InstOf_ent (ir_node *node) {
1055 assert (node->op = op_InstOf);
1056 return (node->attr.io.ent);
1060 set_InstOf_ent (ir_node *node, type *ent) {
1061 assert (node->op = op_InstOf);
1062 node->attr.io.ent = ent;
1066 get_InstOf_store (ir_node *node) {
1067 assert (node->op = op_InstOf);
1068 return (get_irn_n (node, 0));
1072 set_InstOf_store (ir_node *node, ir_node *obj) {
1073 assert (node->op = op_InstOf);
1074 set_irn_n (node, 0, obj);
1078 get_InstOf_obj (ir_node *node) {
1079 assert (node->op = op_InstOf);
1080 return (get_irn_n (node, 1));
1084 set_InstOf_obj (ir_node *node, ir_node *obj) {
1085 assert (node->op = op_InstOf);
1086 set_irn_n (node, 1, obj);
1090 /* For unary and binary arithmetic operations the access to the
1091 operands can be factored out. Left is the first, right the
1092 second arithmetic value as listed in tech report 0999-33.
1093 unops are: Minus, Abs, Not, Conv, Cast
1094 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1095 Shr, Shrs, Rotate, Cmp */
1099 get_Call_mem (ir_node *node) {
1100 assert (node->op == op_Call);
1101 return get_irn_n(node, 0);
1105 set_Call_mem (ir_node *node, ir_node *mem) {
1106 assert (node->op == op_Call);
1107 set_irn_n(node, 0, mem);
1111 get_Call_ptr (ir_node *node) {
1112 assert (node->op == op_Call);
1113 return get_irn_n(node, 1);
1117 set_Call_ptr (ir_node *node, ir_node *ptr) {
1118 assert (node->op == op_Call);
1119 set_irn_n(node, 1, ptr);
1123 get_Call_param_arr (ir_node *node) {
1124 assert (node->op == op_Call);
1125 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1129 get_Call_n_params (ir_node *node) {
1130 assert (node->op == op_Call);
1131 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1135 get_Call_arity (ir_node *node) {
1136 assert (node->op == op_Call);
1137 return get_Call_n_params(node);
1141 set_Call_arity (ir_node *node, ir_node *arity) {
1142 assert (node->op == op_Call);
1147 get_Call_param (ir_node *node, int pos) {
1148 assert (node->op == op_Call);
1149 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1153 set_Call_param (ir_node *node, int pos, ir_node *param) {
1154 assert (node->op == op_Call);
1155 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1159 get_Call_type (ir_node *node) {
1160 assert (node->op == op_Call);
1161 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1165 set_Call_type (ir_node *node, type *tp) {
1166 assert (node->op == op_Call);
1167 assert ((get_unknown_type() == tp) || is_method_type(tp));
1168 node->attr.call.cld_tp = tp;
1171 int Call_has_callees(ir_node *node) {
1172 assert(node && node->op == op_Call);
1173 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1174 (node->attr.call.callee_arr != NULL));
1177 int get_Call_n_callees(ir_node * node) {
1178 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1179 return ARR_LEN(node->attr.call.callee_arr);
1182 entity * get_Call_callee(ir_node * node, int pos) {
1183 assert(pos >= 0 && pos < get_Call_n_callees(node));
1184 return node->attr.call.callee_arr[pos];
1187 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1188 assert(node->op == op_Call);
1189 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1190 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1192 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1195 void remove_Call_callee_arr(ir_node * node) {
1196 assert(node->op == op_Call);
1197 node->attr.call.callee_arr = NULL;
1200 ir_node * get_CallBegin_ptr (ir_node *node) {
1201 assert(node->op == op_CallBegin);
1202 return get_irn_n(node, 0);
1204 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1205 assert(node->op == op_CallBegin);
1206 set_irn_n(node, 0, ptr);
1208 ir_node * get_CallBegin_call (ir_node *node) {
1209 assert(node->op == op_CallBegin);
1210 return node->attr.callbegin.call;
1212 void set_CallBegin_call (ir_node *node, ir_node *call) {
1213 assert(node->op == op_CallBegin);
1214 node->attr.callbegin.call = call;
1219 ir_node * get_##OP##_left(ir_node *node) { \
1220 assert(node->op == op_##OP); \
1221 return get_irn_n(node, node->op->op_index); \
1223 void set_##OP##_left(ir_node *node, ir_node *left) { \
1224 assert(node->op == op_##OP); \
1225 set_irn_n(node, node->op->op_index, left); \
1227 ir_node *get_##OP##_right(ir_node *node) { \
1228 assert(node->op == op_##OP); \
1229 return get_irn_n(node, node->op->op_index + 1); \
1231 void set_##OP##_right(ir_node *node, ir_node *right) { \
1232 assert(node->op == op_##OP); \
1233 set_irn_n(node, node->op->op_index + 1, right); \
1237 ir_node *get_##OP##_op(ir_node *node) { \
1238 assert(node->op == op_##OP); \
1239 return get_irn_n(node, node->op->op_index); \
1241 void set_##OP##_op (ir_node *node, ir_node *op) { \
1242 assert(node->op == op_##OP); \
1243 set_irn_n(node, node->op->op_index, op); \
1253 get_Quot_mem (ir_node *node) {
1254 assert (node->op == op_Quot);
1255 return get_irn_n(node, 0);
1259 set_Quot_mem (ir_node *node, ir_node *mem) {
1260 assert (node->op == op_Quot);
1261 set_irn_n(node, 0, mem);
1267 get_DivMod_mem (ir_node *node) {
1268 assert (node->op == op_DivMod);
1269 return get_irn_n(node, 0);
1273 set_DivMod_mem (ir_node *node, ir_node *mem) {
1274 assert (node->op == op_DivMod);
1275 set_irn_n(node, 0, mem);
1281 get_Div_mem (ir_node *node) {
1282 assert (node->op == op_Div);
1283 return get_irn_n(node, 0);
1287 set_Div_mem (ir_node *node, ir_node *mem) {
1288 assert (node->op == op_Div);
1289 set_irn_n(node, 0, mem);
1295 get_Mod_mem (ir_node *node) {
1296 assert (node->op == op_Mod);
1297 return get_irn_n(node, 0);
1301 set_Mod_mem (ir_node *node, ir_node *mem) {
1302 assert (node->op == op_Mod);
1303 set_irn_n(node, 0, mem);
1320 get_Cast_type (ir_node *node) {
1321 assert (node->op == op_Cast);
1322 return node->attr.cast.totype;
1326 set_Cast_type (ir_node *node, type *to_tp) {
1327 assert (node->op == op_Cast);
1328 node->attr.cast.totype = to_tp;
1332 (is_unop)(const ir_node *node) {
1333 return __is_unop(node);
1337 get_unop_op (ir_node *node) {
1338 if (node->op->opar == oparity_unary)
1339 return get_irn_n(node, node->op->op_index);
1341 assert(node->op->opar == oparity_unary);
1346 set_unop_op (ir_node *node, ir_node *op) {
1347 if (node->op->opar == oparity_unary)
1348 set_irn_n(node, node->op->op_index, op);
1350 assert(node->op->opar == oparity_unary);
1354 (is_binop)(const ir_node *node) {
1355 return __is_binop(node);
1359 get_binop_left (ir_node *node) {
1360 if (node->op->opar == oparity_binary)
1361 return get_irn_n(node, node->op->op_index);
1363 assert(node->op->opar == oparity_binary);
1368 set_binop_left (ir_node *node, ir_node *left) {
1369 if (node->op->opar == oparity_binary)
1370 set_irn_n(node, node->op->op_index, left);
1372 assert (node->op->opar == oparity_binary);
1376 get_binop_right (ir_node *node) {
1377 if (node->op->opar == oparity_binary)
1378 return get_irn_n(node, node->op->op_index + 1);
1380 assert(node->op->opar == oparity_binary);
1385 set_binop_right (ir_node *node, ir_node *right) {
1386 if (node->op->opar == oparity_binary)
1387 set_irn_n(node, node->op->op_index + 1, right);
1389 assert (node->op->opar == oparity_binary);
1392 int is_Phi (ir_node *n) {
1398 if (op == op_Filter) return get_interprocedural_view();
1401 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1402 (get_irn_arity(n) > 0));
1407 int is_Phi0 (ir_node *n) {
1410 return ((get_irn_op(n) == op_Phi) &&
1411 (get_irn_arity(n) == 0) &&
1412 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1416 get_Phi_preds_arr (ir_node *node) {
1417 assert (node->op == op_Phi);
1418 return (ir_node **)&(get_irn_in(node)[1]);
1422 get_Phi_n_preds (ir_node *node) {
1423 assert (is_Phi(node) || is_Phi0(node));
1424 return (get_irn_arity(node));
1428 void set_Phi_n_preds (ir_node *node, int n_preds) {
1429 assert (node->op == op_Phi);
1434 get_Phi_pred (ir_node *node, int pos) {
1435 assert (is_Phi(node) || is_Phi0(node));
1436 return get_irn_n(node, pos);
1440 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1441 assert (is_Phi(node) || is_Phi0(node));
1442 set_irn_n(node, pos, pred);
1446 int is_memop(ir_node *node) {
1447 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1450 ir_node *get_memop_mem (ir_node *node) {
1451 assert(is_memop(node));
1452 return get_irn_n(node, 0);
1455 void set_memop_mem (ir_node *node, ir_node *mem) {
1456 assert(is_memop(node));
1457 set_irn_n(node, 0, mem);
1460 ir_node *get_memop_ptr (ir_node *node) {
1461 assert(is_memop(node));
1462 return get_irn_n(node, 1);
1465 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1466 assert(is_memop(node));
1467 set_irn_n(node, 1, ptr);
1471 get_Load_mem (ir_node *node) {
1472 assert (node->op == op_Load);
1473 return get_irn_n(node, 0);
1477 set_Load_mem (ir_node *node, ir_node *mem) {
1478 assert (node->op == op_Load);
1479 set_irn_n(node, 0, mem);
1483 get_Load_ptr (ir_node *node) {
1484 assert (node->op == op_Load);
1485 return get_irn_n(node, 1);
1489 set_Load_ptr (ir_node *node, ir_node *ptr) {
1490 assert (node->op == op_Load);
1491 set_irn_n(node, 1, ptr);
1495 get_Load_mode (ir_node *node) {
1496 assert (node->op == op_Load);
1497 return node->attr.load.load_mode;
1501 set_Load_mode (ir_node *node, ir_mode *mode) {
1502 assert (node->op == op_Load);
1503 node->attr.load.load_mode = mode;
1507 get_Load_volatility (ir_node *node) {
1508 assert (node->op == op_Load);
1509 return node->attr.load.volatility;
1513 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1514 assert (node->op == op_Load);
1515 node->attr.load.volatility = volatility;
1520 get_Store_mem (ir_node *node) {
1521 assert (node->op == op_Store);
1522 return get_irn_n(node, 0);
1526 set_Store_mem (ir_node *node, ir_node *mem) {
1527 assert (node->op == op_Store);
1528 set_irn_n(node, 0, mem);
1532 get_Store_ptr (ir_node *node) {
1533 assert (node->op == op_Store);
1534 return get_irn_n(node, 1);
1538 set_Store_ptr (ir_node *node, ir_node *ptr) {
1539 assert (node->op == op_Store);
1540 set_irn_n(node, 1, ptr);
1544 get_Store_value (ir_node *node) {
1545 assert (node->op == op_Store);
1546 return get_irn_n(node, 2);
1550 set_Store_value (ir_node *node, ir_node *value) {
1551 assert (node->op == op_Store);
1552 set_irn_n(node, 2, value);
1556 get_Store_volatility (ir_node *node) {
1557 assert (node->op == op_Store);
1558 return node->attr.store.volatility;
1562 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1563 assert (node->op == op_Store);
1564 node->attr.store.volatility = volatility;
1569 get_Alloc_mem (ir_node *node) {
1570 assert (node->op == op_Alloc);
1571 return get_irn_n(node, 0);
1575 set_Alloc_mem (ir_node *node, ir_node *mem) {
1576 assert (node->op == op_Alloc);
1577 set_irn_n(node, 0, mem);
1581 get_Alloc_size (ir_node *node) {
1582 assert (node->op == op_Alloc);
1583 return get_irn_n(node, 1);
1587 set_Alloc_size (ir_node *node, ir_node *size) {
1588 assert (node->op == op_Alloc);
1589 set_irn_n(node, 1, size);
1593 get_Alloc_type (ir_node *node) {
1594 assert (node->op == op_Alloc);
1595 return node->attr.a.type = skip_tid(node->attr.a.type);
1599 set_Alloc_type (ir_node *node, type *tp) {
1600 assert (node->op == op_Alloc);
1601 node->attr.a.type = tp;
1605 get_Alloc_where (ir_node *node) {
1606 assert (node->op == op_Alloc);
1607 return node->attr.a.where;
1611 set_Alloc_where (ir_node *node, where_alloc where) {
1612 assert (node->op == op_Alloc);
1613 node->attr.a.where = where;
1618 get_Free_mem (ir_node *node) {
1619 assert (node->op == op_Free);
1620 return get_irn_n(node, 0);
1624 set_Free_mem (ir_node *node, ir_node *mem) {
1625 assert (node->op == op_Free);
1626 set_irn_n(node, 0, mem);
1630 get_Free_ptr (ir_node *node) {
1631 assert (node->op == op_Free);
1632 return get_irn_n(node, 1);
1636 set_Free_ptr (ir_node *node, ir_node *ptr) {
1637 assert (node->op == op_Free);
1638 set_irn_n(node, 1, ptr);
1642 get_Free_size (ir_node *node) {
1643 assert (node->op == op_Free);
1644 return get_irn_n(node, 2);
1648 set_Free_size (ir_node *node, ir_node *size) {
1649 assert (node->op == op_Free);
1650 set_irn_n(node, 2, size);
1654 get_Free_type (ir_node *node) {
1655 assert (node->op == op_Free);
1656 return node->attr.f = skip_tid(node->attr.f);
1660 set_Free_type (ir_node *node, type *tp) {
1661 assert (node->op == op_Free);
1666 get_Sync_preds_arr (ir_node *node) {
1667 assert (node->op == op_Sync);
1668 return (ir_node **)&(get_irn_in(node)[1]);
1672 get_Sync_n_preds (ir_node *node) {
1673 assert (node->op == op_Sync);
1674 return (get_irn_arity(node));
1679 set_Sync_n_preds (ir_node *node, int n_preds) {
1680 assert (node->op == op_Sync);
1685 get_Sync_pred (ir_node *node, int pos) {
1686 assert (node->op == op_Sync);
1687 return get_irn_n(node, pos);
1691 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1692 assert (node->op == op_Sync);
1693 set_irn_n(node, pos, pred);
1697 get_Proj_pred (ir_node *node) {
1698 assert (is_Proj(node));
1699 return get_irn_n(node, 0);
1703 set_Proj_pred (ir_node *node, ir_node *pred) {
1704 assert (is_Proj(node));
1705 set_irn_n(node, 0, pred);
1709 get_Proj_proj (ir_node *node) {
1710 assert (is_Proj(node));
1711 if (get_irn_opcode(node) == iro_Proj) {
1712 return node->attr.proj;
1714 assert(get_irn_opcode(node) == iro_Filter);
1715 return node->attr.filter.proj;
1720 set_Proj_proj (ir_node *node, long proj) {
1721 assert (node->op == op_Proj);
1722 node->attr.proj = proj;
1726 get_Tuple_preds_arr (ir_node *node) {
1727 assert (node->op == op_Tuple);
1728 return (ir_node **)&(get_irn_in(node)[1]);
1732 get_Tuple_n_preds (ir_node *node) {
1733 assert (node->op == op_Tuple);
1734 return (get_irn_arity(node));
1739 set_Tuple_n_preds (ir_node *node, int n_preds) {
1740 assert (node->op == op_Tuple);
1745 get_Tuple_pred (ir_node *node, int pos) {
1746 assert (node->op == op_Tuple);
1747 return get_irn_n(node, pos);
1751 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1752 assert (node->op == op_Tuple);
1753 set_irn_n(node, pos, pred);
1757 get_Id_pred (ir_node *node) {
1758 assert (node->op == op_Id);
1759 return get_irn_n(node, 0);
1763 set_Id_pred (ir_node *node, ir_node *pred) {
1764 assert (node->op == op_Id);
1765 set_irn_n(node, 0, pred);
1768 ir_node *get_Confirm_value (ir_node *node) {
1769 assert (node->op == op_Confirm);
1770 return get_irn_n(node, 0);
1772 void set_Confirm_value (ir_node *node, ir_node *value) {
1773 assert (node->op == op_Confirm);
1774 set_irn_n(node, 0, value);
1776 ir_node *get_Confirm_bound (ir_node *node) {
1777 assert (node->op == op_Confirm);
1778 return get_irn_n(node, 1);
1780 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1781 assert (node->op == op_Confirm);
1782 set_irn_n(node, 0, bound);
1784 pn_Cmp get_Confirm_cmp (ir_node *node) {
1785 assert (node->op == op_Confirm);
1786 return node->attr.confirm_cmp;
1788 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1789 assert (node->op == op_Confirm);
1790 node->attr.confirm_cmp = cmp;
1795 get_Filter_pred (ir_node *node) {
1796 assert(node->op == op_Filter);
1800 set_Filter_pred (ir_node *node, ir_node *pred) {
1801 assert(node->op == op_Filter);
1805 get_Filter_proj(ir_node *node) {
1806 assert(node->op == op_Filter);
1807 return node->attr.filter.proj;
1810 set_Filter_proj (ir_node *node, long proj) {
1811 assert(node->op == op_Filter);
1812 node->attr.filter.proj = proj;
1815 /* Don't use get_irn_arity, get_irn_n in implementation as access
1816 shall work independent of view!!! */
1817 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1818 assert(node->op == op_Filter);
1819 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1820 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1821 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1822 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1823 node->attr.filter.in_cg[0] = node->in[0];
1825 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1828 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1829 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1830 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1831 node->attr.filter.in_cg[pos + 1] = pred;
1833 int get_Filter_n_cg_preds(ir_node *node) {
1834 assert(node->op == op_Filter && node->attr.filter.in_cg);
1835 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1837 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1839 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1841 arity = ARR_LEN(node->attr.filter.in_cg);
1842 assert(pos < arity - 1);
1843 return node->attr.filter.in_cg[pos + 1];
1848 get_irn_irg(ir_node *node) {
1849 if (get_irn_op(node) != op_Block)
1850 node = get_nodes_block(node);
1851 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1852 node = get_nodes_block(node);
1853 assert(get_irn_op(node) == op_Block);
1854 return node->attr.block.irg;
1858 /*----------------------------------------------------------------*/
1859 /* Auxiliary routines */
1860 /*----------------------------------------------------------------*/
1863 skip_Proj (ir_node *node) {
1864 /* don't assert node !!! */
1865 if (node && is_Proj(node)) {
1866 return get_Proj_pred(node);
1873 skip_Tuple (ir_node *node) {
1876 if (!get_opt_normalize()) return node;
1878 node = skip_Id(node);
1879 if (get_irn_op(node) == op_Proj) {
1880 pred = skip_Id(get_Proj_pred(node));
1881 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1882 pred = skip_Id(skip_Tuple(pred));
1883 if (get_irn_op(pred) == op_Tuple)
1884 return get_Tuple_pred(pred, get_Proj_proj(node));
1889 /** returns operand of node if node is a Cast */
1890 ir_node *skip_Cast (ir_node *node) {
1891 if (node && get_irn_op(node) == op_Cast) {
1892 return skip_Id(get_irn_n(node, 0));
1899 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1900 than any other approach, as Id chains are resolved and all point to the real node, or
1901 all id's are self loops. */
1903 skip_Id (ir_node *node) {
1904 /* don't assert node !!! */
1906 if (!get_opt_normalize()) return node;
1908 /* Don't use get_Id_pred: We get into an endless loop for
1909 self-referencing Ids. */
1910 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1911 ir_node *rem_pred = node->in[0+1];
1914 assert (get_irn_arity (node) > 0);
1916 node->in[0+1] = node;
1917 res = skip_Id(rem_pred);
1918 if (res->op == op_Id) /* self-loop */ return node;
1920 node->in[0+1] = res;
1927 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1928 than any other approach, as Id chains are resolved and all point to the real node, or
1929 all id's are self loops. */
1931 skip_Id (ir_node *node) {
1933 /* don't assert node !!! */
1935 if (!node || (node->op != op_Id)) return node;
1937 if (!get_opt_normalize()) return node;
1939 /* Don't use get_Id_pred: We get into an endless loop for
1940 self-referencing Ids. */
1941 pred = node->in[0+1];
1943 if (pred->op != op_Id) return pred;
1945 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1946 ir_node *rem_pred, *res;
1948 if (pred->op != op_Id) return pred; /* shortcut */
1951 assert (get_irn_arity (node) > 0);
1953 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1954 res = skip_Id(rem_pred);
1955 if (res->op == op_Id) /* self-loop */ return node;
1957 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1966 (is_Bad)(const ir_node *node) {
1967 return __is_Bad(node);
1971 (is_no_Block)(const ir_node *node) {
1972 return __is_no_Block(node);
1976 (is_Block)(const ir_node *node) {
1977 return __is_Block(node);
1980 /* returns true if node is a Unknown node. */
1982 is_Unknown (const ir_node *node) {
1984 return (get_irn_op(node) == op_Unknown);
1988 is_Proj (const ir_node *node) {
1990 return node->op == op_Proj
1991 || (!get_interprocedural_view() && node->op == op_Filter);
1994 /* Returns true if the operation manipulates control flow. */
1996 is_cfop(const ir_node *node) {
1997 return is_cfopcode(get_irn_op(node));
2000 /* Returns true if the operation manipulates interprocedural control flow:
2001 CallBegin, EndReg, EndExcept */
2002 int is_ip_cfop(const ir_node *node) {
2003 return is_ip_cfopcode(get_irn_op(node));
2006 /* Returns true if the operation can change the control flow because
2009 is_fragile_op(const ir_node *node) {
2010 return is_op_fragile(get_irn_op(node));
2013 /* Returns the memory operand of fragile operations. */
2014 ir_node *get_fragile_op_mem(ir_node *node) {
2015 assert(node && is_fragile_op(node));
2017 switch (get_irn_opcode (node)) {
2026 return get_irn_n(node, 0);
2031 assert(0 && "should not be reached");
2036 /* Returns true if the operation is a forking control flow operation. */
2038 is_forking_op(const ir_node *node) {
2039 return is_op_forking(get_irn_op(node));
2043 #ifdef DEBUG_libfirm
2044 void dump_irn (ir_node *n) {
2045 int i, arity = get_irn_arity(n);
2046 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2048 ir_node *pred = get_irn_n(n, -1);
2049 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2050 get_irn_node_nr(pred), (void *)pred);
2052 printf(" preds: \n");
2053 for (i = 0; i < arity; ++i) {
2054 ir_node *pred = get_irn_n(n, i);
2055 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2056 get_irn_node_nr(pred), (void *)pred);
2060 #else /* DEBUG_libfirm */
2061 void dump_irn (ir_node *n) {}
2062 #endif /* DEBUG_libfirm */