3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
24 #include "irbackedge_t.h"
31 /* some constants fixing the positions of nodes predecessors
33 #define CALL_PARAM_OFFSET 2
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 /* Declarations for inlineing */
39 INLINE ir_node ** get_irn_in (const ir_node *node);
40 INLINE ir_mode *get_irn_mode (const ir_node *node);
41 INLINE ir_op *get_irn_op (const ir_node *node);
42 INLINE opcode get_irn_opcode (const ir_node *node);
43 INLINE ident *get_irn_opident (const ir_node *node);
44 INLINE type *get_SymConst_type (ir_node *node);
45 INLINE ir_node *skip_nop (ir_node *node);
46 INLINE int is_Proj (const ir_node *node);
49 static const char *pnc_name_arr [] = {
50 "False", "Eq", "Lt", "Le",
51 "Gt", "Ge", "Lg", "Leg", "Uo",
52 "Ue", "Ul", "Ule", "Ug", "Uge",
57 * returns the pnc name from an pnc constant
59 INLINE const char *get_pnc_string(int pnc) {
60 return pnc_name_arr[pnc];
64 * Calculates the negated pnc condition.
67 get_negated_pnc(int pnc) {
69 case False: return True; break;
70 case Eq: return Ne; break;
71 case Lt: return Uge; break;
72 case Le: return Ug; break;
73 case Gt: return Ule; break;
74 case Ge: return Ul; break;
75 case Lg: return Ue; break;
76 case Leg: return Uo; break;
77 case Uo: return Leg; break;
78 case Ue: return Lg; break;
79 case Ul: return Ge; break;
80 case Ule: return Gt; break;
81 case Ug: return Le; break;
82 case Uge: return Lt; break;
83 case Ne: return Eq; break;
84 case True: return False; break;
86 return 99; /* to shut up gcc */
89 const char *pns_name_arr [] = {
90 "initial_exec", "global_store",
91 "frame_base", "globals", "args"
94 const char *symconst_name_arr [] = {
95 "type_tag", "size", "linkage_ptr_info"
104 * irnode constructor.
105 * Create a new irnode in irg, with an op, mode, arity and
106 * some incoming irnodes.
107 * If arity is negative, a node with a dynamic array is created.
110 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
111 int arity, ir_node **in)
114 int node_size = offsetof (ir_node, attr) + op->attr_size;
116 res = (ir_node *) obstack_alloc (irg->obst, node_size);
118 res->kind = k_ir_node;
124 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
126 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
127 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
130 set_irn_dbg_info(res, db);
134 res->node_nr = get_irp_new_node_nr();
140 /* Copies all attributes stored in the old node to the new node.
141 Assumes both have the same opcode and sufficient size. */
143 copy_attrs (const ir_node *old_node, ir_node *new_node) {
144 assert(get_irn_op(old_node) == get_irn_op(new_node));
145 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
148 /** getting some parameters from ir_nodes **/
151 is_ir_node (const void *thing) {
152 if (get_kind(thing) == k_ir_node)
158 /* returns the number of predecessors without the block predecessor. */
160 get_irn_intra_arity (const ir_node *node) {
162 return ARR_LEN(node->in) - 1;
165 /* returns the number of predecessors without the block predecessor. */
167 get_irn_inter_arity (const ir_node *node) {
169 if (get_irn_opcode(node) == iro_Filter) {
170 assert(node->attr.filter.in_cg);
171 return ARR_LEN(node->attr.filter.in_cg) - 1;
172 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
173 return ARR_LEN(node->attr.block.in_cg) - 1;
175 return get_irn_intra_arity(node);
178 /* returns the number of predecessors without the block predecessor. */
180 get_irn_arity (const ir_node *node) {
182 if (interprocedural_view) return get_irn_inter_arity(node);
183 return get_irn_intra_arity(node);
186 /* Returns the array with ins. This array is shifted with respect to the
187 array accessed by get_irn_n: The block operand is at position 0 not -1.
188 (@@@ This should be changed.)
189 The order of the predecessors in this array is not guaranteed, except that
190 lists of operands as predecessors of Block or arguments of a Call are
193 get_irn_in (const ir_node *node) {
195 if (interprocedural_view) { /* handle Filter and Block specially */
196 if (get_irn_opcode(node) == iro_Filter) {
197 assert(node->attr.filter.in_cg);
198 return node->attr.filter.in_cg;
199 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
200 return node->attr.block.in_cg;
202 /* else fall through */
208 set_irn_in (ir_node *node, int arity, ir_node **in) {
211 if (interprocedural_view) { /* handle Filter and Block specially */
212 if (get_irn_opcode(node) == iro_Filter) {
213 assert(node->attr.filter.in_cg);
214 arr = &node->attr.filter.in_cg;
215 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
216 arr = &node->attr.block.in_cg;
223 if (arity != ARR_LEN(*arr) - 1) {
224 ir_node * block = (*arr)[0];
225 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
228 fix_backedges(current_ir_graph->obst, node);
229 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
233 get_irn_intra_n (ir_node *node, int n) {
234 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
238 get_irn_inter_n (ir_node *node, int n) {
239 /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
243 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
244 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
247 return get_irn_intra_n (node, n);
250 /* to iterate through the predecessors without touching the array */
251 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
252 to iterate including the Block predecessor iterate from i = -1 to
254 If it is a block, the entry -1 is NULL. */
256 get_irn_n (ir_node *node, int n) {
257 assert(node); assert(-1 <= n && n < get_irn_arity(node));
258 if (interprocedural_view) return get_irn_inter_n (node, n);
259 return get_irn_intra_n (node, n);
264 set_irn_n (ir_node *node, int n, ir_node *in) {
265 assert(node && -1 <= n && n < get_irn_arity(node));
266 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
267 /* Change block pred in both views! */
268 node->in[n + 1] = in;
269 assert(node->attr.filter.in_cg);
270 node->attr.filter.in_cg[n + 1] = in;
273 if (interprocedural_view) { /* handle Filter and Block specially */
274 if (get_irn_opcode(node) == iro_Filter) {
275 assert(node->attr.filter.in_cg);
276 node->attr.filter.in_cg[n + 1] = in;
278 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
279 node->attr.block.in_cg[n + 1] = in;
282 /* else fall through */
284 node->in[n + 1] = in;
288 get_irn_mode (const ir_node *node)
295 set_irn_mode (ir_node *node, ir_mode *mode)
303 get_irn_modecode (const ir_node *node)
306 return node->mode->code;
309 /** Gets the string representation of the mode .*/
311 get_irn_modename (const ir_node *node)
314 return get_mode_name(node->mode);
318 get_irn_modeident (const ir_node *node)
321 return get_mode_ident(node->mode);
325 get_irn_op (const ir_node *node)
331 /* should be private to the library: */
333 set_irn_op (ir_node *node, ir_op *op)
340 get_irn_opcode (const ir_node *node)
342 assert (k_ir_node == get_kind(node));
344 return node->op->code;
348 get_irn_opname (const ir_node *node)
351 return get_id_str(node->op->name);
355 get_irn_opident (const ir_node *node)
358 return node->op->name;
362 get_irn_visited (const ir_node *node)
365 return node->visited;
369 set_irn_visited (ir_node *node, unsigned long visited)
372 node->visited = visited;
376 mark_irn_visited (ir_node *node) {
378 node->visited = current_ir_graph->visited;
382 irn_not_visited (const ir_node *node) {
384 return (node->visited < current_ir_graph->visited);
388 irn_visited (const ir_node *node) {
390 return (node->visited >= current_ir_graph->visited);
394 set_irn_link (ir_node *node, void *link) {
396 /* Link field is used for Phi construction and various optimizations
398 assert(get_irg_phase_state(current_ir_graph) != phase_building);
404 get_irn_link (const ir_node *node) {
409 /* Outputs a unique number for this node */
411 get_irn_node_nr(const ir_node *node) {
414 return node->node_nr;
421 get_irn_const_attr (ir_node *node)
423 assert (node->op == op_Const);
424 return node->attr.con;
428 get_irn_proj_attr (ir_node *node)
430 assert (node->op == op_Proj);
431 return node->attr.proj;
435 get_irn_alloc_attr (ir_node *node)
437 assert (node->op == op_Alloc);
442 get_irn_free_attr (ir_node *node)
444 assert (node->op == op_Free);
445 return node->attr.f = skip_tid(node->attr.f);
449 get_irn_symconst_attr (ir_node *node)
451 assert (node->op == op_SymConst);
456 get_irn_call_attr (ir_node *node)
458 assert (node->op == op_Call);
459 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
463 get_irn_sel_attr (ir_node *node)
465 assert (node->op == op_Sel);
470 get_irn_phi_attr (ir_node *node)
472 assert (node->op == op_Phi);
473 return node->attr.phi0_pos;
477 get_irn_block_attr (ir_node *node)
479 assert (node->op == op_Block);
480 return node->attr.block;
483 /** manipulate fields of individual nodes **/
485 /* this works for all except Block */
487 get_nodes_Block (ir_node *node) {
488 assert (!(node->op == op_Block));
489 return get_irn_n(node, -1);
493 set_nodes_Block (ir_node *node, ir_node *block) {
494 assert (!(node->op == op_Block));
495 set_irn_n(node, -1, block);
498 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
499 * from Start. If so returns frame type, else Null. */
500 type *is_frame_pointer(ir_node *n) {
501 if ((get_irn_op(n) == op_Proj) &&
502 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
503 ir_node *start = get_Proj_pred(n);
504 if (get_irn_op(start) == op_Start) {
505 return get_irg_frame_type(get_irn_irg(start));
511 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
512 * from Start. If so returns global type, else Null. */
513 type *is_globals_pointer(ir_node *n) {
514 if ((get_irn_op(n) == op_Proj) &&
515 (get_Proj_proj(n) == pn_Start_P_globals)) {
516 ir_node *start = get_Proj_pred(n);
517 if (get_irn_op(start) == op_Start) {
518 return get_glob_type();
524 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
525 * from Start. If so returns 1, else 0. */
526 int is_value_arg_pointer(ir_node *n) {
527 if ((get_irn_op(n) == op_Proj) &&
528 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
529 (get_irn_op(get_Proj_pred(n)) == op_Start))
534 /* Returns an array with the predecessors of the Block. Depending on
535 the implementation of the graph data structure this can be a copy of
536 the internal representation of predecessors as well as the internal
537 array itself. Therefore writing to this array might obstruct the ir. */
539 get_Block_cfgpred_arr (ir_node *node)
541 assert ((node->op == op_Block));
542 return (ir_node **)&(get_irn_in(node)[1]);
547 get_Block_n_cfgpreds (ir_node *node) {
548 assert ((node->op == op_Block));
549 return (get_irn_arity(node));
553 get_Block_cfgpred (ir_node *node, int pos) {
554 assert (node->op == op_Block);
556 if (-1 > pos || get_irn_arity(node) <= pos) {
557 dump_ir_block_graph(current_ir_graph);
558 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
561 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
562 return get_irn_n(node, pos);
566 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
567 assert (node->op == op_Block);
568 set_irn_n(node, pos, pred);
572 get_Block_matured (ir_node *node) {
573 assert (node->op == op_Block);
574 return node->attr.block.matured;
578 set_Block_matured (ir_node *node, bool matured) {
579 assert (node->op == op_Block);
580 node->attr.block.matured = matured;
583 get_Block_block_visited (ir_node *node) {
584 assert (node->op == op_Block);
585 return node->attr.block.block_visited;
589 set_Block_block_visited (ir_node *node, unsigned long visit) {
590 assert (node->op == op_Block);
591 node->attr.block.block_visited = visit;
594 /* For this current_ir_graph must be set. */
596 mark_Block_block_visited (ir_node *node) {
597 assert (node->op == op_Block);
598 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
602 Block_not_block_visited(ir_node *node) {
603 assert (node->op == op_Block);
604 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
608 get_Block_graph_arr (ir_node *node, int pos) {
609 assert (node->op == op_Block);
610 return node->attr.block.graph_arr[pos+1];
614 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
615 assert (node->op == op_Block);
616 node->attr.block.graph_arr[pos+1] = value;
619 /* handler handling for Blocks * /
621 set_Block_handler (ir_node *block, ir_node *handler) {
622 assert ((block->op == op_Block));
623 assert ((handler->op == op_Block));
624 block->attr.block.handler_entry = handler;
628 get_Block_handler (ir_node *block) {
629 assert ((block->op == op_Block));
630 return (block->attr.block.handler_entry);
633 / * handler handling for Nodes * /
635 set_Node_handler (ir_node *node, ir_node *handler) {
636 set_Block_handler (get_nodes_Block (node), handler);
640 get_Node_handler (ir_node *node) {
641 return (get_Block_handler (get_nodes_Block (node)));
644 / * exc_t handling for Blocks * /
645 void set_Block_exc (ir_node *block, exc_t exc) {
646 assert ((block->op == op_Block));
647 block->attr.block.exc = exc;
650 exc_t get_Block_exc (ir_node *block) {
651 assert ((block->op == op_Block));
652 return (block->attr.block.exc);
655 / * exc_t handling for Nodes * /
656 void set_Node_exc (ir_node *node, exc_t exc) {
657 set_Block_exc (get_nodes_Block (node), exc);
660 exc_t get_Node_exc (ir_node *node) {
661 return (get_Block_exc (get_nodes_Block (node)));
665 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
666 assert(node->op == op_Block);
667 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
668 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
669 node->attr.block.in_cg[0] = NULL;
670 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
672 /* Fix backedge array. fix_backedges operates depending on
673 interprocedural_view. */
674 bool ipv = interprocedural_view;
675 interprocedural_view = true;
676 fix_backedges(current_ir_graph->obst, node);
677 interprocedural_view = ipv;
680 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
683 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
684 assert(node->op == op_Block &&
685 node->attr.block.in_cg &&
686 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
687 node->attr.block.in_cg[pos + 1] = pred;
690 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
691 assert(node->op == op_Block);
692 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
695 int get_Block_cg_n_cfgpreds(ir_node * node) {
696 assert(node->op == op_Block);
697 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
700 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
701 assert(node->op == op_Block && node->attr.block.in_cg);
702 return node->attr.block.in_cg[pos + 1];
705 void remove_Block_cg_cfgpred_arr(ir_node * node) {
706 assert(node->op == op_Block);
707 node->attr.block.in_cg = NULL;
710 /* Start references the irg it is in. */
712 get_Start_irg(ir_node *node) {
713 return get_irn_irg(node);
717 set_Start_irg(ir_node *node, ir_graph *irg) {
718 assert(node->op == op_Start);
719 assert(is_ir_graph(irg));
720 assert(0 && " Why set irg? ");
721 //node->attr.start.irg = irg;
725 get_End_n_keepalives(ir_node *end) {
726 assert (end->op == op_End);
727 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
731 get_End_keepalive(ir_node *end, int pos) {
732 assert (end->op == op_End);
733 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
737 add_End_keepalive (ir_node *end, ir_node *ka) {
738 assert (end->op == op_End);
739 ARR_APP1 (ir_node *, end->in, ka);
743 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
744 assert (end->op == op_End);
745 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
749 free_End (ir_node *end) {
750 assert (end->op == op_End);
752 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
753 end->in = NULL; /* @@@ make sure we get an error if we use the
754 in array afterwards ... */
757 ir_graph *get_EndReg_irg (ir_node *end) {
758 return get_irn_irg(end);
761 ir_graph *get_EndExcept_irg (ir_node *end) {
762 return get_irn_irg(end);
766 > Implementing the case construct (which is where the constant Proj node is
767 > important) involves far more than simply determining the constant values.
768 > We could argue that this is more properly a function of the translator from
769 > Firm to the target machine. That could be done if there was some way of
770 > projecting "default" out of the Cond node.
771 I know it's complicated.
772 Basically there are two proglems:
773 - determining the gaps between the projs
774 - determining the biggest case constant to know the proj number for
776 I see several solutions:
777 1. Introduce a ProjDefault node. Solves both problems.
778 This means to extend all optimizations executed during construction.
779 2. Give the Cond node for switch two flavors:
780 a) there are no gaps in the projs (existing flavor)
781 b) gaps may exist, default proj is still the Proj with the largest
782 projection number. This covers also the gaps.
783 3. Fix the semantic of the Cond to that of 2b)
785 Solution 2 seems to be the best:
786 Computing the gaps in the Firm representation is not too hard, i.e.,
787 libFIRM can implement a routine that transforms between the two
788 flavours. This is also possible for 1) but 2) does not require to
789 change any existing optimization.
790 Further it should be far simpler to determine the biggest constant than
792 I don't want to choose 3) as 2a) seems to have advantages for
793 dataflow analysis and 3) does not allow to convert the representation to
797 get_Cond_selector (ir_node *node) {
798 assert (node->op == op_Cond);
799 return get_irn_n(node, 0);
803 set_Cond_selector (ir_node *node, ir_node *selector) {
804 assert (node->op == op_Cond);
805 set_irn_n(node, 0, selector);
809 get_Cond_kind (ir_node *node) {
810 assert (node->op == op_Cond);
811 return node->attr.c.kind;
815 set_Cond_kind (ir_node *node, cond_kind kind) {
816 assert (node->op == op_Cond);
817 node->attr.c.kind = kind;
821 get_Return_mem (ir_node *node) {
822 assert (node->op == op_Return);
823 return get_irn_n(node, 0);
827 set_Return_mem (ir_node *node, ir_node *mem) {
828 assert (node->op == op_Return);
829 set_irn_n(node, 0, mem);
833 get_Return_n_ress (ir_node *node) {
834 assert (node->op == op_Return);
835 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
839 get_Return_res_arr (ir_node *node)
841 assert ((node->op == op_Return));
842 if (get_Return_n_ress(node) > 0)
843 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
850 set_Return_n_res (ir_node *node, int results) {
851 assert (node->op == op_Return);
856 get_Return_res (ir_node *node, int pos) {
857 assert (node->op == op_Return);
858 assert (get_Return_n_ress(node) > pos);
859 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
863 set_Return_res (ir_node *node, int pos, ir_node *res){
864 assert (node->op == op_Return);
865 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
869 get_Raise_mem (ir_node *node) {
870 assert (node->op == op_Raise);
871 return get_irn_n(node, 0);
875 set_Raise_mem (ir_node *node, ir_node *mem) {
876 assert (node->op == op_Raise);
877 set_irn_n(node, 0, mem);
881 get_Raise_exo_ptr (ir_node *node) {
882 assert (node->op == op_Raise);
883 return get_irn_n(node, 1);
887 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
888 assert (node->op == op_Raise);
889 set_irn_n(node, 1, exo_ptr);
892 INLINE tarval *get_Const_tarval (ir_node *node) {
893 assert (node->op == op_Const);
894 return node->attr.con.tv;
898 set_Const_tarval (ir_node *node, tarval *con) {
899 assert (node->op == op_Const);
900 node->attr.con.tv = con;
904 /* The source language type. Must be an atomic type. Mode of type must
905 be mode of node. For tarvals from entities type must be pointer to
908 get_Const_type (ir_node *node) {
909 assert (node->op == op_Const);
910 return node->attr.con.tp;
914 set_Const_type (ir_node *node, type *tp) {
915 assert (node->op == op_Const);
916 if (tp != unknown_type) {
917 assert (is_atomic_type(tp));
918 assert (get_type_mode(tp) == get_irn_mode(node));
919 assert (!tarval_is_entity(get_Const_tarval(node)) ||
920 (is_pointer_type(tp) &&
921 (get_pointer_points_to_type(tp) ==
922 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
925 node->attr.con.tp = tp;
930 get_SymConst_kind (const ir_node *node) {
931 assert (node->op == op_SymConst);
932 return node->attr.i.num;
936 set_SymConst_kind (ir_node *node, symconst_kind num) {
937 assert (node->op == op_SymConst);
938 node->attr.i.num = num;
942 get_SymConst_type (ir_node *node) {
943 assert ( (node->op == op_SymConst)
944 && ( get_SymConst_kind(node) == type_tag
945 || get_SymConst_kind(node) == size));
946 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
950 set_SymConst_type (ir_node *node, type *tp) {
951 assert ( (node->op == op_SymConst)
952 && ( get_SymConst_kind(node) == type_tag
953 || get_SymConst_kind(node) == size));
954 node->attr.i.tori.typ = tp;
958 get_SymConst_ptrinfo (ir_node *node) {
959 assert ( (node->op == op_SymConst)
960 && (get_SymConst_kind(node) == linkage_ptr_info));
961 return node->attr.i.tori.ptrinfo;
965 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
966 assert ( (node->op == op_SymConst)
967 && (get_SymConst_kind(node) == linkage_ptr_info));
968 node->attr.i.tori.ptrinfo = ptrinfo;
972 get_SymConst_type_or_id (ir_node *node) {
973 assert (node->op == op_SymConst);
974 return &(node->attr.i.tori);
978 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
979 assert (node->op == op_SymConst);
980 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
984 get_Sel_mem (ir_node *node) {
985 assert (node->op == op_Sel);
986 return get_irn_n(node, 0);
990 set_Sel_mem (ir_node *node, ir_node *mem) {
991 assert (node->op == op_Sel);
992 set_irn_n(node, 0, mem);
996 get_Sel_ptr (ir_node *node) {
997 assert (node->op == op_Sel);
998 return get_irn_n(node, 1);
1002 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1003 assert (node->op == op_Sel);
1004 set_irn_n(node, 1, ptr);
1008 get_Sel_n_indexs (ir_node *node) {
1009 assert (node->op == op_Sel);
1010 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1014 get_Sel_index_arr (ir_node *node)
1016 assert ((node->op == op_Sel));
1017 if (get_Sel_n_indexs(node) > 0)
1018 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1024 get_Sel_index (ir_node *node, int pos) {
1025 assert (node->op == op_Sel);
1026 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1030 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1031 assert (node->op == op_Sel);
1032 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1036 get_Sel_entity (ir_node *node) {
1037 assert (node->op == op_Sel);
1038 return node->attr.s.ent;
1042 set_Sel_entity (ir_node *node, entity *ent) {
1043 assert (node->op == op_Sel);
1044 node->attr.s.ent = ent;
1048 get_InstOf_ent (ir_node *node) {
1049 assert (node->op = op_InstOf);
1050 return (node->attr.io.ent);
1054 set_InstOf_ent (ir_node *node, type *ent) {
1055 assert (node->op = op_InstOf);
1056 node->attr.io.ent = ent;
1060 get_InstOf_store (ir_node *node) {
1061 assert (node->op = op_InstOf);
1062 return (get_irn_n (node, 0));
1066 set_InstOf_store (ir_node *node, ir_node *obj) {
1067 assert (node->op = op_InstOf);
1068 set_irn_n (node, 0, obj);
1072 get_InstOf_obj (ir_node *node) {
1073 assert (node->op = op_InstOf);
1074 return (get_irn_n (node, 1));
1078 set_InstOf_obj (ir_node *node, ir_node *obj) {
1079 assert (node->op = op_InstOf);
1080 set_irn_n (node, 1, obj);
1084 /* For unary and binary arithmetic operations the access to the
1085 operands can be factored out. Left is the first, right the
1086 second arithmetic value as listed in tech report 0999-33.
1087 unops are: Minus, Abs, Not, Conv, Cast
1088 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1089 Shr, Shrs, Rotate, Cmp */
1093 get_Call_mem (ir_node *node) {
1094 assert (node->op == op_Call);
1095 return get_irn_n(node, 0);
1099 set_Call_mem (ir_node *node, ir_node *mem) {
1100 assert (node->op == op_Call);
1101 set_irn_n(node, 0, mem);
1105 get_Call_ptr (ir_node *node) {
1106 assert (node->op == op_Call);
1107 return get_irn_n(node, 1);
1111 set_Call_ptr (ir_node *node, ir_node *ptr) {
1112 assert (node->op == op_Call);
1113 set_irn_n(node, 1, ptr);
1117 get_Call_param_arr (ir_node *node) {
1118 assert (node->op == op_Call);
1119 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1123 get_Call_n_params (ir_node *node) {
1124 assert (node->op == op_Call);
1125 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1129 get_Call_arity (ir_node *node) {
1130 assert (node->op == op_Call);
1131 return get_Call_n_params(node);
1135 set_Call_arity (ir_node *node, ir_node *arity) {
1136 assert (node->op == op_Call);
1141 get_Call_param (ir_node *node, int pos) {
1142 assert (node->op == op_Call);
1143 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1147 set_Call_param (ir_node *node, int pos, ir_node *param) {
1148 assert (node->op == op_Call);
1149 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1153 get_Call_type (ir_node *node) {
1154 assert (node->op == op_Call);
1155 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1159 set_Call_type (ir_node *node, type *tp) {
1160 assert (node->op == op_Call);
1161 assert (is_method_type(tp));
1162 node->attr.call.cld_tp = tp;
1165 int Call_has_callees(ir_node *node) {
1166 return (node->attr.call.callee_arr != NULL);
1169 int get_Call_n_callees(ir_node * node) {
1170 assert(node->op == op_Call && node->attr.call.callee_arr);
1171 return ARR_LEN(node->attr.call.callee_arr);
1174 entity * get_Call_callee(ir_node * node, int pos) {
1175 assert(node->op == op_Call && node->attr.call.callee_arr);
1176 return node->attr.call.callee_arr[pos];
1179 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1180 assert(node->op == op_Call);
1181 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1182 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1184 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1187 void remove_Call_callee_arr(ir_node * node) {
1188 assert(node->op == op_Call);
1189 node->attr.call.callee_arr = NULL;
1192 ir_node * get_CallBegin_ptr (ir_node *node) {
1193 assert(node->op == op_CallBegin);
1194 return get_irn_n(node, 0);
1196 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1197 assert(node->op == op_CallBegin);
1198 set_irn_n(node, 0, ptr);
1200 ir_graph * get_CallBegin_irg (ir_node *node) {
1201 return get_irn_irg(node);
1203 ir_node * get_CallBegin_call (ir_node *node) {
1204 assert(node->op == op_CallBegin);
1205 return node->attr.callbegin.call;
1207 void set_CallBegin_call (ir_node *node, ir_node *call) {
1208 assert(node->op == op_CallBegin);
1209 node->attr.callbegin.call = call;
1213 ir_node * get_##OP##_left(ir_node *node) { \
1214 assert(node->op == op_##OP); \
1215 return get_irn_n(node, node->op->op_index); \
1217 void set_##OP##_left(ir_node *node, ir_node *left) { \
1218 assert(node->op == op_##OP); \
1219 set_irn_n(node, node->op->op_index, left); \
1221 ir_node *get_##OP##_right(ir_node *node) { \
1222 assert(node->op == op_##OP); \
1223 return get_irn_n(node, node->op->op_index + 1); \
1225 void set_##OP##_right(ir_node *node, ir_node *right) { \
1226 assert(node->op == op_##OP); \
1227 set_irn_n(node, node->op->op_index + 1, right); \
1231 ir_node *get_##OP##_op(ir_node *node) { \
1232 assert(node->op == op_##OP); \
1233 return get_irn_n(node, node->op->op_index); \
1235 void set_##OP##_op (ir_node *node, ir_node *op) { \
1236 assert(node->op == op_##OP); \
1237 set_irn_n(node, node->op->op_index, op); \
1247 get_Quot_mem (ir_node *node) {
1248 assert (node->op == op_Quot);
1249 return get_irn_n(node, 0);
1253 set_Quot_mem (ir_node *node, ir_node *mem) {
1254 assert (node->op == op_Quot);
1255 set_irn_n(node, 0, mem);
1261 get_DivMod_mem (ir_node *node) {
1262 assert (node->op == op_DivMod);
1263 return get_irn_n(node, 0);
1267 set_DivMod_mem (ir_node *node, ir_node *mem) {
1268 assert (node->op == op_DivMod);
1269 set_irn_n(node, 0, mem);
1275 get_Div_mem (ir_node *node) {
1276 assert (node->op == op_Div);
1277 return get_irn_n(node, 0);
1281 set_Div_mem (ir_node *node, ir_node *mem) {
1282 assert (node->op == op_Div);
1283 set_irn_n(node, 0, mem);
1289 get_Mod_mem (ir_node *node) {
1290 assert (node->op == op_Mod);
1291 return get_irn_n(node, 0);
1295 set_Mod_mem (ir_node *node, ir_node *mem) {
1296 assert (node->op == op_Mod);
1297 set_irn_n(node, 0, mem);
1314 get_Cast_type (ir_node *node) {
1315 assert (node->op == op_Cast);
1316 return node->attr.cast.totype;
1320 set_Cast_type (ir_node *node, type *to_tp) {
1321 assert (node->op == op_Cast);
1322 node->attr.cast.totype = to_tp;
1326 is_unop (ir_node *node) {
1327 return (node->op->opar == oparity_unary);
1331 get_unop_op (ir_node *node) {
1332 if (node->op->opar == oparity_unary)
1333 return get_irn_n(node, node->op->op_index);
1335 assert(node->op->opar == oparity_unary);
1340 set_unop_op (ir_node *node, ir_node *op) {
1341 if (node->op->opar == oparity_unary)
1342 set_irn_n(node, node->op->op_index, op);
1344 assert(node->op->opar == oparity_unary);
1348 is_binop (ir_node *node) {
1349 return (node->op->opar == oparity_binary);
1353 get_binop_left (ir_node *node) {
1354 if (node->op->opar == oparity_binary)
1355 return get_irn_n(node, node->op->op_index);
1357 assert(node->op->opar == oparity_binary);
1362 set_binop_left (ir_node *node, ir_node *left) {
1363 if (node->op->opar == oparity_binary)
1364 set_irn_n(node, node->op->op_index, left);
1366 assert (node->op->opar == oparity_binary);
1370 get_binop_right (ir_node *node) {
1371 if (node->op->opar == oparity_binary)
1372 return get_irn_n(node, node->op->op_index + 1);
1374 assert(node->op->opar == oparity_binary);
1379 set_binop_right (ir_node *node, ir_node *right) {
1380 if (node->op->opar == oparity_binary)
1381 set_irn_n(node, node->op->op_index + 1, right);
1383 assert (node->op->opar == oparity_binary);
1386 INLINE int is_Phi (ir_node *n) {
1391 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1395 get_Phi_preds_arr (ir_node *node) {
1396 assert (node->op == op_Phi);
1397 return (ir_node **)&(get_irn_in(node)[1]);
1401 get_Phi_n_preds (ir_node *node) {
1402 assert (is_Phi(node));
1403 return (get_irn_arity(node));
1407 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1408 assert (node->op == op_Phi);
1413 get_Phi_pred (ir_node *node, int pos) {
1414 assert (is_Phi(node));
1415 return get_irn_n(node, pos);
1419 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1420 assert (is_Phi(node));
1421 set_irn_n(node, pos, pred);
1425 get_Load_mem (ir_node *node) {
1426 assert (node->op == op_Load);
1427 return get_irn_n(node, 0);
1431 set_Load_mem (ir_node *node, ir_node *mem) {
1432 assert (node->op == op_Load);
1433 set_irn_n(node, 0, mem);
1437 get_Load_ptr (ir_node *node) {
1438 assert (node->op == op_Load);
1439 return get_irn_n(node, 1);
1443 set_Load_ptr (ir_node *node, ir_node *ptr) {
1444 assert (node->op == op_Load);
1445 set_irn_n(node, 1, ptr);
1450 get_Store_mem (ir_node *node) {
1451 assert (node->op == op_Store);
1452 return get_irn_n(node, 0);
1456 set_Store_mem (ir_node *node, ir_node *mem) {
1457 assert (node->op == op_Store);
1458 set_irn_n(node, 0, mem);
1462 get_Store_ptr (ir_node *node) {
1463 assert (node->op == op_Store);
1464 return get_irn_n(node, 1);
1468 set_Store_ptr (ir_node *node, ir_node *ptr) {
1469 assert (node->op == op_Store);
1470 set_irn_n(node, 1, ptr);
1474 get_Store_value (ir_node *node) {
1475 assert (node->op == op_Store);
1476 return get_irn_n(node, 2);
1480 set_Store_value (ir_node *node, ir_node *value) {
1481 assert (node->op == op_Store);
1482 set_irn_n(node, 2, value);
1486 get_Alloc_mem (ir_node *node) {
1487 assert (node->op == op_Alloc);
1488 return get_irn_n(node, 0);
1492 set_Alloc_mem (ir_node *node, ir_node *mem) {
1493 assert (node->op == op_Alloc);
1494 set_irn_n(node, 0, mem);
1498 get_Alloc_size (ir_node *node) {
1499 assert (node->op == op_Alloc);
1500 return get_irn_n(node, 1);
1504 set_Alloc_size (ir_node *node, ir_node *size) {
1505 assert (node->op == op_Alloc);
1506 set_irn_n(node, 1, size);
1510 get_Alloc_type (ir_node *node) {
1511 assert (node->op == op_Alloc);
1512 return node->attr.a.type = skip_tid(node->attr.a.type);
1516 set_Alloc_type (ir_node *node, type *tp) {
1517 assert (node->op == op_Alloc);
1518 node->attr.a.type = tp;
1522 get_Alloc_where (ir_node *node) {
1523 assert (node->op == op_Alloc);
1524 return node->attr.a.where;
1528 set_Alloc_where (ir_node *node, where_alloc where) {
1529 assert (node->op == op_Alloc);
1530 node->attr.a.where = where;
1535 get_Free_mem (ir_node *node) {
1536 assert (node->op == op_Free);
1537 return get_irn_n(node, 0);
1541 set_Free_mem (ir_node *node, ir_node *mem) {
1542 assert (node->op == op_Free);
1543 set_irn_n(node, 0, mem);
1547 get_Free_ptr (ir_node *node) {
1548 assert (node->op == op_Free);
1549 return get_irn_n(node, 1);
1553 set_Free_ptr (ir_node *node, ir_node *ptr) {
1554 assert (node->op == op_Free);
1555 set_irn_n(node, 1, ptr);
1559 get_Free_size (ir_node *node) {
1560 assert (node->op == op_Free);
1561 return get_irn_n(node, 2);
1565 set_Free_size (ir_node *node, ir_node *size) {
1566 assert (node->op == op_Free);
1567 set_irn_n(node, 2, size);
1571 get_Free_type (ir_node *node) {
1572 assert (node->op == op_Free);
1573 return node->attr.f = skip_tid(node->attr.f);
1577 set_Free_type (ir_node *node, type *tp) {
1578 assert (node->op == op_Free);
1583 get_Sync_preds_arr (ir_node *node) {
1584 assert (node->op == op_Sync);
1585 return (ir_node **)&(get_irn_in(node)[1]);
1589 get_Sync_n_preds (ir_node *node) {
1590 assert (node->op == op_Sync);
1591 return (get_irn_arity(node));
1596 set_Sync_n_preds (ir_node *node, int n_preds) {
1597 assert (node->op == op_Sync);
1602 get_Sync_pred (ir_node *node, int pos) {
1603 assert (node->op == op_Sync);
1604 return get_irn_n(node, pos);
1608 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1609 assert (node->op == op_Sync);
1610 set_irn_n(node, pos, pred);
1614 get_Proj_pred (ir_node *node) {
1615 assert (is_Proj(node));
1616 return get_irn_n(node, 0);
1620 set_Proj_pred (ir_node *node, ir_node *pred) {
1621 assert (is_Proj(node));
1622 set_irn_n(node, 0, pred);
1626 get_Proj_proj (ir_node *node) {
1627 assert (is_Proj(node));
1628 if (get_irn_opcode(node) == iro_Proj) {
1629 return node->attr.proj;
1631 assert(get_irn_opcode(node) == iro_Filter);
1632 return node->attr.filter.proj;
1637 set_Proj_proj (ir_node *node, long proj) {
1638 assert (node->op == op_Proj);
1639 node->attr.proj = proj;
1643 get_Tuple_preds_arr (ir_node *node) {
1644 assert (node->op == op_Tuple);
1645 return (ir_node **)&(get_irn_in(node)[1]);
1649 get_Tuple_n_preds (ir_node *node) {
1650 assert (node->op == op_Tuple);
1651 return (get_irn_arity(node));
1656 set_Tuple_n_preds (ir_node *node, int n_preds) {
1657 assert (node->op == op_Tuple);
1662 get_Tuple_pred (ir_node *node, int pos) {
1663 assert (node->op == op_Tuple);
1664 return get_irn_n(node, pos);
1668 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1669 assert (node->op == op_Tuple);
1670 set_irn_n(node, pos, pred);
1674 get_Id_pred (ir_node *node) {
1675 assert (node->op == op_Id);
1676 return get_irn_n(node, 0);
1680 set_Id_pred (ir_node *node, ir_node *pred) {
1681 assert (node->op == op_Id);
1682 set_irn_n(node, 0, pred);
1685 INLINE ir_node *get_Confirm_value (ir_node *node) {
1686 assert (node->op == op_Confirm);
1687 return get_irn_n(node, 0);
1689 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1690 assert (node->op == op_Confirm);
1691 set_irn_n(node, 0, value);
1693 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1694 assert (node->op == op_Confirm);
1695 return get_irn_n(node, 1);
1697 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1698 assert (node->op == op_Confirm);
1699 set_irn_n(node, 0, bound);
1701 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1702 assert (node->op == op_Confirm);
1703 return node->attr.confirm_cmp;
1705 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1706 assert (node->op == op_Confirm);
1707 node->attr.confirm_cmp = cmp;
1712 get_Filter_pred (ir_node *node) {
1713 assert(node->op == op_Filter);
1717 set_Filter_pred (ir_node *node, ir_node *pred) {
1718 assert(node->op == op_Filter);
1722 get_Filter_proj(ir_node *node) {
1723 assert(node->op == op_Filter);
1724 return node->attr.filter.proj;
1727 set_Filter_proj (ir_node *node, long proj) {
1728 assert(node->op == op_Filter);
1729 node->attr.filter.proj = proj;
1732 /* Don't use get_irn_arity, get_irn_n in implementation as access
1733 shall work independent of view!!! */
1734 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1735 assert(node->op == op_Filter);
1736 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1737 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1738 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1739 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1740 node->attr.filter.in_cg[0] = node->in[0];
1742 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1745 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1746 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1747 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1748 node->attr.filter.in_cg[pos + 1] = pred;
1750 int get_Filter_n_cg_preds(ir_node *node) {
1751 assert(node->op == op_Filter && node->attr.filter.in_cg);
1752 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1754 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1756 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1758 arity = ARR_LEN(node->attr.filter.in_cg);
1759 assert(pos < arity - 1);
1760 return node->attr.filter.in_cg[pos + 1];
1765 get_irn_irg(ir_node *node) {
1766 if (get_irn_op(node) != op_Block)
1767 node = get_nodes_block(node);
1768 assert(get_irn_op(node) == op_Block);
1769 return node->attr.block.irg;
1773 /*----------------------------------------------------------------*/
1774 /* Auxiliary routines */
1775 /*----------------------------------------------------------------*/
1778 skip_Proj (ir_node *node) {
1779 /* don't assert node !!! */
1780 if (node && is_Proj(node)) {
1781 return get_Proj_pred(node);
1788 skip_Tuple (ir_node *node) {
1791 if (!get_opt_normalize()) return node;
1793 node = skip_nop(node);
1794 if (get_irn_op(node) == op_Proj) {
1795 pred = skip_nop(get_Proj_pred(node));
1796 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1797 pred = skip_nop(skip_Tuple(pred));
1798 if (get_irn_op(pred) == op_Tuple)
1799 return get_Tuple_pred(pred, get_Proj_proj(node));
1805 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1806 than any other approach, as Id chains are resolved and all point to the real node, or
1807 all id's are self loops. */
1809 skip_nop (ir_node *node) {
1810 /* don't assert node !!! */
1812 if (!get_opt_normalize()) return node;
1814 /* Don't use get_Id_pred: We get into an endless loop for
1815 self-referencing Ids. */
1816 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1817 ir_node *rem_pred = node->in[0+1];
1820 assert (get_irn_arity (node) > 0);
1822 node->in[0+1] = node;
1823 res = skip_nop(rem_pred);
1824 if (res->op == op_Id) /* self-loop */ return node;
1826 node->in[0+1] = res;
1833 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1834 than any other approach, as Id chains are resolved and all point to the real node, or
1835 all id's are self loops. */
1836 extern int opt_normalize;
1838 skip_nop (ir_node *node) {
1840 /* don't assert node !!! */
1842 if (!get_opt_normalize()) return node;
1844 /* Don't use get_Id_pred: We get into an endless loop for
1845 self-referencing Ids. */
1846 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1847 ir_node *rem_pred, *res;
1849 if (pred->op != op_Id) return pred; /* shortcut */
1852 assert (get_irn_arity (node) > 0);
1854 node->in[0+1] = node;
1855 res = skip_nop(rem_pred);
1856 if (res->op == op_Id) /* self-loop */ return node;
1858 node->in[0+1] = res;
1869 skip_Id (ir_node *node) {
1870 return skip_nop(node);
1874 is_Bad (ir_node *node) {
1876 if ((node) && get_irn_opcode(node) == iro_Bad)
1882 is_no_Block (ir_node *node) {
1884 return (get_irn_opcode(node) != iro_Block);
1888 is_Block (ir_node *node) {
1890 return (get_irn_opcode(node) == iro_Block);
1893 /* returns true if node is a Unknown node. */
1895 is_Unknown (ir_node *node) {
1897 return (get_irn_opcode(node) == iro_Unknown);
1901 is_Proj (const ir_node *node) {
1903 return node->op == op_Proj
1904 || (!interprocedural_view && node->op == op_Filter);
1907 /* Returns true if the operation manipulates control flow. */
1909 is_cfop(ir_node *node) {
1910 return is_cfopcode(get_irn_op(node));
1913 /* Returns true if the operation manipulates interprocedural control flow:
1914 CallBegin, EndReg, EndExcept */
1915 INLINE int is_ip_cfop(ir_node *node) {
1916 return is_ip_cfopcode(get_irn_op(node));
1919 ir_graph *get_ip_cfop_irg(ir_node *n) {
1920 return get_irn_irg(n);
1923 /* Returns true if the operation can change the control flow because
1926 is_fragile_op(ir_node *node) {
1927 return is_op_fragile(get_irn_op(node));
1930 /* Returns the memory operand of fragile operations. */
1931 ir_node *get_fragile_op_mem(ir_node *node) {
1932 assert(node && is_fragile_op(node));
1934 switch (get_irn_opcode (node)) {
1943 return get_irn_n(node, 0);
1948 assert(0 && "should not be reached");