3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
24 #include "irbackedge_t.h"
31 /* some constants fixing the positions of nodes predecessors
33 #define CALL_PARAM_OFFSET 2
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 /* Declarations for inlineing */
39 INLINE ir_node ** get_irn_in (const ir_node *node);
40 INLINE ir_mode *get_irn_mode (const ir_node *node);
41 INLINE ir_op *get_irn_op (const ir_node *node);
42 INLINE opcode get_irn_opcode (const ir_node *node);
43 INLINE ident *get_irn_opident (const ir_node *node);
44 INLINE type *get_SymConst_type (ir_node *node);
45 INLINE ir_node *skip_nop (ir_node *node);
46 INLINE int is_Proj (const ir_node *node);
49 static const char *pnc_name_arr [] = {
50 "False", "Eq", "Lt", "Le",
51 "Gt", "Ge", "Lg", "Leg", "Uo",
52 "Ue", "Ul", "Ule", "Ug", "Uge",
57 * returns the pnc name from an pnc constant
59 INLINE const char *get_pnc_string(int pnc) {
60 return pnc_name_arr[pnc];
64 * Calculates the negated pnc condition.
67 get_negated_pnc(int pnc) {
69 case False: return True; break;
70 case Eq: return Ne; break;
71 case Lt: return Uge; break;
72 case Le: return Ug; break;
73 case Gt: return Ule; break;
74 case Ge: return Ul; break;
75 case Lg: return Ue; break;
76 case Leg: return Uo; break;
77 case Uo: return Leg; break;
78 case Ue: return Lg; break;
79 case Ul: return Ge; break;
80 case Ule: return Gt; break;
81 case Ug: return Le; break;
82 case Uge: return Lt; break;
83 case Ne: return Eq; break;
84 case True: return False; break;
86 return 99; /* to shut up gcc */
89 const char *pns_name_arr [] = {
90 "initial_exec", "global_store",
91 "frame_base", "globals", "args"
94 const char *symconst_name_arr [] = {
95 "type_tag", "size", "linkage_ptr_info"
104 * irnode constructor.
105 * Create a new irnode in irg, with an op, mode, arity and
106 * some incoming irnodes.
107 * If arity is negative, a node with a dynamic array is created.
110 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
111 int arity, ir_node **in)
114 int node_size = offsetof (ir_node, attr) + op->attr_size;
116 assert(irg && op && mode);
117 res = (ir_node *) obstack_alloc (irg->obst, node_size);
119 res->kind = k_ir_node;
125 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
127 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
128 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
131 set_irn_dbg_info(res, db);
135 res->node_nr = get_irp_new_node_nr();
141 /* Copies all attributes stored in the old node to the new node.
142 Assumes both have the same opcode and sufficient size. */
144 copy_attrs (const ir_node *old_node, ir_node *new_node) {
145 assert(get_irn_op(old_node) == get_irn_op(new_node));
146 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
149 /** getting some parameters from ir_nodes **/
152 is_ir_node (const void *thing) {
153 if (get_kind(thing) == k_ir_node)
159 /* returns the number of predecessors without the block predecessor. */
161 get_irn_intra_arity (const ir_node *node) {
163 return ARR_LEN(node->in) - 1;
166 /* returns the number of predecessors without the block predecessor. */
168 get_irn_inter_arity (const ir_node *node) {
170 if (get_irn_opcode(node) == iro_Filter) {
171 assert(node->attr.filter.in_cg);
172 return ARR_LEN(node->attr.filter.in_cg) - 1;
173 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
174 return ARR_LEN(node->attr.block.in_cg) - 1;
176 return get_irn_intra_arity(node);
179 /* returns the number of predecessors without the block predecessor. */
181 get_irn_arity (const ir_node *node) {
183 if (interprocedural_view) return get_irn_inter_arity(node);
184 return get_irn_intra_arity(node);
187 /* Returns the array with ins. This array is shifted with respect to the
188 array accessed by get_irn_n: The block operand is at position 0 not -1.
189 (@@@ This should be changed.)
190 The order of the predecessors in this array is not guaranteed, except that
191 lists of operands as predecessors of Block or arguments of a Call are
194 get_irn_in (const ir_node *node) {
196 if (interprocedural_view) { /* handle Filter and Block specially */
197 if (get_irn_opcode(node) == iro_Filter) {
198 assert(node->attr.filter.in_cg);
199 return node->attr.filter.in_cg;
200 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
201 return node->attr.block.in_cg;
203 /* else fall through */
209 set_irn_in (ir_node *node, int arity, ir_node **in) {
212 if (interprocedural_view) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 arr = &node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 arr = &node->attr.block.in_cg;
224 if (arity != ARR_LEN(*arr) - 1) {
225 ir_node * block = (*arr)[0];
226 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
229 fix_backedges(current_ir_graph->obst, node);
230 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
234 get_irn_intra_n (ir_node *node, int n) {
235 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
239 get_irn_inter_n (ir_node *node, int n) {
240 /* handle Filter and Block specially */
241 if (get_irn_opcode(node) == iro_Filter) {
242 assert(node->attr.filter.in_cg);
243 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
248 return get_irn_intra_n (node, n);
251 /* to iterate through the predecessors without touching the array */
252 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
253 to iterate including the Block predecessor iterate from i = -1 to
255 If it is a block, the entry -1 is NULL. */
257 get_irn_n (ir_node *node, int n) {
258 assert(node); assert(-1 <= n && n < get_irn_arity(node));
259 if (interprocedural_view) return get_irn_inter_n (node, n);
260 return get_irn_intra_n (node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && -1 <= n && n < get_irn_arity(node));
267 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
268 /* Change block pred in both views! */
269 node->in[n + 1] = in;
270 assert(node->attr.filter.in_cg);
271 node->attr.filter.in_cg[n + 1] = in;
274 if (interprocedural_view) { /* handle Filter and Block specially */
275 if (get_irn_opcode(node) == iro_Filter) {
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
279 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
280 node->attr.block.in_cg[n + 1] = in;
283 /* else fall through */
285 node->in[n + 1] = in;
289 get_irn_mode (const ir_node *node)
296 set_irn_mode (ir_node *node, ir_mode *mode)
304 get_irn_modecode (const ir_node *node)
307 return node->mode->code;
310 /** Gets the string representation of the mode .*/
312 get_irn_modename (const ir_node *node)
315 return get_mode_name(node->mode);
319 get_irn_modeident (const ir_node *node)
322 return get_mode_ident(node->mode);
326 get_irn_op (const ir_node *node)
332 /* should be private to the library: */
334 set_irn_op (ir_node *node, ir_op *op)
341 get_irn_opcode (const ir_node *node)
343 assert (k_ir_node == get_kind(node));
345 return node->op->code;
349 get_irn_opname (const ir_node *node)
352 return get_id_str(node->op->name);
356 get_irn_opident (const ir_node *node)
359 return node->op->name;
363 get_irn_visited (const ir_node *node)
366 return node->visited;
370 set_irn_visited (ir_node *node, unsigned long visited)
373 node->visited = visited;
377 mark_irn_visited (ir_node *node) {
379 node->visited = current_ir_graph->visited;
383 irn_not_visited (const ir_node *node) {
385 return (node->visited < current_ir_graph->visited);
389 irn_visited (const ir_node *node) {
391 return (node->visited >= current_ir_graph->visited);
395 set_irn_link (ir_node *node, void *link) {
397 /* Link field is used for Phi construction and various optimizations
399 assert(get_irg_phase_state(current_ir_graph) != phase_building);
405 get_irn_link (const ir_node *node) {
410 /* Outputs a unique number for this node */
412 get_irn_node_nr(const ir_node *node) {
415 return node->node_nr;
422 get_irn_const_attr (ir_node *node)
424 assert (node->op == op_Const);
425 return node->attr.con;
429 get_irn_proj_attr (ir_node *node)
431 assert (node->op == op_Proj);
432 return node->attr.proj;
436 get_irn_alloc_attr (ir_node *node)
438 assert (node->op == op_Alloc);
443 get_irn_free_attr (ir_node *node)
445 assert (node->op == op_Free);
446 return node->attr.f = skip_tid(node->attr.f);
450 get_irn_symconst_attr (ir_node *node)
452 assert (node->op == op_SymConst);
457 get_irn_call_attr (ir_node *node)
459 assert (node->op == op_Call);
460 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
464 get_irn_sel_attr (ir_node *node)
466 assert (node->op == op_Sel);
471 get_irn_phi_attr (ir_node *node)
473 assert (node->op == op_Phi);
474 return node->attr.phi0_pos;
478 get_irn_block_attr (ir_node *node)
480 assert (node->op == op_Block);
481 return node->attr.block;
484 /** manipulate fields of individual nodes **/
486 /* this works for all except Block */
488 get_nodes_Block (ir_node *node) {
489 assert (!(node->op == op_Block));
490 return get_irn_n(node, -1);
494 set_nodes_Block (ir_node *node, ir_node *block) {
495 assert (!(node->op == op_Block));
496 set_irn_n(node, -1, block);
499 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
500 * from Start. If so returns frame type, else Null. */
501 type *is_frame_pointer(ir_node *n) {
502 if ((get_irn_op(n) == op_Proj) &&
503 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
504 ir_node *start = get_Proj_pred(n);
505 if (get_irn_op(start) == op_Start) {
506 return get_irg_frame_type(get_irn_irg(start));
512 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
513 * from Start. If so returns global type, else Null. */
514 type *is_globals_pointer(ir_node *n) {
515 if ((get_irn_op(n) == op_Proj) &&
516 (get_Proj_proj(n) == pn_Start_P_globals)) {
517 ir_node *start = get_Proj_pred(n);
518 if (get_irn_op(start) == op_Start) {
519 return get_glob_type();
525 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
526 * from Start. If so returns 1, else 0. */
527 int is_value_arg_pointer(ir_node *n) {
528 if ((get_irn_op(n) == op_Proj) &&
529 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
530 (get_irn_op(get_Proj_pred(n)) == op_Start))
535 /* Returns an array with the predecessors of the Block. Depending on
536 the implementation of the graph data structure this can be a copy of
537 the internal representation of predecessors as well as the internal
538 array itself. Therefore writing to this array might obstruct the ir. */
540 get_Block_cfgpred_arr (ir_node *node)
542 assert ((node->op == op_Block));
543 return (ir_node **)&(get_irn_in(node)[1]);
548 get_Block_n_cfgpreds (ir_node *node) {
549 assert ((node->op == op_Block));
550 return (get_irn_arity(node));
554 get_Block_cfgpred (ir_node *node, int pos) {
555 assert (node->op == op_Block);
557 if (-1 > pos || get_irn_arity(node) <= pos) {
558 dump_ir_block_graph(current_ir_graph);
559 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
562 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
563 return get_irn_n(node, pos);
567 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
568 assert (node->op == op_Block);
569 set_irn_n(node, pos, pred);
573 get_Block_matured (ir_node *node) {
574 assert (node->op == op_Block);
575 return node->attr.block.matured;
579 set_Block_matured (ir_node *node, bool matured) {
580 assert (node->op == op_Block);
581 node->attr.block.matured = matured;
584 get_Block_block_visited (ir_node *node) {
585 assert (node->op == op_Block);
586 return node->attr.block.block_visited;
590 set_Block_block_visited (ir_node *node, unsigned long visit) {
591 assert (node->op == op_Block);
592 node->attr.block.block_visited = visit;
595 /* For this current_ir_graph must be set. */
597 mark_Block_block_visited (ir_node *node) {
598 assert (node->op == op_Block);
599 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
603 Block_not_block_visited(ir_node *node) {
604 assert (node->op == op_Block);
605 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
609 get_Block_graph_arr (ir_node *node, int pos) {
610 assert (node->op == op_Block);
611 return node->attr.block.graph_arr[pos+1];
615 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
616 assert (node->op == op_Block);
617 node->attr.block.graph_arr[pos+1] = value;
620 /* handler handling for Blocks * /
622 set_Block_handler (ir_node *block, ir_node *handler) {
623 assert ((block->op == op_Block));
624 assert ((handler->op == op_Block));
625 block->attr.block.handler_entry = handler;
629 get_Block_handler (ir_node *block) {
630 assert ((block->op == op_Block));
631 return (block->attr.block.handler_entry);
634 / * handler handling for Nodes * /
636 set_Node_handler (ir_node *node, ir_node *handler) {
637 set_Block_handler (get_nodes_Block (node), handler);
641 get_Node_handler (ir_node *node) {
642 return (get_Block_handler (get_nodes_Block (node)));
645 / * exc_t handling for Blocks * /
646 void set_Block_exc (ir_node *block, exc_t exc) {
647 assert ((block->op == op_Block));
648 block->attr.block.exc = exc;
651 exc_t get_Block_exc (ir_node *block) {
652 assert ((block->op == op_Block));
653 return (block->attr.block.exc);
656 / * exc_t handling for Nodes * /
657 void set_Node_exc (ir_node *node, exc_t exc) {
658 set_Block_exc (get_nodes_Block (node), exc);
661 exc_t get_Node_exc (ir_node *node) {
662 return (get_Block_exc (get_nodes_Block (node)));
666 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
667 assert(node->op == op_Block);
668 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
669 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
670 node->attr.block.in_cg[0] = NULL;
671 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
673 /* Fix backedge array. fix_backedges operates depending on
674 interprocedural_view. */
675 bool ipv = interprocedural_view;
676 interprocedural_view = true;
677 fix_backedges(current_ir_graph->obst, node);
678 interprocedural_view = ipv;
681 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
684 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
685 assert(node->op == op_Block &&
686 node->attr.block.in_cg &&
687 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
688 node->attr.block.in_cg[pos + 1] = pred;
691 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
692 assert(node->op == op_Block);
693 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
696 int get_Block_cg_n_cfgpreds(ir_node * node) {
697 assert(node->op == op_Block);
698 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
701 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
702 assert(node->op == op_Block && node->attr.block.in_cg);
703 return node->attr.block.in_cg[pos + 1];
706 void remove_Block_cg_cfgpred_arr(ir_node * node) {
707 assert(node->op == op_Block);
708 node->attr.block.in_cg = NULL;
711 /* Start references the irg it is in. */
713 get_Start_irg(ir_node *node) {
714 return get_irn_irg(node);
718 set_Start_irg(ir_node *node, ir_graph *irg) {
719 assert(node->op == op_Start);
720 assert(is_ir_graph(irg));
721 assert(0 && " Why set irg? ");
722 //node->attr.start.irg = irg;
726 get_End_n_keepalives(ir_node *end) {
727 assert (end->op == op_End);
728 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
732 get_End_keepalive(ir_node *end, int pos) {
733 assert (end->op == op_End);
734 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
738 add_End_keepalive (ir_node *end, ir_node *ka) {
739 assert (end->op == op_End);
740 ARR_APP1 (ir_node *, end->in, ka);
744 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
745 assert (end->op == op_End);
746 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
750 free_End (ir_node *end) {
751 assert (end->op == op_End);
753 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
754 end->in = NULL; /* @@@ make sure we get an error if we use the
755 in array afterwards ... */
758 ir_graph *get_EndReg_irg (ir_node *end) {
759 return get_irn_irg(end);
762 ir_graph *get_EndExcept_irg (ir_node *end) {
763 return get_irn_irg(end);
767 > Implementing the case construct (which is where the constant Proj node is
768 > important) involves far more than simply determining the constant values.
769 > We could argue that this is more properly a function of the translator from
770 > Firm to the target machine. That could be done if there was some way of
771 > projecting "default" out of the Cond node.
772 I know it's complicated.
773 Basically there are two proglems:
774 - determining the gaps between the projs
775 - determining the biggest case constant to know the proj number for
777 I see several solutions:
778 1. Introduce a ProjDefault node. Solves both problems.
779 This means to extend all optimizations executed during construction.
780 2. Give the Cond node for switch two flavors:
781 a) there are no gaps in the projs (existing flavor)
782 b) gaps may exist, default proj is still the Proj with the largest
783 projection number. This covers also the gaps.
784 3. Fix the semantic of the Cond to that of 2b)
786 Solution 2 seems to be the best:
787 Computing the gaps in the Firm representation is not too hard, i.e.,
788 libFIRM can implement a routine that transforms between the two
789 flavours. This is also possible for 1) but 2) does not require to
790 change any existing optimization.
791 Further it should be far simpler to determine the biggest constant than
793 I don't want to choose 3) as 2a) seems to have advantages for
794 dataflow analysis and 3) does not allow to convert the representation to
798 get_Cond_selector (ir_node *node) {
799 assert (node->op == op_Cond);
800 return get_irn_n(node, 0);
804 set_Cond_selector (ir_node *node, ir_node *selector) {
805 assert (node->op == op_Cond);
806 set_irn_n(node, 0, selector);
810 get_Cond_kind (ir_node *node) {
811 assert (node->op == op_Cond);
812 return node->attr.c.kind;
816 set_Cond_kind (ir_node *node, cond_kind kind) {
817 assert (node->op == op_Cond);
818 node->attr.c.kind = kind;
822 get_Return_mem (ir_node *node) {
823 assert (node->op == op_Return);
824 return get_irn_n(node, 0);
828 set_Return_mem (ir_node *node, ir_node *mem) {
829 assert (node->op == op_Return);
830 set_irn_n(node, 0, mem);
834 get_Return_n_ress (ir_node *node) {
835 assert (node->op == op_Return);
836 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
840 get_Return_res_arr (ir_node *node)
842 assert ((node->op == op_Return));
843 if (get_Return_n_ress(node) > 0)
844 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
851 set_Return_n_res (ir_node *node, int results) {
852 assert (node->op == op_Return);
857 get_Return_res (ir_node *node, int pos) {
858 assert (node->op == op_Return);
859 assert (get_Return_n_ress(node) > pos);
860 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
864 set_Return_res (ir_node *node, int pos, ir_node *res){
865 assert (node->op == op_Return);
866 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
870 get_Raise_mem (ir_node *node) {
871 assert (node->op == op_Raise);
872 return get_irn_n(node, 0);
876 set_Raise_mem (ir_node *node, ir_node *mem) {
877 assert (node->op == op_Raise);
878 set_irn_n(node, 0, mem);
882 get_Raise_exo_ptr (ir_node *node) {
883 assert (node->op == op_Raise);
884 return get_irn_n(node, 1);
888 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
889 assert (node->op == op_Raise);
890 set_irn_n(node, 1, exo_ptr);
893 INLINE tarval *get_Const_tarval (ir_node *node) {
894 assert (node->op == op_Const);
895 return node->attr.con.tv;
899 set_Const_tarval (ir_node *node, tarval *con) {
900 assert (node->op == op_Const);
901 node->attr.con.tv = con;
905 /* The source language type. Must be an atomic type. Mode of type must
906 be mode of node. For tarvals from entities type must be pointer to
909 get_Const_type (ir_node *node) {
910 assert (node->op == op_Const);
911 return node->attr.con.tp;
915 set_Const_type (ir_node *node, type *tp) {
916 assert (node->op == op_Const);
917 if (tp != unknown_type) {
918 assert (is_atomic_type(tp));
919 assert (get_type_mode(tp) == get_irn_mode(node));
920 assert (!tarval_is_entity(get_Const_tarval(node)) ||
921 (is_pointer_type(tp) &&
922 (get_pointer_points_to_type(tp) ==
923 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
926 node->attr.con.tp = tp;
931 get_SymConst_kind (const ir_node *node) {
932 assert (node->op == op_SymConst);
933 return node->attr.i.num;
937 set_SymConst_kind (ir_node *node, symconst_kind num) {
938 assert (node->op == op_SymConst);
939 node->attr.i.num = num;
943 get_SymConst_type (ir_node *node) {
944 assert ( (node->op == op_SymConst)
945 && ( get_SymConst_kind(node) == type_tag
946 || get_SymConst_kind(node) == size));
947 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
951 set_SymConst_type (ir_node *node, type *tp) {
952 assert ( (node->op == op_SymConst)
953 && ( get_SymConst_kind(node) == type_tag
954 || get_SymConst_kind(node) == size));
955 node->attr.i.tori.typ = tp;
959 get_SymConst_ptrinfo (ir_node *node) {
960 assert ( (node->op == op_SymConst)
961 && (get_SymConst_kind(node) == linkage_ptr_info));
962 return node->attr.i.tori.ptrinfo;
966 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
967 assert ( (node->op == op_SymConst)
968 && (get_SymConst_kind(node) == linkage_ptr_info));
969 node->attr.i.tori.ptrinfo = ptrinfo;
973 get_SymConst_type_or_id (ir_node *node) {
974 assert (node->op == op_SymConst);
975 return &(node->attr.i.tori);
979 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
980 assert (node->op == op_SymConst);
981 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
985 get_Sel_mem (ir_node *node) {
986 assert (node->op == op_Sel);
987 return get_irn_n(node, 0);
991 set_Sel_mem (ir_node *node, ir_node *mem) {
992 assert (node->op == op_Sel);
993 set_irn_n(node, 0, mem);
997 get_Sel_ptr (ir_node *node) {
998 assert (node->op == op_Sel);
999 return get_irn_n(node, 1);
1003 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1004 assert (node->op == op_Sel);
1005 set_irn_n(node, 1, ptr);
1009 get_Sel_n_indexs (ir_node *node) {
1010 assert (node->op == op_Sel);
1011 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1015 get_Sel_index_arr (ir_node *node)
1017 assert ((node->op == op_Sel));
1018 if (get_Sel_n_indexs(node) > 0)
1019 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1025 get_Sel_index (ir_node *node, int pos) {
1026 assert (node->op == op_Sel);
1027 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1031 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1032 assert (node->op == op_Sel);
1033 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1037 get_Sel_entity (ir_node *node) {
1038 assert (node->op == op_Sel);
1039 return node->attr.s.ent;
1043 set_Sel_entity (ir_node *node, entity *ent) {
1044 assert (node->op == op_Sel);
1045 node->attr.s.ent = ent;
1049 get_InstOf_ent (ir_node *node) {
1050 assert (node->op = op_InstOf);
1051 return (node->attr.io.ent);
1055 set_InstOf_ent (ir_node *node, type *ent) {
1056 assert (node->op = op_InstOf);
1057 node->attr.io.ent = ent;
1061 get_InstOf_store (ir_node *node) {
1062 assert (node->op = op_InstOf);
1063 return (get_irn_n (node, 0));
1067 set_InstOf_store (ir_node *node, ir_node *obj) {
1068 assert (node->op = op_InstOf);
1069 set_irn_n (node, 0, obj);
1073 get_InstOf_obj (ir_node *node) {
1074 assert (node->op = op_InstOf);
1075 return (get_irn_n (node, 1));
1079 set_InstOf_obj (ir_node *node, ir_node *obj) {
1080 assert (node->op = op_InstOf);
1081 set_irn_n (node, 1, obj);
1085 /* For unary and binary arithmetic operations the access to the
1086 operands can be factored out. Left is the first, right the
1087 second arithmetic value as listed in tech report 0999-33.
1088 unops are: Minus, Abs, Not, Conv, Cast
1089 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1090 Shr, Shrs, Rotate, Cmp */
1094 get_Call_mem (ir_node *node) {
1095 assert (node->op == op_Call);
1096 return get_irn_n(node, 0);
1100 set_Call_mem (ir_node *node, ir_node *mem) {
1101 assert (node->op == op_Call);
1102 set_irn_n(node, 0, mem);
1106 get_Call_ptr (ir_node *node) {
1107 assert (node->op == op_Call);
1108 return get_irn_n(node, 1);
1112 set_Call_ptr (ir_node *node, ir_node *ptr) {
1113 assert (node->op == op_Call);
1114 set_irn_n(node, 1, ptr);
1118 get_Call_param_arr (ir_node *node) {
1119 assert (node->op == op_Call);
1120 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1124 get_Call_n_params (ir_node *node) {
1125 assert (node->op == op_Call);
1126 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1130 get_Call_arity (ir_node *node) {
1131 assert (node->op == op_Call);
1132 return get_Call_n_params(node);
1136 set_Call_arity (ir_node *node, ir_node *arity) {
1137 assert (node->op == op_Call);
1142 get_Call_param (ir_node *node, int pos) {
1143 assert (node->op == op_Call);
1144 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1148 set_Call_param (ir_node *node, int pos, ir_node *param) {
1149 assert (node->op == op_Call);
1150 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1154 get_Call_type (ir_node *node) {
1155 assert (node->op == op_Call);
1156 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1160 set_Call_type (ir_node *node, type *tp) {
1161 assert (node->op == op_Call);
1162 assert (is_method_type(tp));
1163 node->attr.call.cld_tp = tp;
1166 int Call_has_callees(ir_node *node) {
1167 return (node->attr.call.callee_arr != NULL);
1170 int get_Call_n_callees(ir_node * node) {
1171 assert(node->op == op_Call && node->attr.call.callee_arr);
1172 return ARR_LEN(node->attr.call.callee_arr);
1175 entity * get_Call_callee(ir_node * node, int pos) {
1176 assert(node->op == op_Call && node->attr.call.callee_arr);
1177 return node->attr.call.callee_arr[pos];
1180 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1181 assert(node->op == op_Call);
1182 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1183 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1185 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1188 void remove_Call_callee_arr(ir_node * node) {
1189 assert(node->op == op_Call);
1190 node->attr.call.callee_arr = NULL;
1193 ir_node * get_CallBegin_ptr (ir_node *node) {
1194 assert(node->op == op_CallBegin);
1195 return get_irn_n(node, 0);
1197 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1198 assert(node->op == op_CallBegin);
1199 set_irn_n(node, 0, ptr);
1201 ir_graph * get_CallBegin_irg (ir_node *node) {
1202 return get_irn_irg(node);
1204 ir_node * get_CallBegin_call (ir_node *node) {
1205 assert(node->op == op_CallBegin);
1206 return node->attr.callbegin.call;
1208 void set_CallBegin_call (ir_node *node, ir_node *call) {
1209 assert(node->op == op_CallBegin);
1210 node->attr.callbegin.call = call;
1214 ir_node * get_##OP##_left(ir_node *node) { \
1215 assert(node->op == op_##OP); \
1216 return get_irn_n(node, node->op->op_index); \
1218 void set_##OP##_left(ir_node *node, ir_node *left) { \
1219 assert(node->op == op_##OP); \
1220 set_irn_n(node, node->op->op_index, left); \
1222 ir_node *get_##OP##_right(ir_node *node) { \
1223 assert(node->op == op_##OP); \
1224 return get_irn_n(node, node->op->op_index + 1); \
1226 void set_##OP##_right(ir_node *node, ir_node *right) { \
1227 assert(node->op == op_##OP); \
1228 set_irn_n(node, node->op->op_index + 1, right); \
1232 ir_node *get_##OP##_op(ir_node *node) { \
1233 assert(node->op == op_##OP); \
1234 return get_irn_n(node, node->op->op_index); \
1236 void set_##OP##_op (ir_node *node, ir_node *op) { \
1237 assert(node->op == op_##OP); \
1238 set_irn_n(node, node->op->op_index, op); \
1248 get_Quot_mem (ir_node *node) {
1249 assert (node->op == op_Quot);
1250 return get_irn_n(node, 0);
1254 set_Quot_mem (ir_node *node, ir_node *mem) {
1255 assert (node->op == op_Quot);
1256 set_irn_n(node, 0, mem);
1262 get_DivMod_mem (ir_node *node) {
1263 assert (node->op == op_DivMod);
1264 return get_irn_n(node, 0);
1268 set_DivMod_mem (ir_node *node, ir_node *mem) {
1269 assert (node->op == op_DivMod);
1270 set_irn_n(node, 0, mem);
1276 get_Div_mem (ir_node *node) {
1277 assert (node->op == op_Div);
1278 return get_irn_n(node, 0);
1282 set_Div_mem (ir_node *node, ir_node *mem) {
1283 assert (node->op == op_Div);
1284 set_irn_n(node, 0, mem);
1290 get_Mod_mem (ir_node *node) {
1291 assert (node->op == op_Mod);
1292 return get_irn_n(node, 0);
1296 set_Mod_mem (ir_node *node, ir_node *mem) {
1297 assert (node->op == op_Mod);
1298 set_irn_n(node, 0, mem);
1315 get_Cast_type (ir_node *node) {
1316 assert (node->op == op_Cast);
1317 return node->attr.cast.totype;
1321 set_Cast_type (ir_node *node, type *to_tp) {
1322 assert (node->op == op_Cast);
1323 node->attr.cast.totype = to_tp;
1327 is_unop (ir_node *node) {
1328 return (node->op->opar == oparity_unary);
1332 get_unop_op (ir_node *node) {
1333 if (node->op->opar == oparity_unary)
1334 return get_irn_n(node, node->op->op_index);
1336 assert(node->op->opar == oparity_unary);
1341 set_unop_op (ir_node *node, ir_node *op) {
1342 if (node->op->opar == oparity_unary)
1343 set_irn_n(node, node->op->op_index, op);
1345 assert(node->op->opar == oparity_unary);
1349 is_binop (ir_node *node) {
1350 return (node->op->opar == oparity_binary);
1354 get_binop_left (ir_node *node) {
1355 if (node->op->opar == oparity_binary)
1356 return get_irn_n(node, node->op->op_index);
1358 assert(node->op->opar == oparity_binary);
1363 set_binop_left (ir_node *node, ir_node *left) {
1364 if (node->op->opar == oparity_binary)
1365 set_irn_n(node, node->op->op_index, left);
1367 assert (node->op->opar == oparity_binary);
1371 get_binop_right (ir_node *node) {
1372 if (node->op->opar == oparity_binary)
1373 return get_irn_n(node, node->op->op_index + 1);
1375 assert(node->op->opar == oparity_binary);
1380 set_binop_right (ir_node *node, ir_node *right) {
1381 if (node->op->opar == oparity_binary)
1382 set_irn_n(node, node->op->op_index + 1, right);
1384 assert (node->op->opar == oparity_binary);
1387 INLINE int is_Phi (ir_node *n) {
1392 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1396 get_Phi_preds_arr (ir_node *node) {
1397 assert (node->op == op_Phi);
1398 return (ir_node **)&(get_irn_in(node)[1]);
1402 get_Phi_n_preds (ir_node *node) {
1403 assert (is_Phi(node));
1404 return (get_irn_arity(node));
1408 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1409 assert (node->op == op_Phi);
1414 get_Phi_pred (ir_node *node, int pos) {
1415 assert (is_Phi(node));
1416 return get_irn_n(node, pos);
1420 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1421 assert (is_Phi(node));
1422 set_irn_n(node, pos, pred);
1426 get_Load_mem (ir_node *node) {
1427 assert (node->op == op_Load);
1428 return get_irn_n(node, 0);
1432 set_Load_mem (ir_node *node, ir_node *mem) {
1433 assert (node->op == op_Load);
1434 set_irn_n(node, 0, mem);
1438 get_Load_ptr (ir_node *node) {
1439 assert (node->op == op_Load);
1440 return get_irn_n(node, 1);
1444 set_Load_ptr (ir_node *node, ir_node *ptr) {
1445 assert (node->op == op_Load);
1446 set_irn_n(node, 1, ptr);
1451 get_Store_mem (ir_node *node) {
1452 assert (node->op == op_Store);
1453 return get_irn_n(node, 0);
1457 set_Store_mem (ir_node *node, ir_node *mem) {
1458 assert (node->op == op_Store);
1459 set_irn_n(node, 0, mem);
1463 get_Store_ptr (ir_node *node) {
1464 assert (node->op == op_Store);
1465 return get_irn_n(node, 1);
1469 set_Store_ptr (ir_node *node, ir_node *ptr) {
1470 assert (node->op == op_Store);
1471 set_irn_n(node, 1, ptr);
1475 get_Store_value (ir_node *node) {
1476 assert (node->op == op_Store);
1477 return get_irn_n(node, 2);
1481 set_Store_value (ir_node *node, ir_node *value) {
1482 assert (node->op == op_Store);
1483 set_irn_n(node, 2, value);
1487 get_Alloc_mem (ir_node *node) {
1488 assert (node->op == op_Alloc);
1489 return get_irn_n(node, 0);
1493 set_Alloc_mem (ir_node *node, ir_node *mem) {
1494 assert (node->op == op_Alloc);
1495 set_irn_n(node, 0, mem);
1499 get_Alloc_size (ir_node *node) {
1500 assert (node->op == op_Alloc);
1501 return get_irn_n(node, 1);
1505 set_Alloc_size (ir_node *node, ir_node *size) {
1506 assert (node->op == op_Alloc);
1507 set_irn_n(node, 1, size);
1511 get_Alloc_type (ir_node *node) {
1512 assert (node->op == op_Alloc);
1513 return node->attr.a.type = skip_tid(node->attr.a.type);
1517 set_Alloc_type (ir_node *node, type *tp) {
1518 assert (node->op == op_Alloc);
1519 node->attr.a.type = tp;
1523 get_Alloc_where (ir_node *node) {
1524 assert (node->op == op_Alloc);
1525 return node->attr.a.where;
1529 set_Alloc_where (ir_node *node, where_alloc where) {
1530 assert (node->op == op_Alloc);
1531 node->attr.a.where = where;
1536 get_Free_mem (ir_node *node) {
1537 assert (node->op == op_Free);
1538 return get_irn_n(node, 0);
1542 set_Free_mem (ir_node *node, ir_node *mem) {
1543 assert (node->op == op_Free);
1544 set_irn_n(node, 0, mem);
1548 get_Free_ptr (ir_node *node) {
1549 assert (node->op == op_Free);
1550 return get_irn_n(node, 1);
1554 set_Free_ptr (ir_node *node, ir_node *ptr) {
1555 assert (node->op == op_Free);
1556 set_irn_n(node, 1, ptr);
1560 get_Free_size (ir_node *node) {
1561 assert (node->op == op_Free);
1562 return get_irn_n(node, 2);
1566 set_Free_size (ir_node *node, ir_node *size) {
1567 assert (node->op == op_Free);
1568 set_irn_n(node, 2, size);
1572 get_Free_type (ir_node *node) {
1573 assert (node->op == op_Free);
1574 return node->attr.f = skip_tid(node->attr.f);
1578 set_Free_type (ir_node *node, type *tp) {
1579 assert (node->op == op_Free);
1584 get_Sync_preds_arr (ir_node *node) {
1585 assert (node->op == op_Sync);
1586 return (ir_node **)&(get_irn_in(node)[1]);
1590 get_Sync_n_preds (ir_node *node) {
1591 assert (node->op == op_Sync);
1592 return (get_irn_arity(node));
1597 set_Sync_n_preds (ir_node *node, int n_preds) {
1598 assert (node->op == op_Sync);
1603 get_Sync_pred (ir_node *node, int pos) {
1604 assert (node->op == op_Sync);
1605 return get_irn_n(node, pos);
1609 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1610 assert (node->op == op_Sync);
1611 set_irn_n(node, pos, pred);
1615 get_Proj_pred (ir_node *node) {
1616 assert (is_Proj(node));
1617 return get_irn_n(node, 0);
1621 set_Proj_pred (ir_node *node, ir_node *pred) {
1622 assert (is_Proj(node));
1623 set_irn_n(node, 0, pred);
1627 get_Proj_proj (ir_node *node) {
1628 assert (is_Proj(node));
1629 if (get_irn_opcode(node) == iro_Proj) {
1630 return node->attr.proj;
1632 assert(get_irn_opcode(node) == iro_Filter);
1633 return node->attr.filter.proj;
1638 set_Proj_proj (ir_node *node, long proj) {
1639 assert (node->op == op_Proj);
1640 node->attr.proj = proj;
1644 get_Tuple_preds_arr (ir_node *node) {
1645 assert (node->op == op_Tuple);
1646 return (ir_node **)&(get_irn_in(node)[1]);
1650 get_Tuple_n_preds (ir_node *node) {
1651 assert (node->op == op_Tuple);
1652 return (get_irn_arity(node));
1657 set_Tuple_n_preds (ir_node *node, int n_preds) {
1658 assert (node->op == op_Tuple);
1663 get_Tuple_pred (ir_node *node, int pos) {
1664 assert (node->op == op_Tuple);
1665 return get_irn_n(node, pos);
1669 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1670 assert (node->op == op_Tuple);
1671 set_irn_n(node, pos, pred);
1675 get_Id_pred (ir_node *node) {
1676 assert (node->op == op_Id);
1677 return get_irn_n(node, 0);
1681 set_Id_pred (ir_node *node, ir_node *pred) {
1682 assert (node->op == op_Id);
1683 set_irn_n(node, 0, pred);
1686 INLINE ir_node *get_Confirm_value (ir_node *node) {
1687 assert (node->op == op_Confirm);
1688 return get_irn_n(node, 0);
1690 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1691 assert (node->op == op_Confirm);
1692 set_irn_n(node, 0, value);
1694 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1695 assert (node->op == op_Confirm);
1696 return get_irn_n(node, 1);
1698 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1699 assert (node->op == op_Confirm);
1700 set_irn_n(node, 0, bound);
1702 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1703 assert (node->op == op_Confirm);
1704 return node->attr.confirm_cmp;
1706 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1707 assert (node->op == op_Confirm);
1708 node->attr.confirm_cmp = cmp;
1713 get_Filter_pred (ir_node *node) {
1714 assert(node->op == op_Filter);
1718 set_Filter_pred (ir_node *node, ir_node *pred) {
1719 assert(node->op == op_Filter);
1723 get_Filter_proj(ir_node *node) {
1724 assert(node->op == op_Filter);
1725 return node->attr.filter.proj;
1728 set_Filter_proj (ir_node *node, long proj) {
1729 assert(node->op == op_Filter);
1730 node->attr.filter.proj = proj;
1733 /* Don't use get_irn_arity, get_irn_n in implementation as access
1734 shall work independent of view!!! */
1735 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1736 assert(node->op == op_Filter);
1737 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1738 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1739 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1740 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1741 node->attr.filter.in_cg[0] = node->in[0];
1743 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1746 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1747 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1748 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1749 node->attr.filter.in_cg[pos + 1] = pred;
1751 int get_Filter_n_cg_preds(ir_node *node) {
1752 assert(node->op == op_Filter && node->attr.filter.in_cg);
1753 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1755 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1757 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1759 arity = ARR_LEN(node->attr.filter.in_cg);
1760 assert(pos < arity - 1);
1761 return node->attr.filter.in_cg[pos + 1];
1766 get_irn_irg(ir_node *node) {
1767 if (get_irn_op(node) != op_Block)
1768 node = get_nodes_block(node);
1769 assert(get_irn_op(node) == op_Block);
1770 return node->attr.block.irg;
1774 /*----------------------------------------------------------------*/
1775 /* Auxiliary routines */
1776 /*----------------------------------------------------------------*/
1779 skip_Proj (ir_node *node) {
1780 /* don't assert node !!! */
1781 if (node && is_Proj(node)) {
1782 return get_Proj_pred(node);
1789 skip_Tuple (ir_node *node) {
1792 if (!get_opt_normalize()) return node;
1794 node = skip_nop(node);
1795 if (get_irn_op(node) == op_Proj) {
1796 pred = skip_nop(get_Proj_pred(node));
1797 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1798 pred = skip_nop(skip_Tuple(pred));
1799 if (get_irn_op(pred) == op_Tuple)
1800 return get_Tuple_pred(pred, get_Proj_proj(node));
1806 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1807 than any other approach, as Id chains are resolved and all point to the real node, or
1808 all id's are self loops. */
1810 skip_nop (ir_node *node) {
1811 /* don't assert node !!! */
1813 if (!get_opt_normalize()) return node;
1815 /* Don't use get_Id_pred: We get into an endless loop for
1816 self-referencing Ids. */
1817 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1818 ir_node *rem_pred = node->in[0+1];
1821 assert (get_irn_arity (node) > 0);
1823 node->in[0+1] = node;
1824 res = skip_nop(rem_pred);
1825 if (res->op == op_Id) /* self-loop */ return node;
1827 node->in[0+1] = res;
1834 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1835 than any other approach, as Id chains are resolved and all point to the real node, or
1836 all id's are self loops. */
1837 extern int opt_normalize;
1839 skip_nop (ir_node *node) {
1841 /* don't assert node !!! */
1843 if (!get_opt_normalize()) return node;
1845 /* Don't use get_Id_pred: We get into an endless loop for
1846 self-referencing Ids. */
1847 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1848 ir_node *rem_pred, *res;
1850 if (pred->op != op_Id) return pred; /* shortcut */
1853 assert (get_irn_arity (node) > 0);
1855 node->in[0+1] = node;
1856 res = skip_nop(rem_pred);
1857 if (res->op == op_Id) /* self-loop */ return node;
1859 node->in[0+1] = res;
1870 skip_Id (ir_node *node) {
1871 return skip_nop(node);
1875 is_Bad (ir_node *node) {
1877 if ((node) && get_irn_opcode(node) == iro_Bad)
1883 is_no_Block (ir_node *node) {
1885 return (get_irn_opcode(node) != iro_Block);
1889 is_Block (ir_node *node) {
1891 return (get_irn_opcode(node) == iro_Block);
1894 /* returns true if node is a Unknown node. */
1896 is_Unknown (ir_node *node) {
1898 return (get_irn_opcode(node) == iro_Unknown);
1902 is_Proj (const ir_node *node) {
1904 return node->op == op_Proj
1905 || (!interprocedural_view && node->op == op_Filter);
1908 /* Returns true if the operation manipulates control flow. */
1910 is_cfop(ir_node *node) {
1911 return is_cfopcode(get_irn_op(node));
1914 /* Returns true if the operation manipulates interprocedural control flow:
1915 CallBegin, EndReg, EndExcept */
1916 INLINE int is_ip_cfop(ir_node *node) {
1917 return is_ip_cfopcode(get_irn_op(node));
1920 ir_graph *get_ip_cfop_irg(ir_node *n) {
1921 return get_irn_irg(n);
1924 /* Returns true if the operation can change the control flow because
1927 is_fragile_op(ir_node *node) {
1928 return is_op_fragile(get_irn_op(node));
1931 /* Returns the memory operand of fragile operations. */
1932 ir_node *get_fragile_op_mem(ir_node *node) {
1933 assert(node && is_fragile_op(node));
1935 switch (get_irn_opcode (node)) {
1944 return get_irn_n(node, 0);
1949 assert(0 && "should not be reached");