3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
24 #include "irbackedge_t.h"
31 /* some constants fixing the positions of nodes predecessors
33 #define CALL_PARAM_OFFSET 2
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 /* Declarations for inlineing */
39 INLINE ir_node ** get_irn_in (const ir_node *node);
40 INLINE ir_mode *get_irn_mode (const ir_node *node);
41 INLINE ir_op *get_irn_op (const ir_node *node);
42 INLINE opcode get_irn_opcode (const ir_node *node);
43 INLINE ident *get_irn_opident (const ir_node *node);
44 INLINE type *get_SymConst_type (ir_node *node);
45 INLINE ir_node *skip_nop (ir_node *node);
46 INLINE int is_Proj (const ir_node *node);
49 static const char *pnc_name_arr [] = {
50 "False", "Eq", "Lt", "Le",
51 "Gt", "Ge", "Lg", "Leg", "Uo",
52 "Ue", "Ul", "Ule", "Ug", "Uge",
57 * returns the pnc name from an pnc constant
59 INLINE const char *get_pnc_string(int pnc) {
60 return pnc_name_arr[pnc];
64 * Calculates the negated pnc condition.
67 get_negated_pnc(int pnc) {
69 case False: return True; break;
70 case Eq: return Ne; break;
71 case Lt: return Uge; break;
72 case Le: return Ug; break;
73 case Gt: return Ule; break;
74 case Ge: return Ul; break;
75 case Lg: return Ue; break;
76 case Leg: return Uo; break;
77 case Uo: return Leg; break;
78 case Ue: return Lg; break;
79 case Ul: return Ge; break;
80 case Ule: return Gt; break;
81 case Ug: return Le; break;
82 case Uge: return Lt; break;
83 case Ne: return Eq; break;
84 case True: return False; break;
86 return 99; /* to shut up gcc */
89 const char *pns_name_arr [] = {
90 "initial_exec", "global_store",
91 "frame_base", "globals", "args"
94 const char *symconst_name_arr [] = {
95 "type_tag", "size", "linkage_ptr_info"
104 * irnode constructor.
105 * Create a new irnode in irg, with an op, mode, arity and
106 * some incoming irnodes.
107 * If arity is negative, a node with a dynamic array is created.
110 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
111 int arity, ir_node **in)
114 int node_size = offsetof (ir_node, attr) + op->attr_size;
116 assert(irg && op && mode);
117 res = (ir_node *) obstack_alloc (irg->obst, node_size);
119 res->kind = k_ir_node;
125 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
127 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
128 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
131 set_irn_dbg_info(res, db);
135 res->node_nr = get_irp_new_node_nr();
141 /* Copies all attributes stored in the old node to the new node.
142 Assumes both have the same opcode and sufficient size. */
144 copy_attrs (const ir_node *old_node, ir_node *new_node) {
145 assert(get_irn_op(old_node) == get_irn_op(new_node));
146 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
149 /** getting some parameters from ir_nodes **/
152 is_ir_node (const void *thing) {
153 if (get_kind(thing) == k_ir_node)
159 /* returns the number of predecessors without the block predecessor. */
161 get_irn_intra_arity (const ir_node *node) {
163 return ARR_LEN(node->in) - 1;
166 /* returns the number of predecessors without the block predecessor. */
168 get_irn_inter_arity (const ir_node *node) {
170 if (get_irn_opcode(node) == iro_Filter) {
171 assert(node->attr.filter.in_cg);
172 return ARR_LEN(node->attr.filter.in_cg) - 1;
173 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
174 return ARR_LEN(node->attr.block.in_cg) - 1;
176 return get_irn_intra_arity(node);
179 /* returns the number of predecessors without the block predecessor. */
181 get_irn_arity (const ir_node *node) {
183 if (interprocedural_view) return get_irn_inter_arity(node);
184 return get_irn_intra_arity(node);
187 /* Returns the array with ins. This array is shifted with respect to the
188 array accessed by get_irn_n: The block operand is at position 0 not -1.
189 (@@@ This should be changed.)
190 The order of the predecessors in this array is not guaranteed, except that
191 lists of operands as predecessors of Block or arguments of a Call are
194 get_irn_in (const ir_node *node) {
196 if (interprocedural_view) { /* handle Filter and Block specially */
197 if (get_irn_opcode(node) == iro_Filter) {
198 assert(node->attr.filter.in_cg);
199 return node->attr.filter.in_cg;
200 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
201 return node->attr.block.in_cg;
203 /* else fall through */
209 set_irn_in (ir_node *node, int arity, ir_node **in) {
212 if (interprocedural_view) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 arr = &node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 arr = &node->attr.block.in_cg;
224 if (arity != ARR_LEN(*arr) - 1) {
225 ir_node * block = (*arr)[0];
226 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
229 fix_backedges(current_ir_graph->obst, node);
230 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
234 get_irn_intra_n (ir_node *node, int n) {
235 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
239 get_irn_inter_n (ir_node *node, int n) {
240 /* handle Filter and Block specially */
241 if (get_irn_opcode(node) == iro_Filter) {
242 assert(node->attr.filter.in_cg);
243 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
248 return get_irn_intra_n (node, n);
251 /* to iterate through the predecessors without touching the array */
252 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
253 to iterate including the Block predecessor iterate from i = -1 to
255 If it is a block, the entry -1 is NULL. */
257 get_irn_n (ir_node *node, int n) {
258 assert(node); assert(-1 <= n && n < get_irn_arity(node));
259 if (interprocedural_view) return get_irn_inter_n (node, n);
260 return get_irn_intra_n (node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && -1 <= n && n < get_irn_arity(node));
267 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
268 /* Change block pred in both views! */
269 node->in[n + 1] = in;
270 assert(node->attr.filter.in_cg);
271 node->attr.filter.in_cg[n + 1] = in;
274 if (interprocedural_view) { /* handle Filter and Block specially */
275 if (get_irn_opcode(node) == iro_Filter) {
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
279 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
280 node->attr.block.in_cg[n + 1] = in;
283 /* else fall through */
285 node->in[n + 1] = in;
289 get_irn_mode (const ir_node *node)
296 set_irn_mode (ir_node *node, ir_mode *mode)
304 get_irn_modecode (const ir_node *node)
307 return node->mode->code;
310 /** Gets the string representation of the mode .*/
312 get_irn_modename (const ir_node *node)
315 return get_mode_name(node->mode);
319 get_irn_modeident (const ir_node *node)
322 return get_mode_ident(node->mode);
326 get_irn_op (const ir_node *node)
332 /* should be private to the library: */
334 set_irn_op (ir_node *node, ir_op *op)
341 get_irn_opcode (const ir_node *node)
343 assert (k_ir_node == get_kind(node));
345 return node->op->code;
349 get_irn_opname (const ir_node *node)
352 return get_id_str(node->op->name);
356 get_irn_opident (const ir_node *node)
359 return node->op->name;
363 get_irn_visited (const ir_node *node)
366 return node->visited;
370 set_irn_visited (ir_node *node, unsigned long visited)
373 node->visited = visited;
377 mark_irn_visited (ir_node *node) {
379 node->visited = current_ir_graph->visited;
383 irn_not_visited (const ir_node *node) {
385 return (node->visited < current_ir_graph->visited);
389 irn_visited (const ir_node *node) {
391 return (node->visited >= current_ir_graph->visited);
395 set_irn_link (ir_node *node, void *link) {
397 /* Link field is used for Phi construction and various optimizations
399 assert(get_irg_phase_state(current_ir_graph) != phase_building);
405 get_irn_link (const ir_node *node) {
410 /* Outputs a unique number for this node */
412 get_irn_node_nr(const ir_node *node) {
415 return node->node_nr;
422 get_irn_const_attr (ir_node *node)
424 assert (node->op == op_Const);
425 return node->attr.con;
429 get_irn_proj_attr (ir_node *node)
431 assert (node->op == op_Proj);
432 return node->attr.proj;
436 get_irn_alloc_attr (ir_node *node)
438 assert (node->op == op_Alloc);
443 get_irn_free_attr (ir_node *node)
445 assert (node->op == op_Free);
446 return node->attr.f = skip_tid(node->attr.f);
450 get_irn_symconst_attr (ir_node *node)
452 assert (node->op == op_SymConst);
457 get_irn_call_attr (ir_node *node)
459 assert (node->op == op_Call);
460 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
464 get_irn_funccall_attr (ir_node *node)
466 assert (node->op == op_FuncCall);
467 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
471 get_irn_sel_attr (ir_node *node)
473 assert (node->op == op_Sel);
478 get_irn_phi_attr (ir_node *node)
480 assert (node->op == op_Phi);
481 return node->attr.phi0_pos;
485 get_irn_block_attr (ir_node *node)
487 assert (node->op == op_Block);
488 return node->attr.block;
491 /** manipulate fields of individual nodes **/
493 /* this works for all except Block */
495 get_nodes_Block (ir_node *node) {
496 assert (!(node->op == op_Block));
497 return get_irn_n(node, -1);
501 set_nodes_Block (ir_node *node, ir_node *block) {
502 assert (!(node->op == op_Block));
503 set_irn_n(node, -1, block);
506 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
507 * from Start. If so returns frame type, else Null. */
508 type *is_frame_pointer(ir_node *n) {
509 if ((get_irn_op(n) == op_Proj) &&
510 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
511 ir_node *start = get_Proj_pred(n);
512 if (get_irn_op(start) == op_Start) {
513 return get_irg_frame_type(get_irn_irg(start));
519 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
520 * from Start. If so returns global type, else Null. */
521 type *is_globals_pointer(ir_node *n) {
522 if ((get_irn_op(n) == op_Proj) &&
523 (get_Proj_proj(n) == pn_Start_P_globals)) {
524 ir_node *start = get_Proj_pred(n);
525 if (get_irn_op(start) == op_Start) {
526 return get_glob_type();
532 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
533 * from Start. If so returns 1, else 0. */
534 int is_value_arg_pointer(ir_node *n) {
535 if ((get_irn_op(n) == op_Proj) &&
536 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
537 (get_irn_op(get_Proj_pred(n)) == op_Start))
542 /* Returns an array with the predecessors of the Block. Depending on
543 the implementation of the graph data structure this can be a copy of
544 the internal representation of predecessors as well as the internal
545 array itself. Therefore writing to this array might obstruct the ir. */
547 get_Block_cfgpred_arr (ir_node *node)
549 assert ((node->op == op_Block));
550 return (ir_node **)&(get_irn_in(node)[1]);
555 get_Block_n_cfgpreds (ir_node *node) {
556 assert ((node->op == op_Block));
557 return (get_irn_arity(node));
561 get_Block_cfgpred (ir_node *node, int pos) {
562 assert (node->op == op_Block);
564 if (-1 > pos || get_irn_arity(node) <= pos) {
565 dump_ir_block_graph(current_ir_graph);
566 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
569 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
570 return get_irn_n(node, pos);
574 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
575 assert (node->op == op_Block);
576 set_irn_n(node, pos, pred);
580 get_Block_matured (ir_node *node) {
581 assert (node->op == op_Block);
582 return node->attr.block.matured;
586 set_Block_matured (ir_node *node, bool matured) {
587 assert (node->op == op_Block);
588 node->attr.block.matured = matured;
591 get_Block_block_visited (ir_node *node) {
592 assert (node->op == op_Block);
593 return node->attr.block.block_visited;
597 set_Block_block_visited (ir_node *node, unsigned long visit) {
598 assert (node->op == op_Block);
599 node->attr.block.block_visited = visit;
602 /* For this current_ir_graph must be set. */
604 mark_Block_block_visited (ir_node *node) {
605 assert (node->op == op_Block);
606 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
610 Block_not_block_visited(ir_node *node) {
611 assert (node->op == op_Block);
612 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
616 get_Block_graph_arr (ir_node *node, int pos) {
617 assert (node->op == op_Block);
618 return node->attr.block.graph_arr[pos+1];
622 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
623 assert (node->op == op_Block);
624 node->attr.block.graph_arr[pos+1] = value;
627 /* handler handling for Blocks * /
629 set_Block_handler (ir_node *block, ir_node *handler) {
630 assert ((block->op == op_Block));
631 assert ((handler->op == op_Block));
632 block->attr.block.handler_entry = handler;
636 get_Block_handler (ir_node *block) {
637 assert ((block->op == op_Block));
638 return (block->attr.block.handler_entry);
641 / * handler handling for Nodes * /
643 set_Node_handler (ir_node *node, ir_node *handler) {
644 set_Block_handler (get_nodes_Block (node), handler);
648 get_Node_handler (ir_node *node) {
649 return (get_Block_handler (get_nodes_Block (node)));
652 / * exc_t handling for Blocks * /
653 void set_Block_exc (ir_node *block, exc_t exc) {
654 assert ((block->op == op_Block));
655 block->attr.block.exc = exc;
658 exc_t get_Block_exc (ir_node *block) {
659 assert ((block->op == op_Block));
660 return (block->attr.block.exc);
663 / * exc_t handling for Nodes * /
664 void set_Node_exc (ir_node *node, exc_t exc) {
665 set_Block_exc (get_nodes_Block (node), exc);
668 exc_t get_Node_exc (ir_node *node) {
669 return (get_Block_exc (get_nodes_Block (node)));
673 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
674 assert(node->op == op_Block);
675 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
676 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
677 node->attr.block.in_cg[0] = NULL;
678 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
680 /* Fix backedge array. fix_backedges operates depending on
681 interprocedural_view. */
682 bool ipv = interprocedural_view;
683 interprocedural_view = true;
684 fix_backedges(current_ir_graph->obst, node);
685 interprocedural_view = ipv;
688 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
691 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
692 assert(node->op == op_Block &&
693 node->attr.block.in_cg &&
694 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
695 node->attr.block.in_cg[pos + 1] = pred;
698 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
699 assert(node->op == op_Block);
700 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
703 int get_Block_cg_n_cfgpreds(ir_node * node) {
704 assert(node->op == op_Block);
705 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
708 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
709 assert(node->op == op_Block && node->attr.block.in_cg);
710 return node->attr.block.in_cg[pos + 1];
713 void remove_Block_cg_cfgpred_arr(ir_node * node) {
714 assert(node->op == op_Block);
715 node->attr.block.in_cg = NULL;
718 /* Start references the irg it is in. */
720 get_Start_irg(ir_node *node) {
721 return get_irn_irg(node);
725 set_Start_irg(ir_node *node, ir_graph *irg) {
726 assert(node->op == op_Start);
727 assert(is_ir_graph(irg));
728 assert(0 && " Why set irg? ");
729 //node->attr.start.irg = irg;
733 get_End_n_keepalives(ir_node *end) {
734 assert (end->op == op_End);
735 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
739 get_End_keepalive(ir_node *end, int pos) {
740 assert (end->op == op_End);
741 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
745 add_End_keepalive (ir_node *end, ir_node *ka) {
746 assert (end->op == op_End);
747 ARR_APP1 (ir_node *, end->in, ka);
751 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
752 assert (end->op == op_End);
753 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
757 free_End (ir_node *end) {
758 assert (end->op == op_End);
760 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
761 end->in = NULL; /* @@@ make sure we get an error if we use the
762 in array afterwards ... */
765 ir_graph *get_EndReg_irg (ir_node *end) {
766 return get_irn_irg(end);
769 ir_graph *get_EndExcept_irg (ir_node *end) {
770 return get_irn_irg(end);
774 > Implementing the case construct (which is where the constant Proj node is
775 > important) involves far more than simply determining the constant values.
776 > We could argue that this is more properly a function of the translator from
777 > Firm to the target machine. That could be done if there was some way of
778 > projecting "default" out of the Cond node.
779 I know it's complicated.
780 Basically there are two proglems:
781 - determining the gaps between the projs
782 - determining the biggest case constant to know the proj number for
784 I see several solutions:
785 1. Introduce a ProjDefault node. Solves both problems.
786 This means to extend all optimizations executed during construction.
787 2. Give the Cond node for switch two flavors:
788 a) there are no gaps in the projs (existing flavor)
789 b) gaps may exist, default proj is still the Proj with the largest
790 projection number. This covers also the gaps.
791 3. Fix the semantic of the Cond to that of 2b)
793 Solution 2 seems to be the best:
794 Computing the gaps in the Firm representation is not too hard, i.e.,
795 libFIRM can implement a routine that transforms between the two
796 flavours. This is also possible for 1) but 2) does not require to
797 change any existing optimization.
798 Further it should be far simpler to determine the biggest constant than
800 I don't want to choose 3) as 2a) seems to have advantages for
801 dataflow analysis and 3) does not allow to convert the representation to
805 get_Cond_selector (ir_node *node) {
806 assert (node->op == op_Cond);
807 return get_irn_n(node, 0);
811 set_Cond_selector (ir_node *node, ir_node *selector) {
812 assert (node->op == op_Cond);
813 set_irn_n(node, 0, selector);
817 get_Cond_kind (ir_node *node) {
818 assert (node->op == op_Cond);
819 return node->attr.c.kind;
823 set_Cond_kind (ir_node *node, cond_kind kind) {
824 assert (node->op == op_Cond);
825 node->attr.c.kind = kind;
829 get_Return_mem (ir_node *node) {
830 assert (node->op == op_Return);
831 return get_irn_n(node, 0);
835 set_Return_mem (ir_node *node, ir_node *mem) {
836 assert (node->op == op_Return);
837 set_irn_n(node, 0, mem);
841 get_Return_n_ress (ir_node *node) {
842 assert (node->op == op_Return);
843 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
847 get_Return_res_arr (ir_node *node)
849 assert ((node->op == op_Return));
850 if (get_Return_n_ress(node) > 0)
851 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
858 set_Return_n_res (ir_node *node, int results) {
859 assert (node->op == op_Return);
864 get_Return_res (ir_node *node, int pos) {
865 assert (node->op == op_Return);
866 assert (get_Return_n_ress(node) > pos);
867 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
871 set_Return_res (ir_node *node, int pos, ir_node *res){
872 assert (node->op == op_Return);
873 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
877 get_Raise_mem (ir_node *node) {
878 assert (node->op == op_Raise);
879 return get_irn_n(node, 0);
883 set_Raise_mem (ir_node *node, ir_node *mem) {
884 assert (node->op == op_Raise);
885 set_irn_n(node, 0, mem);
889 get_Raise_exo_ptr (ir_node *node) {
890 assert (node->op == op_Raise);
891 return get_irn_n(node, 1);
895 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
896 assert (node->op == op_Raise);
897 set_irn_n(node, 1, exo_ptr);
900 INLINE tarval *get_Const_tarval (ir_node *node) {
901 assert (node->op == op_Const);
902 return node->attr.con.tv;
906 set_Const_tarval (ir_node *node, tarval *con) {
907 assert (node->op == op_Const);
908 node->attr.con.tv = con;
912 /* The source language type. Must be an atomic type. Mode of type must
913 be mode of node. For tarvals from entities type must be pointer to
916 get_Const_type (ir_node *node) {
917 assert (node->op == op_Const);
918 return node->attr.con.tp;
922 set_Const_type (ir_node *node, type *tp) {
923 assert (node->op == op_Const);
924 if (tp != unknown_type) {
925 assert (is_atomic_type(tp));
926 assert (get_type_mode(tp) == get_irn_mode(node));
927 assert (!tarval_is_entity(get_Const_tarval(node)) ||
928 (is_pointer_type(tp) &&
929 (get_pointer_points_to_type(tp) ==
930 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
933 node->attr.con.tp = tp;
938 get_SymConst_kind (const ir_node *node) {
939 assert (node->op == op_SymConst);
940 return node->attr.i.num;
944 set_SymConst_kind (ir_node *node, symconst_kind num) {
945 assert (node->op == op_SymConst);
946 node->attr.i.num = num;
950 get_SymConst_type (ir_node *node) {
951 assert ( (node->op == op_SymConst)
952 && ( get_SymConst_kind(node) == type_tag
953 || get_SymConst_kind(node) == size));
954 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
958 set_SymConst_type (ir_node *node, type *tp) {
959 assert ( (node->op == op_SymConst)
960 && ( get_SymConst_kind(node) == type_tag
961 || get_SymConst_kind(node) == size));
962 node->attr.i.tori.typ = tp;
966 get_SymConst_ptrinfo (ir_node *node) {
967 assert ( (node->op == op_SymConst)
968 && (get_SymConst_kind(node) == linkage_ptr_info));
969 return node->attr.i.tori.ptrinfo;
973 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
974 assert ( (node->op == op_SymConst)
975 && (get_SymConst_kind(node) == linkage_ptr_info));
976 node->attr.i.tori.ptrinfo = ptrinfo;
980 get_SymConst_type_or_id (ir_node *node) {
981 assert (node->op == op_SymConst);
982 return &(node->attr.i.tori);
986 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
987 assert (node->op == op_SymConst);
988 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
992 get_Sel_mem (ir_node *node) {
993 assert (node->op == op_Sel);
994 return get_irn_n(node, 0);
998 set_Sel_mem (ir_node *node, ir_node *mem) {
999 assert (node->op == op_Sel);
1000 set_irn_n(node, 0, mem);
1004 get_Sel_ptr (ir_node *node) {
1005 assert (node->op == op_Sel);
1006 return get_irn_n(node, 1);
1010 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1011 assert (node->op == op_Sel);
1012 set_irn_n(node, 1, ptr);
1016 get_Sel_n_indexs (ir_node *node) {
1017 assert (node->op == op_Sel);
1018 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1022 get_Sel_index_arr (ir_node *node)
1024 assert ((node->op == op_Sel));
1025 if (get_Sel_n_indexs(node) > 0)
1026 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1032 get_Sel_index (ir_node *node, int pos) {
1033 assert (node->op == op_Sel);
1034 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1038 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1039 assert (node->op == op_Sel);
1040 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1044 get_Sel_entity (ir_node *node) {
1045 assert (node->op == op_Sel);
1046 return node->attr.s.ent;
1050 set_Sel_entity (ir_node *node, entity *ent) {
1051 assert (node->op == op_Sel);
1052 node->attr.s.ent = ent;
1056 get_InstOf_ent (ir_node *node) {
1057 assert (node->op = op_InstOf);
1058 return (node->attr.io.ent);
1062 set_InstOf_ent (ir_node *node, type *ent) {
1063 assert (node->op = op_InstOf);
1064 node->attr.io.ent = ent;
1068 get_InstOf_store (ir_node *node) {
1069 assert (node->op = op_InstOf);
1070 return (get_irn_n (node, 0));
1074 set_InstOf_store (ir_node *node, ir_node *obj) {
1075 assert (node->op = op_InstOf);
1076 set_irn_n (node, 0, obj);
1080 get_InstOf_obj (ir_node *node) {
1081 assert (node->op = op_InstOf);
1082 return (get_irn_n (node, 1));
1086 set_InstOf_obj (ir_node *node, ir_node *obj) {
1087 assert (node->op = op_InstOf);
1088 set_irn_n (node, 1, obj);
1092 /* For unary and binary arithmetic operations the access to the
1093 operands can be factored out. Left is the first, right the
1094 second arithmetic value as listed in tech report 0999-33.
1095 unops are: Minus, Abs, Not, Conv, Cast
1096 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1097 Shr, Shrs, Rotate, Cmp */
1101 get_Call_mem (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return get_irn_n(node, 0);
1107 set_Call_mem (ir_node *node, ir_node *mem) {
1108 assert (node->op == op_Call);
1109 set_irn_n(node, 0, mem);
1113 get_Call_ptr (ir_node *node) {
1114 assert (node->op == op_Call);
1115 return get_irn_n(node, 1);
1119 set_Call_ptr (ir_node *node, ir_node *ptr) {
1120 assert (node->op == op_Call);
1121 set_irn_n(node, 1, ptr);
1125 get_Call_param_arr (ir_node *node) {
1126 assert (node->op == op_Call);
1127 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1131 get_Call_n_params (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1137 get_Call_arity (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return get_Call_n_params(node);
1143 set_Call_arity (ir_node *node, ir_node *arity) {
1144 assert (node->op == op_Call);
1149 get_Call_param (ir_node *node, int pos) {
1150 assert (node->op == op_Call);
1151 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1155 set_Call_param (ir_node *node, int pos, ir_node *param) {
1156 assert (node->op == op_Call);
1157 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1161 get_Call_type (ir_node *node) {
1162 assert (node->op == op_Call);
1163 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1167 set_Call_type (ir_node *node, type *tp) {
1168 assert (node->op == op_Call);
1169 assert (is_method_type(tp));
1170 node->attr.call.cld_tp = tp;
1173 int Call_has_callees(ir_node *node) {
1174 return (node->attr.call.callee_arr != NULL);
1177 int get_Call_n_callees(ir_node * node) {
1178 assert(node->op == op_Call && node->attr.call.callee_arr);
1179 return ARR_LEN(node->attr.call.callee_arr);
1182 entity * get_Call_callee(ir_node * node, int pos) {
1183 assert(node->op == op_Call && node->attr.call.callee_arr);
1184 return node->attr.call.callee_arr[pos];
1187 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1188 assert(node->op == op_Call);
1189 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1190 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1192 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1195 void remove_Call_callee_arr(ir_node * node) {
1196 assert(node->op == op_Call);
1197 node->attr.call.callee_arr = NULL;
1200 ir_node * get_CallBegin_ptr (ir_node *node) {
1201 assert(node->op == op_CallBegin);
1202 return get_irn_n(node, 0);
1204 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1205 assert(node->op == op_CallBegin);
1206 set_irn_n(node, 0, ptr);
1208 ir_graph * get_CallBegin_irg (ir_node *node) {
1209 return get_irn_irg(node);
1211 ir_node * get_CallBegin_call (ir_node *node) {
1212 assert(node->op == op_CallBegin);
1213 return node->attr.callbegin.call;
1215 void set_CallBegin_call (ir_node *node, ir_node *call) {
1216 assert(node->op == op_CallBegin);
1217 node->attr.callbegin.call = call;
1221 get_FuncCall_ptr (ir_node *node) {
1222 assert (node->op == op_FuncCall);
1223 return get_irn_n(node, 0);
1227 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1228 assert (node->op == op_FuncCall);
1229 set_irn_n(node, 0, ptr);
1233 get_FuncCall_param_arr (ir_node *node) {
1234 assert (node->op == op_FuncCall);
1235 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1239 get_FuncCall_n_params (ir_node *node) {
1240 assert (node->op == op_FuncCall);
1241 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1245 get_FuncCall_arity (ir_node *node) {
1246 assert (node->op == op_FuncCall);
1247 return get_FuncCall_n_params(node);
1251 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1252 assert (node->op == op_FuncCall);
1257 get_FuncCall_param (ir_node *node, int pos) {
1258 assert (node->op == op_FuncCall);
1259 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1263 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1264 assert (node->op == op_FuncCall);
1265 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1269 get_FuncCall_type (ir_node *node) {
1270 assert (node->op == op_FuncCall);
1271 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1275 set_FuncCall_type (ir_node *node, type *tp) {
1276 assert (node->op == op_FuncCall);
1277 assert (is_method_type(tp));
1278 node->attr.call.cld_tp = tp;
1281 int FuncCall_has_callees(ir_node *node) {
1282 return (node->attr.call.callee_arr != NULL);
1285 int get_FuncCall_n_callees(ir_node * node) {
1286 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1287 return ARR_LEN(node->attr.call.callee_arr);
1290 entity * get_FuncCall_callee(ir_node * node, int pos) {
1291 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1292 return node->attr.call.callee_arr[pos];
1295 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1296 assert(node->op == op_FuncCall);
1297 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1298 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1300 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1303 void remove_FuncCall_callee_arr(ir_node * node) {
1304 assert(node->op == op_FuncCall);
1305 node->attr.call.callee_arr = NULL;
1310 ir_node * get_##OP##_left(ir_node *node) { \
1311 assert(node->op == op_##OP); \
1312 return get_irn_n(node, node->op->op_index); \
1314 void set_##OP##_left(ir_node *node, ir_node *left) { \
1315 assert(node->op == op_##OP); \
1316 set_irn_n(node, node->op->op_index, left); \
1318 ir_node *get_##OP##_right(ir_node *node) { \
1319 assert(node->op == op_##OP); \
1320 return get_irn_n(node, node->op->op_index + 1); \
1322 void set_##OP##_right(ir_node *node, ir_node *right) { \
1323 assert(node->op == op_##OP); \
1324 set_irn_n(node, node->op->op_index + 1, right); \
1328 ir_node *get_##OP##_op(ir_node *node) { \
1329 assert(node->op == op_##OP); \
1330 return get_irn_n(node, node->op->op_index); \
1332 void set_##OP##_op (ir_node *node, ir_node *op) { \
1333 assert(node->op == op_##OP); \
1334 set_irn_n(node, node->op->op_index, op); \
1344 get_Quot_mem (ir_node *node) {
1345 assert (node->op == op_Quot);
1346 return get_irn_n(node, 0);
1350 set_Quot_mem (ir_node *node, ir_node *mem) {
1351 assert (node->op == op_Quot);
1352 set_irn_n(node, 0, mem);
1358 get_DivMod_mem (ir_node *node) {
1359 assert (node->op == op_DivMod);
1360 return get_irn_n(node, 0);
1364 set_DivMod_mem (ir_node *node, ir_node *mem) {
1365 assert (node->op == op_DivMod);
1366 set_irn_n(node, 0, mem);
1372 get_Div_mem (ir_node *node) {
1373 assert (node->op == op_Div);
1374 return get_irn_n(node, 0);
1378 set_Div_mem (ir_node *node, ir_node *mem) {
1379 assert (node->op == op_Div);
1380 set_irn_n(node, 0, mem);
1386 get_Mod_mem (ir_node *node) {
1387 assert (node->op == op_Mod);
1388 return get_irn_n(node, 0);
1392 set_Mod_mem (ir_node *node, ir_node *mem) {
1393 assert (node->op == op_Mod);
1394 set_irn_n(node, 0, mem);
1411 get_Cast_type (ir_node *node) {
1412 assert (node->op == op_Cast);
1413 return node->attr.cast.totype;
1417 set_Cast_type (ir_node *node, type *to_tp) {
1418 assert (node->op == op_Cast);
1419 node->attr.cast.totype = to_tp;
1423 is_unop (ir_node *node) {
1424 return (node->op->opar == oparity_unary);
1428 get_unop_op (ir_node *node) {
1429 if (node->op->opar == oparity_unary)
1430 return get_irn_n(node, node->op->op_index);
1432 assert(node->op->opar == oparity_unary);
1437 set_unop_op (ir_node *node, ir_node *op) {
1438 if (node->op->opar == oparity_unary)
1439 set_irn_n(node, node->op->op_index, op);
1441 assert(node->op->opar == oparity_unary);
1445 is_binop (ir_node *node) {
1446 return (node->op->opar == oparity_binary);
1450 get_binop_left (ir_node *node) {
1451 if (node->op->opar == oparity_binary)
1452 return get_irn_n(node, node->op->op_index);
1454 assert(node->op->opar == oparity_binary);
1459 set_binop_left (ir_node *node, ir_node *left) {
1460 if (node->op->opar == oparity_binary)
1461 set_irn_n(node, node->op->op_index, left);
1463 assert (node->op->opar == oparity_binary);
1467 get_binop_right (ir_node *node) {
1468 if (node->op->opar == oparity_binary)
1469 return get_irn_n(node, node->op->op_index + 1);
1471 assert(node->op->opar == oparity_binary);
1476 set_binop_right (ir_node *node, ir_node *right) {
1477 if (node->op->opar == oparity_binary)
1478 set_irn_n(node, node->op->op_index + 1, right);
1480 assert (node->op->opar == oparity_binary);
1483 INLINE int is_Phi (ir_node *n) {
1488 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1492 get_Phi_preds_arr (ir_node *node) {
1493 assert (node->op == op_Phi);
1494 return (ir_node **)&(get_irn_in(node)[1]);
1498 get_Phi_n_preds (ir_node *node) {
1499 assert (is_Phi(node));
1500 return (get_irn_arity(node));
1504 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1505 assert (node->op == op_Phi);
1510 get_Phi_pred (ir_node *node, int pos) {
1511 assert (is_Phi(node));
1512 return get_irn_n(node, pos);
1516 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1517 assert (is_Phi(node));
1518 set_irn_n(node, pos, pred);
1522 get_Load_mem (ir_node *node) {
1523 assert (node->op == op_Load);
1524 return get_irn_n(node, 0);
1528 set_Load_mem (ir_node *node, ir_node *mem) {
1529 assert (node->op == op_Load);
1530 set_irn_n(node, 0, mem);
1534 get_Load_ptr (ir_node *node) {
1535 assert (node->op == op_Load);
1536 return get_irn_n(node, 1);
1540 set_Load_ptr (ir_node *node, ir_node *ptr) {
1541 assert (node->op == op_Load);
1542 set_irn_n(node, 1, ptr);
1547 get_Store_mem (ir_node *node) {
1548 assert (node->op == op_Store);
1549 return get_irn_n(node, 0);
1553 set_Store_mem (ir_node *node, ir_node *mem) {
1554 assert (node->op == op_Store);
1555 set_irn_n(node, 0, mem);
1559 get_Store_ptr (ir_node *node) {
1560 assert (node->op == op_Store);
1561 return get_irn_n(node, 1);
1565 set_Store_ptr (ir_node *node, ir_node *ptr) {
1566 assert (node->op == op_Store);
1567 set_irn_n(node, 1, ptr);
1571 get_Store_value (ir_node *node) {
1572 assert (node->op == op_Store);
1573 return get_irn_n(node, 2);
1577 set_Store_value (ir_node *node, ir_node *value) {
1578 assert (node->op == op_Store);
1579 set_irn_n(node, 2, value);
1583 get_Alloc_mem (ir_node *node) {
1584 assert (node->op == op_Alloc);
1585 return get_irn_n(node, 0);
1589 set_Alloc_mem (ir_node *node, ir_node *mem) {
1590 assert (node->op == op_Alloc);
1591 set_irn_n(node, 0, mem);
1595 get_Alloc_size (ir_node *node) {
1596 assert (node->op == op_Alloc);
1597 return get_irn_n(node, 1);
1601 set_Alloc_size (ir_node *node, ir_node *size) {
1602 assert (node->op == op_Alloc);
1603 set_irn_n(node, 1, size);
1607 get_Alloc_type (ir_node *node) {
1608 assert (node->op == op_Alloc);
1609 return node->attr.a.type = skip_tid(node->attr.a.type);
1613 set_Alloc_type (ir_node *node, type *tp) {
1614 assert (node->op == op_Alloc);
1615 node->attr.a.type = tp;
1619 get_Alloc_where (ir_node *node) {
1620 assert (node->op == op_Alloc);
1621 return node->attr.a.where;
1625 set_Alloc_where (ir_node *node, where_alloc where) {
1626 assert (node->op == op_Alloc);
1627 node->attr.a.where = where;
1632 get_Free_mem (ir_node *node) {
1633 assert (node->op == op_Free);
1634 return get_irn_n(node, 0);
1638 set_Free_mem (ir_node *node, ir_node *mem) {
1639 assert (node->op == op_Free);
1640 set_irn_n(node, 0, mem);
1644 get_Free_ptr (ir_node *node) {
1645 assert (node->op == op_Free);
1646 return get_irn_n(node, 1);
1650 set_Free_ptr (ir_node *node, ir_node *ptr) {
1651 assert (node->op == op_Free);
1652 set_irn_n(node, 1, ptr);
1656 get_Free_size (ir_node *node) {
1657 assert (node->op == op_Free);
1658 return get_irn_n(node, 2);
1662 set_Free_size (ir_node *node, ir_node *size) {
1663 assert (node->op == op_Free);
1664 set_irn_n(node, 2, size);
1668 get_Free_type (ir_node *node) {
1669 assert (node->op == op_Free);
1670 return node->attr.f = skip_tid(node->attr.f);
1674 set_Free_type (ir_node *node, type *tp) {
1675 assert (node->op == op_Free);
1680 get_Sync_preds_arr (ir_node *node) {
1681 assert (node->op == op_Sync);
1682 return (ir_node **)&(get_irn_in(node)[1]);
1686 get_Sync_n_preds (ir_node *node) {
1687 assert (node->op == op_Sync);
1688 return (get_irn_arity(node));
1693 set_Sync_n_preds (ir_node *node, int n_preds) {
1694 assert (node->op == op_Sync);
1699 get_Sync_pred (ir_node *node, int pos) {
1700 assert (node->op == op_Sync);
1701 return get_irn_n(node, pos);
1705 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1706 assert (node->op == op_Sync);
1707 set_irn_n(node, pos, pred);
1711 get_Proj_pred (ir_node *node) {
1712 assert (is_Proj(node));
1713 return get_irn_n(node, 0);
1717 set_Proj_pred (ir_node *node, ir_node *pred) {
1718 assert (is_Proj(node));
1719 set_irn_n(node, 0, pred);
1723 get_Proj_proj (ir_node *node) {
1724 assert (is_Proj(node));
1725 if (get_irn_opcode(node) == iro_Proj) {
1726 return node->attr.proj;
1728 assert(get_irn_opcode(node) == iro_Filter);
1729 return node->attr.filter.proj;
1734 set_Proj_proj (ir_node *node, long proj) {
1735 assert (node->op == op_Proj);
1736 node->attr.proj = proj;
1740 get_Tuple_preds_arr (ir_node *node) {
1741 assert (node->op == op_Tuple);
1742 return (ir_node **)&(get_irn_in(node)[1]);
1746 get_Tuple_n_preds (ir_node *node) {
1747 assert (node->op == op_Tuple);
1748 return (get_irn_arity(node));
1753 set_Tuple_n_preds (ir_node *node, int n_preds) {
1754 assert (node->op == op_Tuple);
1759 get_Tuple_pred (ir_node *node, int pos) {
1760 assert (node->op == op_Tuple);
1761 return get_irn_n(node, pos);
1765 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1766 assert (node->op == op_Tuple);
1767 set_irn_n(node, pos, pred);
1771 get_Id_pred (ir_node *node) {
1772 assert (node->op == op_Id);
1773 return get_irn_n(node, 0);
1777 set_Id_pred (ir_node *node, ir_node *pred) {
1778 assert (node->op == op_Id);
1779 set_irn_n(node, 0, pred);
1782 INLINE ir_node *get_Confirm_value (ir_node *node) {
1783 assert (node->op == op_Confirm);
1784 return get_irn_n(node, 0);
1786 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1787 assert (node->op == op_Confirm);
1788 set_irn_n(node, 0, value);
1790 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1791 assert (node->op == op_Confirm);
1792 return get_irn_n(node, 1);
1794 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1795 assert (node->op == op_Confirm);
1796 set_irn_n(node, 0, bound);
1798 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1799 assert (node->op == op_Confirm);
1800 return node->attr.confirm_cmp;
1802 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1803 assert (node->op == op_Confirm);
1804 node->attr.confirm_cmp = cmp;
1809 get_Filter_pred (ir_node *node) {
1810 assert(node->op == op_Filter);
1814 set_Filter_pred (ir_node *node, ir_node *pred) {
1815 assert(node->op == op_Filter);
1819 get_Filter_proj(ir_node *node) {
1820 assert(node->op == op_Filter);
1821 return node->attr.filter.proj;
1824 set_Filter_proj (ir_node *node, long proj) {
1825 assert(node->op == op_Filter);
1826 node->attr.filter.proj = proj;
1829 /* Don't use get_irn_arity, get_irn_n in implementation as access
1830 shall work independent of view!!! */
1831 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1832 assert(node->op == op_Filter);
1833 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1834 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1835 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1836 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1837 node->attr.filter.in_cg[0] = node->in[0];
1839 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1842 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1843 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1844 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1845 node->attr.filter.in_cg[pos + 1] = pred;
1847 int get_Filter_n_cg_preds(ir_node *node) {
1848 assert(node->op == op_Filter && node->attr.filter.in_cg);
1849 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1851 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1853 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1855 arity = ARR_LEN(node->attr.filter.in_cg);
1856 assert(pos < arity - 1);
1857 return node->attr.filter.in_cg[pos + 1];
1862 get_irn_irg(ir_node *node) {
1863 if (get_irn_op(node) != op_Block)
1864 node = get_nodes_block(node);
1865 assert(get_irn_op(node) == op_Block);
1866 return node->attr.block.irg;
1870 /*----------------------------------------------------------------*/
1871 /* Auxiliary routines */
1872 /*----------------------------------------------------------------*/
1875 skip_Proj (ir_node *node) {
1876 /* don't assert node !!! */
1877 if (node && is_Proj(node)) {
1878 return get_Proj_pred(node);
1885 skip_Tuple (ir_node *node) {
1888 if (!get_opt_normalize()) return node;
1890 node = skip_nop(node);
1891 if (get_irn_op(node) == op_Proj) {
1892 pred = skip_nop(get_Proj_pred(node));
1893 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1894 pred = skip_nop(skip_Tuple(pred));
1895 if (get_irn_op(pred) == op_Tuple)
1896 return get_Tuple_pred(pred, get_Proj_proj(node));
1902 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1903 than any other approach, as Id chains are resolved and all point to the real node, or
1904 all id's are self loops. */
1906 skip_nop (ir_node *node) {
1907 /* don't assert node !!! */
1909 if (!get_opt_normalize()) return node;
1911 /* Don't use get_Id_pred: We get into an endless loop for
1912 self-referencing Ids. */
1913 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1914 ir_node *rem_pred = node->in[0+1];
1917 assert (get_irn_arity (node) > 0);
1919 node->in[0+1] = node;
1920 res = skip_nop(rem_pred);
1921 if (res->op == op_Id) /* self-loop */ return node;
1923 node->in[0+1] = res;
1930 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1931 than any other approach, as Id chains are resolved and all point to the real node, or
1932 all id's are self loops. */
1933 extern int opt_normalize;
1935 skip_nop (ir_node *node) {
1937 /* don't assert node !!! */
1939 if (!get_opt_normalize()) return node;
1941 /* Don't use get_Id_pred: We get into an endless loop for
1942 self-referencing Ids. */
1943 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1944 ir_node *rem_pred, *res;
1946 if (pred->op != op_Id) return pred; /* shortcut */
1949 assert (get_irn_arity (node) > 0);
1951 node->in[0+1] = node;
1952 res = skip_nop(rem_pred);
1953 if (res->op == op_Id) /* self-loop */ return node;
1955 node->in[0+1] = res;
1966 skip_Id (ir_node *node) {
1967 return skip_nop(node);
1971 is_Bad (ir_node *node) {
1973 if ((node) && get_irn_opcode(node) == iro_Bad)
1979 is_no_Block (ir_node *node) {
1981 return (get_irn_opcode(node) != iro_Block);
1985 is_Block (ir_node *node) {
1987 return (get_irn_opcode(node) == iro_Block);
1990 /* returns true if node is a Unknown node. */
1992 is_Unknown (ir_node *node) {
1994 return (get_irn_opcode(node) == iro_Unknown);
1998 is_Proj (const ir_node *node) {
2000 return node->op == op_Proj
2001 || (!interprocedural_view && node->op == op_Filter);
2004 /* Returns true if the operation manipulates control flow. */
2006 is_cfop(ir_node *node) {
2007 return is_cfopcode(get_irn_op(node));
2010 /* Returns true if the operation manipulates interprocedural control flow:
2011 CallBegin, EndReg, EndExcept */
2012 INLINE int is_ip_cfop(ir_node *node) {
2013 return is_ip_cfopcode(get_irn_op(node));
2016 ir_graph *get_ip_cfop_irg(ir_node *n) {
2017 return get_irn_irg(n);
2020 /* Returns true if the operation can change the control flow because
2023 is_fragile_op(ir_node *node) {
2024 return is_op_fragile(get_irn_op(node));
2027 /* Returns the memory operand of fragile operations. */
2028 ir_node *get_fragile_op_mem(ir_node *node) {
2029 assert(node && is_fragile_op(node));
2031 switch (get_irn_opcode (node)) {
2040 return get_irn_n(node, 0);
2045 assert(0 && "should not be reached");