3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
24 #include "irbackedge_t.h"
32 /* some constants fixing the positions of nodes predecessors
34 #define CALL_PARAM_OFFSET 2
35 #define SEL_INDEX_OFFSET 2
36 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
37 #define END_KEEPALIVE_OFFSET 0
39 /* Declarations for inlineing */
40 INLINE ir_node ** get_irn_in (const ir_node *node);
41 INLINE ir_mode *get_irn_mode (const ir_node *node);
42 INLINE ir_op *get_irn_op (const ir_node *node);
43 INLINE opcode get_irn_opcode (const ir_node *node);
44 INLINE ident *get_irn_opident (const ir_node *node);
45 INLINE type *get_SymConst_type (ir_node *node);
46 INLINE ir_node *skip_nop (ir_node *node);
47 INLINE int is_Proj (const ir_node *node);
50 static const char *pnc_name_arr [] = {
51 "False", "Eq", "Lt", "Le",
52 "Gt", "Ge", "Lg", "Leg", "Uo",
53 "Ue", "Ul", "Ule", "Ug", "Uge",
58 * returns the pnc name from an pnc constant
60 INLINE const char *get_pnc_string(int pnc) {
61 return pnc_name_arr[pnc];
65 * Calculates the negated pnc condition.
68 get_negated_pnc(int pnc) {
70 case False: return True; break;
71 case Eq: return Ne; break;
72 case Lt: return Uge; break;
73 case Le: return Ug; break;
74 case Gt: return Ule; break;
75 case Ge: return Ul; break;
76 case Lg: return Ue; break;
77 case Leg: return Uo; break;
78 case Uo: return Leg; break;
79 case Ue: return Lg; break;
80 case Ul: return Ge; break;
81 case Ule: return Gt; break;
82 case Ug: return Le; break;
83 case Uge: return Lt; break;
84 case Ne: return Eq; break;
85 case True: return False; break;
87 return 99; /* to shut up gcc */
90 const char *pns_name_arr [] = {
91 "initial_exec", "global_store",
92 "frame_base", "globals", "args"
95 const char *symconst_name_arr [] = {
96 "type_tag", "size", "linkage_ptr_info"
105 * irnode constructor.
106 * Create a new irnode in irg, with an op, mode, arity and
107 * some incoming irnodes.
108 * If arity is negative, a node with a dynamic array is created.
111 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
112 int arity, ir_node **in)
115 int node_size = offsetof (ir_node, attr) + op->attr_size;
117 assert(irg && op && mode);
118 res = (ir_node *) obstack_alloc (irg->obst, node_size);
120 res->kind = k_ir_node;
126 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
128 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
129 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
132 set_irn_dbg_info(res, db);
136 res->node_nr = get_irp_new_node_nr();
144 /* Copies all attributes stored in the old node to the new node.
145 Assumes both have the same opcode and sufficient size. */
147 copy_attrs (const ir_node *old_node, ir_node *new_node) {
148 assert(get_irn_op(old_node) == get_irn_op(new_node));
149 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
152 /** getting some parameters from ir_nodes **/
155 is_ir_node (const void *thing) {
156 if (get_kind(thing) == k_ir_node)
162 /* returns the number of predecessors without the block predecessor. */
164 get_irn_intra_arity (const ir_node *node) {
166 return ARR_LEN(node->in) - 1;
169 /* returns the number of predecessors without the block predecessor. */
171 get_irn_inter_arity (const ir_node *node) {
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return ARR_LEN(node->attr.filter.in_cg) - 1;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return ARR_LEN(node->attr.block.in_cg) - 1;
179 return get_irn_intra_arity(node);
182 /* returns the number of predecessors without the block predecessor. */
184 get_irn_arity (const ir_node *node) {
186 if (interprocedural_view) return get_irn_inter_arity(node);
187 return get_irn_intra_arity(node);
190 /* Returns the array with ins. This array is shifted with respect to the
191 array accessed by get_irn_n: The block operand is at position 0 not -1.
192 (@@@ This should be changed.)
193 The order of the predecessors in this array is not guaranteed, except that
194 lists of operands as predecessors of Block or arguments of a Call are
197 get_irn_in (const ir_node *node) {
199 if (interprocedural_view) { /* handle Filter and Block specially */
200 if (get_irn_opcode(node) == iro_Filter) {
201 assert(node->attr.filter.in_cg);
202 return node->attr.filter.in_cg;
203 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
204 return node->attr.block.in_cg;
206 /* else fall through */
212 set_irn_in (ir_node *node, int arity, ir_node **in) {
215 if (interprocedural_view) { /* handle Filter and Block specially */
216 if (get_irn_opcode(node) == iro_Filter) {
217 assert(node->attr.filter.in_cg);
218 arr = &node->attr.filter.in_cg;
219 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
220 arr = &node->attr.block.in_cg;
227 if (arity != ARR_LEN(*arr) - 1) {
228 ir_node * block = (*arr)[0];
229 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
232 fix_backedges(current_ir_graph->obst, node);
233 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
237 get_irn_intra_n (ir_node *node, int n) {
238 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
242 get_irn_inter_n (ir_node *node, int n) {
243 /* handle Filter and Block specially */
244 if (get_irn_opcode(node) == iro_Filter) {
245 assert(node->attr.filter.in_cg);
246 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
247 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
248 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
251 return get_irn_intra_n (node, n);
254 /* to iterate through the predecessors without touching the array */
255 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
256 to iterate including the Block predecessor iterate from i = -1 to
258 If it is a block, the entry -1 is NULL. */
260 get_irn_n (ir_node *node, int n) {
261 assert(node); assert(-1 <= n && n < get_irn_arity(node));
262 if (interprocedural_view) return get_irn_inter_n (node, n);
263 return get_irn_intra_n (node, n);
268 set_irn_n (ir_node *node, int n, ir_node *in) {
269 assert(node && -1 <= n && n < get_irn_arity(node));
270 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
271 /* Change block pred in both views! */
272 node->in[n + 1] = in;
273 assert(node->attr.filter.in_cg);
274 node->attr.filter.in_cg[n + 1] = in;
277 if (interprocedural_view) { /* handle Filter and Block specially */
278 if (get_irn_opcode(node) == iro_Filter) {
279 assert(node->attr.filter.in_cg);
280 node->attr.filter.in_cg[n + 1] = in;
282 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
283 node->attr.block.in_cg[n + 1] = in;
286 /* else fall through */
288 node->in[n + 1] = in;
292 get_irn_mode (const ir_node *node)
299 set_irn_mode (ir_node *node, ir_mode *mode)
307 get_irn_modecode (const ir_node *node)
310 return node->mode->code;
313 /** Gets the string representation of the mode .*/
315 get_irn_modename (const ir_node *node)
318 return get_mode_name(node->mode);
322 get_irn_modeident (const ir_node *node)
325 return get_mode_ident(node->mode);
329 get_irn_op (const ir_node *node)
335 /* should be private to the library: */
337 set_irn_op (ir_node *node, ir_op *op)
344 get_irn_opcode (const ir_node *node)
346 assert (k_ir_node == get_kind(node));
348 return node->op->code;
352 get_irn_opname (const ir_node *node)
355 return get_id_str(node->op->name);
359 get_irn_opident (const ir_node *node)
362 return node->op->name;
366 get_irn_visited (const ir_node *node)
369 return node->visited;
373 set_irn_visited (ir_node *node, unsigned long visited)
376 node->visited = visited;
380 mark_irn_visited (ir_node *node) {
382 node->visited = current_ir_graph->visited;
386 irn_not_visited (const ir_node *node) {
388 return (node->visited < current_ir_graph->visited);
392 irn_visited (const ir_node *node) {
394 return (node->visited >= current_ir_graph->visited);
398 set_irn_link (ir_node *node, void *link) {
400 /* Link field is used for Phi construction and various optimizations
402 assert(get_irg_phase_state(current_ir_graph) != phase_building);
408 get_irn_link (const ir_node *node) {
413 /* Outputs a unique number for this node */
415 get_irn_node_nr(const ir_node *node) {
418 return node->node_nr;
425 get_irn_const_attr (ir_node *node)
427 assert (node->op == op_Const);
428 return node->attr.con;
432 get_irn_proj_attr (ir_node *node)
434 assert (node->op == op_Proj);
435 return node->attr.proj;
439 get_irn_alloc_attr (ir_node *node)
441 assert (node->op == op_Alloc);
446 get_irn_free_attr (ir_node *node)
448 assert (node->op == op_Free);
449 return node->attr.f = skip_tid(node->attr.f);
453 get_irn_symconst_attr (ir_node *node)
455 assert (node->op == op_SymConst);
460 get_irn_call_attr (ir_node *node)
462 assert (node->op == op_Call);
463 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
467 get_irn_funccall_attr (ir_node *node)
469 assert (node->op == op_FuncCall);
470 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
474 get_irn_sel_attr (ir_node *node)
476 assert (node->op == op_Sel);
481 get_irn_phi_attr (ir_node *node)
483 assert (node->op == op_Phi);
484 return node->attr.phi0_pos;
488 get_irn_block_attr (ir_node *node)
490 assert (node->op == op_Block);
491 return node->attr.block;
494 /** manipulate fields of individual nodes **/
496 /* this works for all except Block */
498 get_nodes_Block (ir_node *node) {
499 assert (!(node->op == op_Block));
500 return get_irn_n(node, -1);
504 set_nodes_Block (ir_node *node, ir_node *block) {
505 assert (!(node->op == op_Block));
506 set_irn_n(node, -1, block);
509 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
510 * from Start. If so returns frame type, else Null. */
511 type *is_frame_pointer(ir_node *n) {
512 if ((get_irn_op(n) == op_Proj) &&
513 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
514 ir_node *start = get_Proj_pred(n);
515 if (get_irn_op(start) == op_Start) {
516 return get_irg_frame_type(get_irn_irg(start));
522 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
523 * from Start. If so returns global type, else Null. */
524 type *is_globals_pointer(ir_node *n) {
525 if ((get_irn_op(n) == op_Proj) &&
526 (get_Proj_proj(n) == pn_Start_P_globals)) {
527 ir_node *start = get_Proj_pred(n);
528 if (get_irn_op(start) == op_Start) {
529 return get_glob_type();
535 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
536 * from Start. If so returns 1, else 0. */
537 int is_value_arg_pointer(ir_node *n) {
538 if ((get_irn_op(n) == op_Proj) &&
539 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
540 (get_irn_op(get_Proj_pred(n)) == op_Start))
545 /* Returns an array with the predecessors of the Block. Depending on
546 the implementation of the graph data structure this can be a copy of
547 the internal representation of predecessors as well as the internal
548 array itself. Therefore writing to this array might obstruct the ir. */
550 get_Block_cfgpred_arr (ir_node *node)
552 assert ((node->op == op_Block));
553 return (ir_node **)&(get_irn_in(node)[1]);
558 get_Block_n_cfgpreds (ir_node *node) {
559 assert ((node->op == op_Block));
560 return (get_irn_arity(node));
564 get_Block_cfgpred (ir_node *node, int pos) {
565 assert (node->op == op_Block);
567 if (-1 > pos || get_irn_arity(node) <= pos) {
568 dump_ir_block_graph(current_ir_graph);
569 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
572 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
573 return get_irn_n(node, pos);
577 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
578 assert (node->op == op_Block);
579 set_irn_n(node, pos, pred);
583 get_Block_matured (ir_node *node) {
584 assert (node->op == op_Block);
585 return node->attr.block.matured;
589 set_Block_matured (ir_node *node, bool matured) {
590 assert (node->op == op_Block);
591 node->attr.block.matured = matured;
594 get_Block_block_visited (ir_node *node) {
595 assert (node->op == op_Block);
596 return node->attr.block.block_visited;
600 set_Block_block_visited (ir_node *node, unsigned long visit) {
601 assert (node->op == op_Block);
602 node->attr.block.block_visited = visit;
605 /* For this current_ir_graph must be set. */
607 mark_Block_block_visited (ir_node *node) {
608 assert (node->op == op_Block);
609 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
613 Block_not_block_visited(ir_node *node) {
614 assert (node->op == op_Block);
615 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
619 get_Block_graph_arr (ir_node *node, int pos) {
620 assert (node->op == op_Block);
621 return node->attr.block.graph_arr[pos+1];
625 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
626 assert (node->op == op_Block);
627 node->attr.block.graph_arr[pos+1] = value;
630 /* handler handling for Blocks * /
632 set_Block_handler (ir_node *block, ir_node *handler) {
633 assert ((block->op == op_Block));
634 assert ((handler->op == op_Block));
635 block->attr.block.handler_entry = handler;
639 get_Block_handler (ir_node *block) {
640 assert ((block->op == op_Block));
641 return (block->attr.block.handler_entry);
644 / * handler handling for Nodes * /
646 set_Node_handler (ir_node *node, ir_node *handler) {
647 set_Block_handler (get_nodes_Block (node), handler);
651 get_Node_handler (ir_node *node) {
652 return (get_Block_handler (get_nodes_Block (node)));
655 / * exc_t handling for Blocks * /
656 void set_Block_exc (ir_node *block, exc_t exc) {
657 assert ((block->op == op_Block));
658 block->attr.block.exc = exc;
661 exc_t get_Block_exc (ir_node *block) {
662 assert ((block->op == op_Block));
663 return (block->attr.block.exc);
666 / * exc_t handling for Nodes * /
667 void set_Node_exc (ir_node *node, exc_t exc) {
668 set_Block_exc (get_nodes_Block (node), exc);
671 exc_t get_Node_exc (ir_node *node) {
672 return (get_Block_exc (get_nodes_Block (node)));
676 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
677 assert(node->op == op_Block);
678 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
679 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
680 node->attr.block.in_cg[0] = NULL;
681 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
683 /* Fix backedge array. fix_backedges operates depending on
684 interprocedural_view. */
685 bool ipv = interprocedural_view;
686 interprocedural_view = true;
687 fix_backedges(current_ir_graph->obst, node);
688 interprocedural_view = ipv;
691 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
694 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
695 assert(node->op == op_Block &&
696 node->attr.block.in_cg &&
697 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
698 node->attr.block.in_cg[pos + 1] = pred;
701 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
702 assert(node->op == op_Block);
703 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
706 int get_Block_cg_n_cfgpreds(ir_node * node) {
707 assert(node->op == op_Block);
708 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
711 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
712 assert(node->op == op_Block && node->attr.block.in_cg);
713 return node->attr.block.in_cg[pos + 1];
716 void remove_Block_cg_cfgpred_arr(ir_node * node) {
717 assert(node->op == op_Block);
718 node->attr.block.in_cg = NULL;
721 /* Start references the irg it is in. */
723 get_Start_irg(ir_node *node) {
724 return get_irn_irg(node);
728 set_Start_irg(ir_node *node, ir_graph *irg) {
729 assert(node->op == op_Start);
730 assert(is_ir_graph(irg));
731 assert(0 && " Why set irg? ");
732 //node->attr.start.irg = irg;
736 get_End_n_keepalives(ir_node *end) {
737 assert (end->op == op_End);
738 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
742 get_End_keepalive(ir_node *end, int pos) {
743 assert (end->op == op_End);
744 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
748 add_End_keepalive (ir_node *end, ir_node *ka) {
749 assert (end->op == op_End);
750 ARR_APP1 (ir_node *, end->in, ka);
754 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
755 assert (end->op == op_End);
756 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
760 free_End (ir_node *end) {
761 assert (end->op == op_End);
763 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
764 end->in = NULL; /* @@@ make sure we get an error if we use the
765 in array afterwards ... */
768 ir_graph *get_EndReg_irg (ir_node *end) {
769 return get_irn_irg(end);
772 ir_graph *get_EndExcept_irg (ir_node *end) {
773 return get_irn_irg(end);
777 > Implementing the case construct (which is where the constant Proj node is
778 > important) involves far more than simply determining the constant values.
779 > We could argue that this is more properly a function of the translator from
780 > Firm to the target machine. That could be done if there was some way of
781 > projecting "default" out of the Cond node.
782 I know it's complicated.
783 Basically there are two proglems:
784 - determining the gaps between the projs
785 - determining the biggest case constant to know the proj number for
787 I see several solutions:
788 1. Introduce a ProjDefault node. Solves both problems.
789 This means to extend all optimizations executed during construction.
790 2. Give the Cond node for switch two flavors:
791 a) there are no gaps in the projs (existing flavor)
792 b) gaps may exist, default proj is still the Proj with the largest
793 projection number. This covers also the gaps.
794 3. Fix the semantic of the Cond to that of 2b)
796 Solution 2 seems to be the best:
797 Computing the gaps in the Firm representation is not too hard, i.e.,
798 libFIRM can implement a routine that transforms between the two
799 flavours. This is also possible for 1) but 2) does not require to
800 change any existing optimization.
801 Further it should be far simpler to determine the biggest constant than
803 I don't want to choose 3) as 2a) seems to have advantages for
804 dataflow analysis and 3) does not allow to convert the representation to
808 get_Cond_selector (ir_node *node) {
809 assert (node->op == op_Cond);
810 return get_irn_n(node, 0);
814 set_Cond_selector (ir_node *node, ir_node *selector) {
815 assert (node->op == op_Cond);
816 set_irn_n(node, 0, selector);
820 get_Cond_kind (ir_node *node) {
821 assert (node->op == op_Cond);
822 return node->attr.c.kind;
826 set_Cond_kind (ir_node *node, cond_kind kind) {
827 assert (node->op == op_Cond);
828 node->attr.c.kind = kind;
832 get_Return_mem (ir_node *node) {
833 assert (node->op == op_Return);
834 return get_irn_n(node, 0);
838 set_Return_mem (ir_node *node, ir_node *mem) {
839 assert (node->op == op_Return);
840 set_irn_n(node, 0, mem);
844 get_Return_n_ress (ir_node *node) {
845 assert (node->op == op_Return);
846 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
850 get_Return_res_arr (ir_node *node)
852 assert ((node->op == op_Return));
853 if (get_Return_n_ress(node) > 0)
854 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
861 set_Return_n_res (ir_node *node, int results) {
862 assert (node->op == op_Return);
867 get_Return_res (ir_node *node, int pos) {
868 assert (node->op == op_Return);
869 assert (get_Return_n_ress(node) > pos);
870 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
874 set_Return_res (ir_node *node, int pos, ir_node *res){
875 assert (node->op == op_Return);
876 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
880 get_Raise_mem (ir_node *node) {
881 assert (node->op == op_Raise);
882 return get_irn_n(node, 0);
886 set_Raise_mem (ir_node *node, ir_node *mem) {
887 assert (node->op == op_Raise);
888 set_irn_n(node, 0, mem);
892 get_Raise_exo_ptr (ir_node *node) {
893 assert (node->op == op_Raise);
894 return get_irn_n(node, 1);
898 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
899 assert (node->op == op_Raise);
900 set_irn_n(node, 1, exo_ptr);
903 INLINE tarval *get_Const_tarval (ir_node *node) {
904 assert (node->op == op_Const);
905 return node->attr.con.tv;
909 set_Const_tarval (ir_node *node, tarval *con) {
910 assert (node->op == op_Const);
911 node->attr.con.tv = con;
915 /* The source language type. Must be an atomic type. Mode of type must
916 be mode of node. For tarvals from entities type must be pointer to
919 get_Const_type (ir_node *node) {
920 assert (node->op == op_Const);
921 return node->attr.con.tp;
925 set_Const_type (ir_node *node, type *tp) {
926 assert (node->op == op_Const);
927 if (tp != unknown_type) {
928 assert (is_atomic_type(tp));
929 assert (get_type_mode(tp) == get_irn_mode(node));
930 assert (!tarval_is_entity(get_Const_tarval(node)) ||
931 (is_pointer_type(tp) &&
932 (get_pointer_points_to_type(tp) ==
933 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
936 node->attr.con.tp = tp;
941 get_SymConst_kind (const ir_node *node) {
942 assert (node->op == op_SymConst);
943 return node->attr.i.num;
947 set_SymConst_kind (ir_node *node, symconst_kind num) {
948 assert (node->op == op_SymConst);
949 node->attr.i.num = num;
953 get_SymConst_type (ir_node *node) {
954 assert ( (node->op == op_SymConst)
955 && ( get_SymConst_kind(node) == type_tag
956 || get_SymConst_kind(node) == size));
957 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
961 set_SymConst_type (ir_node *node, type *tp) {
962 assert ( (node->op == op_SymConst)
963 && ( get_SymConst_kind(node) == type_tag
964 || get_SymConst_kind(node) == size));
965 node->attr.i.tori.typ = tp;
969 get_SymConst_ptrinfo (ir_node *node) {
970 assert ( (node->op == op_SymConst)
971 && (get_SymConst_kind(node) == linkage_ptr_info));
972 return node->attr.i.tori.ptrinfo;
976 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
977 assert ( (node->op == op_SymConst)
978 && (get_SymConst_kind(node) == linkage_ptr_info));
979 node->attr.i.tori.ptrinfo = ptrinfo;
983 get_SymConst_type_or_id (ir_node *node) {
984 assert (node->op == op_SymConst);
985 return &(node->attr.i.tori);
989 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
990 assert (node->op == op_SymConst);
991 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
995 get_Sel_mem (ir_node *node) {
996 assert (node->op == op_Sel);
997 return get_irn_n(node, 0);
1001 set_Sel_mem (ir_node *node, ir_node *mem) {
1002 assert (node->op == op_Sel);
1003 set_irn_n(node, 0, mem);
1007 get_Sel_ptr (ir_node *node) {
1008 assert (node->op == op_Sel);
1009 return get_irn_n(node, 1);
1013 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1014 assert (node->op == op_Sel);
1015 set_irn_n(node, 1, ptr);
1019 get_Sel_n_indexs (ir_node *node) {
1020 assert (node->op == op_Sel);
1021 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1025 get_Sel_index_arr (ir_node *node)
1027 assert ((node->op == op_Sel));
1028 if (get_Sel_n_indexs(node) > 0)
1029 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1035 get_Sel_index (ir_node *node, int pos) {
1036 assert (node->op == op_Sel);
1037 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1041 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1042 assert (node->op == op_Sel);
1043 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1047 get_Sel_entity (ir_node *node) {
1048 assert (node->op == op_Sel);
1049 return node->attr.s.ent;
1053 set_Sel_entity (ir_node *node, entity *ent) {
1054 assert (node->op == op_Sel);
1055 node->attr.s.ent = ent;
1059 get_InstOf_ent (ir_node *node) {
1060 assert (node->op = op_InstOf);
1061 return (node->attr.io.ent);
1065 set_InstOf_ent (ir_node *node, type *ent) {
1066 assert (node->op = op_InstOf);
1067 node->attr.io.ent = ent;
1071 get_InstOf_store (ir_node *node) {
1072 assert (node->op = op_InstOf);
1073 return (get_irn_n (node, 0));
1077 set_InstOf_store (ir_node *node, ir_node *obj) {
1078 assert (node->op = op_InstOf);
1079 set_irn_n (node, 0, obj);
1083 get_InstOf_obj (ir_node *node) {
1084 assert (node->op = op_InstOf);
1085 return (get_irn_n (node, 1));
1089 set_InstOf_obj (ir_node *node, ir_node *obj) {
1090 assert (node->op = op_InstOf);
1091 set_irn_n (node, 1, obj);
1095 /* For unary and binary arithmetic operations the access to the
1096 operands can be factored out. Left is the first, right the
1097 second arithmetic value as listed in tech report 0999-33.
1098 unops are: Minus, Abs, Not, Conv, Cast
1099 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1100 Shr, Shrs, Rotate, Cmp */
1104 get_Call_mem (ir_node *node) {
1105 assert (node->op == op_Call);
1106 return get_irn_n(node, 0);
1110 set_Call_mem (ir_node *node, ir_node *mem) {
1111 assert (node->op == op_Call);
1112 set_irn_n(node, 0, mem);
1116 get_Call_ptr (ir_node *node) {
1117 assert (node->op == op_Call);
1118 return get_irn_n(node, 1);
1122 set_Call_ptr (ir_node *node, ir_node *ptr) {
1123 assert (node->op == op_Call);
1124 set_irn_n(node, 1, ptr);
1128 get_Call_param_arr (ir_node *node) {
1129 assert (node->op == op_Call);
1130 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1134 get_Call_n_params (ir_node *node) {
1135 assert (node->op == op_Call);
1136 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1140 get_Call_arity (ir_node *node) {
1141 assert (node->op == op_Call);
1142 return get_Call_n_params(node);
1146 set_Call_arity (ir_node *node, ir_node *arity) {
1147 assert (node->op == op_Call);
1152 get_Call_param (ir_node *node, int pos) {
1153 assert (node->op == op_Call);
1154 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1158 set_Call_param (ir_node *node, int pos, ir_node *param) {
1159 assert (node->op == op_Call);
1160 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1164 get_Call_type (ir_node *node) {
1165 assert (node->op == op_Call);
1166 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1170 set_Call_type (ir_node *node, type *tp) {
1171 assert (node->op == op_Call);
1172 assert (is_method_type(tp));
1173 node->attr.call.cld_tp = tp;
1176 int Call_has_callees(ir_node *node) {
1177 return (node->attr.call.callee_arr != NULL);
1180 int get_Call_n_callees(ir_node * node) {
1181 assert(node->op == op_Call && node->attr.call.callee_arr);
1182 return ARR_LEN(node->attr.call.callee_arr);
1185 entity * get_Call_callee(ir_node * node, int pos) {
1186 assert(node->op == op_Call && node->attr.call.callee_arr);
1187 return node->attr.call.callee_arr[pos];
1190 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1191 assert(node->op == op_Call);
1192 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1193 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1195 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1198 void remove_Call_callee_arr(ir_node * node) {
1199 assert(node->op == op_Call);
1200 node->attr.call.callee_arr = NULL;
1203 ir_node * get_CallBegin_ptr (ir_node *node) {
1204 assert(node->op == op_CallBegin);
1205 return get_irn_n(node, 0);
1207 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1208 assert(node->op == op_CallBegin);
1209 set_irn_n(node, 0, ptr);
1211 ir_graph * get_CallBegin_irg (ir_node *node) {
1212 return get_irn_irg(node);
1214 ir_node * get_CallBegin_call (ir_node *node) {
1215 assert(node->op == op_CallBegin);
1216 return node->attr.callbegin.call;
1218 void set_CallBegin_call (ir_node *node, ir_node *call) {
1219 assert(node->op == op_CallBegin);
1220 node->attr.callbegin.call = call;
1224 get_FuncCall_ptr (ir_node *node) {
1225 assert (node->op == op_FuncCall);
1226 return get_irn_n(node, 0);
1230 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1231 assert (node->op == op_FuncCall);
1232 set_irn_n(node, 0, ptr);
1236 get_FuncCall_param_arr (ir_node *node) {
1237 assert (node->op == op_FuncCall);
1238 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1242 get_FuncCall_n_params (ir_node *node) {
1243 assert (node->op == op_FuncCall);
1244 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1248 get_FuncCall_arity (ir_node *node) {
1249 assert (node->op == op_FuncCall);
1250 return get_FuncCall_n_params(node);
1254 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1255 assert (node->op == op_FuncCall);
1260 get_FuncCall_param (ir_node *node, int pos) {
1261 assert (node->op == op_FuncCall);
1262 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1266 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1267 assert (node->op == op_FuncCall);
1268 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1272 get_FuncCall_type (ir_node *node) {
1273 assert (node->op == op_FuncCall);
1274 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1278 set_FuncCall_type (ir_node *node, type *tp) {
1279 assert (node->op == op_FuncCall);
1280 assert (is_method_type(tp));
1281 node->attr.call.cld_tp = tp;
1284 int FuncCall_has_callees(ir_node *node) {
1285 return (node->attr.call.callee_arr != NULL);
1288 int get_FuncCall_n_callees(ir_node * node) {
1289 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1290 return ARR_LEN(node->attr.call.callee_arr);
1293 entity * get_FuncCall_callee(ir_node * node, int pos) {
1294 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1295 return node->attr.call.callee_arr[pos];
1298 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1299 assert(node->op == op_FuncCall);
1300 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1301 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1303 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1306 void remove_FuncCall_callee_arr(ir_node * node) {
1307 assert(node->op == op_FuncCall);
1308 node->attr.call.callee_arr = NULL;
1313 ir_node * get_##OP##_left(ir_node *node) { \
1314 assert(node->op == op_##OP); \
1315 return get_irn_n(node, node->op->op_index); \
1317 void set_##OP##_left(ir_node *node, ir_node *left) { \
1318 assert(node->op == op_##OP); \
1319 set_irn_n(node, node->op->op_index, left); \
1321 ir_node *get_##OP##_right(ir_node *node) { \
1322 assert(node->op == op_##OP); \
1323 return get_irn_n(node, node->op->op_index + 1); \
1325 void set_##OP##_right(ir_node *node, ir_node *right) { \
1326 assert(node->op == op_##OP); \
1327 set_irn_n(node, node->op->op_index + 1, right); \
1331 ir_node *get_##OP##_op(ir_node *node) { \
1332 assert(node->op == op_##OP); \
1333 return get_irn_n(node, node->op->op_index); \
1335 void set_##OP##_op (ir_node *node, ir_node *op) { \
1336 assert(node->op == op_##OP); \
1337 set_irn_n(node, node->op->op_index, op); \
1347 get_Quot_mem (ir_node *node) {
1348 assert (node->op == op_Quot);
1349 return get_irn_n(node, 0);
1353 set_Quot_mem (ir_node *node, ir_node *mem) {
1354 assert (node->op == op_Quot);
1355 set_irn_n(node, 0, mem);
1361 get_DivMod_mem (ir_node *node) {
1362 assert (node->op == op_DivMod);
1363 return get_irn_n(node, 0);
1367 set_DivMod_mem (ir_node *node, ir_node *mem) {
1368 assert (node->op == op_DivMod);
1369 set_irn_n(node, 0, mem);
1375 get_Div_mem (ir_node *node) {
1376 assert (node->op == op_Div);
1377 return get_irn_n(node, 0);
1381 set_Div_mem (ir_node *node, ir_node *mem) {
1382 assert (node->op == op_Div);
1383 set_irn_n(node, 0, mem);
1389 get_Mod_mem (ir_node *node) {
1390 assert (node->op == op_Mod);
1391 return get_irn_n(node, 0);
1395 set_Mod_mem (ir_node *node, ir_node *mem) {
1396 assert (node->op == op_Mod);
1397 set_irn_n(node, 0, mem);
1414 get_Cast_type (ir_node *node) {
1415 assert (node->op == op_Cast);
1416 return node->attr.cast.totype;
1420 set_Cast_type (ir_node *node, type *to_tp) {
1421 assert (node->op == op_Cast);
1422 node->attr.cast.totype = to_tp;
1426 is_unop (ir_node *node) {
1427 return (node->op->opar == oparity_unary);
1431 get_unop_op (ir_node *node) {
1432 if (node->op->opar == oparity_unary)
1433 return get_irn_n(node, node->op->op_index);
1435 assert(node->op->opar == oparity_unary);
1440 set_unop_op (ir_node *node, ir_node *op) {
1441 if (node->op->opar == oparity_unary)
1442 set_irn_n(node, node->op->op_index, op);
1444 assert(node->op->opar == oparity_unary);
1448 is_binop (ir_node *node) {
1449 return (node->op->opar == oparity_binary);
1453 get_binop_left (ir_node *node) {
1454 if (node->op->opar == oparity_binary)
1455 return get_irn_n(node, node->op->op_index);
1457 assert(node->op->opar == oparity_binary);
1462 set_binop_left (ir_node *node, ir_node *left) {
1463 if (node->op->opar == oparity_binary)
1464 set_irn_n(node, node->op->op_index, left);
1466 assert (node->op->opar == oparity_binary);
1470 get_binop_right (ir_node *node) {
1471 if (node->op->opar == oparity_binary)
1472 return get_irn_n(node, node->op->op_index + 1);
1474 assert(node->op->opar == oparity_binary);
1479 set_binop_right (ir_node *node, ir_node *right) {
1480 if (node->op->opar == oparity_binary)
1481 set_irn_n(node, node->op->op_index + 1, right);
1483 assert (node->op->opar == oparity_binary);
1486 INLINE int is_Phi (ir_node *n) {
1491 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1495 get_Phi_preds_arr (ir_node *node) {
1496 assert (node->op == op_Phi);
1497 return (ir_node **)&(get_irn_in(node)[1]);
1501 get_Phi_n_preds (ir_node *node) {
1502 assert (is_Phi(node));
1503 return (get_irn_arity(node));
1507 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1508 assert (node->op == op_Phi);
1513 get_Phi_pred (ir_node *node, int pos) {
1514 assert (is_Phi(node));
1515 return get_irn_n(node, pos);
1519 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1520 assert (is_Phi(node));
1521 set_irn_n(node, pos, pred);
1525 get_Load_mem (ir_node *node) {
1526 assert (node->op == op_Load);
1527 return get_irn_n(node, 0);
1531 set_Load_mem (ir_node *node, ir_node *mem) {
1532 assert (node->op == op_Load);
1533 set_irn_n(node, 0, mem);
1537 get_Load_ptr (ir_node *node) {
1538 assert (node->op == op_Load);
1539 return get_irn_n(node, 1);
1543 set_Load_ptr (ir_node *node, ir_node *ptr) {
1544 assert (node->op == op_Load);
1545 set_irn_n(node, 1, ptr);
1550 get_Store_mem (ir_node *node) {
1551 assert (node->op == op_Store);
1552 return get_irn_n(node, 0);
1556 set_Store_mem (ir_node *node, ir_node *mem) {
1557 assert (node->op == op_Store);
1558 set_irn_n(node, 0, mem);
1562 get_Store_ptr (ir_node *node) {
1563 assert (node->op == op_Store);
1564 return get_irn_n(node, 1);
1568 set_Store_ptr (ir_node *node, ir_node *ptr) {
1569 assert (node->op == op_Store);
1570 set_irn_n(node, 1, ptr);
1574 get_Store_value (ir_node *node) {
1575 assert (node->op == op_Store);
1576 return get_irn_n(node, 2);
1580 set_Store_value (ir_node *node, ir_node *value) {
1581 assert (node->op == op_Store);
1582 set_irn_n(node, 2, value);
1586 get_Alloc_mem (ir_node *node) {
1587 assert (node->op == op_Alloc);
1588 return get_irn_n(node, 0);
1592 set_Alloc_mem (ir_node *node, ir_node *mem) {
1593 assert (node->op == op_Alloc);
1594 set_irn_n(node, 0, mem);
1598 get_Alloc_size (ir_node *node) {
1599 assert (node->op == op_Alloc);
1600 return get_irn_n(node, 1);
1604 set_Alloc_size (ir_node *node, ir_node *size) {
1605 assert (node->op == op_Alloc);
1606 set_irn_n(node, 1, size);
1610 get_Alloc_type (ir_node *node) {
1611 assert (node->op == op_Alloc);
1612 return node->attr.a.type = skip_tid(node->attr.a.type);
1616 set_Alloc_type (ir_node *node, type *tp) {
1617 assert (node->op == op_Alloc);
1618 node->attr.a.type = tp;
1622 get_Alloc_where (ir_node *node) {
1623 assert (node->op == op_Alloc);
1624 return node->attr.a.where;
1628 set_Alloc_where (ir_node *node, where_alloc where) {
1629 assert (node->op == op_Alloc);
1630 node->attr.a.where = where;
1635 get_Free_mem (ir_node *node) {
1636 assert (node->op == op_Free);
1637 return get_irn_n(node, 0);
1641 set_Free_mem (ir_node *node, ir_node *mem) {
1642 assert (node->op == op_Free);
1643 set_irn_n(node, 0, mem);
1647 get_Free_ptr (ir_node *node) {
1648 assert (node->op == op_Free);
1649 return get_irn_n(node, 1);
1653 set_Free_ptr (ir_node *node, ir_node *ptr) {
1654 assert (node->op == op_Free);
1655 set_irn_n(node, 1, ptr);
1659 get_Free_size (ir_node *node) {
1660 assert (node->op == op_Free);
1661 return get_irn_n(node, 2);
1665 set_Free_size (ir_node *node, ir_node *size) {
1666 assert (node->op == op_Free);
1667 set_irn_n(node, 2, size);
1671 get_Free_type (ir_node *node) {
1672 assert (node->op == op_Free);
1673 return node->attr.f = skip_tid(node->attr.f);
1677 set_Free_type (ir_node *node, type *tp) {
1678 assert (node->op == op_Free);
1683 get_Sync_preds_arr (ir_node *node) {
1684 assert (node->op == op_Sync);
1685 return (ir_node **)&(get_irn_in(node)[1]);
1689 get_Sync_n_preds (ir_node *node) {
1690 assert (node->op == op_Sync);
1691 return (get_irn_arity(node));
1696 set_Sync_n_preds (ir_node *node, int n_preds) {
1697 assert (node->op == op_Sync);
1702 get_Sync_pred (ir_node *node, int pos) {
1703 assert (node->op == op_Sync);
1704 return get_irn_n(node, pos);
1708 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1709 assert (node->op == op_Sync);
1710 set_irn_n(node, pos, pred);
1714 get_Proj_pred (ir_node *node) {
1715 assert (is_Proj(node));
1716 return get_irn_n(node, 0);
1720 set_Proj_pred (ir_node *node, ir_node *pred) {
1721 assert (is_Proj(node));
1722 set_irn_n(node, 0, pred);
1726 get_Proj_proj (ir_node *node) {
1727 assert (is_Proj(node));
1728 if (get_irn_opcode(node) == iro_Proj) {
1729 return node->attr.proj;
1731 assert(get_irn_opcode(node) == iro_Filter);
1732 return node->attr.filter.proj;
1737 set_Proj_proj (ir_node *node, long proj) {
1738 assert (node->op == op_Proj);
1739 node->attr.proj = proj;
1743 get_Tuple_preds_arr (ir_node *node) {
1744 assert (node->op == op_Tuple);
1745 return (ir_node **)&(get_irn_in(node)[1]);
1749 get_Tuple_n_preds (ir_node *node) {
1750 assert (node->op == op_Tuple);
1751 return (get_irn_arity(node));
1756 set_Tuple_n_preds (ir_node *node, int n_preds) {
1757 assert (node->op == op_Tuple);
1762 get_Tuple_pred (ir_node *node, int pos) {
1763 assert (node->op == op_Tuple);
1764 return get_irn_n(node, pos);
1768 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1769 assert (node->op == op_Tuple);
1770 set_irn_n(node, pos, pred);
1774 get_Id_pred (ir_node *node) {
1775 assert (node->op == op_Id);
1776 return get_irn_n(node, 0);
1780 set_Id_pred (ir_node *node, ir_node *pred) {
1781 assert (node->op == op_Id);
1782 set_irn_n(node, 0, pred);
1785 INLINE ir_node *get_Confirm_value (ir_node *node) {
1786 assert (node->op == op_Confirm);
1787 return get_irn_n(node, 0);
1789 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1790 assert (node->op == op_Confirm);
1791 set_irn_n(node, 0, value);
1793 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1794 assert (node->op == op_Confirm);
1795 return get_irn_n(node, 1);
1797 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1798 assert (node->op == op_Confirm);
1799 set_irn_n(node, 0, bound);
1801 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1802 assert (node->op == op_Confirm);
1803 return node->attr.confirm_cmp;
1805 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1806 assert (node->op == op_Confirm);
1807 node->attr.confirm_cmp = cmp;
1812 get_Filter_pred (ir_node *node) {
1813 assert(node->op == op_Filter);
1817 set_Filter_pred (ir_node *node, ir_node *pred) {
1818 assert(node->op == op_Filter);
1822 get_Filter_proj(ir_node *node) {
1823 assert(node->op == op_Filter);
1824 return node->attr.filter.proj;
1827 set_Filter_proj (ir_node *node, long proj) {
1828 assert(node->op == op_Filter);
1829 node->attr.filter.proj = proj;
1832 /* Don't use get_irn_arity, get_irn_n in implementation as access
1833 shall work independent of view!!! */
1834 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1835 assert(node->op == op_Filter);
1836 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1837 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1838 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1839 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1840 node->attr.filter.in_cg[0] = node->in[0];
1842 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1845 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1846 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1847 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1848 node->attr.filter.in_cg[pos + 1] = pred;
1850 int get_Filter_n_cg_preds(ir_node *node) {
1851 assert(node->op == op_Filter && node->attr.filter.in_cg);
1852 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1854 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1856 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1858 arity = ARR_LEN(node->attr.filter.in_cg);
1859 assert(pos < arity - 1);
1860 return node->attr.filter.in_cg[pos + 1];
1865 get_irn_irg(ir_node *node) {
1866 if (get_irn_op(node) != op_Block)
1867 node = get_nodes_block(node);
1868 assert(get_irn_op(node) == op_Block);
1869 return node->attr.block.irg;
1873 /*----------------------------------------------------------------*/
1874 /* Auxiliary routines */
1875 /*----------------------------------------------------------------*/
1878 skip_Proj (ir_node *node) {
1879 /* don't assert node !!! */
1880 if (node && is_Proj(node)) {
1881 return get_Proj_pred(node);
1888 skip_Tuple (ir_node *node) {
1891 if (!get_opt_normalize()) return node;
1893 node = skip_nop(node);
1894 if (get_irn_op(node) == op_Proj) {
1895 pred = skip_nop(get_Proj_pred(node));
1896 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1897 pred = skip_nop(skip_Tuple(pred));
1898 if (get_irn_op(pred) == op_Tuple)
1899 return get_Tuple_pred(pred, get_Proj_proj(node));
1905 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1906 than any other approach, as Id chains are resolved and all point to the real node, or
1907 all id's are self loops. */
1909 skip_nop (ir_node *node) {
1910 /* don't assert node !!! */
1912 if (!get_opt_normalize()) return node;
1914 /* Don't use get_Id_pred: We get into an endless loop for
1915 self-referencing Ids. */
1916 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1917 ir_node *rem_pred = node->in[0+1];
1920 assert (get_irn_arity (node) > 0);
1922 node->in[0+1] = node;
1923 res = skip_nop(rem_pred);
1924 if (res->op == op_Id) /* self-loop */ return node;
1926 node->in[0+1] = res;
1933 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1934 than any other approach, as Id chains are resolved and all point to the real node, or
1935 all id's are self loops. */
1936 extern int opt_normalize;
1938 skip_nop (ir_node *node) {
1940 /* don't assert node !!! */
1942 if (!get_opt_normalize()) return node;
1944 /* Don't use get_Id_pred: We get into an endless loop for
1945 self-referencing Ids. */
1946 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1947 ir_node *rem_pred, *res;
1949 if (pred->op != op_Id) return pred; /* shortcut */
1952 assert (get_irn_arity (node) > 0);
1954 node->in[0+1] = node;
1955 res = skip_nop(rem_pred);
1956 if (res->op == op_Id) /* self-loop */ return node;
1958 node->in[0+1] = res;
1969 skip_Id (ir_node *node) {
1970 return skip_nop(node);
1974 is_Bad (ir_node *node) {
1976 if ((node) && get_irn_opcode(node) == iro_Bad)
1982 is_no_Block (ir_node *node) {
1984 return (get_irn_opcode(node) != iro_Block);
1988 is_Block (ir_node *node) {
1990 return (get_irn_opcode(node) == iro_Block);
1993 /* returns true if node is a Unknown node. */
1995 is_Unknown (ir_node *node) {
1997 return (get_irn_opcode(node) == iro_Unknown);
2001 is_Proj (const ir_node *node) {
2003 return node->op == op_Proj
2004 || (!interprocedural_view && node->op == op_Filter);
2007 /* Returns true if the operation manipulates control flow. */
2009 is_cfop(ir_node *node) {
2010 return is_cfopcode(get_irn_op(node));
2013 /* Returns true if the operation manipulates interprocedural control flow:
2014 CallBegin, EndReg, EndExcept */
2015 INLINE int is_ip_cfop(ir_node *node) {
2016 return is_ip_cfopcode(get_irn_op(node));
2019 ir_graph *get_ip_cfop_irg(ir_node *n) {
2020 return get_irn_irg(n);
2023 /* Returns true if the operation can change the control flow because
2026 is_fragile_op(ir_node *node) {
2027 return is_op_fragile(get_irn_op(node));
2030 /* Returns the memory operand of fragile operations. */
2031 ir_node *get_fragile_op_mem(ir_node *node) {
2032 assert(node && is_fragile_op(node));
2034 switch (get_irn_opcode (node)) {
2043 return get_irn_n(node, 0);
2048 assert(0 && "should not be reached");