3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
24 #include "irbackedge_t.h"
31 /* some constants fixing the positions of nodes predecessors
33 #define CALL_PARAM_OFFSET 2
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 /* Declarations for inlineing */
39 INLINE ir_node ** get_irn_in (const ir_node *node);
40 INLINE ir_mode *get_irn_mode (const ir_node *node);
41 INLINE ir_op *get_irn_op (const ir_node *node);
42 INLINE opcode get_irn_opcode (const ir_node *node);
43 INLINE ident *get_irn_opident (const ir_node *node);
44 INLINE type *get_SymConst_type (ir_node *node);
45 INLINE ir_node *skip_nop (ir_node *node);
46 INLINE int is_Proj (const ir_node *node);
49 static const char *pnc_name_arr [] = {
50 "False", "Eq", "Lt", "Le",
51 "Gt", "Ge", "Lg", "Leg", "Uo",
52 "Ue", "Ul", "Ule", "Ug", "Uge",
57 * returns the pnc name from an pnc constant
59 INLINE const char *get_pnc_string(int pnc) {
60 return pnc_name_arr[pnc];
64 * Calculates the negated pnc condition.
67 get_negated_pnc(int pnc) {
69 case False: return True; break;
70 case Eq: return Ne; break;
71 case Lt: return Uge; break;
72 case Le: return Ug; break;
73 case Gt: return Ule; break;
74 case Ge: return Ul; break;
75 case Lg: return Ue; break;
76 case Leg: return Uo; break;
77 case Uo: return Leg; break;
78 case Ue: return Lg; break;
79 case Ul: return Ge; break;
80 case Ule: return Gt; break;
81 case Ug: return Le; break;
82 case Uge: return Lt; break;
83 case Ne: return Eq; break;
84 case True: return False; break;
86 return 99; /* to shut up gcc */
89 const char *pns_name_arr [] = {
90 "initial_exec", "global_store",
91 "frame_base", "globals", "args"
94 const char *symconst_name_arr [] = {
95 "type_tag", "size", "linkage_ptr_info"
104 * irnode constructor.
105 * Create a new irnode in irg, with an op, mode, arity and
106 * some incoming irnodes.
107 * If arity is negative, a node with a dynamic array is created.
110 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
111 int arity, ir_node **in)
114 int node_size = offsetof (ir_node, attr) + op->attr_size;
116 assert(irg && op && mode);
117 res = (ir_node *) obstack_alloc (irg->obst, node_size);
119 res->kind = k_ir_node;
125 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
127 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
128 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
131 set_irn_dbg_info(res, db);
135 res->node_nr = get_irp_new_node_nr();
141 /* Copies all attributes stored in the old node to the new node.
142 Assumes both have the same opcode and sufficient size. */
144 copy_attrs (const ir_node *old_node, ir_node *new_node) {
145 assert(get_irn_op(old_node) == get_irn_op(new_node));
146 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
149 /** getting some parameters from ir_nodes **/
152 is_ir_node (const void *thing) {
153 if (get_kind(thing) == k_ir_node)
159 /* returns the number of predecessors without the block predecessor. */
161 get_irn_intra_arity (const ir_node *node) {
163 return ARR_LEN(node->in) - 1;
166 /* returns the number of predecessors without the block predecessor. */
168 get_irn_inter_arity (const ir_node *node) {
170 if (get_irn_opcode(node) == iro_Filter) {
171 assert(node->attr.filter.in_cg);
172 return ARR_LEN(node->attr.filter.in_cg) - 1;
173 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
174 return ARR_LEN(node->attr.block.in_cg) - 1;
176 return get_irn_intra_arity(node);
179 /* returns the number of predecessors without the block predecessor. */
181 get_irn_arity (const ir_node *node) {
183 if (interprocedural_view) return get_irn_inter_arity(node);
184 return get_irn_intra_arity(node);
187 /* Returns the array with ins. This array is shifted with respect to the
188 array accessed by get_irn_n: The block operand is at position 0 not -1.
189 (@@@ This should be changed.)
190 The order of the predecessors in this array is not guaranteed, except that
191 lists of operands as predecessors of Block or arguments of a Call are
194 get_irn_in (const ir_node *node) {
196 if (interprocedural_view) { /* handle Filter and Block specially */
197 if (get_irn_opcode(node) == iro_Filter) {
198 assert(node->attr.filter.in_cg);
199 return node->attr.filter.in_cg;
200 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
201 return node->attr.block.in_cg;
203 /* else fall through */
209 set_irn_in (ir_node *node, int arity, ir_node **in) {
212 if (interprocedural_view) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 arr = &node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 arr = &node->attr.block.in_cg;
224 if (arity != ARR_LEN(*arr) - 1) {
225 ir_node * block = (*arr)[0];
226 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
229 fix_backedges(current_ir_graph->obst, node);
230 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
234 get_irn_intra_n (ir_node *node, int n) {
235 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
239 get_irn_inter_n (ir_node *node, int n) {
240 /* handle Filter and Block specially */
241 if (get_irn_opcode(node) == iro_Filter) {
242 assert(node->attr.filter.in_cg);
243 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
248 return get_irn_intra_n (node, n);
251 /* to iterate through the predecessors without touching the array */
252 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
253 to iterate including the Block predecessor iterate from i = -1 to
255 If it is a block, the entry -1 is NULL. */
257 get_irn_n (ir_node *node, int n) {
258 assert(node); assert(-1 <= n && n < get_irn_arity(node));
259 if (interprocedural_view) return get_irn_inter_n (node, n);
260 return get_irn_intra_n (node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && -1 <= n && n < get_irn_arity(node));
267 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
268 /* Change block pred in both views! */
269 node->in[n + 1] = in;
270 assert(node->attr.filter.in_cg);
271 node->attr.filter.in_cg[n + 1] = in;
274 if (interprocedural_view) { /* handle Filter and Block specially */
275 if (get_irn_opcode(node) == iro_Filter) {
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
279 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
280 node->attr.block.in_cg[n + 1] = in;
283 /* else fall through */
285 node->in[n + 1] = in;
289 get_irn_mode (const ir_node *node)
296 set_irn_mode (ir_node *node, ir_mode *mode)
304 get_irn_modecode (const ir_node *node)
307 return node->mode->code;
310 /** Gets the string representation of the mode .*/
312 get_irn_modename (const ir_node *node)
315 return get_mode_name(node->mode);
319 get_irn_modeident (const ir_node *node)
322 return get_mode_ident(node->mode);
326 get_irn_op (const ir_node *node)
332 /* should be private to the library: */
334 set_irn_op (ir_node *node, ir_op *op)
341 get_irn_opcode (const ir_node *node)
343 assert (k_ir_node == get_kind(node));
345 return node->op->code;
349 get_irn_opname (const ir_node *node)
352 return get_id_str(node->op->name);
356 get_irn_opident (const ir_node *node)
359 return node->op->name;
363 get_irn_visited (const ir_node *node)
366 return node->visited;
370 set_irn_visited (ir_node *node, unsigned long visited)
373 node->visited = visited;
377 mark_irn_visited (ir_node *node) {
379 node->visited = current_ir_graph->visited;
383 irn_not_visited (const ir_node *node) {
385 return (node->visited < current_ir_graph->visited);
389 irn_visited (const ir_node *node) {
391 return (node->visited >= current_ir_graph->visited);
395 set_irn_link (ir_node *node, void *link) {
397 /* Link field is used for Phi construction and various optimizations
399 assert(get_irg_phase_state(current_ir_graph) != phase_building);
405 get_irn_link (const ir_node *node) {
410 /* Outputs a unique number for this node */
412 get_irn_node_nr(const ir_node *node) {
415 return node->node_nr;
422 get_irn_const_attr (ir_node *node)
424 assert (node->op == op_Const);
425 return node->attr.con;
429 get_irn_proj_attr (ir_node *node)
431 assert (node->op == op_Proj);
432 return node->attr.proj;
436 get_irn_alloc_attr (ir_node *node)
438 assert (node->op == op_Alloc);
443 get_irn_free_attr (ir_node *node)
445 assert (node->op == op_Free);
446 return node->attr.f = skip_tid(node->attr.f);
450 get_irn_symconst_attr (ir_node *node)
452 assert (node->op == op_SymConst);
457 get_irn_call_attr (ir_node *node)
459 assert (node->op == op_Call);
460 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
464 get_irn_sel_attr (ir_node *node)
466 assert (node->op == op_Sel);
471 get_irn_phi_attr (ir_node *node)
473 assert (node->op == op_Phi);
474 return node->attr.phi0_pos;
478 get_irn_block_attr (ir_node *node)
480 assert (node->op == op_Block);
481 return node->attr.block;
484 /** manipulate fields of individual nodes **/
486 /* this works for all except Block */
488 get_nodes_Block (ir_node *node) {
489 assert (!(node->op == op_Block));
490 return get_irn_n(node, -1);
494 set_nodes_Block (ir_node *node, ir_node *block) {
495 assert (!(node->op == op_Block));
496 set_irn_n(node, -1, block);
499 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
500 * from Start. If so returns frame type, else Null. */
501 type *is_frame_pointer(ir_node *n) {
502 if ((get_irn_op(n) == op_Proj) &&
503 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
504 ir_node *start = get_Proj_pred(n);
505 if (get_irn_op(start) == op_Start) {
506 return get_irg_frame_type(get_irn_irg(start));
512 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
513 * from Start. If so returns global type, else Null. */
514 type *is_globals_pointer(ir_node *n) {
515 if ((get_irn_op(n) == op_Proj) &&
516 (get_Proj_proj(n) == pn_Start_P_globals)) {
517 ir_node *start = get_Proj_pred(n);
518 if (get_irn_op(start) == op_Start) {
519 return get_glob_type();
525 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
526 * from Start. If so returns 1, else 0. */
527 int is_value_arg_pointer(ir_node *n) {
528 if ((get_irn_op(n) == op_Proj) &&
529 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
530 (get_irn_op(get_Proj_pred(n)) == op_Start))
535 /* Returns an array with the predecessors of the Block. Depending on
536 the implementation of the graph data structure this can be a copy of
537 the internal representation of predecessors as well as the internal
538 array itself. Therefore writing to this array might obstruct the ir. */
540 get_Block_cfgpred_arr (ir_node *node)
542 assert ((node->op == op_Block));
543 return (ir_node **)&(get_irn_in(node)[1]);
548 get_Block_n_cfgpreds (ir_node *node) {
549 assert ((node->op == op_Block));
550 return (get_irn_arity(node));
554 get_Block_cfgpred (ir_node *node, int pos) {
555 assert (node->op == op_Block);
557 if (-1 > pos || get_irn_arity(node) <= pos) {
558 dump_ir_block_graph(current_ir_graph);
559 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
562 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
563 return get_irn_n(node, pos);
567 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
568 assert (node->op == op_Block);
569 set_irn_n(node, pos, pred);
573 get_Block_matured (ir_node *node) {
574 assert (node->op == op_Block);
575 return node->attr.block.matured;
579 set_Block_matured (ir_node *node, bool matured) {
580 assert (node->op == op_Block);
581 node->attr.block.matured = matured;
584 get_Block_block_visited (ir_node *node) {
585 assert (node->op == op_Block);
586 return node->attr.block.block_visited;
590 set_Block_block_visited (ir_node *node, unsigned long visit) {
591 assert (node->op == op_Block);
592 node->attr.block.block_visited = visit;
595 /* For this current_ir_graph must be set. */
597 mark_Block_block_visited (ir_node *node) {
598 assert (node->op == op_Block);
599 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
603 Block_not_block_visited(ir_node *node) {
604 assert (node->op == op_Block);
605 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
609 get_Block_graph_arr (ir_node *node, int pos) {
610 assert (node->op == op_Block);
611 return node->attr.block.graph_arr[pos+1];
615 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
616 assert (node->op == op_Block);
617 node->attr.block.graph_arr[pos+1] = value;
620 /* handler handling for Blocks * /
622 set_Block_handler (ir_node *block, ir_node *handler) {
623 assert ((block->op == op_Block));
624 assert ((handler->op == op_Block));
625 block->attr.block.handler_entry = handler;
629 get_Block_handler (ir_node *block) {
630 assert ((block->op == op_Block));
631 return (block->attr.block.handler_entry);
634 / * handler handling for Nodes * /
636 set_Node_handler (ir_node *node, ir_node *handler) {
637 set_Block_handler (get_nodes_Block (node), handler);
641 get_Node_handler (ir_node *node) {
642 return (get_Block_handler (get_nodes_Block (node)));
645 / * exc_t handling for Blocks * /
646 void set_Block_exc (ir_node *block, exc_t exc) {
647 assert ((block->op == op_Block));
648 block->attr.block.exc = exc;
651 exc_t get_Block_exc (ir_node *block) {
652 assert ((block->op == op_Block));
653 return (block->attr.block.exc);
656 / * exc_t handling for Nodes * /
657 void set_Node_exc (ir_node *node, exc_t exc) {
658 set_Block_exc (get_nodes_Block (node), exc);
661 exc_t get_Node_exc (ir_node *node) {
662 return (get_Block_exc (get_nodes_Block (node)));
666 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
667 assert(node->op == op_Block);
668 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
669 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
670 node->attr.block.in_cg[0] = NULL;
671 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
673 /* Fix backedge array. fix_backedges operates depending on
674 interprocedural_view. */
675 bool ipv = interprocedural_view;
676 interprocedural_view = true;
677 fix_backedges(current_ir_graph->obst, node);
678 interprocedural_view = ipv;
681 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
684 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
685 assert(node->op == op_Block &&
686 node->attr.block.in_cg &&
687 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
688 node->attr.block.in_cg[pos + 1] = pred;
691 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
692 assert(node->op == op_Block);
693 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
696 int get_Block_cg_n_cfgpreds(ir_node * node) {
697 assert(node->op == op_Block);
698 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
701 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
702 assert(node->op == op_Block && node->attr.block.in_cg);
703 return node->attr.block.in_cg[pos + 1];
706 void remove_Block_cg_cfgpred_arr(ir_node * node) {
707 assert(node->op == op_Block);
708 node->attr.block.in_cg = NULL;
711 /* Start references the irg it is in. */
713 get_Start_irg(ir_node *node) {
714 return get_irn_irg(node);
718 set_Start_irg(ir_node *node, ir_graph *irg) {
719 assert(node->op == op_Start);
720 assert(is_ir_graph(irg));
721 assert(0 && " Why set irg? ");
722 //node->attr.start.irg = irg;
726 get_End_n_keepalives(ir_node *end) {
727 assert (end->op == op_End);
728 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
732 get_End_keepalive(ir_node *end, int pos) {
733 assert (end->op == op_End);
734 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
738 add_End_keepalive (ir_node *end, ir_node *ka) {
739 assert (end->op == op_End);
740 ARR_APP1 (ir_node *, end->in, ka);
744 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
745 assert (end->op == op_End);
746 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
750 free_End (ir_node *end) {
751 assert (end->op == op_End);
753 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
754 end->in = NULL; /* @@@ make sure we get an error if we use the
755 in array afterwards ... */
758 ir_graph *get_EndReg_irg (ir_node *end) {
759 return get_irn_irg(end);
762 ir_graph *get_EndExcept_irg (ir_node *end) {
763 return get_irn_irg(end);
767 > Implementing the case construct (which is where the constant Proj node is
768 > important) involves far more than simply determining the constant values.
769 > We could argue that this is more properly a function of the translator from
770 > Firm to the target machine. That could be done if there was some way of
771 > projecting "default" out of the Cond node.
772 I know it's complicated.
773 Basically there are two proglems:
774 - determining the gaps between the projs
775 - determining the biggest case constant to know the proj number for
777 I see several solutions:
778 1. Introduce a ProjDefault node. Solves both problems.
779 This means to extend all optimizations executed during construction.
780 2. Give the Cond node for switch two flavors:
781 a) there are no gaps in the projs (existing flavor)
782 b) gaps may exist, default proj is still the Proj with the largest
783 projection number. This covers also the gaps.
784 3. Fix the semantic of the Cond to that of 2b)
786 Solution 2 seems to be the best:
787 Computing the gaps in the Firm representation is not too hard, i.e.,
788 libFIRM can implement a routine that transforms between the two
789 flavours. This is also possible for 1) but 2) does not require to
790 change any existing optimization.
791 Further it should be far simpler to determine the biggest constant than
793 I don't want to choose 3) as 2a) seems to have advantages for
794 dataflow analysis and 3) does not allow to convert the representation to
798 get_Cond_selector (ir_node *node) {
799 assert (node->op == op_Cond);
800 return get_irn_n(node, 0);
804 set_Cond_selector (ir_node *node, ir_node *selector) {
805 assert (node->op == op_Cond);
806 set_irn_n(node, 0, selector);
810 get_Cond_kind (ir_node *node) {
811 assert (node->op == op_Cond);
812 return node->attr.c.kind;
816 set_Cond_kind (ir_node *node, cond_kind kind) {
817 assert (node->op == op_Cond);
818 node->attr.c.kind = kind;
822 get_Return_mem (ir_node *node) {
823 assert (node->op == op_Return);
824 return get_irn_n(node, 0);
828 set_Return_mem (ir_node *node, ir_node *mem) {
829 assert (node->op == op_Return);
830 set_irn_n(node, 0, mem);
834 get_Return_n_ress (ir_node *node) {
835 assert (node->op == op_Return);
836 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
840 get_Return_res_arr (ir_node *node)
842 assert ((node->op == op_Return));
843 if (get_Return_n_ress(node) > 0)
844 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
851 set_Return_n_res (ir_node *node, int results) {
852 assert (node->op == op_Return);
857 get_Return_res (ir_node *node, int pos) {
858 assert (node->op == op_Return);
859 assert (get_Return_n_ress(node) > pos);
860 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
864 set_Return_res (ir_node *node, int pos, ir_node *res){
865 assert (node->op == op_Return);
866 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
870 get_Raise_mem (ir_node *node) {
871 assert (node->op == op_Raise);
872 return get_irn_n(node, 0);
876 set_Raise_mem (ir_node *node, ir_node *mem) {
877 assert (node->op == op_Raise);
878 set_irn_n(node, 0, mem);
882 get_Raise_exo_ptr (ir_node *node) {
883 assert (node->op == op_Raise);
884 return get_irn_n(node, 1);
888 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
889 assert (node->op == op_Raise);
890 set_irn_n(node, 1, exo_ptr);
893 INLINE tarval *get_Const_tarval (ir_node *node) {
894 assert (node->op == op_Const);
895 return node->attr.con.tv;
899 set_Const_tarval (ir_node *node, tarval *con) {
900 assert (node->op == op_Const);
901 node->attr.con.tv = con;
905 /* The source language type. Must be an atomic type. Mode of type must
906 be mode of node. For tarvals from entities type must be pointer to
909 get_Const_type (ir_node *node) {
910 assert (node->op == op_Const);
911 return node->attr.con.tp;
915 set_Const_type (ir_node *node, type *tp) {
916 assert (node->op == op_Const);
917 if (tp != unknown_type) {
918 assert (is_atomic_type(tp));
919 assert (get_type_mode(tp) == get_irn_mode(node));
920 assert (!tarval_is_entity(get_Const_tarval(node)) ||
921 (is_pointer_type(tp) &&
922 (get_pointer_points_to_type(tp) ==
923 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
926 node->attr.con.tp = tp;
931 get_SymConst_kind (const ir_node *node) {
932 assert (node->op == op_SymConst);
933 return node->attr.i.num;
937 set_SymConst_kind (ir_node *node, symconst_kind num) {
938 assert (node->op == op_SymConst);
939 node->attr.i.num = num;
943 get_SymConst_type (ir_node *node) {
944 assert ( (node->op == op_SymConst)
945 && ( get_SymConst_kind(node) == type_tag
946 || get_SymConst_kind(node) == size));
947 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
951 set_SymConst_type (ir_node *node, type *tp) {
952 assert ( (node->op == op_SymConst)
953 && ( get_SymConst_kind(node) == type_tag
954 || get_SymConst_kind(node) == size));
955 node->attr.i.tori.typ = tp;
959 get_SymConst_ptrinfo (ir_node *node) {
960 assert ( (node->op == op_SymConst)
961 && (get_SymConst_kind(node) == linkage_ptr_info));
962 return node->attr.i.tori.ptrinfo;
966 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
967 assert ( (node->op == op_SymConst)
968 && (get_SymConst_kind(node) == linkage_ptr_info));
969 node->attr.i.tori.ptrinfo = ptrinfo;
973 get_SymConst_type_or_id (ir_node *node) {
974 assert (node->op == op_SymConst);
975 return &(node->attr.i.tori);
979 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
980 assert (node->op == op_SymConst);
981 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
985 get_Sel_mem (ir_node *node) {
986 assert (node->op == op_Sel);
987 return get_irn_n(node, 0);
991 set_Sel_mem (ir_node *node, ir_node *mem) {
992 assert (node->op == op_Sel);
993 set_irn_n(node, 0, mem);
997 get_Sel_ptr (ir_node *node) {
998 assert (node->op == op_Sel);
999 return get_irn_n(node, 1);
1003 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1004 assert (node->op == op_Sel);
1005 set_irn_n(node, 1, ptr);
1009 get_Sel_n_indexs (ir_node *node) {
1010 assert (node->op == op_Sel);
1011 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1015 get_Sel_index_arr (ir_node *node)
1017 assert ((node->op == op_Sel));
1018 if (get_Sel_n_indexs(node) > 0)
1019 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1025 get_Sel_index (ir_node *node, int pos) {
1026 assert (node->op == op_Sel);
1027 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1031 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1032 assert (node->op == op_Sel);
1033 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1037 get_Sel_entity (ir_node *node) {
1038 assert (node->op == op_Sel);
1039 return node->attr.s.ent;
1043 set_Sel_entity (ir_node *node, entity *ent) {
1044 assert (node->op == op_Sel);
1045 node->attr.s.ent = ent;
1049 get_InstOf_ent (ir_node *node) {
1050 assert (node->op = op_InstOf);
1051 return (node->attr.io.ent);
1055 set_InstOf_ent (ir_node *node, type *ent) {
1056 assert (node->op = op_InstOf);
1057 node->attr.io.ent = ent;
1061 get_InstOf_store (ir_node *node) {
1062 assert (node->op = op_InstOf);
1063 return (get_irn_n (node, 0));
1067 set_InstOf_store (ir_node *node, ir_node *obj) {
1068 assert (node->op = op_InstOf);
1069 set_irn_n (node, 0, obj);
1073 get_InstOf_obj (ir_node *node) {
1074 assert (node->op = op_InstOf);
1075 return (get_irn_n (node, 1));
1079 set_InstOf_obj (ir_node *node, ir_node *obj) {
1080 assert (node->op = op_InstOf);
1081 set_irn_n (node, 1, obj);
1085 /* For unary and binary arithmetic operations the access to the
1086 operands can be factored out. Left is the first, right the
1087 second arithmetic value as listed in tech report 0999-33.
1088 unops are: Minus, Abs, Not, Conv, Cast
1089 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1090 Shr, Shrs, Rotate, Cmp */
1094 get_Call_mem (ir_node *node) {
1095 assert (node->op == op_Call);
1096 return get_irn_n(node, 0);
1100 set_Call_mem (ir_node *node, ir_node *mem) {
1101 assert (node->op == op_Call);
1102 set_irn_n(node, 0, mem);
1106 get_Call_ptr (ir_node *node) {
1107 assert (node->op == op_Call);
1108 return get_irn_n(node, 1);
1112 set_Call_ptr (ir_node *node, ir_node *ptr) {
1113 assert (node->op == op_Call);
1114 set_irn_n(node, 1, ptr);
1118 get_Call_param_arr (ir_node *node) {
1119 assert (node->op == op_Call);
1120 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1124 get_Call_n_params (ir_node *node) {
1125 assert (node->op == op_Call);
1126 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1130 get_Call_arity (ir_node *node) {
1131 assert (node->op == op_Call);
1132 return get_Call_n_params(node);
1136 set_Call_arity (ir_node *node, ir_node *arity) {
1137 assert (node->op == op_Call);
1142 get_Call_param (ir_node *node, int pos) {
1143 assert (node->op == op_Call);
1144 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1148 set_Call_param (ir_node *node, int pos, ir_node *param) {
1149 assert (node->op == op_Call);
1150 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1154 get_Call_type (ir_node *node) {
1155 assert (node->op == op_Call);
1156 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1160 set_Call_type (ir_node *node, type *tp) {
1161 assert (node->op == op_Call);
1162 assert (is_method_type(tp));
1163 node->attr.call.cld_tp = tp;
1166 int Call_has_callees(ir_node *node) {
1167 return (node->attr.call.callee_arr != NULL);
1170 int get_Call_n_callees(ir_node * node) {
1171 assert(node->op == op_Call && node->attr.call.callee_arr);
1172 return ARR_LEN(node->attr.call.callee_arr);
1175 entity * get_Call_callee(ir_node * node, int pos) {
1176 assert(node->op == op_Call && node->attr.call.callee_arr);
1177 return node->attr.call.callee_arr[pos];
1180 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1181 assert(node->op == op_Call);
1182 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1183 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1185 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1188 void remove_Call_callee_arr(ir_node * node) {
1189 assert(node->op == op_Call);
1190 node->attr.call.callee_arr = NULL;
1193 ir_node * get_CallBegin_ptr (ir_node *node) {
1194 assert(node->op == op_CallBegin);
1195 return get_irn_n(node, 0);
1197 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1198 assert(node->op == op_CallBegin);
1199 set_irn_n(node, 0, ptr);
1201 ir_graph * get_CallBegin_irg (ir_node *node) {
1202 return get_irn_irg(node);
1204 ir_node * get_CallBegin_call (ir_node *node) {
1205 assert(node->op == op_CallBegin);
1206 return node->attr.callbegin.call;
1208 void set_CallBegin_call (ir_node *node, ir_node *call) {
1209 assert(node->op == op_CallBegin);
1210 node->attr.callbegin.call = call;
1214 get_FuncCall_ptr (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return get_irn_n(node, 0);
1220 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1221 assert (node->op == op_FuncCall);
1222 set_irn_n(node, 0, ptr);
1226 get_FuncCall_param_arr (ir_node *node) {
1227 assert (node->op == op_FuncCall);
1228 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1232 get_FuncCall_n_params (ir_node *node) {
1233 assert (node->op == op_FuncCall);
1234 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1238 get_FuncCall_arity (ir_node *node) {
1239 assert (node->op == op_FuncCall);
1240 return get_FuncCall_n_params(node);
1244 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1245 assert (node->op == op_FuncCall);
1250 get_FuncCall_param (ir_node *node, int pos) {
1251 assert (node->op == op_FuncCall);
1252 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1256 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1257 assert (node->op == op_FuncCall);
1258 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1262 get_FuncCall_type (ir_node *node) {
1263 assert (node->op == op_FuncCall);
1264 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1268 set_FuncCall_type (ir_node *node, type *tp) {
1269 assert (node->op == op_FuncCall);
1270 assert (is_method_type(tp));
1271 node->attr.call.cld_tp = tp;
1274 int FuncCall_has_callees(ir_node *node) {
1275 return (node->attr.call.callee_arr != NULL);
1278 int get_FuncCall_n_callees(ir_node * node) {
1279 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1280 return ARR_LEN(node->attr.call.callee_arr);
1283 entity * get_FuncCall_callee(ir_node * node, int pos) {
1284 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1285 return node->attr.call.callee_arr[pos];
1288 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1289 assert(node->op == op_FuncCall);
1290 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1291 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1293 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1296 void remove_FuncCall_callee_arr(ir_node * node) {
1297 assert(node->op == op_FuncCall);
1298 node->attr.call.callee_arr = NULL;
1303 ir_node * get_##OP##_left(ir_node *node) { \
1304 assert(node->op == op_##OP); \
1305 return get_irn_n(node, node->op->op_index); \
1307 void set_##OP##_left(ir_node *node, ir_node *left) { \
1308 assert(node->op == op_##OP); \
1309 set_irn_n(node, node->op->op_index, left); \
1311 ir_node *get_##OP##_right(ir_node *node) { \
1312 assert(node->op == op_##OP); \
1313 return get_irn_n(node, node->op->op_index + 1); \
1315 void set_##OP##_right(ir_node *node, ir_node *right) { \
1316 assert(node->op == op_##OP); \
1317 set_irn_n(node, node->op->op_index + 1, right); \
1321 ir_node *get_##OP##_op(ir_node *node) { \
1322 assert(node->op == op_##OP); \
1323 return get_irn_n(node, node->op->op_index); \
1325 void set_##OP##_op (ir_node *node, ir_node *op) { \
1326 assert(node->op == op_##OP); \
1327 set_irn_n(node, node->op->op_index, op); \
1337 get_Quot_mem (ir_node *node) {
1338 assert (node->op == op_Quot);
1339 return get_irn_n(node, 0);
1343 set_Quot_mem (ir_node *node, ir_node *mem) {
1344 assert (node->op == op_Quot);
1345 set_irn_n(node, 0, mem);
1351 get_DivMod_mem (ir_node *node) {
1352 assert (node->op == op_DivMod);
1353 return get_irn_n(node, 0);
1357 set_DivMod_mem (ir_node *node, ir_node *mem) {
1358 assert (node->op == op_DivMod);
1359 set_irn_n(node, 0, mem);
1365 get_Div_mem (ir_node *node) {
1366 assert (node->op == op_Div);
1367 return get_irn_n(node, 0);
1371 set_Div_mem (ir_node *node, ir_node *mem) {
1372 assert (node->op == op_Div);
1373 set_irn_n(node, 0, mem);
1379 get_Mod_mem (ir_node *node) {
1380 assert (node->op == op_Mod);
1381 return get_irn_n(node, 0);
1385 set_Mod_mem (ir_node *node, ir_node *mem) {
1386 assert (node->op == op_Mod);
1387 set_irn_n(node, 0, mem);
1404 get_Cast_type (ir_node *node) {
1405 assert (node->op == op_Cast);
1406 return node->attr.cast.totype;
1410 set_Cast_type (ir_node *node, type *to_tp) {
1411 assert (node->op == op_Cast);
1412 node->attr.cast.totype = to_tp;
1416 is_unop (ir_node *node) {
1417 return (node->op->opar == oparity_unary);
1421 get_unop_op (ir_node *node) {
1422 if (node->op->opar == oparity_unary)
1423 return get_irn_n(node, node->op->op_index);
1425 assert(node->op->opar == oparity_unary);
1430 set_unop_op (ir_node *node, ir_node *op) {
1431 if (node->op->opar == oparity_unary)
1432 set_irn_n(node, node->op->op_index, op);
1434 assert(node->op->opar == oparity_unary);
1438 is_binop (ir_node *node) {
1439 return (node->op->opar == oparity_binary);
1443 get_binop_left (ir_node *node) {
1444 if (node->op->opar == oparity_binary)
1445 return get_irn_n(node, node->op->op_index);
1447 assert(node->op->opar == oparity_binary);
1452 set_binop_left (ir_node *node, ir_node *left) {
1453 if (node->op->opar == oparity_binary)
1454 set_irn_n(node, node->op->op_index, left);
1456 assert (node->op->opar == oparity_binary);
1460 get_binop_right (ir_node *node) {
1461 if (node->op->opar == oparity_binary)
1462 return get_irn_n(node, node->op->op_index + 1);
1464 assert(node->op->opar == oparity_binary);
1469 set_binop_right (ir_node *node, ir_node *right) {
1470 if (node->op->opar == oparity_binary)
1471 set_irn_n(node, node->op->op_index + 1, right);
1473 assert (node->op->opar == oparity_binary);
1476 INLINE int is_Phi (ir_node *n) {
1481 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1485 get_Phi_preds_arr (ir_node *node) {
1486 assert (node->op == op_Phi);
1487 return (ir_node **)&(get_irn_in(node)[1]);
1491 get_Phi_n_preds (ir_node *node) {
1492 assert (is_Phi(node));
1493 return (get_irn_arity(node));
1497 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1498 assert (node->op == op_Phi);
1503 get_Phi_pred (ir_node *node, int pos) {
1504 assert (is_Phi(node));
1505 return get_irn_n(node, pos);
1509 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1510 assert (is_Phi(node));
1511 set_irn_n(node, pos, pred);
1515 get_Load_mem (ir_node *node) {
1516 assert (node->op == op_Load);
1517 return get_irn_n(node, 0);
1521 set_Load_mem (ir_node *node, ir_node *mem) {
1522 assert (node->op == op_Load);
1523 set_irn_n(node, 0, mem);
1527 get_Load_ptr (ir_node *node) {
1528 assert (node->op == op_Load);
1529 return get_irn_n(node, 1);
1533 set_Load_ptr (ir_node *node, ir_node *ptr) {
1534 assert (node->op == op_Load);
1535 set_irn_n(node, 1, ptr);
1540 get_Store_mem (ir_node *node) {
1541 assert (node->op == op_Store);
1542 return get_irn_n(node, 0);
1546 set_Store_mem (ir_node *node, ir_node *mem) {
1547 assert (node->op == op_Store);
1548 set_irn_n(node, 0, mem);
1552 get_Store_ptr (ir_node *node) {
1553 assert (node->op == op_Store);
1554 return get_irn_n(node, 1);
1558 set_Store_ptr (ir_node *node, ir_node *ptr) {
1559 assert (node->op == op_Store);
1560 set_irn_n(node, 1, ptr);
1564 get_Store_value (ir_node *node) {
1565 assert (node->op == op_Store);
1566 return get_irn_n(node, 2);
1570 set_Store_value (ir_node *node, ir_node *value) {
1571 assert (node->op == op_Store);
1572 set_irn_n(node, 2, value);
1576 get_Alloc_mem (ir_node *node) {
1577 assert (node->op == op_Alloc);
1578 return get_irn_n(node, 0);
1582 set_Alloc_mem (ir_node *node, ir_node *mem) {
1583 assert (node->op == op_Alloc);
1584 set_irn_n(node, 0, mem);
1588 get_Alloc_size (ir_node *node) {
1589 assert (node->op == op_Alloc);
1590 return get_irn_n(node, 1);
1594 set_Alloc_size (ir_node *node, ir_node *size) {
1595 assert (node->op == op_Alloc);
1596 set_irn_n(node, 1, size);
1600 get_Alloc_type (ir_node *node) {
1601 assert (node->op == op_Alloc);
1602 return node->attr.a.type = skip_tid(node->attr.a.type);
1606 set_Alloc_type (ir_node *node, type *tp) {
1607 assert (node->op == op_Alloc);
1608 node->attr.a.type = tp;
1612 get_Alloc_where (ir_node *node) {
1613 assert (node->op == op_Alloc);
1614 return node->attr.a.where;
1618 set_Alloc_where (ir_node *node, where_alloc where) {
1619 assert (node->op == op_Alloc);
1620 node->attr.a.where = where;
1625 get_Free_mem (ir_node *node) {
1626 assert (node->op == op_Free);
1627 return get_irn_n(node, 0);
1631 set_Free_mem (ir_node *node, ir_node *mem) {
1632 assert (node->op == op_Free);
1633 set_irn_n(node, 0, mem);
1637 get_Free_ptr (ir_node *node) {
1638 assert (node->op == op_Free);
1639 return get_irn_n(node, 1);
1643 set_Free_ptr (ir_node *node, ir_node *ptr) {
1644 assert (node->op == op_Free);
1645 set_irn_n(node, 1, ptr);
1649 get_Free_size (ir_node *node) {
1650 assert (node->op == op_Free);
1651 return get_irn_n(node, 2);
1655 set_Free_size (ir_node *node, ir_node *size) {
1656 assert (node->op == op_Free);
1657 set_irn_n(node, 2, size);
1661 get_Free_type (ir_node *node) {
1662 assert (node->op == op_Free);
1663 return node->attr.f = skip_tid(node->attr.f);
1667 set_Free_type (ir_node *node, type *tp) {
1668 assert (node->op == op_Free);
1673 get_Sync_preds_arr (ir_node *node) {
1674 assert (node->op == op_Sync);
1675 return (ir_node **)&(get_irn_in(node)[1]);
1679 get_Sync_n_preds (ir_node *node) {
1680 assert (node->op == op_Sync);
1681 return (get_irn_arity(node));
1686 set_Sync_n_preds (ir_node *node, int n_preds) {
1687 assert (node->op == op_Sync);
1692 get_Sync_pred (ir_node *node, int pos) {
1693 assert (node->op == op_Sync);
1694 return get_irn_n(node, pos);
1698 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1699 assert (node->op == op_Sync);
1700 set_irn_n(node, pos, pred);
1704 get_Proj_pred (ir_node *node) {
1705 assert (is_Proj(node));
1706 return get_irn_n(node, 0);
1710 set_Proj_pred (ir_node *node, ir_node *pred) {
1711 assert (is_Proj(node));
1712 set_irn_n(node, 0, pred);
1716 get_Proj_proj (ir_node *node) {
1717 assert (is_Proj(node));
1718 if (get_irn_opcode(node) == iro_Proj) {
1719 return node->attr.proj;
1721 assert(get_irn_opcode(node) == iro_Filter);
1722 return node->attr.filter.proj;
1727 set_Proj_proj (ir_node *node, long proj) {
1728 assert (node->op == op_Proj);
1729 node->attr.proj = proj;
1733 get_Tuple_preds_arr (ir_node *node) {
1734 assert (node->op == op_Tuple);
1735 return (ir_node **)&(get_irn_in(node)[1]);
1739 get_Tuple_n_preds (ir_node *node) {
1740 assert (node->op == op_Tuple);
1741 return (get_irn_arity(node));
1746 set_Tuple_n_preds (ir_node *node, int n_preds) {
1747 assert (node->op == op_Tuple);
1752 get_Tuple_pred (ir_node *node, int pos) {
1753 assert (node->op == op_Tuple);
1754 return get_irn_n(node, pos);
1758 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1759 assert (node->op == op_Tuple);
1760 set_irn_n(node, pos, pred);
1764 get_Id_pred (ir_node *node) {
1765 assert (node->op == op_Id);
1766 return get_irn_n(node, 0);
1770 set_Id_pred (ir_node *node, ir_node *pred) {
1771 assert (node->op == op_Id);
1772 set_irn_n(node, 0, pred);
1775 INLINE ir_node *get_Confirm_value (ir_node *node) {
1776 assert (node->op == op_Confirm);
1777 return get_irn_n(node, 0);
1779 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1780 assert (node->op == op_Confirm);
1781 set_irn_n(node, 0, value);
1783 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1784 assert (node->op == op_Confirm);
1785 return get_irn_n(node, 1);
1787 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1788 assert (node->op == op_Confirm);
1789 set_irn_n(node, 0, bound);
1791 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1792 assert (node->op == op_Confirm);
1793 return node->attr.confirm_cmp;
1795 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1796 assert (node->op == op_Confirm);
1797 node->attr.confirm_cmp = cmp;
1802 get_Filter_pred (ir_node *node) {
1803 assert(node->op == op_Filter);
1807 set_Filter_pred (ir_node *node, ir_node *pred) {
1808 assert(node->op == op_Filter);
1812 get_Filter_proj(ir_node *node) {
1813 assert(node->op == op_Filter);
1814 return node->attr.filter.proj;
1817 set_Filter_proj (ir_node *node, long proj) {
1818 assert(node->op == op_Filter);
1819 node->attr.filter.proj = proj;
1822 /* Don't use get_irn_arity, get_irn_n in implementation as access
1823 shall work independent of view!!! */
1824 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1825 assert(node->op == op_Filter);
1826 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1827 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1828 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1829 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1830 node->attr.filter.in_cg[0] = node->in[0];
1832 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1835 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1836 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1837 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1838 node->attr.filter.in_cg[pos + 1] = pred;
1840 int get_Filter_n_cg_preds(ir_node *node) {
1841 assert(node->op == op_Filter && node->attr.filter.in_cg);
1842 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1844 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1846 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1848 arity = ARR_LEN(node->attr.filter.in_cg);
1849 assert(pos < arity - 1);
1850 return node->attr.filter.in_cg[pos + 1];
1855 get_irn_irg(ir_node *node) {
1856 if (get_irn_op(node) != op_Block)
1857 node = get_nodes_block(node);
1858 assert(get_irn_op(node) == op_Block);
1859 return node->attr.block.irg;
1863 /*----------------------------------------------------------------*/
1864 /* Auxiliary routines */
1865 /*----------------------------------------------------------------*/
1868 skip_Proj (ir_node *node) {
1869 /* don't assert node !!! */
1870 if (node && is_Proj(node)) {
1871 return get_Proj_pred(node);
1878 skip_Tuple (ir_node *node) {
1881 if (!get_opt_normalize()) return node;
1883 node = skip_nop(node);
1884 if (get_irn_op(node) == op_Proj) {
1885 pred = skip_nop(get_Proj_pred(node));
1886 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1887 pred = skip_nop(skip_Tuple(pred));
1888 if (get_irn_op(pred) == op_Tuple)
1889 return get_Tuple_pred(pred, get_Proj_proj(node));
1895 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1896 than any other approach, as Id chains are resolved and all point to the real node, or
1897 all id's are self loops. */
1899 skip_nop (ir_node *node) {
1900 /* don't assert node !!! */
1902 if (!get_opt_normalize()) return node;
1904 /* Don't use get_Id_pred: We get into an endless loop for
1905 self-referencing Ids. */
1906 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1907 ir_node *rem_pred = node->in[0+1];
1910 assert (get_irn_arity (node) > 0);
1912 node->in[0+1] = node;
1913 res = skip_nop(rem_pred);
1914 if (res->op == op_Id) /* self-loop */ return node;
1916 node->in[0+1] = res;
1923 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1924 than any other approach, as Id chains are resolved and all point to the real node, or
1925 all id's are self loops. */
1926 extern int opt_normalize;
1928 skip_nop (ir_node *node) {
1930 /* don't assert node !!! */
1932 if (!get_opt_normalize()) return node;
1934 /* Don't use get_Id_pred: We get into an endless loop for
1935 self-referencing Ids. */
1936 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1937 ir_node *rem_pred, *res;
1939 if (pred->op != op_Id) return pred; /* shortcut */
1942 assert (get_irn_arity (node) > 0);
1944 node->in[0+1] = node;
1945 res = skip_nop(rem_pred);
1946 if (res->op == op_Id) /* self-loop */ return node;
1948 node->in[0+1] = res;
1959 skip_Id (ir_node *node) {
1960 return skip_nop(node);
1964 is_Bad (ir_node *node) {
1966 if ((node) && get_irn_opcode(node) == iro_Bad)
1972 is_no_Block (ir_node *node) {
1974 return (get_irn_opcode(node) != iro_Block);
1978 is_Block (ir_node *node) {
1980 return (get_irn_opcode(node) == iro_Block);
1983 /* returns true if node is a Unknown node. */
1985 is_Unknown (ir_node *node) {
1987 return (get_irn_opcode(node) == iro_Unknown);
1991 is_Proj (const ir_node *node) {
1993 return node->op == op_Proj
1994 || (!interprocedural_view && node->op == op_Filter);
1997 /* Returns true if the operation manipulates control flow. */
1999 is_cfop(ir_node *node) {
2000 return is_cfopcode(get_irn_op(node));
2003 /* Returns true if the operation manipulates interprocedural control flow:
2004 CallBegin, EndReg, EndExcept */
2005 INLINE int is_ip_cfop(ir_node *node) {
2006 return is_ip_cfopcode(get_irn_op(node));
2009 ir_graph *get_ip_cfop_irg(ir_node *n) {
2010 return get_irn_irg(n);
2013 /* Returns true if the operation can change the control flow because
2016 is_fragile_op(ir_node *node) {
2017 return is_op_fragile(get_irn_op(node));
2020 /* Returns the memory operand of fragile operations. */
2021 ir_node *get_fragile_op_mem(ir_node *node) {
2022 assert(node && is_fragile_op(node));
2024 switch (get_irn_opcode (node)) {
2033 return get_irn_n(node, 0);
2038 assert(0 && "should not be reached");