3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 res->kind = k_ir_node;
113 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
115 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
116 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
119 set_irn_dbg_info(res, db);
123 res->node_nr = get_irp_new_node_nr();
131 /* Copies all attributes stored in the old node to the new node.
132 Assumes both have the same opcode and sufficient size. */
134 copy_attrs (const ir_node *old_node, ir_node *new_node) {
135 assert(get_irn_op(old_node) == get_irn_op(new_node));
136 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 /*-- getting some parameters from ir_nodes --*/
142 is_ir_node (const void *thing) {
143 if (get_kind(thing) == k_ir_node)
150 get_irn_intra_arity (const ir_node *node) {
151 return intern_get_irn_intra_arity(node);
155 get_irn_inter_arity (const ir_node *node) {
156 return intern_get_irn_inter_arity(node);
160 get_irn_arity (const ir_node *node) {
161 return intern_get_irn_arity(node);
164 /* Returns the array with ins. This array is shifted with respect to the
165 array accessed by get_irn_n: The block operand is at position 0 not -1.
166 (@@@ This should be changed.)
167 The order of the predecessors in this array is not guaranteed, except that
168 lists of operands as predecessors of Block or arguments of a Call are
171 get_irn_in (const ir_node *node) {
173 if (interprocedural_view) { /* handle Filter and Block specially */
174 if (get_irn_opcode(node) == iro_Filter) {
175 assert(node->attr.filter.in_cg);
176 return node->attr.filter.in_cg;
177 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
178 return node->attr.block.in_cg;
180 /* else fall through */
186 set_irn_in (ir_node *node, int arity, ir_node **in) {
189 if (interprocedural_view) { /* handle Filter and Block specially */
190 if (get_irn_opcode(node) == iro_Filter) {
191 assert(node->attr.filter.in_cg);
192 arr = &node->attr.filter.in_cg;
193 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
194 arr = &node->attr.block.in_cg;
201 if (arity != ARR_LEN(*arr) - 1) {
202 ir_node * block = (*arr)[0];
203 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
206 fix_backedges(current_ir_graph->obst, node);
207 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
211 get_irn_intra_n (ir_node *node, int n) {
212 return intern_get_irn_intra_n (node, n);
216 get_irn_inter_n (ir_node *node, int n) {
217 return intern_get_irn_inter_n (node, n);
221 get_irn_n (ir_node *node, int n) {
222 return intern_get_irn_n (node, n);
226 set_irn_n (ir_node *node, int n, ir_node *in) {
227 assert(node && -1 <= n && n < get_irn_arity(node));
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 get_irn_mode (const ir_node *node) {
251 return intern_get_irn_mode(node);
255 set_irn_mode (ir_node *node, ir_mode *mode)
263 get_irn_modecode (const ir_node *node)
266 return node->mode->code;
269 /** Gets the string representation of the mode .*/
271 get_irn_modename (const ir_node *node)
274 return get_mode_name(node->mode);
278 get_irn_modeident (const ir_node *node)
281 return get_mode_ident(node->mode);
285 get_irn_op (const ir_node *node)
287 return intern_get_irn_op(node);
290 /* should be private to the library: */
292 set_irn_op (ir_node *node, ir_op *op)
299 get_irn_opcode (const ir_node *node)
301 return intern_get_irn_opcode(node);
305 get_irn_opname (const ir_node *node)
308 return get_id_str(node->op->name);
312 get_irn_opident (const ir_node *node)
315 return node->op->name;
319 get_irn_visited (const ir_node *node)
322 return node->visited;
326 set_irn_visited (ir_node *node, unsigned long visited)
329 node->visited = visited;
333 mark_irn_visited (ir_node *node) {
335 node->visited = current_ir_graph->visited;
339 irn_not_visited (const ir_node *node) {
341 return (node->visited < current_ir_graph->visited);
345 irn_visited (const ir_node *node) {
347 return (node->visited >= current_ir_graph->visited);
351 set_irn_link (ir_node *node, void *link) {
353 /* Link field is used for Phi construction and various optimizations
355 assert(get_irg_phase_state(current_ir_graph) != phase_building);
361 get_irn_link (const ir_node *node) {
366 /* Outputs a unique number for this node */
368 get_irn_node_nr(const ir_node *node) {
371 return node->node_nr;
378 get_irn_const_attr (ir_node *node)
380 assert (node->op == op_Const);
381 return node->attr.con;
385 get_irn_proj_attr (ir_node *node)
387 assert (node->op == op_Proj);
388 return node->attr.proj;
392 get_irn_alloc_attr (ir_node *node)
394 assert (node->op == op_Alloc);
399 get_irn_free_attr (ir_node *node)
401 assert (node->op == op_Free);
402 return node->attr.f = skip_tid(node->attr.f);
406 get_irn_symconst_attr (ir_node *node)
408 assert (node->op == op_SymConst);
413 get_irn_call_attr (ir_node *node)
415 assert (node->op == op_Call);
416 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
420 get_irn_funccall_attr (ir_node *node)
422 assert (node->op == op_FuncCall);
423 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
427 get_irn_sel_attr (ir_node *node)
429 assert (node->op == op_Sel);
434 get_irn_phi_attr (ir_node *node)
436 assert (node->op == op_Phi);
437 return node->attr.phi0_pos;
441 get_irn_block_attr (ir_node *node)
443 assert (node->op == op_Block);
444 return node->attr.block;
447 /** manipulate fields of individual nodes **/
449 /* this works for all except Block */
451 get_nodes_Block (ir_node *node) {
452 assert (!(node->op == op_Block));
453 return get_irn_n(node, -1);
457 set_nodes_Block (ir_node *node, ir_node *block) {
458 assert (!(node->op == op_Block));
459 set_irn_n(node, -1, block);
462 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
463 * from Start. If so returns frame type, else Null. */
464 type *is_frame_pointer(ir_node *n) {
465 if ((get_irn_op(n) == op_Proj) &&
466 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
467 ir_node *start = get_Proj_pred(n);
468 if (get_irn_op(start) == op_Start) {
469 return get_irg_frame_type(get_irn_irg(start));
475 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
476 * from Start. If so returns global type, else Null. */
477 type *is_globals_pointer(ir_node *n) {
478 if ((get_irn_op(n) == op_Proj) &&
479 (get_Proj_proj(n) == pn_Start_P_globals)) {
480 ir_node *start = get_Proj_pred(n);
481 if (get_irn_op(start) == op_Start) {
482 return get_glob_type();
488 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
489 * from Start. If so returns 1, else 0. */
490 int is_value_arg_pointer(ir_node *n) {
491 if ((get_irn_op(n) == op_Proj) &&
492 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
493 (get_irn_op(get_Proj_pred(n)) == op_Start))
498 /* Returns an array with the predecessors of the Block. Depending on
499 the implementation of the graph data structure this can be a copy of
500 the internal representation of predecessors as well as the internal
501 array itself. Therefore writing to this array might obstruct the ir. */
503 get_Block_cfgpred_arr (ir_node *node)
505 assert ((node->op == op_Block));
506 return (ir_node **)&(get_irn_in(node)[1]);
511 get_Block_n_cfgpreds (ir_node *node) {
512 assert ((node->op == op_Block));
513 return (get_irn_arity(node));
517 get_Block_cfgpred (ir_node *node, int pos) {
518 assert (node->op == op_Block);
520 if (-1 > pos || get_irn_arity(node) <= pos) {
521 dump_ir_block_graph(current_ir_graph);
522 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
525 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
526 return get_irn_n(node, pos);
530 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
531 assert (node->op == op_Block);
532 set_irn_n(node, pos, pred);
536 get_Block_matured (ir_node *node) {
537 assert (node->op == op_Block);
538 return node->attr.block.matured;
542 set_Block_matured (ir_node *node, bool matured) {
543 assert (node->op == op_Block);
544 node->attr.block.matured = matured;
547 get_Block_block_visited (ir_node *node) {
548 assert (node->op == op_Block);
549 return node->attr.block.block_visited;
553 set_Block_block_visited (ir_node *node, unsigned long visit) {
554 assert (node->op == op_Block);
555 node->attr.block.block_visited = visit;
558 /* For this current_ir_graph must be set. */
560 mark_Block_block_visited (ir_node *node) {
561 assert (node->op == op_Block);
562 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
566 Block_not_block_visited(ir_node *node) {
567 assert (node->op == op_Block);
568 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
572 get_Block_graph_arr (ir_node *node, int pos) {
573 assert (node->op == op_Block);
574 return node->attr.block.graph_arr[pos+1];
578 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
579 assert (node->op == op_Block);
580 node->attr.block.graph_arr[pos+1] = value;
583 /* handler handling for Blocks * /
585 set_Block_handler (ir_node *block, ir_node *handler) {
586 assert ((block->op == op_Block));
587 assert ((handler->op == op_Block));
588 block->attr.block.handler_entry = handler;
592 get_Block_handler (ir_node *block) {
593 assert ((block->op == op_Block));
594 return (block->attr.block.handler_entry);
597 / * handler handling for Nodes * /
599 set_Node_handler (ir_node *node, ir_node *handler) {
600 set_Block_handler (get_nodes_Block (node), handler);
604 get_Node_handler (ir_node *node) {
605 return (get_Block_handler (get_nodes_Block (node)));
608 / * exc_t handling for Blocks * /
609 void set_Block_exc (ir_node *block, exc_t exc) {
610 assert ((block->op == op_Block));
611 block->attr.block.exc = exc;
614 exc_t get_Block_exc (ir_node *block) {
615 assert ((block->op == op_Block));
616 return (block->attr.block.exc);
619 / * exc_t handling for Nodes * /
620 void set_Node_exc (ir_node *node, exc_t exc) {
621 set_Block_exc (get_nodes_Block (node), exc);
624 exc_t get_Node_exc (ir_node *node) {
625 return (get_Block_exc (get_nodes_Block (node)));
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 bool ipv = interprocedural_view;
639 interprocedural_view = true;
640 fix_backedges(current_ir_graph->obst, node);
641 interprocedural_view = ipv;
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
674 /* Start references the irg it is in. */
676 get_Start_irg(ir_node *node) {
677 return get_irn_irg(node);
681 set_Start_irg(ir_node *node, ir_graph *irg) {
682 assert(node->op == op_Start);
683 assert(is_ir_graph(irg));
684 assert(0 && " Why set irg? -- use set_irn_irg");
688 get_End_n_keepalives(ir_node *end) {
689 assert (end->op == op_End);
690 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
694 get_End_keepalive(ir_node *end, int pos) {
695 assert (end->op == op_End);
696 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
700 add_End_keepalive (ir_node *end, ir_node *ka) {
701 assert (end->op == op_End);
702 ARR_APP1 (ir_node *, end->in, ka);
706 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
707 assert (end->op == op_End);
708 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
712 free_End (ir_node *end) {
713 assert (end->op == op_End);
715 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
716 end->in = NULL; /* @@@ make sure we get an error if we use the
717 in array afterwards ... */
720 ir_graph *get_EndReg_irg (ir_node *end) {
721 return get_irn_irg(end);
724 ir_graph *get_EndExcept_irg (ir_node *end) {
725 return get_irn_irg(end);
729 > Implementing the case construct (which is where the constant Proj node is
730 > important) involves far more than simply determining the constant values.
731 > We could argue that this is more properly a function of the translator from
732 > Firm to the target machine. That could be done if there was some way of
733 > projecting "default" out of the Cond node.
734 I know it's complicated.
735 Basically there are two proglems:
736 - determining the gaps between the projs
737 - determining the biggest case constant to know the proj number for
739 I see several solutions:
740 1. Introduce a ProjDefault node. Solves both problems.
741 This means to extend all optimizations executed during construction.
742 2. Give the Cond node for switch two flavors:
743 a) there are no gaps in the projs (existing flavor)
744 b) gaps may exist, default proj is still the Proj with the largest
745 projection number. This covers also the gaps.
746 3. Fix the semantic of the Cond to that of 2b)
748 Solution 2 seems to be the best:
749 Computing the gaps in the Firm representation is not too hard, i.e.,
750 libFIRM can implement a routine that transforms between the two
751 flavours. This is also possible for 1) but 2) does not require to
752 change any existing optimization.
753 Further it should be far simpler to determine the biggest constant than
755 I don't want to choose 3) as 2a) seems to have advantages for
756 dataflow analysis and 3) does not allow to convert the representation to
760 get_Cond_selector (ir_node *node) {
761 assert (node->op == op_Cond);
762 return get_irn_n(node, 0);
766 set_Cond_selector (ir_node *node, ir_node *selector) {
767 assert (node->op == op_Cond);
768 set_irn_n(node, 0, selector);
772 get_Cond_kind (ir_node *node) {
773 assert (node->op == op_Cond);
774 return node->attr.c.kind;
778 set_Cond_kind (ir_node *node, cond_kind kind) {
779 assert (node->op == op_Cond);
780 node->attr.c.kind = kind;
784 get_Cond_defaultProj (ir_node *node) {
785 assert (node->op == op_Cond);
786 return node->attr.c.default_proj;
790 get_Return_mem (ir_node *node) {
791 assert (node->op == op_Return);
792 return get_irn_n(node, 0);
796 set_Return_mem (ir_node *node, ir_node *mem) {
797 assert (node->op == op_Return);
798 set_irn_n(node, 0, mem);
802 get_Return_n_ress (ir_node *node) {
803 assert (node->op == op_Return);
804 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
808 get_Return_res_arr (ir_node *node)
810 assert ((node->op == op_Return));
811 if (get_Return_n_ress(node) > 0)
812 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
819 set_Return_n_res (ir_node *node, int results) {
820 assert (node->op == op_Return);
825 get_Return_res (ir_node *node, int pos) {
826 assert (node->op == op_Return);
827 assert (get_Return_n_ress(node) > pos);
828 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
832 set_Return_res (ir_node *node, int pos, ir_node *res){
833 assert (node->op == op_Return);
834 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
838 get_Raise_mem (ir_node *node) {
839 assert (node->op == op_Raise);
840 return get_irn_n(node, 0);
844 set_Raise_mem (ir_node *node, ir_node *mem) {
845 assert (node->op == op_Raise);
846 set_irn_n(node, 0, mem);
850 get_Raise_exo_ptr (ir_node *node) {
851 assert (node->op == op_Raise);
852 return get_irn_n(node, 1);
856 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
857 assert (node->op == op_Raise);
858 set_irn_n(node, 1, exo_ptr);
861 tarval *get_Const_tarval (ir_node *node) {
862 assert (node->op == op_Const);
863 return node->attr.con.tv;
867 set_Const_tarval (ir_node *node, tarval *con) {
868 assert (node->op == op_Const);
869 node->attr.con.tv = con;
873 /* The source language type. Must be an atomic type. Mode of type must
874 be mode of node. For tarvals from entities type must be pointer to
877 get_Const_type (ir_node *node) {
878 assert (node->op == op_Const);
879 return node->attr.con.tp;
883 set_Const_type (ir_node *node, type *tp) {
884 assert (node->op == op_Const);
885 if (tp != unknown_type) {
886 assert (is_atomic_type(tp));
887 assert (get_type_mode(tp) == get_irn_mode(node));
888 assert (!tarval_is_entity(get_Const_tarval(node)) ||
889 (is_pointer_type(tp) &&
890 (get_pointer_points_to_type(tp) ==
891 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
894 node->attr.con.tp = tp;
899 get_SymConst_kind (const ir_node *node) {
900 assert (node->op == op_SymConst);
901 return node->attr.i.num;
905 set_SymConst_kind (ir_node *node, symconst_kind num) {
906 assert (node->op == op_SymConst);
907 node->attr.i.num = num;
911 get_SymConst_type (ir_node *node) {
912 assert ( (node->op == op_SymConst)
913 && ( get_SymConst_kind(node) == type_tag
914 || get_SymConst_kind(node) == size));
915 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
919 set_SymConst_type (ir_node *node, type *tp) {
920 assert ( (node->op == op_SymConst)
921 && ( get_SymConst_kind(node) == type_tag
922 || get_SymConst_kind(node) == size));
923 node->attr.i.tori.typ = tp;
927 get_SymConst_ptrinfo (ir_node *node) {
928 assert ( (node->op == op_SymConst)
929 && (get_SymConst_kind(node) == linkage_ptr_info));
930 return node->attr.i.tori.ptrinfo;
934 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
935 assert ( (node->op == op_SymConst)
936 && (get_SymConst_kind(node) == linkage_ptr_info));
937 node->attr.i.tori.ptrinfo = ptrinfo;
941 get_SymConst_type_or_id (ir_node *node) {
942 assert (node->op == op_SymConst);
943 return &(node->attr.i.tori);
947 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
948 assert (node->op == op_SymConst);
949 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
953 get_Sel_mem (ir_node *node) {
954 assert (node->op == op_Sel);
955 return get_irn_n(node, 0);
959 set_Sel_mem (ir_node *node, ir_node *mem) {
960 assert (node->op == op_Sel);
961 set_irn_n(node, 0, mem);
965 get_Sel_ptr (ir_node *node) {
966 assert (node->op == op_Sel);
967 return get_irn_n(node, 1);
971 set_Sel_ptr (ir_node *node, ir_node *ptr) {
972 assert (node->op == op_Sel);
973 set_irn_n(node, 1, ptr);
977 get_Sel_n_indexs (ir_node *node) {
978 assert (node->op == op_Sel);
979 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
983 get_Sel_index_arr (ir_node *node)
985 assert ((node->op == op_Sel));
986 if (get_Sel_n_indexs(node) > 0)
987 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
993 get_Sel_index (ir_node *node, int pos) {
994 assert (node->op == op_Sel);
995 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
999 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1000 assert (node->op == op_Sel);
1001 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1005 get_Sel_entity (ir_node *node) {
1006 assert (node->op == op_Sel);
1007 return node->attr.s.ent;
1011 set_Sel_entity (ir_node *node, entity *ent) {
1012 assert (node->op == op_Sel);
1013 node->attr.s.ent = ent;
1017 get_InstOf_ent (ir_node *node) {
1018 assert (node->op = op_InstOf);
1019 return (node->attr.io.ent);
1023 set_InstOf_ent (ir_node *node, type *ent) {
1024 assert (node->op = op_InstOf);
1025 node->attr.io.ent = ent;
1029 get_InstOf_store (ir_node *node) {
1030 assert (node->op = op_InstOf);
1031 return (get_irn_n (node, 0));
1035 set_InstOf_store (ir_node *node, ir_node *obj) {
1036 assert (node->op = op_InstOf);
1037 set_irn_n (node, 0, obj);
1041 get_InstOf_obj (ir_node *node) {
1042 assert (node->op = op_InstOf);
1043 return (get_irn_n (node, 1));
1047 set_InstOf_obj (ir_node *node, ir_node *obj) {
1048 assert (node->op = op_InstOf);
1049 set_irn_n (node, 1, obj);
1053 /* For unary and binary arithmetic operations the access to the
1054 operands can be factored out. Left is the first, right the
1055 second arithmetic value as listed in tech report 0999-33.
1056 unops are: Minus, Abs, Not, Conv, Cast
1057 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1058 Shr, Shrs, Rotate, Cmp */
1062 get_Call_mem (ir_node *node) {
1063 assert (node->op == op_Call);
1064 return get_irn_n(node, 0);
1068 set_Call_mem (ir_node *node, ir_node *mem) {
1069 assert (node->op == op_Call);
1070 set_irn_n(node, 0, mem);
1074 get_Call_ptr (ir_node *node) {
1075 assert (node->op == op_Call);
1076 return get_irn_n(node, 1);
1080 set_Call_ptr (ir_node *node, ir_node *ptr) {
1081 assert (node->op == op_Call);
1082 set_irn_n(node, 1, ptr);
1086 get_Call_param_arr (ir_node *node) {
1087 assert (node->op == op_Call);
1088 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1092 get_Call_n_params (ir_node *node) {
1093 assert (node->op == op_Call);
1094 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1098 get_Call_arity (ir_node *node) {
1099 assert (node->op == op_Call);
1100 return get_Call_n_params(node);
1104 set_Call_arity (ir_node *node, ir_node *arity) {
1105 assert (node->op == op_Call);
1110 get_Call_param (ir_node *node, int pos) {
1111 assert (node->op == op_Call);
1112 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1116 set_Call_param (ir_node *node, int pos, ir_node *param) {
1117 assert (node->op == op_Call);
1118 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1122 get_Call_type (ir_node *node) {
1123 assert (node->op == op_Call);
1124 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1128 set_Call_type (ir_node *node, type *tp) {
1129 assert (node->op == op_Call);
1130 assert (is_method_type(tp));
1131 node->attr.call.cld_tp = tp;
1134 int Call_has_callees(ir_node *node) {
1135 return (node->attr.call.callee_arr != NULL);
1138 int get_Call_n_callees(ir_node * node) {
1139 assert(node->op == op_Call && node->attr.call.callee_arr);
1140 return ARR_LEN(node->attr.call.callee_arr);
1143 entity * get_Call_callee(ir_node * node, int pos) {
1144 assert(node->op == op_Call && node->attr.call.callee_arr);
1145 return node->attr.call.callee_arr[pos];
1148 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1149 assert(node->op == op_Call);
1150 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1151 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1153 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1156 void remove_Call_callee_arr(ir_node * node) {
1157 assert(node->op == op_Call);
1158 node->attr.call.callee_arr = NULL;
1161 ir_node * get_CallBegin_ptr (ir_node *node) {
1162 assert(node->op == op_CallBegin);
1163 return get_irn_n(node, 0);
1165 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1166 assert(node->op == op_CallBegin);
1167 set_irn_n(node, 0, ptr);
1169 ir_graph * get_CallBegin_irg (ir_node *node) {
1170 return get_irn_irg(node);
1172 ir_node * get_CallBegin_call (ir_node *node) {
1173 assert(node->op == op_CallBegin);
1174 return node->attr.callbegin.call;
1176 void set_CallBegin_call (ir_node *node, ir_node *call) {
1177 assert(node->op == op_CallBegin);
1178 node->attr.callbegin.call = call;
1182 get_FuncCall_ptr (ir_node *node) {
1183 assert (node->op == op_FuncCall);
1184 return get_irn_n(node, 0);
1188 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1189 assert (node->op == op_FuncCall);
1190 set_irn_n(node, 0, ptr);
1194 get_FuncCall_param_arr (ir_node *node) {
1195 assert (node->op == op_FuncCall);
1196 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1200 get_FuncCall_n_params (ir_node *node) {
1201 assert (node->op == op_FuncCall);
1202 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1206 get_FuncCall_arity (ir_node *node) {
1207 assert (node->op == op_FuncCall);
1208 return get_FuncCall_n_params(node);
1212 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1213 assert (node->op == op_FuncCall);
1218 get_FuncCall_param (ir_node *node, int pos) {
1219 assert (node->op == op_FuncCall);
1220 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1224 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1225 assert (node->op == op_FuncCall);
1226 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1230 get_FuncCall_type (ir_node *node) {
1231 assert (node->op == op_FuncCall);
1232 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1236 set_FuncCall_type (ir_node *node, type *tp) {
1237 assert (node->op == op_FuncCall);
1238 assert (is_method_type(tp));
1239 node->attr.call.cld_tp = tp;
1242 int FuncCall_has_callees(ir_node *node) {
1243 return (node->attr.call.callee_arr != NULL);
1246 int get_FuncCall_n_callees(ir_node * node) {
1247 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1248 return ARR_LEN(node->attr.call.callee_arr);
1251 entity * get_FuncCall_callee(ir_node * node, int pos) {
1252 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1253 return node->attr.call.callee_arr[pos];
1256 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1257 assert(node->op == op_FuncCall);
1258 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1259 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1261 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1264 void remove_FuncCall_callee_arr(ir_node * node) {
1265 assert(node->op == op_FuncCall);
1266 node->attr.call.callee_arr = NULL;
1271 ir_node * get_##OP##_left(ir_node *node) { \
1272 assert(node->op == op_##OP); \
1273 return get_irn_n(node, node->op->op_index); \
1275 void set_##OP##_left(ir_node *node, ir_node *left) { \
1276 assert(node->op == op_##OP); \
1277 set_irn_n(node, node->op->op_index, left); \
1279 ir_node *get_##OP##_right(ir_node *node) { \
1280 assert(node->op == op_##OP); \
1281 return get_irn_n(node, node->op->op_index + 1); \
1283 void set_##OP##_right(ir_node *node, ir_node *right) { \
1284 assert(node->op == op_##OP); \
1285 set_irn_n(node, node->op->op_index + 1, right); \
1289 ir_node *get_##OP##_op(ir_node *node) { \
1290 assert(node->op == op_##OP); \
1291 return get_irn_n(node, node->op->op_index); \
1293 void set_##OP##_op (ir_node *node, ir_node *op) { \
1294 assert(node->op == op_##OP); \
1295 set_irn_n(node, node->op->op_index, op); \
1305 get_Quot_mem (ir_node *node) {
1306 assert (node->op == op_Quot);
1307 return get_irn_n(node, 0);
1311 set_Quot_mem (ir_node *node, ir_node *mem) {
1312 assert (node->op == op_Quot);
1313 set_irn_n(node, 0, mem);
1319 get_DivMod_mem (ir_node *node) {
1320 assert (node->op == op_DivMod);
1321 return get_irn_n(node, 0);
1325 set_DivMod_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_DivMod);
1327 set_irn_n(node, 0, mem);
1333 get_Div_mem (ir_node *node) {
1334 assert (node->op == op_Div);
1335 return get_irn_n(node, 0);
1339 set_Div_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_Div);
1341 set_irn_n(node, 0, mem);
1347 get_Mod_mem (ir_node *node) {
1348 assert (node->op == op_Mod);
1349 return get_irn_n(node, 0);
1353 set_Mod_mem (ir_node *node, ir_node *mem) {
1354 assert (node->op == op_Mod);
1355 set_irn_n(node, 0, mem);
1372 get_Cast_type (ir_node *node) {
1373 assert (node->op == op_Cast);
1374 return node->attr.cast.totype;
1378 set_Cast_type (ir_node *node, type *to_tp) {
1379 assert (node->op == op_Cast);
1380 node->attr.cast.totype = to_tp;
1384 is_unop (ir_node *node) {
1385 return (node->op->opar == oparity_unary);
1389 get_unop_op (ir_node *node) {
1390 if (node->op->opar == oparity_unary)
1391 return get_irn_n(node, node->op->op_index);
1393 assert(node->op->opar == oparity_unary);
1398 set_unop_op (ir_node *node, ir_node *op) {
1399 if (node->op->opar == oparity_unary)
1400 set_irn_n(node, node->op->op_index, op);
1402 assert(node->op->opar == oparity_unary);
1406 is_binop (ir_node *node) {
1407 return (node->op->opar == oparity_binary);
1411 get_binop_left (ir_node *node) {
1412 if (node->op->opar == oparity_binary)
1413 return get_irn_n(node, node->op->op_index);
1415 assert(node->op->opar == oparity_binary);
1420 set_binop_left (ir_node *node, ir_node *left) {
1421 if (node->op->opar == oparity_binary)
1422 set_irn_n(node, node->op->op_index, left);
1424 assert (node->op->opar == oparity_binary);
1428 get_binop_right (ir_node *node) {
1429 if (node->op->opar == oparity_binary)
1430 return get_irn_n(node, node->op->op_index + 1);
1432 assert(node->op->opar == oparity_binary);
1437 set_binop_right (ir_node *node, ir_node *right) {
1438 if (node->op->opar == oparity_binary)
1439 set_irn_n(node, node->op->op_index + 1, right);
1441 assert (node->op->opar == oparity_binary);
1444 int is_Phi (ir_node *n) {
1449 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1453 get_Phi_preds_arr (ir_node *node) {
1454 assert (node->op == op_Phi);
1455 return (ir_node **)&(get_irn_in(node)[1]);
1459 get_Phi_n_preds (ir_node *node) {
1460 assert (is_Phi(node));
1461 return (get_irn_arity(node));
1465 void set_Phi_n_preds (ir_node *node, int n_preds) {
1466 assert (node->op == op_Phi);
1471 get_Phi_pred (ir_node *node, int pos) {
1472 assert (is_Phi(node));
1473 return get_irn_n(node, pos);
1477 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1478 assert (is_Phi(node));
1479 set_irn_n(node, pos, pred);
1483 get_Load_mem (ir_node *node) {
1484 assert (node->op == op_Load);
1485 return get_irn_n(node, 0);
1489 set_Load_mem (ir_node *node, ir_node *mem) {
1490 assert (node->op == op_Load);
1491 set_irn_n(node, 0, mem);
1495 get_Load_ptr (ir_node *node) {
1496 assert (node->op == op_Load);
1497 return get_irn_n(node, 1);
1501 set_Load_ptr (ir_node *node, ir_node *ptr) {
1502 assert (node->op == op_Load);
1503 set_irn_n(node, 1, ptr);
1508 get_Store_mem (ir_node *node) {
1509 assert (node->op == op_Store);
1510 return get_irn_n(node, 0);
1514 set_Store_mem (ir_node *node, ir_node *mem) {
1515 assert (node->op == op_Store);
1516 set_irn_n(node, 0, mem);
1520 get_Store_ptr (ir_node *node) {
1521 assert (node->op == op_Store);
1522 return get_irn_n(node, 1);
1526 set_Store_ptr (ir_node *node, ir_node *ptr) {
1527 assert (node->op == op_Store);
1528 set_irn_n(node, 1, ptr);
1532 get_Store_value (ir_node *node) {
1533 assert (node->op == op_Store);
1534 return get_irn_n(node, 2);
1538 set_Store_value (ir_node *node, ir_node *value) {
1539 assert (node->op == op_Store);
1540 set_irn_n(node, 2, value);
1544 get_Alloc_mem (ir_node *node) {
1545 assert (node->op == op_Alloc);
1546 return get_irn_n(node, 0);
1550 set_Alloc_mem (ir_node *node, ir_node *mem) {
1551 assert (node->op == op_Alloc);
1552 set_irn_n(node, 0, mem);
1556 get_Alloc_size (ir_node *node) {
1557 assert (node->op == op_Alloc);
1558 return get_irn_n(node, 1);
1562 set_Alloc_size (ir_node *node, ir_node *size) {
1563 assert (node->op == op_Alloc);
1564 set_irn_n(node, 1, size);
1568 get_Alloc_type (ir_node *node) {
1569 assert (node->op == op_Alloc);
1570 return node->attr.a.type = skip_tid(node->attr.a.type);
1574 set_Alloc_type (ir_node *node, type *tp) {
1575 assert (node->op == op_Alloc);
1576 node->attr.a.type = tp;
1580 get_Alloc_where (ir_node *node) {
1581 assert (node->op == op_Alloc);
1582 return node->attr.a.where;
1586 set_Alloc_where (ir_node *node, where_alloc where) {
1587 assert (node->op == op_Alloc);
1588 node->attr.a.where = where;
1593 get_Free_mem (ir_node *node) {
1594 assert (node->op == op_Free);
1595 return get_irn_n(node, 0);
1599 set_Free_mem (ir_node *node, ir_node *mem) {
1600 assert (node->op == op_Free);
1601 set_irn_n(node, 0, mem);
1605 get_Free_ptr (ir_node *node) {
1606 assert (node->op == op_Free);
1607 return get_irn_n(node, 1);
1611 set_Free_ptr (ir_node *node, ir_node *ptr) {
1612 assert (node->op == op_Free);
1613 set_irn_n(node, 1, ptr);
1617 get_Free_size (ir_node *node) {
1618 assert (node->op == op_Free);
1619 return get_irn_n(node, 2);
1623 set_Free_size (ir_node *node, ir_node *size) {
1624 assert (node->op == op_Free);
1625 set_irn_n(node, 2, size);
1629 get_Free_type (ir_node *node) {
1630 assert (node->op == op_Free);
1631 return node->attr.f = skip_tid(node->attr.f);
1635 set_Free_type (ir_node *node, type *tp) {
1636 assert (node->op == op_Free);
1641 get_Sync_preds_arr (ir_node *node) {
1642 assert (node->op == op_Sync);
1643 return (ir_node **)&(get_irn_in(node)[1]);
1647 get_Sync_n_preds (ir_node *node) {
1648 assert (node->op == op_Sync);
1649 return (get_irn_arity(node));
1654 set_Sync_n_preds (ir_node *node, int n_preds) {
1655 assert (node->op == op_Sync);
1660 get_Sync_pred (ir_node *node, int pos) {
1661 assert (node->op == op_Sync);
1662 return get_irn_n(node, pos);
1666 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1667 assert (node->op == op_Sync);
1668 set_irn_n(node, pos, pred);
1672 get_Proj_pred (ir_node *node) {
1673 assert (is_Proj(node));
1674 return get_irn_n(node, 0);
1678 set_Proj_pred (ir_node *node, ir_node *pred) {
1679 assert (is_Proj(node));
1680 set_irn_n(node, 0, pred);
1684 get_Proj_proj (ir_node *node) {
1685 assert (is_Proj(node));
1686 if (get_irn_opcode(node) == iro_Proj) {
1687 return node->attr.proj;
1689 assert(get_irn_opcode(node) == iro_Filter);
1690 return node->attr.filter.proj;
1695 set_Proj_proj (ir_node *node, long proj) {
1696 assert (node->op == op_Proj);
1697 node->attr.proj = proj;
1701 get_Tuple_preds_arr (ir_node *node) {
1702 assert (node->op == op_Tuple);
1703 return (ir_node **)&(get_irn_in(node)[1]);
1707 get_Tuple_n_preds (ir_node *node) {
1708 assert (node->op == op_Tuple);
1709 return (get_irn_arity(node));
1714 set_Tuple_n_preds (ir_node *node, int n_preds) {
1715 assert (node->op == op_Tuple);
1720 get_Tuple_pred (ir_node *node, int pos) {
1721 assert (node->op == op_Tuple);
1722 return get_irn_n(node, pos);
1726 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1727 assert (node->op == op_Tuple);
1728 set_irn_n(node, pos, pred);
1732 get_Id_pred (ir_node *node) {
1733 assert (node->op == op_Id);
1734 return get_irn_n(node, 0);
1738 set_Id_pred (ir_node *node, ir_node *pred) {
1739 assert (node->op == op_Id);
1740 set_irn_n(node, 0, pred);
1743 ir_node *get_Confirm_value (ir_node *node) {
1744 assert (node->op == op_Confirm);
1745 return get_irn_n(node, 0);
1747 void set_Confirm_value (ir_node *node, ir_node *value) {
1748 assert (node->op == op_Confirm);
1749 set_irn_n(node, 0, value);
1751 ir_node *get_Confirm_bound (ir_node *node) {
1752 assert (node->op == op_Confirm);
1753 return get_irn_n(node, 1);
1755 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1756 assert (node->op == op_Confirm);
1757 set_irn_n(node, 0, bound);
1759 pn_Cmp get_Confirm_cmp (ir_node *node) {
1760 assert (node->op == op_Confirm);
1761 return node->attr.confirm_cmp;
1763 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1764 assert (node->op == op_Confirm);
1765 node->attr.confirm_cmp = cmp;
1770 get_Filter_pred (ir_node *node) {
1771 assert(node->op == op_Filter);
1775 set_Filter_pred (ir_node *node, ir_node *pred) {
1776 assert(node->op == op_Filter);
1780 get_Filter_proj(ir_node *node) {
1781 assert(node->op == op_Filter);
1782 return node->attr.filter.proj;
1785 set_Filter_proj (ir_node *node, long proj) {
1786 assert(node->op == op_Filter);
1787 node->attr.filter.proj = proj;
1790 /* Don't use get_irn_arity, get_irn_n in implementation as access
1791 shall work independent of view!!! */
1792 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1793 assert(node->op == op_Filter);
1794 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1795 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1796 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1797 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1798 node->attr.filter.in_cg[0] = node->in[0];
1800 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1803 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1804 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1805 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1806 node->attr.filter.in_cg[pos + 1] = pred;
1808 int get_Filter_n_cg_preds(ir_node *node) {
1809 assert(node->op == op_Filter && node->attr.filter.in_cg);
1810 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1812 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1814 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1816 arity = ARR_LEN(node->attr.filter.in_cg);
1817 assert(pos < arity - 1);
1818 return node->attr.filter.in_cg[pos + 1];
1823 get_irn_irg(ir_node *node) {
1824 if (get_irn_op(node) != op_Block)
1825 node = get_nodes_block(node);
1826 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1827 node = get_nodes_block(node);
1828 assert(get_irn_op(node) == op_Block);
1829 return node->attr.block.irg;
1833 /*----------------------------------------------------------------*/
1834 /* Auxiliary routines */
1835 /*----------------------------------------------------------------*/
1838 skip_Proj (ir_node *node) {
1839 /* don't assert node !!! */
1840 if (node && is_Proj(node)) {
1841 return get_Proj_pred(node);
1848 skip_Tuple (ir_node *node) {
1851 if (!get_opt_normalize()) return node;
1853 node = skip_nop(node);
1854 if (get_irn_op(node) == op_Proj) {
1855 pred = skip_nop(get_Proj_pred(node));
1856 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1857 pred = skip_nop(skip_Tuple(pred));
1858 if (get_irn_op(pred) == op_Tuple)
1859 return get_Tuple_pred(pred, get_Proj_proj(node));
1864 /** returns operand of node if node is a Cast */
1865 ir_node *skip_Cast (ir_node *node) {
1866 if (node && get_irn_op(node) == op_Cast) {
1867 return skip_nop(get_irn_n(node, 0));
1874 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1875 than any other approach, as Id chains are resolved and all point to the real node, or
1876 all id's are self loops. */
1878 skip_nop (ir_node *node) {
1879 /* don't assert node !!! */
1881 if (!get_opt_normalize()) return node;
1883 /* Don't use get_Id_pred: We get into an endless loop for
1884 self-referencing Ids. */
1885 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1886 ir_node *rem_pred = node->in[0+1];
1889 assert (intern_get_irn_arity (node) > 0);
1891 node->in[0+1] = node;
1892 res = skip_nop(rem_pred);
1893 if (res->op == op_Id) /* self-loop */ return node;
1895 node->in[0+1] = res;
1902 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1903 than any other approach, as Id chains are resolved and all point to the real node, or
1904 all id's are self loops. */
1906 skip_nop (ir_node *node) {
1908 /* don't assert node !!! */
1910 if (!get_opt_normalize()) return node;
1912 /* Don't use get_Id_pred: We get into an endless loop for
1913 self-referencing Ids. */
1914 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1915 ir_node *rem_pred, *res;
1917 if (pred->op != op_Id) return pred; /* shortcut */
1920 assert (intern_get_irn_arity (node) > 0);
1922 node->in[0+1] = node;
1923 res = skip_nop(rem_pred);
1924 if (res->op == op_Id) /* self-loop */ return node;
1926 node->in[0+1] = res;
1935 skip_Id (ir_node *node) {
1936 return skip_nop(node);
1940 is_Bad (ir_node *node) {
1942 if ((node) && get_irn_opcode(node) == iro_Bad)
1948 is_no_Block (ir_node *node) {
1950 return (get_irn_opcode(node) != iro_Block);
1954 is_Block (ir_node *node) {
1956 return (get_irn_opcode(node) == iro_Block);
1959 /* returns true if node is a Unknown node. */
1961 is_Unknown (ir_node *node) {
1963 return (get_irn_opcode(node) == iro_Unknown);
1967 is_Proj (const ir_node *node) {
1969 return node->op == op_Proj
1970 || (!interprocedural_view && node->op == op_Filter);
1973 /* Returns true if the operation manipulates control flow. */
1975 is_cfop(ir_node *node) {
1976 return is_cfopcode(get_irn_op(node));
1979 /* Returns true if the operation manipulates interprocedural control flow:
1980 CallBegin, EndReg, EndExcept */
1981 int is_ip_cfop(ir_node *node) {
1982 return is_ip_cfopcode(get_irn_op(node));
1985 ir_graph *get_ip_cfop_irg(ir_node *n) {
1986 return get_irn_irg(n);
1989 /* Returns true if the operation can change the control flow because
1992 is_fragile_op(ir_node *node) {
1993 return is_op_fragile(get_irn_op(node));
1996 /* Returns the memory operand of fragile operations. */
1997 ir_node *get_fragile_op_mem(ir_node *node) {
1998 assert(node && is_fragile_op(node));
2000 switch (get_irn_opcode (node)) {
2009 return get_irn_n(node, 0);
2014 assert(0 && "should not be reached");