3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 res->kind = k_ir_node;
113 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
115 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
116 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
119 set_irn_dbg_info(res, db);
123 res->node_nr = get_irp_new_node_nr();
131 /* Copies all attributes stored in the old node to the new node.
132 Assumes both have the same opcode and sufficient size. */
134 copy_attrs (const ir_node *old_node, ir_node *new_node) {
135 assert(get_irn_op(old_node) == get_irn_op(new_node));
136 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 /*-- getting some parameters from ir_nodes --*/
142 is_ir_node (const void *thing) {
143 if (get_kind(thing) == k_ir_node)
150 get_irn_intra_arity (const ir_node *node) {
151 return intern_get_irn_intra_arity(node);
155 get_irn_inter_arity (const ir_node *node) {
156 return intern_get_irn_inter_arity(node);
160 get_irn_arity (const ir_node *node) {
161 return intern_get_irn_arity(node);
164 /* Returns the array with ins. This array is shifted with respect to the
165 array accessed by get_irn_n: The block operand is at position 0 not -1.
166 (@@@ This should be changed.)
167 The order of the predecessors in this array is not guaranteed, except that
168 lists of operands as predecessors of Block or arguments of a Call are
171 get_irn_in (const ir_node *node) {
173 if (interprocedural_view) { /* handle Filter and Block specially */
174 if (get_irn_opcode(node) == iro_Filter) {
175 assert(node->attr.filter.in_cg);
176 return node->attr.filter.in_cg;
177 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
178 return node->attr.block.in_cg;
180 /* else fall through */
186 set_irn_in (ir_node *node, int arity, ir_node **in) {
189 if (interprocedural_view) { /* handle Filter and Block specially */
190 if (get_irn_opcode(node) == iro_Filter) {
191 assert(node->attr.filter.in_cg);
192 arr = &node->attr.filter.in_cg;
193 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
194 arr = &node->attr.block.in_cg;
201 if (arity != ARR_LEN(*arr) - 1) {
202 ir_node * block = (*arr)[0];
203 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
206 fix_backedges(current_ir_graph->obst, node);
207 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
211 get_irn_intra_n (ir_node *node, int n) {
212 return intern_get_irn_intra_n (node, n);
216 get_irn_inter_n (ir_node *node, int n) {
217 return intern_get_irn_inter_n (node, n);
221 get_irn_n (ir_node *node, int n) {
222 return intern_get_irn_n (node, n);
226 set_irn_n (ir_node *node, int n, ir_node *in) {
227 assert(node && -1 <= n && n < get_irn_arity(node));
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 get_irn_mode (const ir_node *node) {
251 return intern_get_irn_mode(node);
255 set_irn_mode (ir_node *node, ir_mode *mode)
263 get_irn_modecode (const ir_node *node)
266 return node->mode->code;
269 /** Gets the string representation of the mode .*/
271 get_irn_modename (const ir_node *node)
274 return get_mode_name(node->mode);
278 get_irn_modeident (const ir_node *node)
281 return get_mode_ident(node->mode);
285 get_irn_op (const ir_node *node)
287 return intern_get_irn_op(node);
290 /* should be private to the library: */
292 set_irn_op (ir_node *node, ir_op *op)
299 get_irn_opcode (const ir_node *node)
301 return intern_get_irn_opcode(node);
305 get_irn_opname (const ir_node *node)
308 return get_id_str(node->op->name);
312 get_irn_opident (const ir_node *node)
315 return node->op->name;
319 get_irn_visited (const ir_node *node)
322 return node->visited;
326 set_irn_visited (ir_node *node, unsigned long visited)
329 node->visited = visited;
333 mark_irn_visited (ir_node *node) {
335 node->visited = current_ir_graph->visited;
339 irn_not_visited (const ir_node *node) {
341 return (node->visited < current_ir_graph->visited);
345 irn_visited (const ir_node *node) {
347 return (node->visited >= current_ir_graph->visited);
351 set_irn_link (ir_node *node, void *link) {
353 /* Link field is used for Phi construction and various optimizations
355 assert(get_irg_phase_state(current_ir_graph) != phase_building);
361 get_irn_link (const ir_node *node) {
366 /* Outputs a unique number for this node */
368 get_irn_node_nr(const ir_node *node) {
371 return node->node_nr;
378 get_irn_const_attr (ir_node *node)
380 assert (node->op == op_Const);
381 return node->attr.con;
385 get_irn_proj_attr (ir_node *node)
387 assert (node->op == op_Proj);
388 return node->attr.proj;
392 get_irn_alloc_attr (ir_node *node)
394 assert (node->op == op_Alloc);
399 get_irn_free_attr (ir_node *node)
401 assert (node->op == op_Free);
402 return node->attr.f = skip_tid(node->attr.f);
406 get_irn_symconst_attr (ir_node *node)
408 assert (node->op == op_SymConst);
413 get_irn_call_attr (ir_node *node)
415 assert (node->op == op_Call);
416 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
420 get_irn_funccall_attr (ir_node *node)
422 assert (node->op == op_FuncCall);
423 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
427 get_irn_sel_attr (ir_node *node)
429 assert (node->op == op_Sel);
434 get_irn_phi_attr (ir_node *node)
436 assert (node->op == op_Phi);
437 return node->attr.phi0_pos;
441 get_irn_block_attr (ir_node *node)
443 assert (node->op == op_Block);
444 return node->attr.block;
447 /** manipulate fields of individual nodes **/
449 /* this works for all except Block */
451 get_nodes_Block (ir_node *node) {
452 assert (!(node->op == op_Block));
453 return get_irn_n(node, -1);
457 set_nodes_Block (ir_node *node, ir_node *block) {
458 assert (!(node->op == op_Block));
459 set_irn_n(node, -1, block);
462 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
463 * from Start. If so returns frame type, else Null. */
464 type *is_frame_pointer(ir_node *n) {
465 if ((get_irn_op(n) == op_Proj) &&
466 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
467 ir_node *start = get_Proj_pred(n);
468 if (get_irn_op(start) == op_Start) {
469 return get_irg_frame_type(get_irn_irg(start));
475 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
476 * from Start. If so returns global type, else Null. */
477 type *is_globals_pointer(ir_node *n) {
478 if ((get_irn_op(n) == op_Proj) &&
479 (get_Proj_proj(n) == pn_Start_P_globals)) {
480 ir_node *start = get_Proj_pred(n);
481 if (get_irn_op(start) == op_Start) {
482 return get_glob_type();
488 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
489 * from Start. If so returns 1, else 0. */
490 int is_value_arg_pointer(ir_node *n) {
491 if ((get_irn_op(n) == op_Proj) &&
492 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
493 (get_irn_op(get_Proj_pred(n)) == op_Start))
498 /* Returns an array with the predecessors of the Block. Depending on
499 the implementation of the graph data structure this can be a copy of
500 the internal representation of predecessors as well as the internal
501 array itself. Therefore writing to this array might obstruct the ir. */
503 get_Block_cfgpred_arr (ir_node *node)
505 assert ((node->op == op_Block));
506 return (ir_node **)&(get_irn_in(node)[1]);
511 get_Block_n_cfgpreds (ir_node *node) {
512 assert ((node->op == op_Block));
513 return (get_irn_arity(node));
517 get_Block_cfgpred (ir_node *node, int pos) {
518 assert (node->op == op_Block);
520 if (-1 > pos || get_irn_arity(node) <= pos) {
521 dump_ir_block_graph(current_ir_graph);
522 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
525 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
526 return get_irn_n(node, pos);
530 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
531 assert (node->op == op_Block);
532 set_irn_n(node, pos, pred);
536 get_Block_matured (ir_node *node) {
537 assert (node->op == op_Block);
538 return node->attr.block.matured;
542 set_Block_matured (ir_node *node, bool matured) {
543 assert (node->op == op_Block);
544 node->attr.block.matured = matured;
547 get_Block_block_visited (ir_node *node) {
548 assert (node->op == op_Block);
549 return node->attr.block.block_visited;
553 set_Block_block_visited (ir_node *node, unsigned long visit) {
554 assert (node->op == op_Block);
555 node->attr.block.block_visited = visit;
558 /* For this current_ir_graph must be set. */
560 mark_Block_block_visited (ir_node *node) {
561 assert (node->op == op_Block);
562 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
566 Block_not_block_visited(ir_node *node) {
567 assert (node->op == op_Block);
568 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
572 get_Block_graph_arr (ir_node *node, int pos) {
573 assert (node->op == op_Block);
574 return node->attr.block.graph_arr[pos+1];
578 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
579 assert (node->op == op_Block);
580 node->attr.block.graph_arr[pos+1] = value;
583 /* handler handling for Blocks * /
585 set_Block_handler (ir_node *block, ir_node *handler) {
586 assert ((block->op == op_Block));
587 assert ((handler->op == op_Block));
588 block->attr.block.handler_entry = handler;
592 get_Block_handler (ir_node *block) {
593 assert ((block->op == op_Block));
594 return (block->attr.block.handler_entry);
597 / * handler handling for Nodes * /
599 set_Node_handler (ir_node *node, ir_node *handler) {
600 set_Block_handler (get_nodes_Block (node), handler);
604 get_Node_handler (ir_node *node) {
605 return (get_Block_handler (get_nodes_Block (node)));
608 / * exc_t handling for Blocks * /
609 void set_Block_exc (ir_node *block, exc_t exc) {
610 assert ((block->op == op_Block));
611 block->attr.block.exc = exc;
614 exc_t get_Block_exc (ir_node *block) {
615 assert ((block->op == op_Block));
616 return (block->attr.block.exc);
619 / * exc_t handling for Nodes * /
620 void set_Node_exc (ir_node *node, exc_t exc) {
621 set_Block_exc (get_nodes_Block (node), exc);
624 exc_t get_Node_exc (ir_node *node) {
625 return (get_Block_exc (get_nodes_Block (node)));
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 bool ipv = interprocedural_view;
639 interprocedural_view = true;
640 fix_backedges(current_ir_graph->obst, node);
641 interprocedural_view = ipv;
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
674 /* Start references the irg it is in. */
676 get_Start_irg(ir_node *node) {
677 return get_irn_irg(node);
681 set_Start_irg(ir_node *node, ir_graph *irg) {
682 assert(node->op == op_Start);
683 assert(is_ir_graph(irg));
684 assert(0 && " Why set irg? -- use set_irn_irg");
688 get_End_n_keepalives(ir_node *end) {
689 assert (end->op == op_End);
690 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
694 get_End_keepalive(ir_node *end, int pos) {
695 assert (end->op == op_End);
696 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
700 add_End_keepalive (ir_node *end, ir_node *ka) {
701 assert (end->op == op_End);
702 ARR_APP1 (ir_node *, end->in, ka);
706 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
707 assert (end->op == op_End);
708 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
712 free_End (ir_node *end) {
713 assert (end->op == op_End);
715 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
716 end->in = NULL; /* @@@ make sure we get an error if we use the
717 in array afterwards ... */
720 ir_graph *get_EndReg_irg (ir_node *end) {
721 return get_irn_irg(end);
724 ir_graph *get_EndExcept_irg (ir_node *end) {
725 return get_irn_irg(end);
729 > Implementing the case construct (which is where the constant Proj node is
730 > important) involves far more than simply determining the constant values.
731 > We could argue that this is more properly a function of the translator from
732 > Firm to the target machine. That could be done if there was some way of
733 > projecting "default" out of the Cond node.
734 I know it's complicated.
735 Basically there are two proglems:
736 - determining the gaps between the projs
737 - determining the biggest case constant to know the proj number for
739 I see several solutions:
740 1. Introduce a ProjDefault node. Solves both problems.
741 This means to extend all optimizations executed during construction.
742 2. Give the Cond node for switch two flavors:
743 a) there are no gaps in the projs (existing flavor)
744 b) gaps may exist, default proj is still the Proj with the largest
745 projection number. This covers also the gaps.
746 3. Fix the semantic of the Cond to that of 2b)
748 Solution 2 seems to be the best:
749 Computing the gaps in the Firm representation is not too hard, i.e.,
750 libFIRM can implement a routine that transforms between the two
751 flavours. This is also possible for 1) but 2) does not require to
752 change any existing optimization.
753 Further it should be far simpler to determine the biggest constant than
755 I don't want to choose 3) as 2a) seems to have advantages for
756 dataflow analysis and 3) does not allow to convert the representation to
760 get_Cond_selector (ir_node *node) {
761 assert (node->op == op_Cond);
762 return get_irn_n(node, 0);
766 set_Cond_selector (ir_node *node, ir_node *selector) {
767 assert (node->op == op_Cond);
768 set_irn_n(node, 0, selector);
772 get_Cond_kind (ir_node *node) {
773 assert (node->op == op_Cond);
774 return node->attr.c.kind;
778 set_Cond_kind (ir_node *node, cond_kind kind) {
779 assert (node->op == op_Cond);
780 node->attr.c.kind = kind;
784 get_Return_mem (ir_node *node) {
785 assert (node->op == op_Return);
786 return get_irn_n(node, 0);
790 set_Return_mem (ir_node *node, ir_node *mem) {
791 assert (node->op == op_Return);
792 set_irn_n(node, 0, mem);
796 get_Return_n_ress (ir_node *node) {
797 assert (node->op == op_Return);
798 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
802 get_Return_res_arr (ir_node *node)
804 assert ((node->op == op_Return));
805 if (get_Return_n_ress(node) > 0)
806 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
813 set_Return_n_res (ir_node *node, int results) {
814 assert (node->op == op_Return);
819 get_Return_res (ir_node *node, int pos) {
820 assert (node->op == op_Return);
821 assert (get_Return_n_ress(node) > pos);
822 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
826 set_Return_res (ir_node *node, int pos, ir_node *res){
827 assert (node->op == op_Return);
828 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
832 get_Raise_mem (ir_node *node) {
833 assert (node->op == op_Raise);
834 return get_irn_n(node, 0);
838 set_Raise_mem (ir_node *node, ir_node *mem) {
839 assert (node->op == op_Raise);
840 set_irn_n(node, 0, mem);
844 get_Raise_exo_ptr (ir_node *node) {
845 assert (node->op == op_Raise);
846 return get_irn_n(node, 1);
850 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
851 assert (node->op == op_Raise);
852 set_irn_n(node, 1, exo_ptr);
855 tarval *get_Const_tarval (ir_node *node) {
856 assert (node->op == op_Const);
857 return node->attr.con.tv;
861 set_Const_tarval (ir_node *node, tarval *con) {
862 assert (node->op == op_Const);
863 node->attr.con.tv = con;
867 /* The source language type. Must be an atomic type. Mode of type must
868 be mode of node. For tarvals from entities type must be pointer to
871 get_Const_type (ir_node *node) {
872 assert (node->op == op_Const);
873 return node->attr.con.tp;
877 set_Const_type (ir_node *node, type *tp) {
878 assert (node->op == op_Const);
879 if (tp != unknown_type) {
880 assert (is_atomic_type(tp));
881 assert (get_type_mode(tp) == get_irn_mode(node));
882 assert (!tarval_is_entity(get_Const_tarval(node)) ||
883 (is_pointer_type(tp) &&
884 (get_pointer_points_to_type(tp) ==
885 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
888 node->attr.con.tp = tp;
893 get_SymConst_kind (const ir_node *node) {
894 assert (node->op == op_SymConst);
895 return node->attr.i.num;
899 set_SymConst_kind (ir_node *node, symconst_kind num) {
900 assert (node->op == op_SymConst);
901 node->attr.i.num = num;
905 get_SymConst_type (ir_node *node) {
906 assert ( (node->op == op_SymConst)
907 && ( get_SymConst_kind(node) == type_tag
908 || get_SymConst_kind(node) == size));
909 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
913 set_SymConst_type (ir_node *node, type *tp) {
914 assert ( (node->op == op_SymConst)
915 && ( get_SymConst_kind(node) == type_tag
916 || get_SymConst_kind(node) == size));
917 node->attr.i.tori.typ = tp;
921 get_SymConst_ptrinfo (ir_node *node) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind(node) == linkage_ptr_info));
924 return node->attr.i.tori.ptrinfo;
928 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
929 assert ( (node->op == op_SymConst)
930 && (get_SymConst_kind(node) == linkage_ptr_info));
931 node->attr.i.tori.ptrinfo = ptrinfo;
935 get_SymConst_type_or_id (ir_node *node) {
936 assert (node->op == op_SymConst);
937 return &(node->attr.i.tori);
941 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
942 assert (node->op == op_SymConst);
943 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
947 get_Sel_mem (ir_node *node) {
948 assert (node->op == op_Sel);
949 return get_irn_n(node, 0);
953 set_Sel_mem (ir_node *node, ir_node *mem) {
954 assert (node->op == op_Sel);
955 set_irn_n(node, 0, mem);
959 get_Sel_ptr (ir_node *node) {
960 assert (node->op == op_Sel);
961 return get_irn_n(node, 1);
965 set_Sel_ptr (ir_node *node, ir_node *ptr) {
966 assert (node->op == op_Sel);
967 set_irn_n(node, 1, ptr);
971 get_Sel_n_indexs (ir_node *node) {
972 assert (node->op == op_Sel);
973 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
977 get_Sel_index_arr (ir_node *node)
979 assert ((node->op == op_Sel));
980 if (get_Sel_n_indexs(node) > 0)
981 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
987 get_Sel_index (ir_node *node, int pos) {
988 assert (node->op == op_Sel);
989 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
993 set_Sel_index (ir_node *node, int pos, ir_node *index) {
994 assert (node->op == op_Sel);
995 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
999 get_Sel_entity (ir_node *node) {
1000 assert (node->op == op_Sel);
1001 return node->attr.s.ent;
1005 set_Sel_entity (ir_node *node, entity *ent) {
1006 assert (node->op == op_Sel);
1007 node->attr.s.ent = ent;
1011 get_InstOf_ent (ir_node *node) {
1012 assert (node->op = op_InstOf);
1013 return (node->attr.io.ent);
1017 set_InstOf_ent (ir_node *node, type *ent) {
1018 assert (node->op = op_InstOf);
1019 node->attr.io.ent = ent;
1023 get_InstOf_store (ir_node *node) {
1024 assert (node->op = op_InstOf);
1025 return (get_irn_n (node, 0));
1029 set_InstOf_store (ir_node *node, ir_node *obj) {
1030 assert (node->op = op_InstOf);
1031 set_irn_n (node, 0, obj);
1035 get_InstOf_obj (ir_node *node) {
1036 assert (node->op = op_InstOf);
1037 return (get_irn_n (node, 1));
1041 set_InstOf_obj (ir_node *node, ir_node *obj) {
1042 assert (node->op = op_InstOf);
1043 set_irn_n (node, 1, obj);
1047 /* For unary and binary arithmetic operations the access to the
1048 operands can be factored out. Left is the first, right the
1049 second arithmetic value as listed in tech report 0999-33.
1050 unops are: Minus, Abs, Not, Conv, Cast
1051 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1052 Shr, Shrs, Rotate, Cmp */
1056 get_Call_mem (ir_node *node) {
1057 assert (node->op == op_Call);
1058 return get_irn_n(node, 0);
1062 set_Call_mem (ir_node *node, ir_node *mem) {
1063 assert (node->op == op_Call);
1064 set_irn_n(node, 0, mem);
1068 get_Call_ptr (ir_node *node) {
1069 assert (node->op == op_Call);
1070 return get_irn_n(node, 1);
1074 set_Call_ptr (ir_node *node, ir_node *ptr) {
1075 assert (node->op == op_Call);
1076 set_irn_n(node, 1, ptr);
1080 get_Call_param_arr (ir_node *node) {
1081 assert (node->op == op_Call);
1082 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1086 get_Call_n_params (ir_node *node) {
1087 assert (node->op == op_Call);
1088 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1092 get_Call_arity (ir_node *node) {
1093 assert (node->op == op_Call);
1094 return get_Call_n_params(node);
1098 set_Call_arity (ir_node *node, ir_node *arity) {
1099 assert (node->op == op_Call);
1104 get_Call_param (ir_node *node, int pos) {
1105 assert (node->op == op_Call);
1106 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1110 set_Call_param (ir_node *node, int pos, ir_node *param) {
1111 assert (node->op == op_Call);
1112 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1116 get_Call_type (ir_node *node) {
1117 assert (node->op == op_Call);
1118 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1122 set_Call_type (ir_node *node, type *tp) {
1123 assert (node->op == op_Call);
1124 assert (is_method_type(tp));
1125 node->attr.call.cld_tp = tp;
1128 int Call_has_callees(ir_node *node) {
1129 return (node->attr.call.callee_arr != NULL);
1132 int get_Call_n_callees(ir_node * node) {
1133 assert(node->op == op_Call && node->attr.call.callee_arr);
1134 return ARR_LEN(node->attr.call.callee_arr);
1137 entity * get_Call_callee(ir_node * node, int pos) {
1138 assert(node->op == op_Call && node->attr.call.callee_arr);
1139 return node->attr.call.callee_arr[pos];
1142 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1143 assert(node->op == op_Call);
1144 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1145 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1147 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1150 void remove_Call_callee_arr(ir_node * node) {
1151 assert(node->op == op_Call);
1152 node->attr.call.callee_arr = NULL;
1155 ir_node * get_CallBegin_ptr (ir_node *node) {
1156 assert(node->op == op_CallBegin);
1157 return get_irn_n(node, 0);
1159 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1160 assert(node->op == op_CallBegin);
1161 set_irn_n(node, 0, ptr);
1163 ir_graph * get_CallBegin_irg (ir_node *node) {
1164 return get_irn_irg(node);
1166 ir_node * get_CallBegin_call (ir_node *node) {
1167 assert(node->op == op_CallBegin);
1168 return node->attr.callbegin.call;
1170 void set_CallBegin_call (ir_node *node, ir_node *call) {
1171 assert(node->op == op_CallBegin);
1172 node->attr.callbegin.call = call;
1176 get_FuncCall_ptr (ir_node *node) {
1177 assert (node->op == op_FuncCall);
1178 return get_irn_n(node, 0);
1182 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1183 assert (node->op == op_FuncCall);
1184 set_irn_n(node, 0, ptr);
1188 get_FuncCall_param_arr (ir_node *node) {
1189 assert (node->op == op_FuncCall);
1190 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1194 get_FuncCall_n_params (ir_node *node) {
1195 assert (node->op == op_FuncCall);
1196 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1200 get_FuncCall_arity (ir_node *node) {
1201 assert (node->op == op_FuncCall);
1202 return get_FuncCall_n_params(node);
1206 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1207 assert (node->op == op_FuncCall);
1212 get_FuncCall_param (ir_node *node, int pos) {
1213 assert (node->op == op_FuncCall);
1214 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1218 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1219 assert (node->op == op_FuncCall);
1220 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1224 get_FuncCall_type (ir_node *node) {
1225 assert (node->op == op_FuncCall);
1226 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1230 set_FuncCall_type (ir_node *node, type *tp) {
1231 assert (node->op == op_FuncCall);
1232 assert (is_method_type(tp));
1233 node->attr.call.cld_tp = tp;
1236 int FuncCall_has_callees(ir_node *node) {
1237 return (node->attr.call.callee_arr != NULL);
1240 int get_FuncCall_n_callees(ir_node * node) {
1241 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1242 return ARR_LEN(node->attr.call.callee_arr);
1245 entity * get_FuncCall_callee(ir_node * node, int pos) {
1246 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1247 return node->attr.call.callee_arr[pos];
1250 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1251 assert(node->op == op_FuncCall);
1252 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1253 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1255 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1258 void remove_FuncCall_callee_arr(ir_node * node) {
1259 assert(node->op == op_FuncCall);
1260 node->attr.call.callee_arr = NULL;
1265 ir_node * get_##OP##_left(ir_node *node) { \
1266 assert(node->op == op_##OP); \
1267 return get_irn_n(node, node->op->op_index); \
1269 void set_##OP##_left(ir_node *node, ir_node *left) { \
1270 assert(node->op == op_##OP); \
1271 set_irn_n(node, node->op->op_index, left); \
1273 ir_node *get_##OP##_right(ir_node *node) { \
1274 assert(node->op == op_##OP); \
1275 return get_irn_n(node, node->op->op_index + 1); \
1277 void set_##OP##_right(ir_node *node, ir_node *right) { \
1278 assert(node->op == op_##OP); \
1279 set_irn_n(node, node->op->op_index + 1, right); \
1283 ir_node *get_##OP##_op(ir_node *node) { \
1284 assert(node->op == op_##OP); \
1285 return get_irn_n(node, node->op->op_index); \
1287 void set_##OP##_op (ir_node *node, ir_node *op) { \
1288 assert(node->op == op_##OP); \
1289 set_irn_n(node, node->op->op_index, op); \
1299 get_Quot_mem (ir_node *node) {
1300 assert (node->op == op_Quot);
1301 return get_irn_n(node, 0);
1305 set_Quot_mem (ir_node *node, ir_node *mem) {
1306 assert (node->op == op_Quot);
1307 set_irn_n(node, 0, mem);
1313 get_DivMod_mem (ir_node *node) {
1314 assert (node->op == op_DivMod);
1315 return get_irn_n(node, 0);
1319 set_DivMod_mem (ir_node *node, ir_node *mem) {
1320 assert (node->op == op_DivMod);
1321 set_irn_n(node, 0, mem);
1327 get_Div_mem (ir_node *node) {
1328 assert (node->op == op_Div);
1329 return get_irn_n(node, 0);
1333 set_Div_mem (ir_node *node, ir_node *mem) {
1334 assert (node->op == op_Div);
1335 set_irn_n(node, 0, mem);
1341 get_Mod_mem (ir_node *node) {
1342 assert (node->op == op_Mod);
1343 return get_irn_n(node, 0);
1347 set_Mod_mem (ir_node *node, ir_node *mem) {
1348 assert (node->op == op_Mod);
1349 set_irn_n(node, 0, mem);
1366 get_Cast_type (ir_node *node) {
1367 assert (node->op == op_Cast);
1368 return node->attr.cast.totype;
1372 set_Cast_type (ir_node *node, type *to_tp) {
1373 assert (node->op == op_Cast);
1374 node->attr.cast.totype = to_tp;
1378 is_unop (ir_node *node) {
1379 return (node->op->opar == oparity_unary);
1383 get_unop_op (ir_node *node) {
1384 if (node->op->opar == oparity_unary)
1385 return get_irn_n(node, node->op->op_index);
1387 assert(node->op->opar == oparity_unary);
1392 set_unop_op (ir_node *node, ir_node *op) {
1393 if (node->op->opar == oparity_unary)
1394 set_irn_n(node, node->op->op_index, op);
1396 assert(node->op->opar == oparity_unary);
1400 is_binop (ir_node *node) {
1401 return (node->op->opar == oparity_binary);
1405 get_binop_left (ir_node *node) {
1406 if (node->op->opar == oparity_binary)
1407 return get_irn_n(node, node->op->op_index);
1409 assert(node->op->opar == oparity_binary);
1414 set_binop_left (ir_node *node, ir_node *left) {
1415 if (node->op->opar == oparity_binary)
1416 set_irn_n(node, node->op->op_index, left);
1418 assert (node->op->opar == oparity_binary);
1422 get_binop_right (ir_node *node) {
1423 if (node->op->opar == oparity_binary)
1424 return get_irn_n(node, node->op->op_index + 1);
1426 assert(node->op->opar == oparity_binary);
1431 set_binop_right (ir_node *node, ir_node *right) {
1432 if (node->op->opar == oparity_binary)
1433 set_irn_n(node, node->op->op_index + 1, right);
1435 assert (node->op->opar == oparity_binary);
1438 int is_Phi (ir_node *n) {
1443 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1447 get_Phi_preds_arr (ir_node *node) {
1448 assert (node->op == op_Phi);
1449 return (ir_node **)&(get_irn_in(node)[1]);
1453 get_Phi_n_preds (ir_node *node) {
1454 assert (is_Phi(node));
1455 return (get_irn_arity(node));
1459 void set_Phi_n_preds (ir_node *node, int n_preds) {
1460 assert (node->op == op_Phi);
1465 get_Phi_pred (ir_node *node, int pos) {
1466 assert (is_Phi(node));
1467 return get_irn_n(node, pos);
1471 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1472 assert (is_Phi(node));
1473 set_irn_n(node, pos, pred);
1477 get_Load_mem (ir_node *node) {
1478 assert (node->op == op_Load);
1479 return get_irn_n(node, 0);
1483 set_Load_mem (ir_node *node, ir_node *mem) {
1484 assert (node->op == op_Load);
1485 set_irn_n(node, 0, mem);
1489 get_Load_ptr (ir_node *node) {
1490 assert (node->op == op_Load);
1491 return get_irn_n(node, 1);
1495 set_Load_ptr (ir_node *node, ir_node *ptr) {
1496 assert (node->op == op_Load);
1497 set_irn_n(node, 1, ptr);
1502 get_Store_mem (ir_node *node) {
1503 assert (node->op == op_Store);
1504 return get_irn_n(node, 0);
1508 set_Store_mem (ir_node *node, ir_node *mem) {
1509 assert (node->op == op_Store);
1510 set_irn_n(node, 0, mem);
1514 get_Store_ptr (ir_node *node) {
1515 assert (node->op == op_Store);
1516 return get_irn_n(node, 1);
1520 set_Store_ptr (ir_node *node, ir_node *ptr) {
1521 assert (node->op == op_Store);
1522 set_irn_n(node, 1, ptr);
1526 get_Store_value (ir_node *node) {
1527 assert (node->op == op_Store);
1528 return get_irn_n(node, 2);
1532 set_Store_value (ir_node *node, ir_node *value) {
1533 assert (node->op == op_Store);
1534 set_irn_n(node, 2, value);
1538 get_Alloc_mem (ir_node *node) {
1539 assert (node->op == op_Alloc);
1540 return get_irn_n(node, 0);
1544 set_Alloc_mem (ir_node *node, ir_node *mem) {
1545 assert (node->op == op_Alloc);
1546 set_irn_n(node, 0, mem);
1550 get_Alloc_size (ir_node *node) {
1551 assert (node->op == op_Alloc);
1552 return get_irn_n(node, 1);
1556 set_Alloc_size (ir_node *node, ir_node *size) {
1557 assert (node->op == op_Alloc);
1558 set_irn_n(node, 1, size);
1562 get_Alloc_type (ir_node *node) {
1563 assert (node->op == op_Alloc);
1564 return node->attr.a.type = skip_tid(node->attr.a.type);
1568 set_Alloc_type (ir_node *node, type *tp) {
1569 assert (node->op == op_Alloc);
1570 node->attr.a.type = tp;
1574 get_Alloc_where (ir_node *node) {
1575 assert (node->op == op_Alloc);
1576 return node->attr.a.where;
1580 set_Alloc_where (ir_node *node, where_alloc where) {
1581 assert (node->op == op_Alloc);
1582 node->attr.a.where = where;
1587 get_Free_mem (ir_node *node) {
1588 assert (node->op == op_Free);
1589 return get_irn_n(node, 0);
1593 set_Free_mem (ir_node *node, ir_node *mem) {
1594 assert (node->op == op_Free);
1595 set_irn_n(node, 0, mem);
1599 get_Free_ptr (ir_node *node) {
1600 assert (node->op == op_Free);
1601 return get_irn_n(node, 1);
1605 set_Free_ptr (ir_node *node, ir_node *ptr) {
1606 assert (node->op == op_Free);
1607 set_irn_n(node, 1, ptr);
1611 get_Free_size (ir_node *node) {
1612 assert (node->op == op_Free);
1613 return get_irn_n(node, 2);
1617 set_Free_size (ir_node *node, ir_node *size) {
1618 assert (node->op == op_Free);
1619 set_irn_n(node, 2, size);
1623 get_Free_type (ir_node *node) {
1624 assert (node->op == op_Free);
1625 return node->attr.f = skip_tid(node->attr.f);
1629 set_Free_type (ir_node *node, type *tp) {
1630 assert (node->op == op_Free);
1635 get_Sync_preds_arr (ir_node *node) {
1636 assert (node->op == op_Sync);
1637 return (ir_node **)&(get_irn_in(node)[1]);
1641 get_Sync_n_preds (ir_node *node) {
1642 assert (node->op == op_Sync);
1643 return (get_irn_arity(node));
1648 set_Sync_n_preds (ir_node *node, int n_preds) {
1649 assert (node->op == op_Sync);
1654 get_Sync_pred (ir_node *node, int pos) {
1655 assert (node->op == op_Sync);
1656 return get_irn_n(node, pos);
1660 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1661 assert (node->op == op_Sync);
1662 set_irn_n(node, pos, pred);
1666 get_Proj_pred (ir_node *node) {
1667 assert (is_Proj(node));
1668 return get_irn_n(node, 0);
1672 set_Proj_pred (ir_node *node, ir_node *pred) {
1673 assert (is_Proj(node));
1674 set_irn_n(node, 0, pred);
1678 get_Proj_proj (ir_node *node) {
1679 assert (is_Proj(node));
1680 if (get_irn_opcode(node) == iro_Proj) {
1681 return node->attr.proj;
1683 assert(get_irn_opcode(node) == iro_Filter);
1684 return node->attr.filter.proj;
1689 set_Proj_proj (ir_node *node, long proj) {
1690 assert (node->op == op_Proj);
1691 node->attr.proj = proj;
1695 get_Tuple_preds_arr (ir_node *node) {
1696 assert (node->op == op_Tuple);
1697 return (ir_node **)&(get_irn_in(node)[1]);
1701 get_Tuple_n_preds (ir_node *node) {
1702 assert (node->op == op_Tuple);
1703 return (get_irn_arity(node));
1708 set_Tuple_n_preds (ir_node *node, int n_preds) {
1709 assert (node->op == op_Tuple);
1714 get_Tuple_pred (ir_node *node, int pos) {
1715 assert (node->op == op_Tuple);
1716 return get_irn_n(node, pos);
1720 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1721 assert (node->op == op_Tuple);
1722 set_irn_n(node, pos, pred);
1726 get_Id_pred (ir_node *node) {
1727 assert (node->op == op_Id);
1728 return get_irn_n(node, 0);
1732 set_Id_pred (ir_node *node, ir_node *pred) {
1733 assert (node->op == op_Id);
1734 set_irn_n(node, 0, pred);
1737 ir_node *get_Confirm_value (ir_node *node) {
1738 assert (node->op == op_Confirm);
1739 return get_irn_n(node, 0);
1741 void set_Confirm_value (ir_node *node, ir_node *value) {
1742 assert (node->op == op_Confirm);
1743 set_irn_n(node, 0, value);
1745 ir_node *get_Confirm_bound (ir_node *node) {
1746 assert (node->op == op_Confirm);
1747 return get_irn_n(node, 1);
1749 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1750 assert (node->op == op_Confirm);
1751 set_irn_n(node, 0, bound);
1753 pn_Cmp get_Confirm_cmp (ir_node *node) {
1754 assert (node->op == op_Confirm);
1755 return node->attr.confirm_cmp;
1757 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1758 assert (node->op == op_Confirm);
1759 node->attr.confirm_cmp = cmp;
1764 get_Filter_pred (ir_node *node) {
1765 assert(node->op == op_Filter);
1769 set_Filter_pred (ir_node *node, ir_node *pred) {
1770 assert(node->op == op_Filter);
1774 get_Filter_proj(ir_node *node) {
1775 assert(node->op == op_Filter);
1776 return node->attr.filter.proj;
1779 set_Filter_proj (ir_node *node, long proj) {
1780 assert(node->op == op_Filter);
1781 node->attr.filter.proj = proj;
1784 /* Don't use get_irn_arity, get_irn_n in implementation as access
1785 shall work independent of view!!! */
1786 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1787 assert(node->op == op_Filter);
1788 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1789 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1790 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1791 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1792 node->attr.filter.in_cg[0] = node->in[0];
1794 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1797 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1798 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1799 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1800 node->attr.filter.in_cg[pos + 1] = pred;
1802 int get_Filter_n_cg_preds(ir_node *node) {
1803 assert(node->op == op_Filter && node->attr.filter.in_cg);
1804 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1806 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1808 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1810 arity = ARR_LEN(node->attr.filter.in_cg);
1811 assert(pos < arity - 1);
1812 return node->attr.filter.in_cg[pos + 1];
1817 get_irn_irg(ir_node *node) {
1818 if (get_irn_op(node) != op_Block)
1819 node = get_nodes_block(node);
1820 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1821 node = get_nodes_block(node);
1822 assert(get_irn_op(node) == op_Block);
1823 return node->attr.block.irg;
1827 /*----------------------------------------------------------------*/
1828 /* Auxiliary routines */
1829 /*----------------------------------------------------------------*/
1832 skip_Proj (ir_node *node) {
1833 /* don't assert node !!! */
1834 if (node && is_Proj(node)) {
1835 return get_Proj_pred(node);
1842 skip_Tuple (ir_node *node) {
1845 if (!get_opt_normalize()) return node;
1847 node = skip_nop(node);
1848 if (get_irn_op(node) == op_Proj) {
1849 pred = skip_nop(get_Proj_pred(node));
1850 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1851 pred = skip_nop(skip_Tuple(pred));
1852 if (get_irn_op(pred) == op_Tuple)
1853 return get_Tuple_pred(pred, get_Proj_proj(node));
1858 /** returns operand of node if node is a Cast */
1859 ir_node *skip_Cast (ir_node *node) {
1860 if (node && get_irn_op(node) == op_Cast) {
1861 return skip_nop(get_irn_n(node, 0));
1868 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1869 than any other approach, as Id chains are resolved and all point to the real node, or
1870 all id's are self loops. */
1872 skip_nop (ir_node *node) {
1873 /* don't assert node !!! */
1875 if (!get_opt_normalize()) return node;
1877 /* Don't use get_Id_pred: We get into an endless loop for
1878 self-referencing Ids. */
1879 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1880 ir_node *rem_pred = node->in[0+1];
1883 assert (intern_get_irn_arity (node) > 0);
1885 node->in[0+1] = node;
1886 res = skip_nop(rem_pred);
1887 if (res->op == op_Id) /* self-loop */ return node;
1889 node->in[0+1] = res;
1896 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1897 than any other approach, as Id chains are resolved and all point to the real node, or
1898 all id's are self loops. */
1900 skip_nop (ir_node *node) {
1902 /* don't assert node !!! */
1904 if (!get_opt_normalize()) return node;
1906 /* Don't use get_Id_pred: We get into an endless loop for
1907 self-referencing Ids. */
1908 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1909 ir_node *rem_pred, *res;
1911 if (pred->op != op_Id) return pred; /* shortcut */
1914 assert (intern_get_irn_arity (node) > 0);
1916 node->in[0+1] = node;
1917 res = skip_nop(rem_pred);
1918 if (res->op == op_Id) /* self-loop */ return node;
1920 node->in[0+1] = res;
1929 skip_Id (ir_node *node) {
1930 return skip_nop(node);
1934 is_Bad (ir_node *node) {
1936 if ((node) && get_irn_opcode(node) == iro_Bad)
1942 is_no_Block (ir_node *node) {
1944 return (get_irn_opcode(node) != iro_Block);
1948 is_Block (ir_node *node) {
1950 return (get_irn_opcode(node) == iro_Block);
1953 /* returns true if node is a Unknown node. */
1955 is_Unknown (ir_node *node) {
1957 return (get_irn_opcode(node) == iro_Unknown);
1961 is_Proj (const ir_node *node) {
1963 return node->op == op_Proj
1964 || (!interprocedural_view && node->op == op_Filter);
1967 /* Returns true if the operation manipulates control flow. */
1969 is_cfop(ir_node *node) {
1970 return is_cfopcode(get_irn_op(node));
1973 /* Returns true if the operation manipulates interprocedural control flow:
1974 CallBegin, EndReg, EndExcept */
1975 int is_ip_cfop(ir_node *node) {
1976 return is_ip_cfopcode(get_irn_op(node));
1979 ir_graph *get_ip_cfop_irg(ir_node *n) {
1980 return get_irn_irg(n);
1983 /* Returns true if the operation can change the control flow because
1986 is_fragile_op(ir_node *node) {
1987 return is_op_fragile(get_irn_op(node));
1990 /* Returns the memory operand of fragile operations. */
1991 ir_node *get_fragile_op_mem(ir_node *node) {
1992 assert(node && is_fragile_op(node));
1994 switch (get_irn_opcode (node)) {
2003 return get_irn_n(node, 0);
2008 assert(0 && "should not be reached");