3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 res->kind = k_ir_node;
113 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
115 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
116 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
119 set_irn_dbg_info(res, db);
123 res->node_nr = get_irp_new_node_nr();
131 /* Copies all attributes stored in the old node to the new node.
132 Assumes both have the same opcode and sufficient size. */
134 copy_attrs (const ir_node *old_node, ir_node *new_node) {
135 assert(get_irn_op(old_node) == get_irn_op(new_node));
136 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 /*-- getting some parameters from ir_nodes --*/
142 (is_ir_node)(const void *thing) {
143 return __is_ir_node(thing);
147 (get_irn_intra_arity)(const ir_node *node) {
148 return __get_irn_intra_arity(node);
152 (get_irn_inter_arity)(const ir_node *node) {
153 return __get_irn_inter_arity(node);
157 (get_irn_arity)(const ir_node *node) {
158 return __get_irn_arity(node);
161 /* Returns the array with ins. This array is shifted with respect to the
162 array accessed by get_irn_n: The block operand is at position 0 not -1.
163 (@@@ This should be changed.)
164 The order of the predecessors in this array is not guaranteed, except that
165 lists of operands as predecessors of Block or arguments of a Call are
168 get_irn_in (const ir_node *node) {
170 if (interprocedural_view) { /* handle Filter and Block specially */
171 if (get_irn_opcode(node) == iro_Filter) {
172 assert(node->attr.filter.in_cg);
173 return node->attr.filter.in_cg;
174 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
175 return node->attr.block.in_cg;
177 /* else fall through */
183 set_irn_in (ir_node *node, int arity, ir_node **in) {
186 if (interprocedural_view) { /* handle Filter and Block specially */
187 if (get_irn_opcode(node) == iro_Filter) {
188 assert(node->attr.filter.in_cg);
189 arr = &node->attr.filter.in_cg;
190 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
191 arr = &node->attr.block.in_cg;
198 if (arity != ARR_LEN(*arr) - 1) {
199 ir_node * block = (*arr)[0];
200 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
203 fix_backedges(current_ir_graph->obst, node);
204 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
208 (get_irn_intra_n)(ir_node *node, int n) {
209 return __get_irn_intra_n (node, n);
213 (get_irn_inter_n)(ir_node *node, int n) {
214 return __get_irn_inter_n (node, n);
218 (get_irn_n)(ir_node *node, int n) {
219 return __get_irn_n (node, n);
223 set_irn_n (ir_node *node, int n, ir_node *in) {
224 assert(node && -1 <= n && n < get_irn_arity(node));
225 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
226 /* Change block pred in both views! */
227 node->in[n + 1] = in;
228 assert(node->attr.filter.in_cg);
229 node->attr.filter.in_cg[n + 1] = in;
232 if (interprocedural_view) { /* handle Filter and Block specially */
233 if (get_irn_opcode(node) == iro_Filter) {
234 assert(node->attr.filter.in_cg);
235 node->attr.filter.in_cg[n + 1] = in;
237 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
238 node->attr.block.in_cg[n + 1] = in;
241 /* else fall through */
243 node->in[n + 1] = in;
247 (get_irn_mode)(const ir_node *node) {
248 return __get_irn_mode(node);
252 (set_irn_mode)(ir_node *node, ir_mode *mode)
254 __set_irn_mode(node, mode);
258 get_irn_modecode (const ir_node *node)
261 return node->mode->code;
264 /** Gets the string representation of the mode .*/
266 get_irn_modename (const ir_node *node)
269 return get_mode_name(node->mode);
273 get_irn_modeident (const ir_node *node)
276 return get_mode_ident(node->mode);
280 (get_irn_op)(const ir_node *node)
282 return __get_irn_op(node);
285 /* should be private to the library: */
287 set_irn_op (ir_node *node, ir_op *op)
294 (get_irn_opcode)(const ir_node *node)
296 return __get_irn_opcode(node);
300 get_irn_opname (const ir_node *node)
303 return get_id_str(node->op->name);
307 get_irn_opident (const ir_node *node)
310 return node->op->name;
314 (get_irn_visited)(const ir_node *node)
316 return __get_irn_visited(node);
320 (set_irn_visited)(ir_node *node, unsigned long visited)
322 __set_irn_visited(node, visited);
326 (mark_irn_visited)(ir_node *node) {
327 __mark_irn_visited(node);
331 (irn_not_visited)(const ir_node *node) {
332 return __irn_not_visited(node);
336 (irn_visited)(const ir_node *node) {
337 return __irn_visited(node);
341 (set_irn_link)(ir_node *node, void *link) {
342 __set_irn_link(node, link);
346 (get_irn_link)(const ir_node *node) {
347 return __get_irn_link(node);
350 /* Outputs a unique number for this node */
352 get_irn_node_nr(const ir_node *node) {
355 return node->node_nr;
362 get_irn_const_attr (ir_node *node)
364 assert (node->op == op_Const);
365 return node->attr.con;
369 get_irn_proj_attr (ir_node *node)
371 assert (node->op == op_Proj);
372 return node->attr.proj;
376 get_irn_alloc_attr (ir_node *node)
378 assert (node->op == op_Alloc);
383 get_irn_free_attr (ir_node *node)
385 assert (node->op == op_Free);
386 return node->attr.f = skip_tid(node->attr.f);
390 get_irn_symconst_attr (ir_node *node)
392 assert (node->op == op_SymConst);
397 get_irn_call_attr (ir_node *node)
399 assert (node->op == op_Call);
400 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
404 get_irn_funccall_attr (ir_node *node)
406 assert (node->op == op_FuncCall);
407 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
411 get_irn_sel_attr (ir_node *node)
413 assert (node->op == op_Sel);
418 get_irn_phi_attr (ir_node *node)
420 assert (node->op == op_Phi);
421 return node->attr.phi0_pos;
425 get_irn_block_attr (ir_node *node)
427 assert (node->op == op_Block);
428 return node->attr.block;
431 /** manipulate fields of individual nodes **/
433 /* this works for all except Block */
435 get_nodes_Block (ir_node *node) {
436 assert (!(node->op == op_Block));
437 return get_irn_n(node, -1);
441 set_nodes_Block (ir_node *node, ir_node *block) {
442 assert (!(node->op == op_Block));
443 set_irn_n(node, -1, block);
446 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
447 * from Start. If so returns frame type, else Null. */
448 type *is_frame_pointer(ir_node *n) {
449 if ((get_irn_op(n) == op_Proj) &&
450 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
451 ir_node *start = get_Proj_pred(n);
452 if (get_irn_op(start) == op_Start) {
453 return get_irg_frame_type(get_irn_irg(start));
459 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
460 * from Start. If so returns global type, else Null. */
461 type *is_globals_pointer(ir_node *n) {
462 if ((get_irn_op(n) == op_Proj) &&
463 (get_Proj_proj(n) == pn_Start_P_globals)) {
464 ir_node *start = get_Proj_pred(n);
465 if (get_irn_op(start) == op_Start) {
466 return get_glob_type();
472 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
473 * from Start. If so returns 1, else 0. */
474 int is_value_arg_pointer(ir_node *n) {
475 if ((get_irn_op(n) == op_Proj) &&
476 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
477 (get_irn_op(get_Proj_pred(n)) == op_Start))
482 /* Returns an array with the predecessors of the Block. Depending on
483 the implementation of the graph data structure this can be a copy of
484 the internal representation of predecessors as well as the internal
485 array itself. Therefore writing to this array might obstruct the ir. */
487 get_Block_cfgpred_arr (ir_node *node)
489 assert ((node->op == op_Block));
490 return (ir_node **)&(get_irn_in(node)[1]);
495 get_Block_n_cfgpreds (ir_node *node) {
496 assert ((node->op == op_Block));
497 return (get_irn_arity(node));
501 get_Block_cfgpred (ir_node *node, int pos) {
502 assert (node->op == op_Block);
504 if (-1 > pos || get_irn_arity(node) <= pos) {
505 dump_ir_block_graph(current_ir_graph);
506 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
509 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
510 return get_irn_n(node, pos);
514 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
515 assert (node->op == op_Block);
516 set_irn_n(node, pos, pred);
520 get_Block_matured (ir_node *node) {
521 assert (node->op == op_Block);
522 return node->attr.block.matured;
526 set_Block_matured (ir_node *node, bool matured) {
527 assert (node->op == op_Block);
528 node->attr.block.matured = matured;
531 get_Block_block_visited (ir_node *node) {
532 assert (node->op == op_Block);
533 return node->attr.block.block_visited;
537 set_Block_block_visited (ir_node *node, unsigned long visit) {
538 assert (node->op == op_Block);
539 node->attr.block.block_visited = visit;
542 /* For this current_ir_graph must be set. */
544 mark_Block_block_visited (ir_node *node) {
545 assert (node->op == op_Block);
546 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
550 Block_not_block_visited(ir_node *node) {
551 assert (node->op == op_Block);
552 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
556 get_Block_graph_arr (ir_node *node, int pos) {
557 assert (node->op == op_Block);
558 return node->attr.block.graph_arr[pos+1];
562 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
563 assert (node->op == op_Block);
564 node->attr.block.graph_arr[pos+1] = value;
567 /* handler handling for Blocks * /
569 set_Block_handler (ir_node *block, ir_node *handler) {
570 assert ((block->op == op_Block));
571 assert ((handler->op == op_Block));
572 block->attr.block.handler_entry = handler;
576 get_Block_handler (ir_node *block) {
577 assert ((block->op == op_Block));
578 return (block->attr.block.handler_entry);
581 / * handler handling for Nodes * /
583 set_Node_handler (ir_node *node, ir_node *handler) {
584 set_Block_handler (get_nodes_Block (node), handler);
588 get_Node_handler (ir_node *node) {
589 return (get_Block_handler (get_nodes_Block (node)));
592 / * exc_t handling for Blocks * /
593 void set_Block_exc (ir_node *block, exc_t exc) {
594 assert ((block->op == op_Block));
595 block->attr.block.exc = exc;
598 exc_t get_Block_exc (ir_node *block) {
599 assert ((block->op == op_Block));
600 return (block->attr.block.exc);
603 / * exc_t handling for Nodes * /
604 void set_Node_exc (ir_node *node, exc_t exc) {
605 set_Block_exc (get_nodes_Block (node), exc);
608 exc_t get_Node_exc (ir_node *node) {
609 return (get_Block_exc (get_nodes_Block (node)));
613 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
614 assert(node->op == op_Block);
615 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
616 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
617 node->attr.block.in_cg[0] = NULL;
618 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
620 /* Fix backedge array. fix_backedges operates depending on
621 interprocedural_view. */
622 bool ipv = interprocedural_view;
623 interprocedural_view = true;
624 fix_backedges(current_ir_graph->obst, node);
625 interprocedural_view = ipv;
628 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
631 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
632 assert(node->op == op_Block &&
633 node->attr.block.in_cg &&
634 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
635 node->attr.block.in_cg[pos + 1] = pred;
638 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
639 assert(node->op == op_Block);
640 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
643 int get_Block_cg_n_cfgpreds(ir_node * node) {
644 assert(node->op == op_Block);
645 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
648 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
649 assert(node->op == op_Block && node->attr.block.in_cg);
650 return node->attr.block.in_cg[pos + 1];
653 void remove_Block_cg_cfgpred_arr(ir_node * node) {
654 assert(node->op == op_Block);
655 node->attr.block.in_cg = NULL;
658 /* Start references the irg it is in. */
660 get_Start_irg(ir_node *node) {
661 return get_irn_irg(node);
665 set_Start_irg(ir_node *node, ir_graph *irg) {
666 assert(node->op == op_Start);
667 assert(is_ir_graph(irg));
668 assert(0 && " Why set irg? -- use set_irn_irg");
672 get_End_n_keepalives(ir_node *end) {
673 assert (end->op == op_End);
674 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
678 get_End_keepalive(ir_node *end, int pos) {
679 assert (end->op == op_End);
680 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
684 add_End_keepalive (ir_node *end, ir_node *ka) {
685 assert (end->op == op_End);
686 ARR_APP1 (ir_node *, end->in, ka);
690 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
691 assert (end->op == op_End);
692 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
696 free_End (ir_node *end) {
697 assert (end->op == op_End);
699 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
700 end->in = NULL; /* @@@ make sure we get an error if we use the
701 in array afterwards ... */
704 ir_graph *get_EndReg_irg (ir_node *end) {
705 return get_irn_irg(end);
708 ir_graph *get_EndExcept_irg (ir_node *end) {
709 return get_irn_irg(end);
713 > Implementing the case construct (which is where the constant Proj node is
714 > important) involves far more than simply determining the constant values.
715 > We could argue that this is more properly a function of the translator from
716 > Firm to the target machine. That could be done if there was some way of
717 > projecting "default" out of the Cond node.
718 I know it's complicated.
719 Basically there are two proglems:
720 - determining the gaps between the projs
721 - determining the biggest case constant to know the proj number for
723 I see several solutions:
724 1. Introduce a ProjDefault node. Solves both problems.
725 This means to extend all optimizations executed during construction.
726 2. Give the Cond node for switch two flavors:
727 a) there are no gaps in the projs (existing flavor)
728 b) gaps may exist, default proj is still the Proj with the largest
729 projection number. This covers also the gaps.
730 3. Fix the semantic of the Cond to that of 2b)
732 Solution 2 seems to be the best:
733 Computing the gaps in the Firm representation is not too hard, i.e.,
734 libFIRM can implement a routine that transforms between the two
735 flavours. This is also possible for 1) but 2) does not require to
736 change any existing optimization.
737 Further it should be far simpler to determine the biggest constant than
739 I don't want to choose 3) as 2a) seems to have advantages for
740 dataflow analysis and 3) does not allow to convert the representation to
744 get_Cond_selector (ir_node *node) {
745 assert (node->op == op_Cond);
746 return get_irn_n(node, 0);
750 set_Cond_selector (ir_node *node, ir_node *selector) {
751 assert (node->op == op_Cond);
752 set_irn_n(node, 0, selector);
756 get_Cond_kind (ir_node *node) {
757 assert (node->op == op_Cond);
758 return node->attr.c.kind;
762 set_Cond_kind (ir_node *node, cond_kind kind) {
763 assert (node->op == op_Cond);
764 node->attr.c.kind = kind;
768 get_Cond_defaultProj (ir_node *node) {
769 assert (node->op == op_Cond);
770 return node->attr.c.default_proj;
774 get_Return_mem (ir_node *node) {
775 assert (node->op == op_Return);
776 return get_irn_n(node, 0);
780 set_Return_mem (ir_node *node, ir_node *mem) {
781 assert (node->op == op_Return);
782 set_irn_n(node, 0, mem);
786 get_Return_n_ress (ir_node *node) {
787 assert (node->op == op_Return);
788 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
792 get_Return_res_arr (ir_node *node)
794 assert ((node->op == op_Return));
795 if (get_Return_n_ress(node) > 0)
796 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
803 set_Return_n_res (ir_node *node, int results) {
804 assert (node->op == op_Return);
809 get_Return_res (ir_node *node, int pos) {
810 assert (node->op == op_Return);
811 assert (get_Return_n_ress(node) > pos);
812 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
816 set_Return_res (ir_node *node, int pos, ir_node *res){
817 assert (node->op == op_Return);
818 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
822 get_Raise_mem (ir_node *node) {
823 assert (node->op == op_Raise);
824 return get_irn_n(node, 0);
828 set_Raise_mem (ir_node *node, ir_node *mem) {
829 assert (node->op == op_Raise);
830 set_irn_n(node, 0, mem);
834 get_Raise_exo_ptr (ir_node *node) {
835 assert (node->op == op_Raise);
836 return get_irn_n(node, 1);
840 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
841 assert (node->op == op_Raise);
842 set_irn_n(node, 1, exo_ptr);
845 tarval *get_Const_tarval (ir_node *node) {
846 assert (node->op == op_Const);
847 return node->attr.con.tv;
851 set_Const_tarval (ir_node *node, tarval *con) {
852 assert (node->op == op_Const);
853 node->attr.con.tv = con;
857 /* The source language type. Must be an atomic type. Mode of type must
858 be mode of node. For tarvals from entities type must be pointer to
861 get_Const_type (ir_node *node) {
862 assert (node->op == op_Const);
863 return node->attr.con.tp;
867 set_Const_type (ir_node *node, type *tp) {
868 assert (node->op == op_Const);
869 if (tp != unknown_type) {
870 assert (is_atomic_type(tp));
871 assert (get_type_mode(tp) == get_irn_mode(node));
872 assert (!tarval_is_entity(get_Const_tarval(node)) ||
873 (is_pointer_type(tp) &&
874 (get_pointer_points_to_type(tp) ==
875 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
878 node->attr.con.tp = tp;
883 get_SymConst_kind (const ir_node *node) {
884 assert (node->op == op_SymConst);
885 return node->attr.i.num;
889 set_SymConst_kind (ir_node *node, symconst_kind num) {
890 assert (node->op == op_SymConst);
891 node->attr.i.num = num;
895 get_SymConst_type (ir_node *node) {
896 assert ( (node->op == op_SymConst)
897 && ( get_SymConst_kind(node) == type_tag
898 || get_SymConst_kind(node) == size));
899 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
903 set_SymConst_type (ir_node *node, type *tp) {
904 assert ( (node->op == op_SymConst)
905 && ( get_SymConst_kind(node) == type_tag
906 || get_SymConst_kind(node) == size));
907 node->attr.i.tori.typ = tp;
911 get_SymConst_ptrinfo (ir_node *node) {
912 assert ( (node->op == op_SymConst)
913 && (get_SymConst_kind(node) == linkage_ptr_info));
914 return node->attr.i.tori.ptrinfo;
918 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
919 assert ( (node->op == op_SymConst)
920 && (get_SymConst_kind(node) == linkage_ptr_info));
921 node->attr.i.tori.ptrinfo = ptrinfo;
925 get_SymConst_type_or_id (ir_node *node) {
926 assert (node->op == op_SymConst);
927 return &(node->attr.i.tori);
931 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
932 assert (node->op == op_SymConst);
933 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
937 get_Sel_mem (ir_node *node) {
938 assert (node->op == op_Sel);
939 return get_irn_n(node, 0);
943 set_Sel_mem (ir_node *node, ir_node *mem) {
944 assert (node->op == op_Sel);
945 set_irn_n(node, 0, mem);
949 get_Sel_ptr (ir_node *node) {
950 assert (node->op == op_Sel);
951 return get_irn_n(node, 1);
955 set_Sel_ptr (ir_node *node, ir_node *ptr) {
956 assert (node->op == op_Sel);
957 set_irn_n(node, 1, ptr);
961 get_Sel_n_indexs (ir_node *node) {
962 assert (node->op == op_Sel);
963 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
967 get_Sel_index_arr (ir_node *node)
969 assert ((node->op == op_Sel));
970 if (get_Sel_n_indexs(node) > 0)
971 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
977 get_Sel_index (ir_node *node, int pos) {
978 assert (node->op == op_Sel);
979 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
983 set_Sel_index (ir_node *node, int pos, ir_node *index) {
984 assert (node->op == op_Sel);
985 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
989 get_Sel_entity (ir_node *node) {
990 assert (node->op == op_Sel);
991 return node->attr.s.ent;
995 set_Sel_entity (ir_node *node, entity *ent) {
996 assert (node->op == op_Sel);
997 node->attr.s.ent = ent;
1001 get_InstOf_ent (ir_node *node) {
1002 assert (node->op = op_InstOf);
1003 return (node->attr.io.ent);
1007 set_InstOf_ent (ir_node *node, type *ent) {
1008 assert (node->op = op_InstOf);
1009 node->attr.io.ent = ent;
1013 get_InstOf_store (ir_node *node) {
1014 assert (node->op = op_InstOf);
1015 return (get_irn_n (node, 0));
1019 set_InstOf_store (ir_node *node, ir_node *obj) {
1020 assert (node->op = op_InstOf);
1021 set_irn_n (node, 0, obj);
1025 get_InstOf_obj (ir_node *node) {
1026 assert (node->op = op_InstOf);
1027 return (get_irn_n (node, 1));
1031 set_InstOf_obj (ir_node *node, ir_node *obj) {
1032 assert (node->op = op_InstOf);
1033 set_irn_n (node, 1, obj);
1037 /* For unary and binary arithmetic operations the access to the
1038 operands can be factored out. Left is the first, right the
1039 second arithmetic value as listed in tech report 0999-33.
1040 unops are: Minus, Abs, Not, Conv, Cast
1041 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1042 Shr, Shrs, Rotate, Cmp */
1046 get_Call_mem (ir_node *node) {
1047 assert (node->op == op_Call);
1048 return get_irn_n(node, 0);
1052 set_Call_mem (ir_node *node, ir_node *mem) {
1053 assert (node->op == op_Call);
1054 set_irn_n(node, 0, mem);
1058 get_Call_ptr (ir_node *node) {
1059 assert (node->op == op_Call);
1060 return get_irn_n(node, 1);
1064 set_Call_ptr (ir_node *node, ir_node *ptr) {
1065 assert (node->op == op_Call);
1066 set_irn_n(node, 1, ptr);
1070 get_Call_param_arr (ir_node *node) {
1071 assert (node->op == op_Call);
1072 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1076 get_Call_n_params (ir_node *node) {
1077 assert (node->op == op_Call);
1078 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1082 get_Call_arity (ir_node *node) {
1083 assert (node->op == op_Call);
1084 return get_Call_n_params(node);
1088 set_Call_arity (ir_node *node, ir_node *arity) {
1089 assert (node->op == op_Call);
1094 get_Call_param (ir_node *node, int pos) {
1095 assert (node->op == op_Call);
1096 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1100 set_Call_param (ir_node *node, int pos, ir_node *param) {
1101 assert (node->op == op_Call);
1102 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1106 get_Call_type (ir_node *node) {
1107 assert (node->op == op_Call);
1108 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1112 set_Call_type (ir_node *node, type *tp) {
1113 assert (node->op == op_Call);
1114 assert (is_method_type(tp));
1115 node->attr.call.cld_tp = tp;
1118 int Call_has_callees(ir_node *node) {
1119 return (node->attr.call.callee_arr != NULL);
1122 int get_Call_n_callees(ir_node * node) {
1123 assert(node->op == op_Call && node->attr.call.callee_arr);
1124 return ARR_LEN(node->attr.call.callee_arr);
1127 entity * get_Call_callee(ir_node * node, int pos) {
1128 assert(node->op == op_Call && node->attr.call.callee_arr);
1129 return node->attr.call.callee_arr[pos];
1132 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1133 assert(node->op == op_Call);
1134 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1135 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1137 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1140 void remove_Call_callee_arr(ir_node * node) {
1141 assert(node->op == op_Call);
1142 node->attr.call.callee_arr = NULL;
1145 ir_node * get_CallBegin_ptr (ir_node *node) {
1146 assert(node->op == op_CallBegin);
1147 return get_irn_n(node, 0);
1149 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1150 assert(node->op == op_CallBegin);
1151 set_irn_n(node, 0, ptr);
1153 ir_graph * get_CallBegin_irg (ir_node *node) {
1154 return get_irn_irg(node);
1156 ir_node * get_CallBegin_call (ir_node *node) {
1157 assert(node->op == op_CallBegin);
1158 return node->attr.callbegin.call;
1160 void set_CallBegin_call (ir_node *node, ir_node *call) {
1161 assert(node->op == op_CallBegin);
1162 node->attr.callbegin.call = call;
1166 get_FuncCall_ptr (ir_node *node) {
1167 assert (node->op == op_FuncCall);
1168 return get_irn_n(node, 0);
1172 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1173 assert (node->op == op_FuncCall);
1174 set_irn_n(node, 0, ptr);
1178 get_FuncCall_param_arr (ir_node *node) {
1179 assert (node->op == op_FuncCall);
1180 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1184 get_FuncCall_n_params (ir_node *node) {
1185 assert (node->op == op_FuncCall);
1186 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1190 get_FuncCall_arity (ir_node *node) {
1191 assert (node->op == op_FuncCall);
1192 return get_FuncCall_n_params(node);
1196 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1197 assert (node->op == op_FuncCall);
1202 get_FuncCall_param (ir_node *node, int pos) {
1203 assert (node->op == op_FuncCall);
1204 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1208 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1209 assert (node->op == op_FuncCall);
1210 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1214 get_FuncCall_type (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1220 set_FuncCall_type (ir_node *node, type *tp) {
1221 assert (node->op == op_FuncCall);
1222 assert (is_method_type(tp));
1223 node->attr.call.cld_tp = tp;
1226 int FuncCall_has_callees(ir_node *node) {
1227 return (node->attr.call.callee_arr != NULL);
1230 int get_FuncCall_n_callees(ir_node * node) {
1231 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1232 return ARR_LEN(node->attr.call.callee_arr);
1235 entity * get_FuncCall_callee(ir_node * node, int pos) {
1236 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1237 return node->attr.call.callee_arr[pos];
1240 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1241 assert(node->op == op_FuncCall);
1242 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1243 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1245 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1248 void remove_FuncCall_callee_arr(ir_node * node) {
1249 assert(node->op == op_FuncCall);
1250 node->attr.call.callee_arr = NULL;
1255 ir_node * get_##OP##_left(ir_node *node) { \
1256 assert(node->op == op_##OP); \
1257 return get_irn_n(node, node->op->op_index); \
1259 void set_##OP##_left(ir_node *node, ir_node *left) { \
1260 assert(node->op == op_##OP); \
1261 set_irn_n(node, node->op->op_index, left); \
1263 ir_node *get_##OP##_right(ir_node *node) { \
1264 assert(node->op == op_##OP); \
1265 return get_irn_n(node, node->op->op_index + 1); \
1267 void set_##OP##_right(ir_node *node, ir_node *right) { \
1268 assert(node->op == op_##OP); \
1269 set_irn_n(node, node->op->op_index + 1, right); \
1273 ir_node *get_##OP##_op(ir_node *node) { \
1274 assert(node->op == op_##OP); \
1275 return get_irn_n(node, node->op->op_index); \
1277 void set_##OP##_op (ir_node *node, ir_node *op) { \
1278 assert(node->op == op_##OP); \
1279 set_irn_n(node, node->op->op_index, op); \
1289 get_Quot_mem (ir_node *node) {
1290 assert (node->op == op_Quot);
1291 return get_irn_n(node, 0);
1295 set_Quot_mem (ir_node *node, ir_node *mem) {
1296 assert (node->op == op_Quot);
1297 set_irn_n(node, 0, mem);
1303 get_DivMod_mem (ir_node *node) {
1304 assert (node->op == op_DivMod);
1305 return get_irn_n(node, 0);
1309 set_DivMod_mem (ir_node *node, ir_node *mem) {
1310 assert (node->op == op_DivMod);
1311 set_irn_n(node, 0, mem);
1317 get_Div_mem (ir_node *node) {
1318 assert (node->op == op_Div);
1319 return get_irn_n(node, 0);
1323 set_Div_mem (ir_node *node, ir_node *mem) {
1324 assert (node->op == op_Div);
1325 set_irn_n(node, 0, mem);
1331 get_Mod_mem (ir_node *node) {
1332 assert (node->op == op_Mod);
1333 return get_irn_n(node, 0);
1337 set_Mod_mem (ir_node *node, ir_node *mem) {
1338 assert (node->op == op_Mod);
1339 set_irn_n(node, 0, mem);
1356 get_Cast_type (ir_node *node) {
1357 assert (node->op == op_Cast);
1358 return node->attr.cast.totype;
1362 set_Cast_type (ir_node *node, type *to_tp) {
1363 assert (node->op == op_Cast);
1364 node->attr.cast.totype = to_tp;
1368 is_unop (ir_node *node) {
1369 return (node->op->opar == oparity_unary);
1373 get_unop_op (ir_node *node) {
1374 if (node->op->opar == oparity_unary)
1375 return get_irn_n(node, node->op->op_index);
1377 assert(node->op->opar == oparity_unary);
1382 set_unop_op (ir_node *node, ir_node *op) {
1383 if (node->op->opar == oparity_unary)
1384 set_irn_n(node, node->op->op_index, op);
1386 assert(node->op->opar == oparity_unary);
1390 is_binop (ir_node *node) {
1391 return (node->op->opar == oparity_binary);
1395 get_binop_left (ir_node *node) {
1396 if (node->op->opar == oparity_binary)
1397 return get_irn_n(node, node->op->op_index);
1399 assert(node->op->opar == oparity_binary);
1404 set_binop_left (ir_node *node, ir_node *left) {
1405 if (node->op->opar == oparity_binary)
1406 set_irn_n(node, node->op->op_index, left);
1408 assert (node->op->opar == oparity_binary);
1412 get_binop_right (ir_node *node) {
1413 if (node->op->opar == oparity_binary)
1414 return get_irn_n(node, node->op->op_index + 1);
1416 assert(node->op->opar == oparity_binary);
1421 set_binop_right (ir_node *node, ir_node *right) {
1422 if (node->op->opar == oparity_binary)
1423 set_irn_n(node, node->op->op_index + 1, right);
1425 assert (node->op->opar == oparity_binary);
1428 int is_Phi (ir_node *n) {
1433 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1437 get_Phi_preds_arr (ir_node *node) {
1438 assert (node->op == op_Phi);
1439 return (ir_node **)&(get_irn_in(node)[1]);
1443 get_Phi_n_preds (ir_node *node) {
1444 assert (is_Phi(node));
1445 return (get_irn_arity(node));
1449 void set_Phi_n_preds (ir_node *node, int n_preds) {
1450 assert (node->op == op_Phi);
1455 get_Phi_pred (ir_node *node, int pos) {
1456 assert (is_Phi(node));
1457 return get_irn_n(node, pos);
1461 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1462 assert (is_Phi(node));
1463 set_irn_n(node, pos, pred);
1467 get_Load_mem (ir_node *node) {
1468 assert (node->op == op_Load);
1469 return get_irn_n(node, 0);
1473 set_Load_mem (ir_node *node, ir_node *mem) {
1474 assert (node->op == op_Load);
1475 set_irn_n(node, 0, mem);
1479 get_Load_ptr (ir_node *node) {
1480 assert (node->op == op_Load);
1481 return get_irn_n(node, 1);
1485 set_Load_ptr (ir_node *node, ir_node *ptr) {
1486 assert (node->op == op_Load);
1487 set_irn_n(node, 1, ptr);
1492 get_Store_mem (ir_node *node) {
1493 assert (node->op == op_Store);
1494 return get_irn_n(node, 0);
1498 set_Store_mem (ir_node *node, ir_node *mem) {
1499 assert (node->op == op_Store);
1500 set_irn_n(node, 0, mem);
1504 get_Store_ptr (ir_node *node) {
1505 assert (node->op == op_Store);
1506 return get_irn_n(node, 1);
1510 set_Store_ptr (ir_node *node, ir_node *ptr) {
1511 assert (node->op == op_Store);
1512 set_irn_n(node, 1, ptr);
1516 get_Store_value (ir_node *node) {
1517 assert (node->op == op_Store);
1518 return get_irn_n(node, 2);
1522 set_Store_value (ir_node *node, ir_node *value) {
1523 assert (node->op == op_Store);
1524 set_irn_n(node, 2, value);
1528 get_Alloc_mem (ir_node *node) {
1529 assert (node->op == op_Alloc);
1530 return get_irn_n(node, 0);
1534 set_Alloc_mem (ir_node *node, ir_node *mem) {
1535 assert (node->op == op_Alloc);
1536 set_irn_n(node, 0, mem);
1540 get_Alloc_size (ir_node *node) {
1541 assert (node->op == op_Alloc);
1542 return get_irn_n(node, 1);
1546 set_Alloc_size (ir_node *node, ir_node *size) {
1547 assert (node->op == op_Alloc);
1548 set_irn_n(node, 1, size);
1552 get_Alloc_type (ir_node *node) {
1553 assert (node->op == op_Alloc);
1554 return node->attr.a.type = skip_tid(node->attr.a.type);
1558 set_Alloc_type (ir_node *node, type *tp) {
1559 assert (node->op == op_Alloc);
1560 node->attr.a.type = tp;
1564 get_Alloc_where (ir_node *node) {
1565 assert (node->op == op_Alloc);
1566 return node->attr.a.where;
1570 set_Alloc_where (ir_node *node, where_alloc where) {
1571 assert (node->op == op_Alloc);
1572 node->attr.a.where = where;
1577 get_Free_mem (ir_node *node) {
1578 assert (node->op == op_Free);
1579 return get_irn_n(node, 0);
1583 set_Free_mem (ir_node *node, ir_node *mem) {
1584 assert (node->op == op_Free);
1585 set_irn_n(node, 0, mem);
1589 get_Free_ptr (ir_node *node) {
1590 assert (node->op == op_Free);
1591 return get_irn_n(node, 1);
1595 set_Free_ptr (ir_node *node, ir_node *ptr) {
1596 assert (node->op == op_Free);
1597 set_irn_n(node, 1, ptr);
1601 get_Free_size (ir_node *node) {
1602 assert (node->op == op_Free);
1603 return get_irn_n(node, 2);
1607 set_Free_size (ir_node *node, ir_node *size) {
1608 assert (node->op == op_Free);
1609 set_irn_n(node, 2, size);
1613 get_Free_type (ir_node *node) {
1614 assert (node->op == op_Free);
1615 return node->attr.f = skip_tid(node->attr.f);
1619 set_Free_type (ir_node *node, type *tp) {
1620 assert (node->op == op_Free);
1625 get_Sync_preds_arr (ir_node *node) {
1626 assert (node->op == op_Sync);
1627 return (ir_node **)&(get_irn_in(node)[1]);
1631 get_Sync_n_preds (ir_node *node) {
1632 assert (node->op == op_Sync);
1633 return (get_irn_arity(node));
1638 set_Sync_n_preds (ir_node *node, int n_preds) {
1639 assert (node->op == op_Sync);
1644 get_Sync_pred (ir_node *node, int pos) {
1645 assert (node->op == op_Sync);
1646 return get_irn_n(node, pos);
1650 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1651 assert (node->op == op_Sync);
1652 set_irn_n(node, pos, pred);
1656 get_Proj_pred (ir_node *node) {
1657 assert (is_Proj(node));
1658 return get_irn_n(node, 0);
1662 set_Proj_pred (ir_node *node, ir_node *pred) {
1663 assert (is_Proj(node));
1664 set_irn_n(node, 0, pred);
1668 get_Proj_proj (ir_node *node) {
1669 assert (is_Proj(node));
1670 if (get_irn_opcode(node) == iro_Proj) {
1671 return node->attr.proj;
1673 assert(get_irn_opcode(node) == iro_Filter);
1674 return node->attr.filter.proj;
1679 set_Proj_proj (ir_node *node, long proj) {
1680 assert (node->op == op_Proj);
1681 node->attr.proj = proj;
1685 get_Tuple_preds_arr (ir_node *node) {
1686 assert (node->op == op_Tuple);
1687 return (ir_node **)&(get_irn_in(node)[1]);
1691 get_Tuple_n_preds (ir_node *node) {
1692 assert (node->op == op_Tuple);
1693 return (get_irn_arity(node));
1698 set_Tuple_n_preds (ir_node *node, int n_preds) {
1699 assert (node->op == op_Tuple);
1704 get_Tuple_pred (ir_node *node, int pos) {
1705 assert (node->op == op_Tuple);
1706 return get_irn_n(node, pos);
1710 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1711 assert (node->op == op_Tuple);
1712 set_irn_n(node, pos, pred);
1716 get_Id_pred (ir_node *node) {
1717 assert (node->op == op_Id);
1718 return get_irn_n(node, 0);
1722 set_Id_pred (ir_node *node, ir_node *pred) {
1723 assert (node->op == op_Id);
1724 set_irn_n(node, 0, pred);
1727 ir_node *get_Confirm_value (ir_node *node) {
1728 assert (node->op == op_Confirm);
1729 return get_irn_n(node, 0);
1731 void set_Confirm_value (ir_node *node, ir_node *value) {
1732 assert (node->op == op_Confirm);
1733 set_irn_n(node, 0, value);
1735 ir_node *get_Confirm_bound (ir_node *node) {
1736 assert (node->op == op_Confirm);
1737 return get_irn_n(node, 1);
1739 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1740 assert (node->op == op_Confirm);
1741 set_irn_n(node, 0, bound);
1743 pn_Cmp get_Confirm_cmp (ir_node *node) {
1744 assert (node->op == op_Confirm);
1745 return node->attr.confirm_cmp;
1747 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1748 assert (node->op == op_Confirm);
1749 node->attr.confirm_cmp = cmp;
1754 get_Filter_pred (ir_node *node) {
1755 assert(node->op == op_Filter);
1759 set_Filter_pred (ir_node *node, ir_node *pred) {
1760 assert(node->op == op_Filter);
1764 get_Filter_proj(ir_node *node) {
1765 assert(node->op == op_Filter);
1766 return node->attr.filter.proj;
1769 set_Filter_proj (ir_node *node, long proj) {
1770 assert(node->op == op_Filter);
1771 node->attr.filter.proj = proj;
1774 /* Don't use get_irn_arity, get_irn_n in implementation as access
1775 shall work independent of view!!! */
1776 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1777 assert(node->op == op_Filter);
1778 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1779 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1780 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1781 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1782 node->attr.filter.in_cg[0] = node->in[0];
1784 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1787 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1788 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1789 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1790 node->attr.filter.in_cg[pos + 1] = pred;
1792 int get_Filter_n_cg_preds(ir_node *node) {
1793 assert(node->op == op_Filter && node->attr.filter.in_cg);
1794 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1796 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1798 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1800 arity = ARR_LEN(node->attr.filter.in_cg);
1801 assert(pos < arity - 1);
1802 return node->attr.filter.in_cg[pos + 1];
1807 get_irn_irg(ir_node *node) {
1808 if (get_irn_op(node) != op_Block)
1809 node = get_nodes_block(node);
1810 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1811 node = get_nodes_block(node);
1812 assert(get_irn_op(node) == op_Block);
1813 return node->attr.block.irg;
1817 /*----------------------------------------------------------------*/
1818 /* Auxiliary routines */
1819 /*----------------------------------------------------------------*/
1822 skip_Proj (ir_node *node) {
1823 /* don't assert node !!! */
1824 if (node && is_Proj(node)) {
1825 return get_Proj_pred(node);
1832 skip_Tuple (ir_node *node) {
1835 if (!get_opt_normalize()) return node;
1837 node = skip_nop(node);
1838 if (get_irn_op(node) == op_Proj) {
1839 pred = skip_nop(get_Proj_pred(node));
1840 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1841 pred = skip_nop(skip_Tuple(pred));
1842 if (get_irn_op(pred) == op_Tuple)
1843 return get_Tuple_pred(pred, get_Proj_proj(node));
1848 /** returns operand of node if node is a Cast */
1849 ir_node *skip_Cast (ir_node *node) {
1850 if (node && get_irn_op(node) == op_Cast) {
1851 return skip_nop(get_irn_n(node, 0));
1858 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1859 than any other approach, as Id chains are resolved and all point to the real node, or
1860 all id's are self loops. */
1862 skip_nop (ir_node *node) {
1863 /* don't assert node !!! */
1865 if (!get_opt_normalize()) return node;
1867 /* Don't use get_Id_pred: We get into an endless loop for
1868 self-referencing Ids. */
1869 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1870 ir_node *rem_pred = node->in[0+1];
1873 assert (get_irn_arity (node) > 0);
1875 node->in[0+1] = node;
1876 res = skip_nop(rem_pred);
1877 if (res->op == op_Id) /* self-loop */ return node;
1879 node->in[0+1] = res;
1886 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1887 than any other approach, as Id chains are resolved and all point to the real node, or
1888 all id's are self loops. */
1890 skip_nop (ir_node *node) {
1892 /* don't assert node !!! */
1894 if (!get_opt_normalize()) return node;
1896 /* Don't use get_Id_pred: We get into an endless loop for
1897 self-referencing Ids. */
1898 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1899 ir_node *rem_pred, *res;
1901 if (pred->op != op_Id) return pred; /* shortcut */
1904 assert (get_irn_arity (node) > 0);
1906 node->in[0+1] = node;
1907 res = skip_nop(rem_pred);
1908 if (res->op == op_Id) /* self-loop */ return node;
1910 node->in[0+1] = res;
1919 skip_Id (ir_node *node) {
1920 return skip_nop(node);
1924 is_Bad (ir_node *node) {
1926 if ((node) && get_irn_opcode(node) == iro_Bad)
1932 is_no_Block (ir_node *node) {
1934 return (get_irn_opcode(node) != iro_Block);
1938 is_Block (ir_node *node) {
1940 return (get_irn_opcode(node) == iro_Block);
1943 /* returns true if node is a Unknown node. */
1945 is_Unknown (ir_node *node) {
1947 return (get_irn_opcode(node) == iro_Unknown);
1951 is_Proj (const ir_node *node) {
1953 return node->op == op_Proj
1954 || (!interprocedural_view && node->op == op_Filter);
1957 /* Returns true if the operation manipulates control flow. */
1959 is_cfop(ir_node *node) {
1960 return is_cfopcode(get_irn_op(node));
1963 /* Returns true if the operation manipulates interprocedural control flow:
1964 CallBegin, EndReg, EndExcept */
1965 int is_ip_cfop(ir_node *node) {
1966 return is_ip_cfopcode(get_irn_op(node));
1969 ir_graph *get_ip_cfop_irg(ir_node *n) {
1970 return get_irn_irg(n);
1973 /* Returns true if the operation can change the control flow because
1976 is_fragile_op(ir_node *node) {
1977 return is_op_fragile(get_irn_op(node));
1980 /* Returns the memory operand of fragile operations. */
1981 ir_node *get_fragile_op_mem(ir_node *node) {
1982 assert(node && is_fragile_op(node));
1984 switch (get_irn_opcode (node)) {
1993 return get_irn_n(node, 0);
1998 assert(0 && "should not be reached");