3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 /* Declarations for inlineing */
38 INLINE ir_node ** get_irn_in (const ir_node *node);
39 INLINE ir_mode *get_irn_mode (const ir_node *node);
40 INLINE ir_op *get_irn_op (const ir_node *node);
41 INLINE opcode get_irn_opcode (const ir_node *node);
42 INLINE ident *get_irn_opident (const ir_node *node);
43 INLINE type *get_SymConst_type (ir_node *node);
44 INLINE ir_node *skip_nop (ir_node *node);
45 INLINE int is_Proj (const ir_node *node);
48 static const char *pnc_name_arr [] = {
49 "False", "Eq", "Lt", "Le",
50 "Gt", "Ge", "Lg", "Leg", "Uo",
51 "Ue", "Ul", "Ule", "Ug", "Uge",
56 * returns the pnc name from an pnc constant
58 INLINE const char *get_pnc_string(int pnc) {
59 return pnc_name_arr[pnc];
63 * Calculates the negated pnc condition.
66 get_negated_pnc(int pnc) {
68 case False: return True; break;
69 case Eq: return Ne; break;
70 case Lt: return Uge; break;
71 case Le: return Ug; break;
72 case Gt: return Ule; break;
73 case Ge: return Ul; break;
74 case Lg: return Ue; break;
75 case Leg: return Uo; break;
76 case Uo: return Leg; break;
77 case Ue: return Lg; break;
78 case Ul: return Ge; break;
79 case Ule: return Gt; break;
80 case Ug: return Le; break;
81 case Uge: return Lt; break;
82 case Ne: return Eq; break;
83 case True: return False; break;
85 return 99; /* to shut up gcc */
88 const char *pns_name_arr [] = {
89 "initial_exec", "global_store",
90 "frame_base", "globals", "args"
93 const char *symconst_name_arr [] = {
94 "type_tag", "size", "linkage_ptr_info"
103 * irnode constructor.
104 * Create a new irnode in irg, with an op, mode, arity and
105 * some incoming irnodes.
106 * If arity is negative, a node with a dynamic array is created.
109 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
110 int arity, ir_node **in)
113 int node_size = offsetof (ir_node, attr) + op->attr_size;
115 assert(irg && op && mode);
116 res = (ir_node *) obstack_alloc (irg->obst, node_size);
118 res->kind = k_ir_node;
124 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
126 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
127 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
130 set_irn_dbg_info(res, db);
134 res->node_nr = get_irp_new_node_nr();
142 /* Copies all attributes stored in the old node to the new node.
143 Assumes both have the same opcode and sufficient size. */
145 copy_attrs (const ir_node *old_node, ir_node *new_node) {
146 assert(get_irn_op(old_node) == get_irn_op(new_node));
147 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
150 /** getting some parameters from ir_nodes **/
153 is_ir_node (const void *thing) {
154 if (get_kind(thing) == k_ir_node)
161 get_irn_intra_arity (const ir_node *node) {
162 return intern_get_irn_intra_arity(node);
166 get_irn_inter_arity (const ir_node *node) {
167 return intern_get_irn_inter_arity(node);
171 get_irn_arity (const ir_node *node) {
172 return intern_get_irn_arity(node);
175 /* Returns the array with ins. This array is shifted with respect to the
176 array accessed by get_irn_n: The block operand is at position 0 not -1.
177 (@@@ This should be changed.)
178 The order of the predecessors in this array is not guaranteed, except that
179 lists of operands as predecessors of Block or arguments of a Call are
182 get_irn_in (const ir_node *node) {
184 if (interprocedural_view) { /* handle Filter and Block specially */
185 if (get_irn_opcode(node) == iro_Filter) {
186 assert(node->attr.filter.in_cg);
187 return node->attr.filter.in_cg;
188 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
189 return node->attr.block.in_cg;
191 /* else fall through */
197 set_irn_in (ir_node *node, int arity, ir_node **in) {
200 if (interprocedural_view) { /* handle Filter and Block specially */
201 if (get_irn_opcode(node) == iro_Filter) {
202 assert(node->attr.filter.in_cg);
203 arr = &node->attr.filter.in_cg;
204 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
205 arr = &node->attr.block.in_cg;
212 if (arity != ARR_LEN(*arr) - 1) {
213 ir_node * block = (*arr)[0];
214 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
217 fix_backedges(current_ir_graph->obst, node);
218 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
222 get_irn_intra_n (ir_node *node, int n) {
223 return intern_get_irn_intra_n (node, n);
227 get_irn_inter_n (ir_node *node, int n) {
228 return intern_get_irn_inter_n (node, n);
232 get_irn_n (ir_node *node, int n) {
233 return intern_get_irn_n (node, n);
237 set_irn_n (ir_node *node, int n, ir_node *in) {
238 assert(node && -1 <= n && n < get_irn_arity(node));
239 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
240 /* Change block pred in both views! */
241 node->in[n + 1] = in;
242 assert(node->attr.filter.in_cg);
243 node->attr.filter.in_cg[n + 1] = in;
246 if (interprocedural_view) { /* handle Filter and Block specially */
247 if (get_irn_opcode(node) == iro_Filter) {
248 assert(node->attr.filter.in_cg);
249 node->attr.filter.in_cg[n + 1] = in;
251 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
252 node->attr.block.in_cg[n + 1] = in;
255 /* else fall through */
257 node->in[n + 1] = in;
261 get_irn_mode (const ir_node *node) {
262 return intern_get_irn_mode(node);
266 set_irn_mode (ir_node *node, ir_mode *mode)
274 get_irn_modecode (const ir_node *node)
277 return node->mode->code;
280 /** Gets the string representation of the mode .*/
282 get_irn_modename (const ir_node *node)
285 return get_mode_name(node->mode);
289 get_irn_modeident (const ir_node *node)
292 return get_mode_ident(node->mode);
296 get_irn_op (const ir_node *node)
298 return intern_get_irn_op(node);
301 /* should be private to the library: */
303 set_irn_op (ir_node *node, ir_op *op)
310 get_irn_opcode (const ir_node *node)
312 return intern_get_irn_opcode(node);
316 get_irn_opname (const ir_node *node)
319 return get_id_str(node->op->name);
323 get_irn_opident (const ir_node *node)
326 return node->op->name;
330 get_irn_visited (const ir_node *node)
333 return node->visited;
337 set_irn_visited (ir_node *node, unsigned long visited)
340 node->visited = visited;
344 mark_irn_visited (ir_node *node) {
346 node->visited = current_ir_graph->visited;
350 irn_not_visited (const ir_node *node) {
352 return (node->visited < current_ir_graph->visited);
356 irn_visited (const ir_node *node) {
358 return (node->visited >= current_ir_graph->visited);
362 set_irn_link (ir_node *node, void *link) {
364 /* Link field is used for Phi construction and various optimizations
366 assert(get_irg_phase_state(current_ir_graph) != phase_building);
372 get_irn_link (const ir_node *node) {
377 /* Outputs a unique number for this node */
379 get_irn_node_nr(const ir_node *node) {
382 return node->node_nr;
389 get_irn_const_attr (ir_node *node)
391 assert (node->op == op_Const);
392 return node->attr.con;
396 get_irn_proj_attr (ir_node *node)
398 assert (node->op == op_Proj);
399 return node->attr.proj;
403 get_irn_alloc_attr (ir_node *node)
405 assert (node->op == op_Alloc);
410 get_irn_free_attr (ir_node *node)
412 assert (node->op == op_Free);
413 return node->attr.f = skip_tid(node->attr.f);
417 get_irn_symconst_attr (ir_node *node)
419 assert (node->op == op_SymConst);
424 get_irn_call_attr (ir_node *node)
426 assert (node->op == op_Call);
427 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
431 get_irn_funccall_attr (ir_node *node)
433 assert (node->op == op_FuncCall);
434 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
438 get_irn_sel_attr (ir_node *node)
440 assert (node->op == op_Sel);
445 get_irn_phi_attr (ir_node *node)
447 assert (node->op == op_Phi);
448 return node->attr.phi0_pos;
452 get_irn_block_attr (ir_node *node)
454 assert (node->op == op_Block);
455 return node->attr.block;
458 /** manipulate fields of individual nodes **/
460 /* this works for all except Block */
462 get_nodes_Block (ir_node *node) {
463 assert (!(node->op == op_Block));
464 return get_irn_n(node, -1);
468 set_nodes_Block (ir_node *node, ir_node *block) {
469 assert (!(node->op == op_Block));
470 set_irn_n(node, -1, block);
473 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
474 * from Start. If so returns frame type, else Null. */
475 type *is_frame_pointer(ir_node *n) {
476 if ((get_irn_op(n) == op_Proj) &&
477 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
478 ir_node *start = get_Proj_pred(n);
479 if (get_irn_op(start) == op_Start) {
480 return get_irg_frame_type(get_irn_irg(start));
486 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
487 * from Start. If so returns global type, else Null. */
488 type *is_globals_pointer(ir_node *n) {
489 if ((get_irn_op(n) == op_Proj) &&
490 (get_Proj_proj(n) == pn_Start_P_globals)) {
491 ir_node *start = get_Proj_pred(n);
492 if (get_irn_op(start) == op_Start) {
493 return get_glob_type();
499 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
500 * from Start. If so returns 1, else 0. */
501 int is_value_arg_pointer(ir_node *n) {
502 if ((get_irn_op(n) == op_Proj) &&
503 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
504 (get_irn_op(get_Proj_pred(n)) == op_Start))
509 /* Returns an array with the predecessors of the Block. Depending on
510 the implementation of the graph data structure this can be a copy of
511 the internal representation of predecessors as well as the internal
512 array itself. Therefore writing to this array might obstruct the ir. */
514 get_Block_cfgpred_arr (ir_node *node)
516 assert ((node->op == op_Block));
517 return (ir_node **)&(get_irn_in(node)[1]);
522 get_Block_n_cfgpreds (ir_node *node) {
523 assert ((node->op == op_Block));
524 return (get_irn_arity(node));
528 get_Block_cfgpred (ir_node *node, int pos) {
529 assert (node->op == op_Block);
531 if (-1 > pos || get_irn_arity(node) <= pos) {
532 dump_ir_block_graph(current_ir_graph);
533 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
536 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
537 return get_irn_n(node, pos);
541 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
542 assert (node->op == op_Block);
543 set_irn_n(node, pos, pred);
547 get_Block_matured (ir_node *node) {
548 assert (node->op == op_Block);
549 return node->attr.block.matured;
553 set_Block_matured (ir_node *node, bool matured) {
554 assert (node->op == op_Block);
555 node->attr.block.matured = matured;
558 get_Block_block_visited (ir_node *node) {
559 assert (node->op == op_Block);
560 return node->attr.block.block_visited;
564 set_Block_block_visited (ir_node *node, unsigned long visit) {
565 assert (node->op == op_Block);
566 node->attr.block.block_visited = visit;
569 /* For this current_ir_graph must be set. */
571 mark_Block_block_visited (ir_node *node) {
572 assert (node->op == op_Block);
573 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
577 Block_not_block_visited(ir_node *node) {
578 assert (node->op == op_Block);
579 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
583 get_Block_graph_arr (ir_node *node, int pos) {
584 assert (node->op == op_Block);
585 return node->attr.block.graph_arr[pos+1];
589 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
590 assert (node->op == op_Block);
591 node->attr.block.graph_arr[pos+1] = value;
594 /* handler handling for Blocks * /
596 set_Block_handler (ir_node *block, ir_node *handler) {
597 assert ((block->op == op_Block));
598 assert ((handler->op == op_Block));
599 block->attr.block.handler_entry = handler;
603 get_Block_handler (ir_node *block) {
604 assert ((block->op == op_Block));
605 return (block->attr.block.handler_entry);
608 / * handler handling for Nodes * /
610 set_Node_handler (ir_node *node, ir_node *handler) {
611 set_Block_handler (get_nodes_Block (node), handler);
615 get_Node_handler (ir_node *node) {
616 return (get_Block_handler (get_nodes_Block (node)));
619 / * exc_t handling for Blocks * /
620 void set_Block_exc (ir_node *block, exc_t exc) {
621 assert ((block->op == op_Block));
622 block->attr.block.exc = exc;
625 exc_t get_Block_exc (ir_node *block) {
626 assert ((block->op == op_Block));
627 return (block->attr.block.exc);
630 / * exc_t handling for Nodes * /
631 void set_Node_exc (ir_node *node, exc_t exc) {
632 set_Block_exc (get_nodes_Block (node), exc);
635 exc_t get_Node_exc (ir_node *node) {
636 return (get_Block_exc (get_nodes_Block (node)));
640 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
641 assert(node->op == op_Block);
642 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
643 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
644 node->attr.block.in_cg[0] = NULL;
645 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
647 /* Fix backedge array. fix_backedges operates depending on
648 interprocedural_view. */
649 bool ipv = interprocedural_view;
650 interprocedural_view = true;
651 fix_backedges(current_ir_graph->obst, node);
652 interprocedural_view = ipv;
655 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
658 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
659 assert(node->op == op_Block &&
660 node->attr.block.in_cg &&
661 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
662 node->attr.block.in_cg[pos + 1] = pred;
665 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
666 assert(node->op == op_Block);
667 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
670 int get_Block_cg_n_cfgpreds(ir_node * node) {
671 assert(node->op == op_Block);
672 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
675 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
676 assert(node->op == op_Block && node->attr.block.in_cg);
677 return node->attr.block.in_cg[pos + 1];
680 void remove_Block_cg_cfgpred_arr(ir_node * node) {
681 assert(node->op == op_Block);
682 node->attr.block.in_cg = NULL;
685 /* Start references the irg it is in. */
687 get_Start_irg(ir_node *node) {
688 return get_irn_irg(node);
692 set_Start_irg(ir_node *node, ir_graph *irg) {
693 assert(node->op == op_Start);
694 assert(is_ir_graph(irg));
695 assert(0 && " Why set irg? -- use set_irn_irg");
699 get_End_n_keepalives(ir_node *end) {
700 assert (end->op == op_End);
701 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
705 get_End_keepalive(ir_node *end, int pos) {
706 assert (end->op == op_End);
707 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
711 add_End_keepalive (ir_node *end, ir_node *ka) {
712 assert (end->op == op_End);
713 ARR_APP1 (ir_node *, end->in, ka);
717 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
718 assert (end->op == op_End);
719 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
723 free_End (ir_node *end) {
724 assert (end->op == op_End);
726 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
727 end->in = NULL; /* @@@ make sure we get an error if we use the
728 in array afterwards ... */
731 ir_graph *get_EndReg_irg (ir_node *end) {
732 return get_irn_irg(end);
735 ir_graph *get_EndExcept_irg (ir_node *end) {
736 return get_irn_irg(end);
740 > Implementing the case construct (which is where the constant Proj node is
741 > important) involves far more than simply determining the constant values.
742 > We could argue that this is more properly a function of the translator from
743 > Firm to the target machine. That could be done if there was some way of
744 > projecting "default" out of the Cond node.
745 I know it's complicated.
746 Basically there are two proglems:
747 - determining the gaps between the projs
748 - determining the biggest case constant to know the proj number for
750 I see several solutions:
751 1. Introduce a ProjDefault node. Solves both problems.
752 This means to extend all optimizations executed during construction.
753 2. Give the Cond node for switch two flavors:
754 a) there are no gaps in the projs (existing flavor)
755 b) gaps may exist, default proj is still the Proj with the largest
756 projection number. This covers also the gaps.
757 3. Fix the semantic of the Cond to that of 2b)
759 Solution 2 seems to be the best:
760 Computing the gaps in the Firm representation is not too hard, i.e.,
761 libFIRM can implement a routine that transforms between the two
762 flavours. This is also possible for 1) but 2) does not require to
763 change any existing optimization.
764 Further it should be far simpler to determine the biggest constant than
766 I don't want to choose 3) as 2a) seems to have advantages for
767 dataflow analysis and 3) does not allow to convert the representation to
771 get_Cond_selector (ir_node *node) {
772 assert (node->op == op_Cond);
773 return get_irn_n(node, 0);
777 set_Cond_selector (ir_node *node, ir_node *selector) {
778 assert (node->op == op_Cond);
779 set_irn_n(node, 0, selector);
783 get_Cond_kind (ir_node *node) {
784 assert (node->op == op_Cond);
785 return node->attr.c.kind;
789 set_Cond_kind (ir_node *node, cond_kind kind) {
790 assert (node->op == op_Cond);
791 node->attr.c.kind = kind;
795 get_Return_mem (ir_node *node) {
796 assert (node->op == op_Return);
797 return get_irn_n(node, 0);
801 set_Return_mem (ir_node *node, ir_node *mem) {
802 assert (node->op == op_Return);
803 set_irn_n(node, 0, mem);
807 get_Return_n_ress (ir_node *node) {
808 assert (node->op == op_Return);
809 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
813 get_Return_res_arr (ir_node *node)
815 assert ((node->op == op_Return));
816 if (get_Return_n_ress(node) > 0)
817 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
824 set_Return_n_res (ir_node *node, int results) {
825 assert (node->op == op_Return);
830 get_Return_res (ir_node *node, int pos) {
831 assert (node->op == op_Return);
832 assert (get_Return_n_ress(node) > pos);
833 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
837 set_Return_res (ir_node *node, int pos, ir_node *res){
838 assert (node->op == op_Return);
839 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
843 get_Raise_mem (ir_node *node) {
844 assert (node->op == op_Raise);
845 return get_irn_n(node, 0);
849 set_Raise_mem (ir_node *node, ir_node *mem) {
850 assert (node->op == op_Raise);
851 set_irn_n(node, 0, mem);
855 get_Raise_exo_ptr (ir_node *node) {
856 assert (node->op == op_Raise);
857 return get_irn_n(node, 1);
861 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
862 assert (node->op == op_Raise);
863 set_irn_n(node, 1, exo_ptr);
866 INLINE tarval *get_Const_tarval (ir_node *node) {
867 assert (node->op == op_Const);
868 return node->attr.con.tv;
872 set_Const_tarval (ir_node *node, tarval *con) {
873 assert (node->op == op_Const);
874 node->attr.con.tv = con;
878 /* The source language type. Must be an atomic type. Mode of type must
879 be mode of node. For tarvals from entities type must be pointer to
882 get_Const_type (ir_node *node) {
883 assert (node->op == op_Const);
884 return node->attr.con.tp;
888 set_Const_type (ir_node *node, type *tp) {
889 assert (node->op == op_Const);
890 if (tp != unknown_type) {
891 assert (is_atomic_type(tp));
892 assert (get_type_mode(tp) == get_irn_mode(node));
893 assert (!tarval_is_entity(get_Const_tarval(node)) ||
894 (is_pointer_type(tp) &&
895 (get_pointer_points_to_type(tp) ==
896 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
899 node->attr.con.tp = tp;
904 get_SymConst_kind (const ir_node *node) {
905 assert (node->op == op_SymConst);
906 return node->attr.i.num;
910 set_SymConst_kind (ir_node *node, symconst_kind num) {
911 assert (node->op == op_SymConst);
912 node->attr.i.num = num;
916 get_SymConst_type (ir_node *node) {
917 assert ( (node->op == op_SymConst)
918 && ( get_SymConst_kind(node) == type_tag
919 || get_SymConst_kind(node) == size));
920 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
924 set_SymConst_type (ir_node *node, type *tp) {
925 assert ( (node->op == op_SymConst)
926 && ( get_SymConst_kind(node) == type_tag
927 || get_SymConst_kind(node) == size));
928 node->attr.i.tori.typ = tp;
932 get_SymConst_ptrinfo (ir_node *node) {
933 assert ( (node->op == op_SymConst)
934 && (get_SymConst_kind(node) == linkage_ptr_info));
935 return node->attr.i.tori.ptrinfo;
939 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
940 assert ( (node->op == op_SymConst)
941 && (get_SymConst_kind(node) == linkage_ptr_info));
942 node->attr.i.tori.ptrinfo = ptrinfo;
946 get_SymConst_type_or_id (ir_node *node) {
947 assert (node->op == op_SymConst);
948 return &(node->attr.i.tori);
952 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
953 assert (node->op == op_SymConst);
954 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
958 get_Sel_mem (ir_node *node) {
959 assert (node->op == op_Sel);
960 return get_irn_n(node, 0);
964 set_Sel_mem (ir_node *node, ir_node *mem) {
965 assert (node->op == op_Sel);
966 set_irn_n(node, 0, mem);
970 get_Sel_ptr (ir_node *node) {
971 assert (node->op == op_Sel);
972 return get_irn_n(node, 1);
976 set_Sel_ptr (ir_node *node, ir_node *ptr) {
977 assert (node->op == op_Sel);
978 set_irn_n(node, 1, ptr);
982 get_Sel_n_indexs (ir_node *node) {
983 assert (node->op == op_Sel);
984 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
988 get_Sel_index_arr (ir_node *node)
990 assert ((node->op == op_Sel));
991 if (get_Sel_n_indexs(node) > 0)
992 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
998 get_Sel_index (ir_node *node, int pos) {
999 assert (node->op == op_Sel);
1000 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1004 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1005 assert (node->op == op_Sel);
1006 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1010 get_Sel_entity (ir_node *node) {
1011 assert (node->op == op_Sel);
1012 return node->attr.s.ent;
1016 set_Sel_entity (ir_node *node, entity *ent) {
1017 assert (node->op == op_Sel);
1018 node->attr.s.ent = ent;
1022 get_InstOf_ent (ir_node *node) {
1023 assert (node->op = op_InstOf);
1024 return (node->attr.io.ent);
1028 set_InstOf_ent (ir_node *node, type *ent) {
1029 assert (node->op = op_InstOf);
1030 node->attr.io.ent = ent;
1034 get_InstOf_store (ir_node *node) {
1035 assert (node->op = op_InstOf);
1036 return (get_irn_n (node, 0));
1040 set_InstOf_store (ir_node *node, ir_node *obj) {
1041 assert (node->op = op_InstOf);
1042 set_irn_n (node, 0, obj);
1046 get_InstOf_obj (ir_node *node) {
1047 assert (node->op = op_InstOf);
1048 return (get_irn_n (node, 1));
1052 set_InstOf_obj (ir_node *node, ir_node *obj) {
1053 assert (node->op = op_InstOf);
1054 set_irn_n (node, 1, obj);
1058 /* For unary and binary arithmetic operations the access to the
1059 operands can be factored out. Left is the first, right the
1060 second arithmetic value as listed in tech report 0999-33.
1061 unops are: Minus, Abs, Not, Conv, Cast
1062 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1063 Shr, Shrs, Rotate, Cmp */
1067 get_Call_mem (ir_node *node) {
1068 assert (node->op == op_Call);
1069 return get_irn_n(node, 0);
1073 set_Call_mem (ir_node *node, ir_node *mem) {
1074 assert (node->op == op_Call);
1075 set_irn_n(node, 0, mem);
1079 get_Call_ptr (ir_node *node) {
1080 assert (node->op == op_Call);
1081 return get_irn_n(node, 1);
1085 set_Call_ptr (ir_node *node, ir_node *ptr) {
1086 assert (node->op == op_Call);
1087 set_irn_n(node, 1, ptr);
1091 get_Call_param_arr (ir_node *node) {
1092 assert (node->op == op_Call);
1093 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1097 get_Call_n_params (ir_node *node) {
1098 assert (node->op == op_Call);
1099 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1103 get_Call_arity (ir_node *node) {
1104 assert (node->op == op_Call);
1105 return get_Call_n_params(node);
1109 set_Call_arity (ir_node *node, ir_node *arity) {
1110 assert (node->op == op_Call);
1115 get_Call_param (ir_node *node, int pos) {
1116 assert (node->op == op_Call);
1117 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1121 set_Call_param (ir_node *node, int pos, ir_node *param) {
1122 assert (node->op == op_Call);
1123 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1127 get_Call_type (ir_node *node) {
1128 assert (node->op == op_Call);
1129 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1133 set_Call_type (ir_node *node, type *tp) {
1134 assert (node->op == op_Call);
1135 assert (is_method_type(tp));
1136 node->attr.call.cld_tp = tp;
1139 int Call_has_callees(ir_node *node) {
1140 return (node->attr.call.callee_arr != NULL);
1143 int get_Call_n_callees(ir_node * node) {
1144 assert(node->op == op_Call && node->attr.call.callee_arr);
1145 return ARR_LEN(node->attr.call.callee_arr);
1148 entity * get_Call_callee(ir_node * node, int pos) {
1149 assert(node->op == op_Call && node->attr.call.callee_arr);
1150 return node->attr.call.callee_arr[pos];
1153 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1154 assert(node->op == op_Call);
1155 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1156 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1158 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1161 void remove_Call_callee_arr(ir_node * node) {
1162 assert(node->op == op_Call);
1163 node->attr.call.callee_arr = NULL;
1166 ir_node * get_CallBegin_ptr (ir_node *node) {
1167 assert(node->op == op_CallBegin);
1168 return get_irn_n(node, 0);
1170 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1171 assert(node->op == op_CallBegin);
1172 set_irn_n(node, 0, ptr);
1174 ir_graph * get_CallBegin_irg (ir_node *node) {
1175 return get_irn_irg(node);
1177 ir_node * get_CallBegin_call (ir_node *node) {
1178 assert(node->op == op_CallBegin);
1179 return node->attr.callbegin.call;
1181 void set_CallBegin_call (ir_node *node, ir_node *call) {
1182 assert(node->op == op_CallBegin);
1183 node->attr.callbegin.call = call;
1187 get_FuncCall_ptr (ir_node *node) {
1188 assert (node->op == op_FuncCall);
1189 return get_irn_n(node, 0);
1193 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1194 assert (node->op == op_FuncCall);
1195 set_irn_n(node, 0, ptr);
1199 get_FuncCall_param_arr (ir_node *node) {
1200 assert (node->op == op_FuncCall);
1201 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1205 get_FuncCall_n_params (ir_node *node) {
1206 assert (node->op == op_FuncCall);
1207 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1211 get_FuncCall_arity (ir_node *node) {
1212 assert (node->op == op_FuncCall);
1213 return get_FuncCall_n_params(node);
1217 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1218 assert (node->op == op_FuncCall);
1223 get_FuncCall_param (ir_node *node, int pos) {
1224 assert (node->op == op_FuncCall);
1225 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1229 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1230 assert (node->op == op_FuncCall);
1231 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1235 get_FuncCall_type (ir_node *node) {
1236 assert (node->op == op_FuncCall);
1237 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1241 set_FuncCall_type (ir_node *node, type *tp) {
1242 assert (node->op == op_FuncCall);
1243 assert (is_method_type(tp));
1244 node->attr.call.cld_tp = tp;
1247 int FuncCall_has_callees(ir_node *node) {
1248 return (node->attr.call.callee_arr != NULL);
1251 int get_FuncCall_n_callees(ir_node * node) {
1252 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1253 return ARR_LEN(node->attr.call.callee_arr);
1256 entity * get_FuncCall_callee(ir_node * node, int pos) {
1257 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1258 return node->attr.call.callee_arr[pos];
1261 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1262 assert(node->op == op_FuncCall);
1263 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1264 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1266 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1269 void remove_FuncCall_callee_arr(ir_node * node) {
1270 assert(node->op == op_FuncCall);
1271 node->attr.call.callee_arr = NULL;
1276 ir_node * get_##OP##_left(ir_node *node) { \
1277 assert(node->op == op_##OP); \
1278 return get_irn_n(node, node->op->op_index); \
1280 void set_##OP##_left(ir_node *node, ir_node *left) { \
1281 assert(node->op == op_##OP); \
1282 set_irn_n(node, node->op->op_index, left); \
1284 ir_node *get_##OP##_right(ir_node *node) { \
1285 assert(node->op == op_##OP); \
1286 return get_irn_n(node, node->op->op_index + 1); \
1288 void set_##OP##_right(ir_node *node, ir_node *right) { \
1289 assert(node->op == op_##OP); \
1290 set_irn_n(node, node->op->op_index + 1, right); \
1294 ir_node *get_##OP##_op(ir_node *node) { \
1295 assert(node->op == op_##OP); \
1296 return get_irn_n(node, node->op->op_index); \
1298 void set_##OP##_op (ir_node *node, ir_node *op) { \
1299 assert(node->op == op_##OP); \
1300 set_irn_n(node, node->op->op_index, op); \
1310 get_Quot_mem (ir_node *node) {
1311 assert (node->op == op_Quot);
1312 return get_irn_n(node, 0);
1316 set_Quot_mem (ir_node *node, ir_node *mem) {
1317 assert (node->op == op_Quot);
1318 set_irn_n(node, 0, mem);
1324 get_DivMod_mem (ir_node *node) {
1325 assert (node->op == op_DivMod);
1326 return get_irn_n(node, 0);
1330 set_DivMod_mem (ir_node *node, ir_node *mem) {
1331 assert (node->op == op_DivMod);
1332 set_irn_n(node, 0, mem);
1338 get_Div_mem (ir_node *node) {
1339 assert (node->op == op_Div);
1340 return get_irn_n(node, 0);
1344 set_Div_mem (ir_node *node, ir_node *mem) {
1345 assert (node->op == op_Div);
1346 set_irn_n(node, 0, mem);
1352 get_Mod_mem (ir_node *node) {
1353 assert (node->op == op_Mod);
1354 return get_irn_n(node, 0);
1358 set_Mod_mem (ir_node *node, ir_node *mem) {
1359 assert (node->op == op_Mod);
1360 set_irn_n(node, 0, mem);
1377 get_Cast_type (ir_node *node) {
1378 assert (node->op == op_Cast);
1379 return node->attr.cast.totype;
1383 set_Cast_type (ir_node *node, type *to_tp) {
1384 assert (node->op == op_Cast);
1385 node->attr.cast.totype = to_tp;
1389 is_unop (ir_node *node) {
1390 return (node->op->opar == oparity_unary);
1394 get_unop_op (ir_node *node) {
1395 if (node->op->opar == oparity_unary)
1396 return get_irn_n(node, node->op->op_index);
1398 assert(node->op->opar == oparity_unary);
1403 set_unop_op (ir_node *node, ir_node *op) {
1404 if (node->op->opar == oparity_unary)
1405 set_irn_n(node, node->op->op_index, op);
1407 assert(node->op->opar == oparity_unary);
1411 is_binop (ir_node *node) {
1412 return (node->op->opar == oparity_binary);
1416 get_binop_left (ir_node *node) {
1417 if (node->op->opar == oparity_binary)
1418 return get_irn_n(node, node->op->op_index);
1420 assert(node->op->opar == oparity_binary);
1425 set_binop_left (ir_node *node, ir_node *left) {
1426 if (node->op->opar == oparity_binary)
1427 set_irn_n(node, node->op->op_index, left);
1429 assert (node->op->opar == oparity_binary);
1433 get_binop_right (ir_node *node) {
1434 if (node->op->opar == oparity_binary)
1435 return get_irn_n(node, node->op->op_index + 1);
1437 assert(node->op->opar == oparity_binary);
1442 set_binop_right (ir_node *node, ir_node *right) {
1443 if (node->op->opar == oparity_binary)
1444 set_irn_n(node, node->op->op_index + 1, right);
1446 assert (node->op->opar == oparity_binary);
1449 INLINE int is_Phi (ir_node *n) {
1454 return (op == op_Phi) || (op == op_Filter && interprocedural_view);
1458 get_Phi_preds_arr (ir_node *node) {
1459 assert (node->op == op_Phi);
1460 return (ir_node **)&(get_irn_in(node)[1]);
1464 get_Phi_n_preds (ir_node *node) {
1465 assert (is_Phi(node));
1466 return (get_irn_arity(node));
1470 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1471 assert (node->op == op_Phi);
1476 get_Phi_pred (ir_node *node, int pos) {
1477 assert (is_Phi(node));
1478 return get_irn_n(node, pos);
1482 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1483 assert (is_Phi(node));
1484 set_irn_n(node, pos, pred);
1488 get_Load_mem (ir_node *node) {
1489 assert (node->op == op_Load);
1490 return get_irn_n(node, 0);
1494 set_Load_mem (ir_node *node, ir_node *mem) {
1495 assert (node->op == op_Load);
1496 set_irn_n(node, 0, mem);
1500 get_Load_ptr (ir_node *node) {
1501 assert (node->op == op_Load);
1502 return get_irn_n(node, 1);
1506 set_Load_ptr (ir_node *node, ir_node *ptr) {
1507 assert (node->op == op_Load);
1508 set_irn_n(node, 1, ptr);
1513 get_Store_mem (ir_node *node) {
1514 assert (node->op == op_Store);
1515 return get_irn_n(node, 0);
1519 set_Store_mem (ir_node *node, ir_node *mem) {
1520 assert (node->op == op_Store);
1521 set_irn_n(node, 0, mem);
1525 get_Store_ptr (ir_node *node) {
1526 assert (node->op == op_Store);
1527 return get_irn_n(node, 1);
1531 set_Store_ptr (ir_node *node, ir_node *ptr) {
1532 assert (node->op == op_Store);
1533 set_irn_n(node, 1, ptr);
1537 get_Store_value (ir_node *node) {
1538 assert (node->op == op_Store);
1539 return get_irn_n(node, 2);
1543 set_Store_value (ir_node *node, ir_node *value) {
1544 assert (node->op == op_Store);
1545 set_irn_n(node, 2, value);
1549 get_Alloc_mem (ir_node *node) {
1550 assert (node->op == op_Alloc);
1551 return get_irn_n(node, 0);
1555 set_Alloc_mem (ir_node *node, ir_node *mem) {
1556 assert (node->op == op_Alloc);
1557 set_irn_n(node, 0, mem);
1561 get_Alloc_size (ir_node *node) {
1562 assert (node->op == op_Alloc);
1563 return get_irn_n(node, 1);
1567 set_Alloc_size (ir_node *node, ir_node *size) {
1568 assert (node->op == op_Alloc);
1569 set_irn_n(node, 1, size);
1573 get_Alloc_type (ir_node *node) {
1574 assert (node->op == op_Alloc);
1575 return node->attr.a.type = skip_tid(node->attr.a.type);
1579 set_Alloc_type (ir_node *node, type *tp) {
1580 assert (node->op == op_Alloc);
1581 node->attr.a.type = tp;
1585 get_Alloc_where (ir_node *node) {
1586 assert (node->op == op_Alloc);
1587 return node->attr.a.where;
1591 set_Alloc_where (ir_node *node, where_alloc where) {
1592 assert (node->op == op_Alloc);
1593 node->attr.a.where = where;
1598 get_Free_mem (ir_node *node) {
1599 assert (node->op == op_Free);
1600 return get_irn_n(node, 0);
1604 set_Free_mem (ir_node *node, ir_node *mem) {
1605 assert (node->op == op_Free);
1606 set_irn_n(node, 0, mem);
1610 get_Free_ptr (ir_node *node) {
1611 assert (node->op == op_Free);
1612 return get_irn_n(node, 1);
1616 set_Free_ptr (ir_node *node, ir_node *ptr) {
1617 assert (node->op == op_Free);
1618 set_irn_n(node, 1, ptr);
1622 get_Free_size (ir_node *node) {
1623 assert (node->op == op_Free);
1624 return get_irn_n(node, 2);
1628 set_Free_size (ir_node *node, ir_node *size) {
1629 assert (node->op == op_Free);
1630 set_irn_n(node, 2, size);
1634 get_Free_type (ir_node *node) {
1635 assert (node->op == op_Free);
1636 return node->attr.f = skip_tid(node->attr.f);
1640 set_Free_type (ir_node *node, type *tp) {
1641 assert (node->op == op_Free);
1646 get_Sync_preds_arr (ir_node *node) {
1647 assert (node->op == op_Sync);
1648 return (ir_node **)&(get_irn_in(node)[1]);
1652 get_Sync_n_preds (ir_node *node) {
1653 assert (node->op == op_Sync);
1654 return (get_irn_arity(node));
1659 set_Sync_n_preds (ir_node *node, int n_preds) {
1660 assert (node->op == op_Sync);
1665 get_Sync_pred (ir_node *node, int pos) {
1666 assert (node->op == op_Sync);
1667 return get_irn_n(node, pos);
1671 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1672 assert (node->op == op_Sync);
1673 set_irn_n(node, pos, pred);
1677 get_Proj_pred (ir_node *node) {
1678 assert (is_Proj(node));
1679 return get_irn_n(node, 0);
1683 set_Proj_pred (ir_node *node, ir_node *pred) {
1684 assert (is_Proj(node));
1685 set_irn_n(node, 0, pred);
1689 get_Proj_proj (ir_node *node) {
1690 assert (is_Proj(node));
1691 if (get_irn_opcode(node) == iro_Proj) {
1692 return node->attr.proj;
1694 assert(get_irn_opcode(node) == iro_Filter);
1695 return node->attr.filter.proj;
1700 set_Proj_proj (ir_node *node, long proj) {
1701 assert (node->op == op_Proj);
1702 node->attr.proj = proj;
1706 get_Tuple_preds_arr (ir_node *node) {
1707 assert (node->op == op_Tuple);
1708 return (ir_node **)&(get_irn_in(node)[1]);
1712 get_Tuple_n_preds (ir_node *node) {
1713 assert (node->op == op_Tuple);
1714 return (get_irn_arity(node));
1719 set_Tuple_n_preds (ir_node *node, int n_preds) {
1720 assert (node->op == op_Tuple);
1725 get_Tuple_pred (ir_node *node, int pos) {
1726 assert (node->op == op_Tuple);
1727 return get_irn_n(node, pos);
1731 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1732 assert (node->op == op_Tuple);
1733 set_irn_n(node, pos, pred);
1737 get_Id_pred (ir_node *node) {
1738 assert (node->op == op_Id);
1739 return get_irn_n(node, 0);
1743 set_Id_pred (ir_node *node, ir_node *pred) {
1744 assert (node->op == op_Id);
1745 set_irn_n(node, 0, pred);
1748 INLINE ir_node *get_Confirm_value (ir_node *node) {
1749 assert (node->op == op_Confirm);
1750 return get_irn_n(node, 0);
1752 INLINE void set_Confirm_value (ir_node *node, ir_node *value) {
1753 assert (node->op == op_Confirm);
1754 set_irn_n(node, 0, value);
1756 INLINE ir_node *get_Confirm_bound (ir_node *node) {
1757 assert (node->op == op_Confirm);
1758 return get_irn_n(node, 1);
1760 INLINE void set_Confirm_bound (ir_node *node, ir_node *bound) {
1761 assert (node->op == op_Confirm);
1762 set_irn_n(node, 0, bound);
1764 INLINE pn_Cmp get_Confirm_cmp (ir_node *node) {
1765 assert (node->op == op_Confirm);
1766 return node->attr.confirm_cmp;
1768 INLINE void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1769 assert (node->op == op_Confirm);
1770 node->attr.confirm_cmp = cmp;
1775 get_Filter_pred (ir_node *node) {
1776 assert(node->op == op_Filter);
1780 set_Filter_pred (ir_node *node, ir_node *pred) {
1781 assert(node->op == op_Filter);
1785 get_Filter_proj(ir_node *node) {
1786 assert(node->op == op_Filter);
1787 return node->attr.filter.proj;
1790 set_Filter_proj (ir_node *node, long proj) {
1791 assert(node->op == op_Filter);
1792 node->attr.filter.proj = proj;
1795 /* Don't use get_irn_arity, get_irn_n in implementation as access
1796 shall work independent of view!!! */
1797 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1798 assert(node->op == op_Filter);
1799 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1800 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1801 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1802 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1803 node->attr.filter.in_cg[0] = node->in[0];
1805 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1808 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1809 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1810 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1811 node->attr.filter.in_cg[pos + 1] = pred;
1813 int get_Filter_n_cg_preds(ir_node *node) {
1814 assert(node->op == op_Filter && node->attr.filter.in_cg);
1815 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1817 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1819 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1821 arity = ARR_LEN(node->attr.filter.in_cg);
1822 assert(pos < arity - 1);
1823 return node->attr.filter.in_cg[pos + 1];
1828 get_irn_irg(ir_node *node) {
1829 if (get_irn_op(node) != op_Block)
1830 node = get_nodes_block(node);
1831 assert(get_irn_op(node) == op_Block);
1832 return node->attr.block.irg;
1836 /*----------------------------------------------------------------*/
1837 /* Auxiliary routines */
1838 /*----------------------------------------------------------------*/
1841 skip_Proj (ir_node *node) {
1842 /* don't assert node !!! */
1843 if (node && is_Proj(node)) {
1844 return get_Proj_pred(node);
1851 skip_Tuple (ir_node *node) {
1854 if (!get_opt_normalize()) return node;
1856 node = skip_nop(node);
1857 if (get_irn_op(node) == op_Proj) {
1858 pred = skip_nop(get_Proj_pred(node));
1859 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1860 pred = skip_nop(skip_Tuple(pred));
1861 if (get_irn_op(pred) == op_Tuple)
1862 return get_Tuple_pred(pred, get_Proj_proj(node));
1868 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1869 than any other approach, as Id chains are resolved and all point to the real node, or
1870 all id's are self loops. */
1872 skip_nop (ir_node *node) {
1873 /* don't assert node !!! */
1875 if (!get_opt_normalize()) return node;
1877 /* Don't use get_Id_pred: We get into an endless loop for
1878 self-referencing Ids. */
1879 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1880 ir_node *rem_pred = node->in[0+1];
1883 assert (intern_get_irn_arity (node) > 0);
1885 node->in[0+1] = node;
1886 res = skip_nop(rem_pred);
1887 if (res->op == op_Id) /* self-loop */ return node;
1889 node->in[0+1] = res;
1896 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1897 than any other approach, as Id chains are resolved and all point to the real node, or
1898 all id's are self loops. */
1900 skip_nop (ir_node *node) {
1902 /* don't assert node !!! */
1904 if (!get_opt_normalize()) return node;
1906 /* Don't use get_Id_pred: We get into an endless loop for
1907 self-referencing Ids. */
1908 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1909 ir_node *rem_pred, *res;
1911 if (pred->op != op_Id) return pred; /* shortcut */
1914 assert (intern_get_irn_arity (node) > 0);
1916 node->in[0+1] = node;
1917 res = skip_nop(rem_pred);
1918 if (res->op == op_Id) /* self-loop */ return node;
1920 node->in[0+1] = res;
1929 skip_Id (ir_node *node) {
1930 return skip_nop(node);
1934 is_Bad (ir_node *node) {
1936 if ((node) && get_irn_opcode(node) == iro_Bad)
1942 is_no_Block (ir_node *node) {
1944 return (get_irn_opcode(node) != iro_Block);
1948 is_Block (ir_node *node) {
1950 return (get_irn_opcode(node) == iro_Block);
1953 /* returns true if node is a Unknown node. */
1955 is_Unknown (ir_node *node) {
1957 return (get_irn_opcode(node) == iro_Unknown);
1961 is_Proj (const ir_node *node) {
1963 return node->op == op_Proj
1964 || (!interprocedural_view && node->op == op_Filter);
1967 /* Returns true if the operation manipulates control flow. */
1969 is_cfop(ir_node *node) {
1970 return is_cfopcode(get_irn_op(node));
1973 /* Returns true if the operation manipulates interprocedural control flow:
1974 CallBegin, EndReg, EndExcept */
1975 INLINE int is_ip_cfop(ir_node *node) {
1976 return is_ip_cfopcode(get_irn_op(node));
1979 ir_graph *get_ip_cfop_irg(ir_node *n) {
1980 return get_irn_irg(n);
1983 /* Returns true if the operation can change the control flow because
1986 is_fragile_op(ir_node *node) {
1987 return is_op_fragile(get_irn_op(node));
1990 /* Returns the memory operand of fragile operations. */
1991 ir_node *get_fragile_op_mem(ir_node *node) {
1992 assert(node && is_fragile_op(node));
1994 switch (get_irn_opcode (node)) {
2003 return get_irn_n(node, 0);
2008 assert(0 && "should not be reached");