3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_sel_attr (ir_node *node)
451 assert (node->op == op_Sel);
456 get_irn_phi_attr (ir_node *node)
458 assert (node->op == op_Phi);
459 return node->attr.phi0_pos;
463 get_irn_block_attr (ir_node *node)
465 assert (node->op == op_Block);
466 return node->attr.block;
470 get_irn_load_attr (ir_node *node)
472 assert (node->op == op_Load);
473 return node->attr.load;
477 get_irn_store_attr (ir_node *node)
479 assert (node->op == op_Store);
480 return node->attr.store;
484 get_irn_except_attr (ir_node *node)
486 assert (node->op == op_Div || node->op == op_Quot ||
487 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
488 return node->attr.except;
491 /** manipulate fields of individual nodes **/
493 /* this works for all except Block */
495 get_nodes_block (ir_node *node) {
496 assert (!(node->op == op_Block));
497 return get_irn_n(node, -1);
501 set_nodes_block (ir_node *node, ir_node *block) {
502 assert (!(node->op == op_Block));
503 set_irn_n(node, -1, block);
506 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
507 * from Start. If so returns frame type, else Null. */
508 type *is_frame_pointer(ir_node *n) {
509 if ((get_irn_op(n) == op_Proj) &&
510 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
511 ir_node *start = get_Proj_pred(n);
512 if (get_irn_op(start) == op_Start) {
513 return get_irg_frame_type(get_irn_irg(start));
519 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
520 * from Start. If so returns global type, else Null. */
521 type *is_globals_pointer(ir_node *n) {
522 if ((get_irn_op(n) == op_Proj) &&
523 (get_Proj_proj(n) == pn_Start_P_globals)) {
524 ir_node *start = get_Proj_pred(n);
525 if (get_irn_op(start) == op_Start) {
526 return get_glob_type();
532 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
533 * from Start. If so returns 1, else 0. */
534 int is_value_arg_pointer(ir_node *n) {
535 if ((get_irn_op(n) == op_Proj) &&
536 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
537 (get_irn_op(get_Proj_pred(n)) == op_Start))
542 /* Returns an array with the predecessors of the Block. Depending on
543 the implementation of the graph data structure this can be a copy of
544 the internal representation of predecessors as well as the internal
545 array itself. Therefore writing to this array might obstruct the ir. */
547 get_Block_cfgpred_arr (ir_node *node)
549 assert ((node->op == op_Block));
550 return (ir_node **)&(get_irn_in(node)[1]);
555 get_Block_n_cfgpreds (ir_node *node) {
556 assert ((node->op == op_Block));
557 return get_irn_arity(node);
561 get_Block_cfgpred (ir_node *node, int pos) {
562 assert(-1 <= pos && pos < get_irn_arity(node));
563 assert(node->op == op_Block);
564 return get_irn_n(node, pos);
568 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
569 assert (node->op == op_Block);
570 set_irn_n(node, pos, pred);
574 get_Block_matured (ir_node *node) {
575 assert (node->op == op_Block);
576 return node->attr.block.matured;
580 set_Block_matured (ir_node *node, bool matured) {
581 assert (node->op == op_Block);
582 node->attr.block.matured = matured;
585 get_Block_block_visited (ir_node *node) {
586 assert (node->op == op_Block);
587 return node->attr.block.block_visited;
591 set_Block_block_visited (ir_node *node, unsigned long visit) {
592 assert (node->op == op_Block);
593 node->attr.block.block_visited = visit;
596 /* For this current_ir_graph must be set. */
598 mark_Block_block_visited (ir_node *node) {
599 assert (node->op == op_Block);
600 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
604 Block_not_block_visited(ir_node *node) {
605 assert (node->op == op_Block);
606 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
610 get_Block_graph_arr (ir_node *node, int pos) {
611 assert (node->op == op_Block);
612 return node->attr.block.graph_arr[pos+1];
616 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
617 assert (node->op == op_Block);
618 node->attr.block.graph_arr[pos+1] = value;
621 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
622 assert(node->op == op_Block);
623 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
624 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
625 node->attr.block.in_cg[0] = NULL;
626 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
628 /* Fix backedge array. fix_backedges operates depending on
629 interprocedural_view. */
630 int ipv = get_interprocedural_view();
631 set_interprocedural_view(true);
632 fix_backedges(current_ir_graph->obst, node);
633 set_interprocedural_view(ipv);
636 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
639 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
640 assert(node->op == op_Block &&
641 node->attr.block.in_cg &&
642 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
643 node->attr.block.in_cg[pos + 1] = pred;
646 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
647 assert(node->op == op_Block);
648 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
651 int get_Block_cg_n_cfgpreds(ir_node * node) {
652 assert(node->op == op_Block);
653 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
656 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
657 assert(node->op == op_Block && node->attr.block.in_cg);
658 return node->attr.block.in_cg[pos + 1];
661 void remove_Block_cg_cfgpred_arr(ir_node * node) {
662 assert(node->op == op_Block);
663 node->attr.block.in_cg = NULL;
667 set_Start_irg(ir_node *node, ir_graph *irg) {
668 assert(node->op == op_Start);
669 assert(is_ir_graph(irg));
670 assert(0 && " Why set irg? -- use set_irn_irg");
674 get_End_n_keepalives(ir_node *end) {
675 assert (end->op == op_End);
676 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
680 get_End_keepalive(ir_node *end, int pos) {
681 assert (end->op == op_End);
682 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
686 add_End_keepalive (ir_node *end, ir_node *ka) {
687 assert (end->op == op_End);
688 ARR_APP1 (ir_node *, end->in, ka);
692 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
693 assert (end->op == op_End);
694 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
698 free_End (ir_node *end) {
699 assert (end->op == op_End);
701 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
702 end->in = NULL; /* @@@ make sure we get an error if we use the
703 in array afterwards ... */
708 > Implementing the case construct (which is where the constant Proj node is
709 > important) involves far more than simply determining the constant values.
710 > We could argue that this is more properly a function of the translator from
711 > Firm to the target machine. That could be done if there was some way of
712 > projecting "default" out of the Cond node.
713 I know it's complicated.
714 Basically there are two proglems:
715 - determining the gaps between the projs
716 - determining the biggest case constant to know the proj number for
718 I see several solutions:
719 1. Introduce a ProjDefault node. Solves both problems.
720 This means to extend all optimizations executed during construction.
721 2. Give the Cond node for switch two flavors:
722 a) there are no gaps in the projs (existing flavor)
723 b) gaps may exist, default proj is still the Proj with the largest
724 projection number. This covers also the gaps.
725 3. Fix the semantic of the Cond to that of 2b)
727 Solution 2 seems to be the best:
728 Computing the gaps in the Firm representation is not too hard, i.e.,
729 libFIRM can implement a routine that transforms between the two
730 flavours. This is also possible for 1) but 2) does not require to
731 change any existing optimization.
732 Further it should be far simpler to determine the biggest constant than
734 I don't want to choose 3) as 2a) seems to have advantages for
735 dataflow analysis and 3) does not allow to convert the representation to
739 get_Cond_selector (ir_node *node) {
740 assert (node->op == op_Cond);
741 return get_irn_n(node, 0);
745 set_Cond_selector (ir_node *node, ir_node *selector) {
746 assert (node->op == op_Cond);
747 set_irn_n(node, 0, selector);
751 get_Cond_kind (ir_node *node) {
752 assert (node->op == op_Cond);
753 return node->attr.c.kind;
757 set_Cond_kind (ir_node *node, cond_kind kind) {
758 assert (node->op == op_Cond);
759 node->attr.c.kind = kind;
763 get_Cond_defaultProj (ir_node *node) {
764 assert (node->op == op_Cond);
765 return node->attr.c.default_proj;
769 get_Return_mem (ir_node *node) {
770 assert (node->op == op_Return);
771 return get_irn_n(node, 0);
775 set_Return_mem (ir_node *node, ir_node *mem) {
776 assert (node->op == op_Return);
777 set_irn_n(node, 0, mem);
781 get_Return_n_ress (ir_node *node) {
782 assert (node->op == op_Return);
783 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
787 get_Return_res_arr (ir_node *node)
789 assert ((node->op == op_Return));
790 if (get_Return_n_ress(node) > 0)
791 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
798 set_Return_n_res (ir_node *node, int results) {
799 assert (node->op == op_Return);
804 get_Return_res (ir_node *node, int pos) {
805 assert (node->op == op_Return);
806 assert (get_Return_n_ress(node) > pos);
807 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
811 set_Return_res (ir_node *node, int pos, ir_node *res){
812 assert (node->op == op_Return);
813 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
817 get_Raise_mem (ir_node *node) {
818 assert (node->op == op_Raise);
819 return get_irn_n(node, 0);
823 set_Raise_mem (ir_node *node, ir_node *mem) {
824 assert (node->op == op_Raise);
825 set_irn_n(node, 0, mem);
829 get_Raise_exo_ptr (ir_node *node) {
830 assert (node->op == op_Raise);
831 return get_irn_n(node, 1);
835 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
836 assert (node->op == op_Raise);
837 set_irn_n(node, 1, exo_ptr);
840 tarval *get_Const_tarval (ir_node *node) {
841 assert (node->op == op_Const);
842 return node->attr.con.tv;
846 set_Const_tarval (ir_node *node, tarval *con) {
847 assert (node->op == op_Const);
848 node->attr.con.tv = con;
852 /* The source language type. Must be an atomic type. Mode of type must
853 be mode of node. For tarvals from entities type must be pointer to
856 get_Const_type (ir_node *node) {
857 assert (node->op == op_Const);
858 return node->attr.con.tp;
862 set_Const_type (ir_node *node, type *tp) {
863 assert (node->op == op_Const);
864 if (tp != unknown_type) {
865 assert (is_atomic_type(tp));
866 assert (get_type_mode(tp) == get_irn_mode(node));
868 node->attr.con.tp = tp;
873 get_SymConst_kind (const ir_node *node) {
874 assert (node->op == op_SymConst);
875 return node->attr.i.num;
879 set_SymConst_kind (ir_node *node, symconst_kind num) {
880 assert (node->op == op_SymConst);
881 node->attr.i.num = num;
885 get_SymConst_type (ir_node *node) {
886 assert ( (node->op == op_SymConst)
887 && ( get_SymConst_kind(node) == symconst_type_tag
888 || get_SymConst_kind(node) == symconst_size));
889 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
893 set_SymConst_type (ir_node *node, type *tp) {
894 assert ( (node->op == op_SymConst)
895 && ( get_SymConst_kind(node) == symconst_type_tag
896 || get_SymConst_kind(node) == symconst_size));
897 node->attr.i.sym.type_p = tp;
901 get_SymConst_name (ir_node *node) {
902 assert ( (node->op == op_SymConst)
903 && (get_SymConst_kind(node) == symconst_addr_name));
904 return node->attr.i.sym.ident_p;
908 set_SymConst_name (ir_node *node, ident *name) {
909 assert ( (node->op == op_SymConst)
910 && (get_SymConst_kind(node) == symconst_addr_name));
911 node->attr.i.sym.ident_p = name;
915 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
916 entity *get_SymConst_entity (ir_node *node) {
917 assert ( (node->op == op_SymConst)
918 && (get_SymConst_kind (node) == symconst_addr_ent));
919 return node->attr.i.sym.entity_p;
922 void set_SymConst_entity (ir_node *node, entity *ent) {
923 assert ( (node->op == op_SymConst)
924 && (get_SymConst_kind(node) == symconst_addr_ent));
925 node->attr.i.sym.entity_p = ent;
928 union symconst_symbol
929 get_SymConst_symbol (ir_node *node) {
930 assert (node->op == op_SymConst);
931 return node->attr.i.sym;
935 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
936 assert (node->op == op_SymConst);
937 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
938 node->attr.i.sym = sym;
942 get_SymConst_value_type (ir_node *node) {
943 assert (node->op == op_SymConst);
944 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
945 return node->attr.i.tp;
949 set_SymConst_value_type (ir_node *node, type *tp) {
950 assert (node->op == op_SymConst);
951 node->attr.i.tp = tp;
955 get_Sel_mem (ir_node *node) {
956 assert (node->op == op_Sel);
957 return get_irn_n(node, 0);
961 set_Sel_mem (ir_node *node, ir_node *mem) {
962 assert (node->op == op_Sel);
963 set_irn_n(node, 0, mem);
967 get_Sel_ptr (ir_node *node) {
968 assert (node->op == op_Sel);
969 return get_irn_n(node, 1);
973 set_Sel_ptr (ir_node *node, ir_node *ptr) {
974 assert (node->op == op_Sel);
975 set_irn_n(node, 1, ptr);
979 get_Sel_n_indexs (ir_node *node) {
980 assert (node->op == op_Sel);
981 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
985 get_Sel_index_arr (ir_node *node)
987 assert ((node->op == op_Sel));
988 if (get_Sel_n_indexs(node) > 0)
989 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
995 get_Sel_index (ir_node *node, int pos) {
996 assert (node->op == op_Sel);
997 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1001 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1002 assert (node->op == op_Sel);
1003 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1007 get_Sel_entity (ir_node *node) {
1008 assert (node->op == op_Sel);
1009 return node->attr.s.ent;
1013 set_Sel_entity (ir_node *node, entity *ent) {
1014 assert (node->op == op_Sel);
1015 node->attr.s.ent = ent;
1019 get_InstOf_ent (ir_node *node) {
1020 assert (node->op = op_InstOf);
1021 return (node->attr.io.ent);
1025 set_InstOf_ent (ir_node *node, type *ent) {
1026 assert (node->op = op_InstOf);
1027 node->attr.io.ent = ent;
1031 get_InstOf_store (ir_node *node) {
1032 assert (node->op = op_InstOf);
1033 return (get_irn_n (node, 0));
1037 set_InstOf_store (ir_node *node, ir_node *obj) {
1038 assert (node->op = op_InstOf);
1039 set_irn_n (node, 0, obj);
1043 get_InstOf_obj (ir_node *node) {
1044 assert (node->op = op_InstOf);
1045 return (get_irn_n (node, 1));
1049 set_InstOf_obj (ir_node *node, ir_node *obj) {
1050 assert (node->op = op_InstOf);
1051 set_irn_n (node, 1, obj);
1055 /* For unary and binary arithmetic operations the access to the
1056 operands can be factored out. Left is the first, right the
1057 second arithmetic value as listed in tech report 0999-33.
1058 unops are: Minus, Abs, Not, Conv, Cast
1059 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1060 Shr, Shrs, Rotate, Cmp */
1064 get_Call_mem (ir_node *node) {
1065 assert (node->op == op_Call);
1066 return get_irn_n(node, 0);
1070 set_Call_mem (ir_node *node, ir_node *mem) {
1071 assert (node->op == op_Call);
1072 set_irn_n(node, 0, mem);
1076 get_Call_ptr (ir_node *node) {
1077 assert (node->op == op_Call);
1078 return get_irn_n(node, 1);
1082 set_Call_ptr (ir_node *node, ir_node *ptr) {
1083 assert (node->op == op_Call);
1084 set_irn_n(node, 1, ptr);
1088 get_Call_param_arr (ir_node *node) {
1089 assert (node->op == op_Call);
1090 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1094 get_Call_n_params (ir_node *node) {
1095 assert (node->op == op_Call);
1096 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1100 get_Call_arity (ir_node *node) {
1101 assert (node->op == op_Call);
1102 return get_Call_n_params(node);
1106 set_Call_arity (ir_node *node, ir_node *arity) {
1107 assert (node->op == op_Call);
1112 get_Call_param (ir_node *node, int pos) {
1113 assert (node->op == op_Call);
1114 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1118 set_Call_param (ir_node *node, int pos, ir_node *param) {
1119 assert (node->op == op_Call);
1120 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1124 get_Call_type (ir_node *node) {
1125 assert (node->op == op_Call);
1126 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1130 set_Call_type (ir_node *node, type *tp) {
1131 assert (node->op == op_Call);
1132 assert ((get_unknown_type() == tp) || is_method_type(tp));
1133 node->attr.call.cld_tp = tp;
1136 int Call_has_callees(ir_node *node) {
1137 assert(node && node->op == op_Call);
1138 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1139 (node->attr.call.callee_arr != NULL));
1142 int get_Call_n_callees(ir_node * node) {
1143 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1144 return ARR_LEN(node->attr.call.callee_arr);
1147 entity * get_Call_callee(ir_node * node, int pos) {
1148 assert(pos >= 0 && pos < get_Call_n_callees(node));
1149 return node->attr.call.callee_arr[pos];
1152 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1153 assert(node->op == op_Call);
1154 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1155 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1157 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1160 void remove_Call_callee_arr(ir_node * node) {
1161 assert(node->op == op_Call);
1162 node->attr.call.callee_arr = NULL;
1165 ir_node * get_CallBegin_ptr (ir_node *node) {
1166 assert(node->op == op_CallBegin);
1167 return get_irn_n(node, 0);
1169 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1170 assert(node->op == op_CallBegin);
1171 set_irn_n(node, 0, ptr);
1173 ir_node * get_CallBegin_call (ir_node *node) {
1174 assert(node->op == op_CallBegin);
1175 return node->attr.callbegin.call;
1177 void set_CallBegin_call (ir_node *node, ir_node *call) {
1178 assert(node->op == op_CallBegin);
1179 node->attr.callbegin.call = call;
1184 ir_node * get_##OP##_left(ir_node *node) { \
1185 assert(node->op == op_##OP); \
1186 return get_irn_n(node, node->op->op_index); \
1188 void set_##OP##_left(ir_node *node, ir_node *left) { \
1189 assert(node->op == op_##OP); \
1190 set_irn_n(node, node->op->op_index, left); \
1192 ir_node *get_##OP##_right(ir_node *node) { \
1193 assert(node->op == op_##OP); \
1194 return get_irn_n(node, node->op->op_index + 1); \
1196 void set_##OP##_right(ir_node *node, ir_node *right) { \
1197 assert(node->op == op_##OP); \
1198 set_irn_n(node, node->op->op_index + 1, right); \
1202 ir_node *get_##OP##_op(ir_node *node) { \
1203 assert(node->op == op_##OP); \
1204 return get_irn_n(node, node->op->op_index); \
1206 void set_##OP##_op (ir_node *node, ir_node *op) { \
1207 assert(node->op == op_##OP); \
1208 set_irn_n(node, node->op->op_index, op); \
1218 get_Quot_mem (ir_node *node) {
1219 assert (node->op == op_Quot);
1220 return get_irn_n(node, 0);
1224 set_Quot_mem (ir_node *node, ir_node *mem) {
1225 assert (node->op == op_Quot);
1226 set_irn_n(node, 0, mem);
1232 get_DivMod_mem (ir_node *node) {
1233 assert (node->op == op_DivMod);
1234 return get_irn_n(node, 0);
1238 set_DivMod_mem (ir_node *node, ir_node *mem) {
1239 assert (node->op == op_DivMod);
1240 set_irn_n(node, 0, mem);
1246 get_Div_mem (ir_node *node) {
1247 assert (node->op == op_Div);
1248 return get_irn_n(node, 0);
1252 set_Div_mem (ir_node *node, ir_node *mem) {
1253 assert (node->op == op_Div);
1254 set_irn_n(node, 0, mem);
1260 get_Mod_mem (ir_node *node) {
1261 assert (node->op == op_Mod);
1262 return get_irn_n(node, 0);
1266 set_Mod_mem (ir_node *node, ir_node *mem) {
1267 assert (node->op == op_Mod);
1268 set_irn_n(node, 0, mem);
1285 get_Cast_type (ir_node *node) {
1286 assert (node->op == op_Cast);
1287 return node->attr.cast.totype;
1291 set_Cast_type (ir_node *node, type *to_tp) {
1292 assert (node->op == op_Cast);
1293 node->attr.cast.totype = to_tp;
1297 (is_unop)(const ir_node *node) {
1298 return __is_unop(node);
1302 get_unop_op (ir_node *node) {
1303 if (node->op->opar == oparity_unary)
1304 return get_irn_n(node, node->op->op_index);
1306 assert(node->op->opar == oparity_unary);
1311 set_unop_op (ir_node *node, ir_node *op) {
1312 if (node->op->opar == oparity_unary)
1313 set_irn_n(node, node->op->op_index, op);
1315 assert(node->op->opar == oparity_unary);
1319 (is_binop)(const ir_node *node) {
1320 return __is_binop(node);
1324 get_binop_left (ir_node *node) {
1325 if (node->op->opar == oparity_binary)
1326 return get_irn_n(node, node->op->op_index);
1328 assert(node->op->opar == oparity_binary);
1333 set_binop_left (ir_node *node, ir_node *left) {
1334 if (node->op->opar == oparity_binary)
1335 set_irn_n(node, node->op->op_index, left);
1337 assert (node->op->opar == oparity_binary);
1341 get_binop_right (ir_node *node) {
1342 if (node->op->opar == oparity_binary)
1343 return get_irn_n(node, node->op->op_index + 1);
1345 assert(node->op->opar == oparity_binary);
1350 set_binop_right (ir_node *node, ir_node *right) {
1351 if (node->op->opar == oparity_binary)
1352 set_irn_n(node, node->op->op_index + 1, right);
1354 assert (node->op->opar == oparity_binary);
1357 int is_Phi (ir_node *n) {
1363 if (op == op_Filter) return get_interprocedural_view();
1366 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1367 (get_irn_arity(n) > 0));
1372 int is_Phi0 (ir_node *n) {
1375 return ((get_irn_op(n) == op_Phi) &&
1376 (get_irn_arity(n) == 0) &&
1377 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1381 get_Phi_preds_arr (ir_node *node) {
1382 assert (node->op == op_Phi);
1383 return (ir_node **)&(get_irn_in(node)[1]);
1387 get_Phi_n_preds (ir_node *node) {
1388 assert (is_Phi(node) || is_Phi0(node));
1389 return (get_irn_arity(node));
1393 void set_Phi_n_preds (ir_node *node, int n_preds) {
1394 assert (node->op == op_Phi);
1399 get_Phi_pred (ir_node *node, int pos) {
1400 assert (is_Phi(node) || is_Phi0(node));
1401 return get_irn_n(node, pos);
1405 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1406 assert (is_Phi(node) || is_Phi0(node));
1407 set_irn_n(node, pos, pred);
1411 int is_memop(ir_node *node) {
1412 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1415 ir_node *get_memop_mem (ir_node *node) {
1416 assert(is_memop(node));
1417 return get_irn_n(node, 0);
1420 void set_memop_mem (ir_node *node, ir_node *mem) {
1421 assert(is_memop(node));
1422 set_irn_n(node, 0, mem);
1425 ir_node *get_memop_ptr (ir_node *node) {
1426 assert(is_memop(node));
1427 return get_irn_n(node, 1);
1430 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1431 assert(is_memop(node));
1432 set_irn_n(node, 1, ptr);
1436 get_Load_mem (ir_node *node) {
1437 assert (node->op == op_Load);
1438 return get_irn_n(node, 0);
1442 set_Load_mem (ir_node *node, ir_node *mem) {
1443 assert (node->op == op_Load);
1444 set_irn_n(node, 0, mem);
1448 get_Load_ptr (ir_node *node) {
1449 assert (node->op == op_Load);
1450 return get_irn_n(node, 1);
1454 set_Load_ptr (ir_node *node, ir_node *ptr) {
1455 assert (node->op == op_Load);
1456 set_irn_n(node, 1, ptr);
1460 get_Load_mode (ir_node *node) {
1461 assert (node->op == op_Load);
1462 return node->attr.load.load_mode;
1466 set_Load_mode (ir_node *node, ir_mode *mode) {
1467 assert (node->op == op_Load);
1468 node->attr.load.load_mode = mode;
1472 get_Load_volatility (ir_node *node) {
1473 assert (node->op == op_Load);
1474 return node->attr.load.volatility;
1478 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1479 assert (node->op == op_Load);
1480 node->attr.load.volatility = volatility;
1485 get_Store_mem (ir_node *node) {
1486 assert (node->op == op_Store);
1487 return get_irn_n(node, 0);
1491 set_Store_mem (ir_node *node, ir_node *mem) {
1492 assert (node->op == op_Store);
1493 set_irn_n(node, 0, mem);
1497 get_Store_ptr (ir_node *node) {
1498 assert (node->op == op_Store);
1499 return get_irn_n(node, 1);
1503 set_Store_ptr (ir_node *node, ir_node *ptr) {
1504 assert (node->op == op_Store);
1505 set_irn_n(node, 1, ptr);
1509 get_Store_value (ir_node *node) {
1510 assert (node->op == op_Store);
1511 return get_irn_n(node, 2);
1515 set_Store_value (ir_node *node, ir_node *value) {
1516 assert (node->op == op_Store);
1517 set_irn_n(node, 2, value);
1521 get_Store_volatility (ir_node *node) {
1522 assert (node->op == op_Store);
1523 return node->attr.store.volatility;
1527 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1528 assert (node->op == op_Store);
1529 node->attr.store.volatility = volatility;
1534 get_Alloc_mem (ir_node *node) {
1535 assert (node->op == op_Alloc);
1536 return get_irn_n(node, 0);
1540 set_Alloc_mem (ir_node *node, ir_node *mem) {
1541 assert (node->op == op_Alloc);
1542 set_irn_n(node, 0, mem);
1546 get_Alloc_size (ir_node *node) {
1547 assert (node->op == op_Alloc);
1548 return get_irn_n(node, 1);
1552 set_Alloc_size (ir_node *node, ir_node *size) {
1553 assert (node->op == op_Alloc);
1554 set_irn_n(node, 1, size);
1558 get_Alloc_type (ir_node *node) {
1559 assert (node->op == op_Alloc);
1560 return node->attr.a.type = skip_tid(node->attr.a.type);
1564 set_Alloc_type (ir_node *node, type *tp) {
1565 assert (node->op == op_Alloc);
1566 node->attr.a.type = tp;
1570 get_Alloc_where (ir_node *node) {
1571 assert (node->op == op_Alloc);
1572 return node->attr.a.where;
1576 set_Alloc_where (ir_node *node, where_alloc where) {
1577 assert (node->op == op_Alloc);
1578 node->attr.a.where = where;
1583 get_Free_mem (ir_node *node) {
1584 assert (node->op == op_Free);
1585 return get_irn_n(node, 0);
1589 set_Free_mem (ir_node *node, ir_node *mem) {
1590 assert (node->op == op_Free);
1591 set_irn_n(node, 0, mem);
1595 get_Free_ptr (ir_node *node) {
1596 assert (node->op == op_Free);
1597 return get_irn_n(node, 1);
1601 set_Free_ptr (ir_node *node, ir_node *ptr) {
1602 assert (node->op == op_Free);
1603 set_irn_n(node, 1, ptr);
1607 get_Free_size (ir_node *node) {
1608 assert (node->op == op_Free);
1609 return get_irn_n(node, 2);
1613 set_Free_size (ir_node *node, ir_node *size) {
1614 assert (node->op == op_Free);
1615 set_irn_n(node, 2, size);
1619 get_Free_type (ir_node *node) {
1620 assert (node->op == op_Free);
1621 return node->attr.f = skip_tid(node->attr.f);
1625 set_Free_type (ir_node *node, type *tp) {
1626 assert (node->op == op_Free);
1631 get_Sync_preds_arr (ir_node *node) {
1632 assert (node->op == op_Sync);
1633 return (ir_node **)&(get_irn_in(node)[1]);
1637 get_Sync_n_preds (ir_node *node) {
1638 assert (node->op == op_Sync);
1639 return (get_irn_arity(node));
1644 set_Sync_n_preds (ir_node *node, int n_preds) {
1645 assert (node->op == op_Sync);
1650 get_Sync_pred (ir_node *node, int pos) {
1651 assert (node->op == op_Sync);
1652 return get_irn_n(node, pos);
1656 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1657 assert (node->op == op_Sync);
1658 set_irn_n(node, pos, pred);
1662 get_Proj_pred (ir_node *node) {
1663 assert (is_Proj(node));
1664 return get_irn_n(node, 0);
1668 set_Proj_pred (ir_node *node, ir_node *pred) {
1669 assert (is_Proj(node));
1670 set_irn_n(node, 0, pred);
1674 get_Proj_proj (ir_node *node) {
1675 assert (is_Proj(node));
1676 if (get_irn_opcode(node) == iro_Proj) {
1677 return node->attr.proj;
1679 assert(get_irn_opcode(node) == iro_Filter);
1680 return node->attr.filter.proj;
1685 set_Proj_proj (ir_node *node, long proj) {
1686 assert (node->op == op_Proj);
1687 node->attr.proj = proj;
1691 get_Tuple_preds_arr (ir_node *node) {
1692 assert (node->op == op_Tuple);
1693 return (ir_node **)&(get_irn_in(node)[1]);
1697 get_Tuple_n_preds (ir_node *node) {
1698 assert (node->op == op_Tuple);
1699 return (get_irn_arity(node));
1704 set_Tuple_n_preds (ir_node *node, int n_preds) {
1705 assert (node->op == op_Tuple);
1710 get_Tuple_pred (ir_node *node, int pos) {
1711 assert (node->op == op_Tuple);
1712 return get_irn_n(node, pos);
1716 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1717 assert (node->op == op_Tuple);
1718 set_irn_n(node, pos, pred);
1722 get_Id_pred (ir_node *node) {
1723 assert (node->op == op_Id);
1724 return get_irn_n(node, 0);
1728 set_Id_pred (ir_node *node, ir_node *pred) {
1729 assert (node->op == op_Id);
1730 set_irn_n(node, 0, pred);
1733 ir_node *get_Confirm_value (ir_node *node) {
1734 assert (node->op == op_Confirm);
1735 return get_irn_n(node, 0);
1737 void set_Confirm_value (ir_node *node, ir_node *value) {
1738 assert (node->op == op_Confirm);
1739 set_irn_n(node, 0, value);
1741 ir_node *get_Confirm_bound (ir_node *node) {
1742 assert (node->op == op_Confirm);
1743 return get_irn_n(node, 1);
1745 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1746 assert (node->op == op_Confirm);
1747 set_irn_n(node, 0, bound);
1749 pn_Cmp get_Confirm_cmp (ir_node *node) {
1750 assert (node->op == op_Confirm);
1751 return node->attr.confirm_cmp;
1753 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1754 assert (node->op == op_Confirm);
1755 node->attr.confirm_cmp = cmp;
1760 get_Filter_pred (ir_node *node) {
1761 assert(node->op == op_Filter);
1765 set_Filter_pred (ir_node *node, ir_node *pred) {
1766 assert(node->op == op_Filter);
1770 get_Filter_proj(ir_node *node) {
1771 assert(node->op == op_Filter);
1772 return node->attr.filter.proj;
1775 set_Filter_proj (ir_node *node, long proj) {
1776 assert(node->op == op_Filter);
1777 node->attr.filter.proj = proj;
1780 /* Don't use get_irn_arity, get_irn_n in implementation as access
1781 shall work independent of view!!! */
1782 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1783 assert(node->op == op_Filter);
1784 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1785 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1786 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1787 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1788 node->attr.filter.in_cg[0] = node->in[0];
1790 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1793 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1794 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1795 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1796 node->attr.filter.in_cg[pos + 1] = pred;
1798 int get_Filter_n_cg_preds(ir_node *node) {
1799 assert(node->op == op_Filter && node->attr.filter.in_cg);
1800 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1802 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1804 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1806 arity = ARR_LEN(node->attr.filter.in_cg);
1807 assert(pos < arity - 1);
1808 return node->attr.filter.in_cg[pos + 1];
1813 get_irn_irg(ir_node *node) {
1814 if (get_irn_op(node) != op_Block)
1815 node = get_nodes_block(node);
1816 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1817 node = get_nodes_block(node);
1818 assert(get_irn_op(node) == op_Block);
1819 return node->attr.block.irg;
1823 /*----------------------------------------------------------------*/
1824 /* Auxiliary routines */
1825 /*----------------------------------------------------------------*/
1828 skip_Proj (ir_node *node) {
1829 /* don't assert node !!! */
1830 if (node && is_Proj(node)) {
1831 return get_Proj_pred(node);
1838 skip_Tuple (ir_node *node) {
1841 if (!get_opt_normalize()) return node;
1843 node = skip_Id(node);
1844 if (get_irn_op(node) == op_Proj) {
1845 pred = skip_Id(get_Proj_pred(node));
1846 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1847 pred = skip_Id(skip_Tuple(pred));
1848 if (get_irn_op(pred) == op_Tuple)
1849 return get_Tuple_pred(pred, get_Proj_proj(node));
1854 /** returns operand of node if node is a Cast */
1855 ir_node *skip_Cast (ir_node *node) {
1856 if (node && get_irn_op(node) == op_Cast) {
1857 return skip_Id(get_irn_n(node, 0));
1864 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1865 than any other approach, as Id chains are resolved and all point to the real node, or
1866 all id's are self loops. */
1868 skip_Id (ir_node *node) {
1869 /* don't assert node !!! */
1871 if (!get_opt_normalize()) return node;
1873 /* Don't use get_Id_pred: We get into an endless loop for
1874 self-referencing Ids. */
1875 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1876 ir_node *rem_pred = node->in[0+1];
1879 assert (get_irn_arity (node) > 0);
1881 node->in[0+1] = node;
1882 res = skip_Id(rem_pred);
1883 if (res->op == op_Id) /* self-loop */ return node;
1885 node->in[0+1] = res;
1892 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1893 than any other approach, as Id chains are resolved and all point to the real node, or
1894 all id's are self loops. */
1896 skip_Id (ir_node *node) {
1898 /* don't assert node !!! */
1900 if (!node || (node->op != op_Id)) return node;
1902 if (!get_opt_normalize()) return node;
1904 /* Don't use get_Id_pred: We get into an endless loop for
1905 self-referencing Ids. */
1906 pred = node->in[0+1];
1908 if (pred->op != op_Id) return pred;
1910 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1911 ir_node *rem_pred, *res;
1913 if (pred->op != op_Id) return pred; /* shortcut */
1916 assert (get_irn_arity (node) > 0);
1918 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1919 res = skip_Id(rem_pred);
1920 if (res->op == op_Id) /* self-loop */ return node;
1922 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1931 (is_Bad)(const ir_node *node) {
1932 return __is_Bad(node);
1936 (is_no_Block)(const ir_node *node) {
1937 return __is_no_Block(node);
1941 (is_Block)(const ir_node *node) {
1942 return __is_Block(node);
1945 /* returns true if node is a Unknown node. */
1947 is_Unknown (const ir_node *node) {
1949 return (get_irn_op(node) == op_Unknown);
1953 is_Proj (const ir_node *node) {
1955 return node->op == op_Proj
1956 || (!get_interprocedural_view() && node->op == op_Filter);
1959 /* Returns true if the operation manipulates control flow. */
1961 is_cfop(const ir_node *node) {
1962 return is_cfopcode(get_irn_op(node));
1965 /* Returns true if the operation manipulates interprocedural control flow:
1966 CallBegin, EndReg, EndExcept */
1967 int is_ip_cfop(const ir_node *node) {
1968 return is_ip_cfopcode(get_irn_op(node));
1971 /* Returns true if the operation can change the control flow because
1974 is_fragile_op(const ir_node *node) {
1975 return is_op_fragile(get_irn_op(node));
1978 /* Returns the memory operand of fragile operations. */
1979 ir_node *get_fragile_op_mem(ir_node *node) {
1980 assert(node && is_fragile_op(node));
1982 switch (get_irn_opcode (node)) {
1991 return get_irn_n(node, 0);
1996 assert(0 && "should not be reached");
2001 /* Returns true if the operation is a forking control flow operation. */
2003 is_forking_op(const ir_node *node) {
2004 return is_op_forking(get_irn_op(node));
2007 #ifdef DEBUG_libfirm
2008 void dump_irn (ir_node *n) {
2009 int i, arity = get_irn_arity(n);
2010 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2012 ir_node *pred = get_irn_n(n, -1);
2013 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2014 get_irn_node_nr(pred), (void *)pred);
2016 printf(" preds: \n");
2017 for (i = 0; i < arity; ++i) {
2018 ir_node *pred = get_irn_n(n, i);
2019 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2020 get_irn_node_nr(pred), (void *)pred);
2024 #else /* DEBUG_libfirm */
2025 void dump_irn (ir_node *n) {}
2026 #endif /* DEBUG_libfirm */