3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 #ifdef DO_HEAPANALYSIS
358 /* Access the abstract interpretation information of a node.
359 Returns NULL if no such information is available. */
360 struct abstval *get_irn_abst_value(ir_node *n) {
363 /* Set the abstract interpretation information of a node. */
364 void set_irn_abst_value(ir_node *n, struct abstval *os) {
367 struct section *firm_get_irn_section(ir_node *n) {
370 void firm_set_irn_section(ir_node *n, struct section *s) {
373 #endif /* DO_HEAPANALYSIS */
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_funccall_attr (ir_node *node)
432 assert (node->op == op_FuncCall);
433 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
437 get_irn_sel_attr (ir_node *node)
439 assert (node->op == op_Sel);
444 get_irn_phi_attr (ir_node *node)
446 assert (node->op == op_Phi);
447 return node->attr.phi0_pos;
451 get_irn_block_attr (ir_node *node)
453 assert (node->op == op_Block);
454 return node->attr.block;
458 get_irn_load_attr (ir_node *node)
460 assert (node->op == op_Load);
461 return node->attr.load;
465 get_irn_store_attr (ir_node *node)
467 assert (node->op == op_Store);
468 return node->attr.store;
472 get_irn_except_attr (ir_node *node)
474 assert (node->op == op_Div || node->op == op_Quot ||
475 node->op == op_DivMod || node->op == op_Mod);
476 return node->attr.except;
479 /** manipulate fields of individual nodes **/
481 /* this works for all except Block */
483 get_nodes_block (ir_node *node) {
484 assert (!(node->op == op_Block));
485 return get_irn_n(node, -1);
489 set_nodes_block (ir_node *node, ir_node *block) {
490 assert (!(node->op == op_Block));
491 set_irn_n(node, -1, block);
494 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
495 * from Start. If so returns frame type, else Null. */
496 type *is_frame_pointer(ir_node *n) {
497 if ((get_irn_op(n) == op_Proj) &&
498 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
499 ir_node *start = get_Proj_pred(n);
500 if (get_irn_op(start) == op_Start) {
501 return get_irg_frame_type(get_irn_irg(start));
507 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
508 * from Start. If so returns global type, else Null. */
509 type *is_globals_pointer(ir_node *n) {
510 if ((get_irn_op(n) == op_Proj) &&
511 (get_Proj_proj(n) == pn_Start_P_globals)) {
512 ir_node *start = get_Proj_pred(n);
513 if (get_irn_op(start) == op_Start) {
514 return get_glob_type();
520 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
521 * from Start. If so returns 1, else 0. */
522 int is_value_arg_pointer(ir_node *n) {
523 if ((get_irn_op(n) == op_Proj) &&
524 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
525 (get_irn_op(get_Proj_pred(n)) == op_Start))
530 /* Returns an array with the predecessors of the Block. Depending on
531 the implementation of the graph data structure this can be a copy of
532 the internal representation of predecessors as well as the internal
533 array itself. Therefore writing to this array might obstruct the ir. */
535 get_Block_cfgpred_arr (ir_node *node)
537 assert ((node->op == op_Block));
538 return (ir_node **)&(get_irn_in(node)[1]);
543 get_Block_n_cfgpreds (ir_node *node) {
544 assert ((node->op == op_Block));
545 return get_irn_arity(node);
549 get_Block_cfgpred (ir_node *node, int pos) {
551 assert (node->op == op_Block);
552 assert(-1 <= pos && pos < get_irn_arity(node));
553 return get_irn_n(node, pos);
557 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
558 assert (node->op == op_Block);
559 set_irn_n(node, pos, pred);
563 get_Block_matured (ir_node *node) {
564 assert (node->op == op_Block);
565 return node->attr.block.matured;
569 set_Block_matured (ir_node *node, bool matured) {
570 assert (node->op == op_Block);
571 node->attr.block.matured = matured;
574 get_Block_block_visited (ir_node *node) {
575 assert (node->op == op_Block);
576 return node->attr.block.block_visited;
580 set_Block_block_visited (ir_node *node, unsigned long visit) {
581 assert (node->op == op_Block);
582 node->attr.block.block_visited = visit;
585 /* For this current_ir_graph must be set. */
587 mark_Block_block_visited (ir_node *node) {
588 assert (node->op == op_Block);
589 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
593 Block_not_block_visited(ir_node *node) {
594 assert (node->op == op_Block);
595 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
599 get_Block_graph_arr (ir_node *node, int pos) {
600 assert (node->op == op_Block);
601 return node->attr.block.graph_arr[pos+1];
605 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
606 assert (node->op == op_Block);
607 node->attr.block.graph_arr[pos+1] = value;
610 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
611 assert(node->op == op_Block);
612 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
613 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
614 node->attr.block.in_cg[0] = NULL;
615 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
617 /* Fix backedge array. fix_backedges operates depending on
618 interprocedural_view. */
619 bool ipv = interprocedural_view;
620 interprocedural_view = true;
621 fix_backedges(current_ir_graph->obst, node);
622 interprocedural_view = ipv;
625 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
628 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
629 assert(node->op == op_Block &&
630 node->attr.block.in_cg &&
631 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
632 node->attr.block.in_cg[pos + 1] = pred;
635 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
636 assert(node->op == op_Block);
637 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
640 int get_Block_cg_n_cfgpreds(ir_node * node) {
641 assert(node->op == op_Block);
642 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
645 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
646 assert(node->op == op_Block && node->attr.block.in_cg);
647 return node->attr.block.in_cg[pos + 1];
650 void remove_Block_cg_cfgpred_arr(ir_node * node) {
651 assert(node->op == op_Block);
652 node->attr.block.in_cg = NULL;
656 set_Start_irg(ir_node *node, ir_graph *irg) {
657 assert(node->op == op_Start);
658 assert(is_ir_graph(irg));
659 assert(0 && " Why set irg? -- use set_irn_irg");
663 get_End_n_keepalives(ir_node *end) {
664 assert (end->op == op_End);
665 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
669 get_End_keepalive(ir_node *end, int pos) {
670 assert (end->op == op_End);
671 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
675 add_End_keepalive (ir_node *end, ir_node *ka) {
676 assert (end->op == op_End);
677 ARR_APP1 (ir_node *, end->in, ka);
681 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
682 assert (end->op == op_End);
683 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
687 free_End (ir_node *end) {
688 assert (end->op == op_End);
690 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
691 end->in = NULL; /* @@@ make sure we get an error if we use the
692 in array afterwards ... */
697 > Implementing the case construct (which is where the constant Proj node is
698 > important) involves far more than simply determining the constant values.
699 > We could argue that this is more properly a function of the translator from
700 > Firm to the target machine. That could be done if there was some way of
701 > projecting "default" out of the Cond node.
702 I know it's complicated.
703 Basically there are two proglems:
704 - determining the gaps between the projs
705 - determining the biggest case constant to know the proj number for
707 I see several solutions:
708 1. Introduce a ProjDefault node. Solves both problems.
709 This means to extend all optimizations executed during construction.
710 2. Give the Cond node for switch two flavors:
711 a) there are no gaps in the projs (existing flavor)
712 b) gaps may exist, default proj is still the Proj with the largest
713 projection number. This covers also the gaps.
714 3. Fix the semantic of the Cond to that of 2b)
716 Solution 2 seems to be the best:
717 Computing the gaps in the Firm representation is not too hard, i.e.,
718 libFIRM can implement a routine that transforms between the two
719 flavours. This is also possible for 1) but 2) does not require to
720 change any existing optimization.
721 Further it should be far simpler to determine the biggest constant than
723 I don't want to choose 3) as 2a) seems to have advantages for
724 dataflow analysis and 3) does not allow to convert the representation to
728 get_Cond_selector (ir_node *node) {
729 assert (node->op == op_Cond);
730 return get_irn_n(node, 0);
734 set_Cond_selector (ir_node *node, ir_node *selector) {
735 assert (node->op == op_Cond);
736 set_irn_n(node, 0, selector);
740 get_Cond_kind (ir_node *node) {
741 assert (node->op == op_Cond);
742 return node->attr.c.kind;
746 set_Cond_kind (ir_node *node, cond_kind kind) {
747 assert (node->op == op_Cond);
748 node->attr.c.kind = kind;
752 get_Cond_defaultProj (ir_node *node) {
753 assert (node->op == op_Cond);
754 return node->attr.c.default_proj;
758 get_Return_mem (ir_node *node) {
759 assert (node->op == op_Return);
760 return get_irn_n(node, 0);
764 set_Return_mem (ir_node *node, ir_node *mem) {
765 assert (node->op == op_Return);
766 set_irn_n(node, 0, mem);
770 get_Return_n_ress (ir_node *node) {
771 assert (node->op == op_Return);
772 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
776 get_Return_res_arr (ir_node *node)
778 assert ((node->op == op_Return));
779 if (get_Return_n_ress(node) > 0)
780 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
787 set_Return_n_res (ir_node *node, int results) {
788 assert (node->op == op_Return);
793 get_Return_res (ir_node *node, int pos) {
794 assert (node->op == op_Return);
795 assert (get_Return_n_ress(node) > pos);
796 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
800 set_Return_res (ir_node *node, int pos, ir_node *res){
801 assert (node->op == op_Return);
802 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
806 get_Raise_mem (ir_node *node) {
807 assert (node->op == op_Raise);
808 return get_irn_n(node, 0);
812 set_Raise_mem (ir_node *node, ir_node *mem) {
813 assert (node->op == op_Raise);
814 set_irn_n(node, 0, mem);
818 get_Raise_exo_ptr (ir_node *node) {
819 assert (node->op == op_Raise);
820 return get_irn_n(node, 1);
824 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
825 assert (node->op == op_Raise);
826 set_irn_n(node, 1, exo_ptr);
829 tarval *get_Const_tarval (ir_node *node) {
830 assert (node->op == op_Const);
831 return node->attr.con.tv;
835 set_Const_tarval (ir_node *node, tarval *con) {
836 assert (node->op == op_Const);
837 node->attr.con.tv = con;
841 /* The source language type. Must be an atomic type. Mode of type must
842 be mode of node. For tarvals from entities type must be pointer to
845 get_Const_type (ir_node *node) {
846 assert (node->op == op_Const);
847 return node->attr.con.tp;
851 set_Const_type (ir_node *node, type *tp) {
852 assert (node->op == op_Const);
853 if (tp != unknown_type) {
854 assert (is_atomic_type(tp));
855 assert (get_type_mode(tp) == get_irn_mode(node));
858 node->attr.con.tp = tp;
863 get_SymConst_kind (const ir_node *node) {
864 assert (node->op == op_SymConst);
865 return node->attr.i.num;
869 set_SymConst_kind (ir_node *node, symconst_kind num) {
870 assert (node->op == op_SymConst);
871 node->attr.i.num = num;
875 get_SymConst_type (ir_node *node) {
876 assert ( (node->op == op_SymConst)
877 && ( get_SymConst_kind(node) == symconst_type_tag
878 || get_SymConst_kind(node) == symconst_size));
879 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
883 set_SymConst_type (ir_node *node, type *tp) {
884 assert ( (node->op == op_SymConst)
885 && ( get_SymConst_kind(node) == symconst_type_tag
886 || get_SymConst_kind(node) == symconst_size));
887 node->attr.i.sym.type_p = tp;
891 get_SymConst_name (ir_node *node) {
892 assert ( (node->op == op_SymConst)
893 && (get_SymConst_kind(node) == symconst_addr_name));
894 return node->attr.i.sym.ident_p;
898 set_SymConst_name (ir_node *node, ident *name) {
899 assert ( (node->op == op_SymConst)
900 && (get_SymConst_kind(node) == symconst_addr_name));
901 node->attr.i.sym.ident_p = name;
905 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
906 entity *get_SymConst_entity (ir_node *node) {
907 assert ( (node->op == op_SymConst)
908 && (get_SymConst_kind (node) == symconst_addr_ent));
909 return node->attr.i.sym.entity_p;
912 void set_SymConst_entity (ir_node *node, entity *ent) {
913 assert ( (node->op == op_SymConst)
914 && (get_SymConst_kind(node) == symconst_addr_ent));
915 node->attr.i.sym.entity_p = ent;
919 union symconst_symbol
920 get_SymConst_symbol (ir_node *node) {
921 assert (node->op == op_SymConst);
922 return node->attr.i.sym;
926 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
927 assert (node->op == op_SymConst);
928 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
929 node->attr.i.sym = sym;
933 get_Sel_mem (ir_node *node) {
934 assert (node->op == op_Sel);
935 return get_irn_n(node, 0);
939 set_Sel_mem (ir_node *node, ir_node *mem) {
940 assert (node->op == op_Sel);
941 set_irn_n(node, 0, mem);
945 get_Sel_ptr (ir_node *node) {
946 assert (node->op == op_Sel);
947 return get_irn_n(node, 1);
951 set_Sel_ptr (ir_node *node, ir_node *ptr) {
952 assert (node->op == op_Sel);
953 set_irn_n(node, 1, ptr);
957 get_Sel_n_indexs (ir_node *node) {
958 assert (node->op == op_Sel);
959 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
963 get_Sel_index_arr (ir_node *node)
965 assert ((node->op == op_Sel));
966 if (get_Sel_n_indexs(node) > 0)
967 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
973 get_Sel_index (ir_node *node, int pos) {
974 assert (node->op == op_Sel);
975 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
979 set_Sel_index (ir_node *node, int pos, ir_node *index) {
980 assert (node->op == op_Sel);
981 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
985 get_Sel_entity (ir_node *node) {
986 assert (node->op == op_Sel);
987 return node->attr.s.ent;
991 set_Sel_entity (ir_node *node, entity *ent) {
992 assert (node->op == op_Sel);
993 node->attr.s.ent = ent;
997 get_InstOf_ent (ir_node *node) {
998 assert (node->op = op_InstOf);
999 return (node->attr.io.ent);
1003 set_InstOf_ent (ir_node *node, type *ent) {
1004 assert (node->op = op_InstOf);
1005 node->attr.io.ent = ent;
1009 get_InstOf_store (ir_node *node) {
1010 assert (node->op = op_InstOf);
1011 return (get_irn_n (node, 0));
1015 set_InstOf_store (ir_node *node, ir_node *obj) {
1016 assert (node->op = op_InstOf);
1017 set_irn_n (node, 0, obj);
1021 get_InstOf_obj (ir_node *node) {
1022 assert (node->op = op_InstOf);
1023 return (get_irn_n (node, 1));
1027 set_InstOf_obj (ir_node *node, ir_node *obj) {
1028 assert (node->op = op_InstOf);
1029 set_irn_n (node, 1, obj);
1033 /* For unary and binary arithmetic operations the access to the
1034 operands can be factored out. Left is the first, right the
1035 second arithmetic value as listed in tech report 0999-33.
1036 unops are: Minus, Abs, Not, Conv, Cast
1037 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1038 Shr, Shrs, Rotate, Cmp */
1042 get_Call_mem (ir_node *node) {
1043 assert (node->op == op_Call);
1044 return get_irn_n(node, 0);
1048 set_Call_mem (ir_node *node, ir_node *mem) {
1049 assert (node->op == op_Call);
1050 set_irn_n(node, 0, mem);
1054 get_Call_ptr (ir_node *node) {
1055 assert (node->op == op_Call);
1056 return get_irn_n(node, 1);
1060 set_Call_ptr (ir_node *node, ir_node *ptr) {
1061 assert (node->op == op_Call);
1062 set_irn_n(node, 1, ptr);
1066 get_Call_param_arr (ir_node *node) {
1067 assert (node->op == op_Call);
1068 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1072 get_Call_n_params (ir_node *node) {
1073 assert (node->op == op_Call);
1074 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1078 get_Call_arity (ir_node *node) {
1079 assert (node->op == op_Call);
1080 return get_Call_n_params(node);
1084 set_Call_arity (ir_node *node, ir_node *arity) {
1085 assert (node->op == op_Call);
1090 get_Call_param (ir_node *node, int pos) {
1091 assert (node->op == op_Call);
1092 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1096 set_Call_param (ir_node *node, int pos, ir_node *param) {
1097 assert (node->op == op_Call);
1098 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1102 get_Call_type (ir_node *node) {
1103 assert (node->op == op_Call);
1104 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1108 set_Call_type (ir_node *node, type *tp) {
1109 assert (node->op == op_Call);
1110 assert (is_method_type(tp));
1111 node->attr.call.cld_tp = tp;
1114 int Call_has_callees(ir_node *node) {
1115 assert(node && node->op == op_Call);
1116 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1117 (node->attr.call.callee_arr != NULL));
1120 int get_Call_n_callees(ir_node * node) {
1121 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1122 return ARR_LEN(node->attr.call.callee_arr);
1125 entity * get_Call_callee(ir_node * node, int pos) {
1126 assert(pos >= 0 && pos < get_Call_n_callees(node));
1127 return node->attr.call.callee_arr[pos];
1130 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1131 assert(node->op == op_Call);
1132 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1133 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1135 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1138 void remove_Call_callee_arr(ir_node * node) {
1139 assert(node->op == op_Call);
1140 node->attr.call.callee_arr = NULL;
1143 ir_node * get_CallBegin_ptr (ir_node *node) {
1144 assert(node->op == op_CallBegin);
1145 return get_irn_n(node, 0);
1147 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1148 assert(node->op == op_CallBegin);
1149 set_irn_n(node, 0, ptr);
1151 ir_node * get_CallBegin_call (ir_node *node) {
1152 assert(node->op == op_CallBegin);
1153 return node->attr.callbegin.call;
1155 void set_CallBegin_call (ir_node *node, ir_node *call) {
1156 assert(node->op == op_CallBegin);
1157 node->attr.callbegin.call = call;
1161 get_FuncCall_ptr (ir_node *node) {
1162 assert (node->op == op_FuncCall);
1163 return get_irn_n(node, 0);
1167 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1168 assert (node->op == op_FuncCall);
1169 set_irn_n(node, 0, ptr);
1173 get_FuncCall_param_arr (ir_node *node) {
1174 assert (node->op == op_FuncCall);
1175 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1179 get_FuncCall_n_params (ir_node *node) {
1180 assert (node->op == op_FuncCall);
1181 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1185 get_FuncCall_arity (ir_node *node) {
1186 assert (node->op == op_FuncCall);
1187 return get_FuncCall_n_params(node);
1191 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1192 assert (node->op == op_FuncCall);
1197 get_FuncCall_param (ir_node *node, int pos) {
1198 assert (node->op == op_FuncCall);
1199 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1203 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1204 assert (node->op == op_FuncCall);
1205 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1209 get_FuncCall_type (ir_node *node) {
1210 assert (node->op == op_FuncCall);
1211 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1215 set_FuncCall_type (ir_node *node, type *tp) {
1216 assert (node->op == op_FuncCall);
1217 assert (is_method_type(tp));
1218 node->attr.call.cld_tp = tp;
1221 int FuncCall_has_callees(ir_node *node) {
1222 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1223 (node->attr.call.callee_arr != NULL));
1226 int get_FuncCall_n_callees(ir_node * node) {
1227 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1228 return ARR_LEN(node->attr.call.callee_arr);
1231 entity * get_FuncCall_callee(ir_node * node, int pos) {
1232 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1233 return node->attr.call.callee_arr[pos];
1236 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1237 assert(node->op == op_FuncCall);
1238 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1239 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1241 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1244 void remove_FuncCall_callee_arr(ir_node * node) {
1245 assert(node->op == op_FuncCall);
1246 node->attr.call.callee_arr = NULL;
1251 ir_node * get_##OP##_left(ir_node *node) { \
1252 assert(node->op == op_##OP); \
1253 return get_irn_n(node, node->op->op_index); \
1255 void set_##OP##_left(ir_node *node, ir_node *left) { \
1256 assert(node->op == op_##OP); \
1257 set_irn_n(node, node->op->op_index, left); \
1259 ir_node *get_##OP##_right(ir_node *node) { \
1260 assert(node->op == op_##OP); \
1261 return get_irn_n(node, node->op->op_index + 1); \
1263 void set_##OP##_right(ir_node *node, ir_node *right) { \
1264 assert(node->op == op_##OP); \
1265 set_irn_n(node, node->op->op_index + 1, right); \
1269 ir_node *get_##OP##_op(ir_node *node) { \
1270 assert(node->op == op_##OP); \
1271 return get_irn_n(node, node->op->op_index); \
1273 void set_##OP##_op (ir_node *node, ir_node *op) { \
1274 assert(node->op == op_##OP); \
1275 set_irn_n(node, node->op->op_index, op); \
1285 get_Quot_mem (ir_node *node) {
1286 assert (node->op == op_Quot);
1287 return get_irn_n(node, 0);
1291 set_Quot_mem (ir_node *node, ir_node *mem) {
1292 assert (node->op == op_Quot);
1293 set_irn_n(node, 0, mem);
1299 get_DivMod_mem (ir_node *node) {
1300 assert (node->op == op_DivMod);
1301 return get_irn_n(node, 0);
1305 set_DivMod_mem (ir_node *node, ir_node *mem) {
1306 assert (node->op == op_DivMod);
1307 set_irn_n(node, 0, mem);
1313 get_Div_mem (ir_node *node) {
1314 assert (node->op == op_Div);
1315 return get_irn_n(node, 0);
1319 set_Div_mem (ir_node *node, ir_node *mem) {
1320 assert (node->op == op_Div);
1321 set_irn_n(node, 0, mem);
1327 get_Mod_mem (ir_node *node) {
1328 assert (node->op == op_Mod);
1329 return get_irn_n(node, 0);
1333 set_Mod_mem (ir_node *node, ir_node *mem) {
1334 assert (node->op == op_Mod);
1335 set_irn_n(node, 0, mem);
1352 get_Cast_type (ir_node *node) {
1353 assert (node->op == op_Cast);
1354 return node->attr.cast.totype;
1358 set_Cast_type (ir_node *node, type *to_tp) {
1359 assert (node->op == op_Cast);
1360 node->attr.cast.totype = to_tp;
1364 is_unop (ir_node *node) {
1365 return (node->op->opar == oparity_unary);
1369 get_unop_op (ir_node *node) {
1370 if (node->op->opar == oparity_unary)
1371 return get_irn_n(node, node->op->op_index);
1373 assert(node->op->opar == oparity_unary);
1378 set_unop_op (ir_node *node, ir_node *op) {
1379 if (node->op->opar == oparity_unary)
1380 set_irn_n(node, node->op->op_index, op);
1382 assert(node->op->opar == oparity_unary);
1386 is_binop (ir_node *node) {
1387 return (node->op->opar == oparity_binary);
1391 get_binop_left (ir_node *node) {
1392 if (node->op->opar == oparity_binary)
1393 return get_irn_n(node, node->op->op_index);
1395 assert(node->op->opar == oparity_binary);
1400 set_binop_left (ir_node *node, ir_node *left) {
1401 if (node->op->opar == oparity_binary)
1402 set_irn_n(node, node->op->op_index, left);
1404 assert (node->op->opar == oparity_binary);
1408 get_binop_right (ir_node *node) {
1409 if (node->op->opar == oparity_binary)
1410 return get_irn_n(node, node->op->op_index + 1);
1412 assert(node->op->opar == oparity_binary);
1417 set_binop_right (ir_node *node, ir_node *right) {
1418 if (node->op->opar == oparity_binary)
1419 set_irn_n(node, node->op->op_index + 1, right);
1421 assert (node->op->opar == oparity_binary);
1424 int is_Phi (ir_node *n) {
1430 if (op == op_Filter) return interprocedural_view;
1433 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1434 (get_irn_arity(n) > 0));
1439 int is_Phi0 (ir_node *n) {
1442 return ((get_irn_op(n) == op_Phi) &&
1443 (get_irn_arity(n) == 0) &&
1444 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1448 get_Phi_preds_arr (ir_node *node) {
1449 assert (node->op == op_Phi);
1450 return (ir_node **)&(get_irn_in(node)[1]);
1454 get_Phi_n_preds (ir_node *node) {
1455 assert (is_Phi(node) || is_Phi0(node));
1456 return (get_irn_arity(node));
1460 void set_Phi_n_preds (ir_node *node, int n_preds) {
1461 assert (node->op == op_Phi);
1466 get_Phi_pred (ir_node *node, int pos) {
1467 assert (is_Phi(node) || is_Phi0(node));
1468 return get_irn_n(node, pos);
1472 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1473 assert (is_Phi(node) || is_Phi0(node));
1474 set_irn_n(node, pos, pred);
1478 int is_memop(ir_node *node) {
1479 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1482 ir_node *get_memop_mem (ir_node *node) {
1483 assert(is_memop(node));
1484 return get_irn_n(node, 0);
1487 void set_memop_mem (ir_node *node, ir_node *mem) {
1488 assert(is_memop(node));
1489 set_irn_n(node, 0, mem);
1492 ir_node *get_memop_ptr (ir_node *node) {
1493 assert(is_memop(node));
1494 return get_irn_n(node, 1);
1497 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1498 assert(is_memop(node));
1499 set_irn_n(node, 1, ptr);
1503 get_Load_mem (ir_node *node) {
1504 assert (node->op == op_Load);
1505 return get_irn_n(node, 0);
1509 set_Load_mem (ir_node *node, ir_node *mem) {
1510 assert (node->op == op_Load);
1511 set_irn_n(node, 0, mem);
1515 get_Load_ptr (ir_node *node) {
1516 assert (node->op == op_Load);
1517 return get_irn_n(node, 1);
1521 set_Load_ptr (ir_node *node, ir_node *ptr) {
1522 assert (node->op == op_Load);
1523 set_irn_n(node, 1, ptr);
1527 get_Load_mode (ir_node *node) {
1528 assert (node->op == op_Load);
1529 return node->attr.load.load_mode;
1533 set_Load_mode (ir_node *node, ir_mode *mode) {
1534 assert (node->op == op_Load);
1535 node->attr.load.load_mode = mode;
1539 get_Load_volatility (ir_node *node) {
1540 assert (node->op == op_Load);
1541 return node->attr.load.volatility;
1545 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1546 assert (node->op == op_Load);
1547 node->attr.load.volatility = volatility;
1552 get_Store_mem (ir_node *node) {
1553 assert (node->op == op_Store);
1554 return get_irn_n(node, 0);
1558 set_Store_mem (ir_node *node, ir_node *mem) {
1559 assert (node->op == op_Store);
1560 set_irn_n(node, 0, mem);
1564 get_Store_ptr (ir_node *node) {
1565 assert (node->op == op_Store);
1566 return get_irn_n(node, 1);
1570 set_Store_ptr (ir_node *node, ir_node *ptr) {
1571 assert (node->op == op_Store);
1572 set_irn_n(node, 1, ptr);
1576 get_Store_value (ir_node *node) {
1577 assert (node->op == op_Store);
1578 return get_irn_n(node, 2);
1582 set_Store_value (ir_node *node, ir_node *value) {
1583 assert (node->op == op_Store);
1584 set_irn_n(node, 2, value);
1588 get_Store_volatility (ir_node *node) {
1589 assert (node->op == op_Store);
1590 return node->attr.store.volatility;
1594 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1595 assert (node->op == op_Store);
1596 node->attr.store.volatility = volatility;
1601 get_Alloc_mem (ir_node *node) {
1602 assert (node->op == op_Alloc);
1603 return get_irn_n(node, 0);
1607 set_Alloc_mem (ir_node *node, ir_node *mem) {
1608 assert (node->op == op_Alloc);
1609 set_irn_n(node, 0, mem);
1613 get_Alloc_size (ir_node *node) {
1614 assert (node->op == op_Alloc);
1615 return get_irn_n(node, 1);
1619 set_Alloc_size (ir_node *node, ir_node *size) {
1620 assert (node->op == op_Alloc);
1621 set_irn_n(node, 1, size);
1625 get_Alloc_type (ir_node *node) {
1626 assert (node->op == op_Alloc);
1627 return node->attr.a.type = skip_tid(node->attr.a.type);
1631 set_Alloc_type (ir_node *node, type *tp) {
1632 assert (node->op == op_Alloc);
1633 node->attr.a.type = tp;
1637 get_Alloc_where (ir_node *node) {
1638 assert (node->op == op_Alloc);
1639 return node->attr.a.where;
1643 set_Alloc_where (ir_node *node, where_alloc where) {
1644 assert (node->op == op_Alloc);
1645 node->attr.a.where = where;
1650 get_Free_mem (ir_node *node) {
1651 assert (node->op == op_Free);
1652 return get_irn_n(node, 0);
1656 set_Free_mem (ir_node *node, ir_node *mem) {
1657 assert (node->op == op_Free);
1658 set_irn_n(node, 0, mem);
1662 get_Free_ptr (ir_node *node) {
1663 assert (node->op == op_Free);
1664 return get_irn_n(node, 1);
1668 set_Free_ptr (ir_node *node, ir_node *ptr) {
1669 assert (node->op == op_Free);
1670 set_irn_n(node, 1, ptr);
1674 get_Free_size (ir_node *node) {
1675 assert (node->op == op_Free);
1676 return get_irn_n(node, 2);
1680 set_Free_size (ir_node *node, ir_node *size) {
1681 assert (node->op == op_Free);
1682 set_irn_n(node, 2, size);
1686 get_Free_type (ir_node *node) {
1687 assert (node->op == op_Free);
1688 return node->attr.f = skip_tid(node->attr.f);
1692 set_Free_type (ir_node *node, type *tp) {
1693 assert (node->op == op_Free);
1698 get_Sync_preds_arr (ir_node *node) {
1699 assert (node->op == op_Sync);
1700 return (ir_node **)&(get_irn_in(node)[1]);
1704 get_Sync_n_preds (ir_node *node) {
1705 assert (node->op == op_Sync);
1706 return (get_irn_arity(node));
1711 set_Sync_n_preds (ir_node *node, int n_preds) {
1712 assert (node->op == op_Sync);
1717 get_Sync_pred (ir_node *node, int pos) {
1718 assert (node->op == op_Sync);
1719 return get_irn_n(node, pos);
1723 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1724 assert (node->op == op_Sync);
1725 set_irn_n(node, pos, pred);
1729 get_Proj_pred (ir_node *node) {
1730 assert (is_Proj(node));
1731 return get_irn_n(node, 0);
1735 set_Proj_pred (ir_node *node, ir_node *pred) {
1736 assert (is_Proj(node));
1737 set_irn_n(node, 0, pred);
1741 get_Proj_proj (ir_node *node) {
1742 assert (is_Proj(node));
1743 if (get_irn_opcode(node) == iro_Proj) {
1744 return node->attr.proj;
1746 assert(get_irn_opcode(node) == iro_Filter);
1747 return node->attr.filter.proj;
1752 set_Proj_proj (ir_node *node, long proj) {
1753 assert (node->op == op_Proj);
1754 node->attr.proj = proj;
1758 get_Tuple_preds_arr (ir_node *node) {
1759 assert (node->op == op_Tuple);
1760 return (ir_node **)&(get_irn_in(node)[1]);
1764 get_Tuple_n_preds (ir_node *node) {
1765 assert (node->op == op_Tuple);
1766 return (get_irn_arity(node));
1771 set_Tuple_n_preds (ir_node *node, int n_preds) {
1772 assert (node->op == op_Tuple);
1777 get_Tuple_pred (ir_node *node, int pos) {
1778 assert (node->op == op_Tuple);
1779 return get_irn_n(node, pos);
1783 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1784 assert (node->op == op_Tuple);
1785 set_irn_n(node, pos, pred);
1789 get_Id_pred (ir_node *node) {
1790 assert (node->op == op_Id);
1791 return get_irn_n(node, 0);
1795 set_Id_pred (ir_node *node, ir_node *pred) {
1796 assert (node->op == op_Id);
1797 set_irn_n(node, 0, pred);
1800 ir_node *get_Confirm_value (ir_node *node) {
1801 assert (node->op == op_Confirm);
1802 return get_irn_n(node, 0);
1804 void set_Confirm_value (ir_node *node, ir_node *value) {
1805 assert (node->op == op_Confirm);
1806 set_irn_n(node, 0, value);
1808 ir_node *get_Confirm_bound (ir_node *node) {
1809 assert (node->op == op_Confirm);
1810 return get_irn_n(node, 1);
1812 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1813 assert (node->op == op_Confirm);
1814 set_irn_n(node, 0, bound);
1816 pn_Cmp get_Confirm_cmp (ir_node *node) {
1817 assert (node->op == op_Confirm);
1818 return node->attr.confirm_cmp;
1820 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1821 assert (node->op == op_Confirm);
1822 node->attr.confirm_cmp = cmp;
1827 get_Filter_pred (ir_node *node) {
1828 assert(node->op == op_Filter);
1832 set_Filter_pred (ir_node *node, ir_node *pred) {
1833 assert(node->op == op_Filter);
1837 get_Filter_proj(ir_node *node) {
1838 assert(node->op == op_Filter);
1839 return node->attr.filter.proj;
1842 set_Filter_proj (ir_node *node, long proj) {
1843 assert(node->op == op_Filter);
1844 node->attr.filter.proj = proj;
1847 /* Don't use get_irn_arity, get_irn_n in implementation as access
1848 shall work independent of view!!! */
1849 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1850 assert(node->op == op_Filter);
1851 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1852 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1853 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1854 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1855 node->attr.filter.in_cg[0] = node->in[0];
1857 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1860 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1861 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1862 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1863 node->attr.filter.in_cg[pos + 1] = pred;
1865 int get_Filter_n_cg_preds(ir_node *node) {
1866 assert(node->op == op_Filter && node->attr.filter.in_cg);
1867 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1869 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1871 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1873 arity = ARR_LEN(node->attr.filter.in_cg);
1874 assert(pos < arity - 1);
1875 return node->attr.filter.in_cg[pos + 1];
1880 get_irn_irg(ir_node *node) {
1881 if (get_irn_op(node) != op_Block)
1882 node = get_nodes_block(node);
1883 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1884 node = get_nodes_block(node);
1885 assert(get_irn_op(node) == op_Block);
1886 return node->attr.block.irg;
1890 /*----------------------------------------------------------------*/
1891 /* Auxiliary routines */
1892 /*----------------------------------------------------------------*/
1895 skip_Proj (ir_node *node) {
1896 /* don't assert node !!! */
1897 if (node && is_Proj(node)) {
1898 return get_Proj_pred(node);
1905 skip_Tuple (ir_node *node) {
1908 if (!get_opt_normalize()) return node;
1910 node = skip_Id(node);
1911 if (get_irn_op(node) == op_Proj) {
1912 pred = skip_Id(get_Proj_pred(node));
1913 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1914 pred = skip_Id(skip_Tuple(pred));
1915 if (get_irn_op(pred) == op_Tuple)
1916 return get_Tuple_pred(pred, get_Proj_proj(node));
1921 /** returns operand of node if node is a Cast */
1922 ir_node *skip_Cast (ir_node *node) {
1923 if (node && get_irn_op(node) == op_Cast) {
1924 return skip_Id(get_irn_n(node, 0));
1931 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1932 than any other approach, as Id chains are resolved and all point to the real node, or
1933 all id's are self loops. */
1935 skip_Id (ir_node *node) {
1936 /* don't assert node !!! */
1938 if (!get_opt_normalize()) return node;
1940 /* Don't use get_Id_pred: We get into an endless loop for
1941 self-referencing Ids. */
1942 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1943 ir_node *rem_pred = node->in[0+1];
1946 assert (get_irn_arity (node) > 0);
1948 node->in[0+1] = node;
1949 res = skip_Id(rem_pred);
1950 if (res->op == op_Id) /* self-loop */ return node;
1952 node->in[0+1] = res;
1959 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1960 than any other approach, as Id chains are resolved and all point to the real node, or
1961 all id's are self loops. */
1963 skip_Id (ir_node *node) {
1965 /* don't assert node !!! */
1967 if (!node || (node->op != op_Id)) return node;
1969 if (!get_opt_normalize()) return node;
1971 /* Don't use get_Id_pred: We get into an endless loop for
1972 self-referencing Ids. */
1973 pred = node->in[0+1];
1975 if (pred->op != op_Id) return pred;
1977 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1978 ir_node *rem_pred, *res;
1980 if (pred->op != op_Id) return pred; /* shortcut */
1983 assert (get_irn_arity (node) > 0);
1985 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1986 res = skip_Id(rem_pred);
1987 if (res->op == op_Id) /* self-loop */ return node;
1989 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1998 is_Bad (ir_node *node) {
2000 if ((node) && get_irn_opcode(node) == iro_Bad)
2006 is_no_Block (ir_node *node) {
2008 return (get_irn_opcode(node) != iro_Block);
2012 is_Block (ir_node *node) {
2014 return (get_irn_opcode(node) == iro_Block);
2017 /* returns true if node is a Unknown node. */
2019 is_Unknown (ir_node *node) {
2021 return (get_irn_opcode(node) == iro_Unknown);
2025 is_Proj (const ir_node *node) {
2027 return node->op == op_Proj
2028 || (!interprocedural_view && node->op == op_Filter);
2031 /* Returns true if the operation manipulates control flow. */
2033 is_cfop(ir_node *node) {
2034 return is_cfopcode(get_irn_op(node));
2037 /* Returns true if the operation manipulates interprocedural control flow:
2038 CallBegin, EndReg, EndExcept */
2039 int is_ip_cfop(ir_node *node) {
2040 return is_ip_cfopcode(get_irn_op(node));
2043 /* Returns true if the operation can change the control flow because
2046 is_fragile_op(ir_node *node) {
2047 return is_op_fragile(get_irn_op(node));
2050 /* Returns the memory operand of fragile operations. */
2051 ir_node *get_fragile_op_mem(ir_node *node) {
2052 assert(node && is_fragile_op(node));
2054 switch (get_irn_opcode (node)) {
2063 return get_irn_n(node, 0);
2068 assert(0 && "should not be reached");
2073 /* Returns true if the operation is a forking control flow operation. */
2075 is_forking_op(ir_node *node) {
2076 return is_op_forking(get_irn_op(node));
2079 #ifdef DEBUG_libfirm
2080 void dump_irn (ir_node *n) {
2081 int i, arity = get_irn_arity(n);
2082 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2084 ir_node *pred = get_irn_n(n, -1);
2085 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2086 get_irn_node_nr(pred), (void *)pred);
2088 printf(" preds: \n");
2089 for (i = 0; i < arity; ++i) {
2090 ir_node *pred = get_irn_n(n, i);
2091 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2092 get_irn_node_nr(pred), (void *)pred);
2096 #else /* DEBUG_libfirm */
2097 void dump_irn (ir_node *n) {}
2098 #endif /* DEBUG_libfirm */