3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 (get_irn_pinned)(const ir_node *node) {
358 return __get_irn_pinned(node);
362 #ifdef DO_HEAPANALYSIS
363 /* Access the abstract interpretation information of a node.
364 Returns NULL if no such information is available. */
365 struct abstval *get_irn_abst_value(ir_node *n) {
368 /* Set the abstract interpretation information of a node. */
369 void set_irn_abst_value(ir_node *n, struct abstval *os) {
372 struct section *firm_get_irn_section(ir_node *n) {
375 void firm_set_irn_section(ir_node *n, struct section *s) {
378 #endif /* DO_HEAPANALYSIS */
381 /* Outputs a unique number for this node */
383 get_irn_node_nr(const ir_node *node) {
386 return node->node_nr;
393 get_irn_const_attr (ir_node *node)
395 assert (node->op == op_Const);
396 return node->attr.con;
400 get_irn_proj_attr (ir_node *node)
402 assert (node->op == op_Proj);
403 return node->attr.proj;
407 get_irn_alloc_attr (ir_node *node)
409 assert (node->op == op_Alloc);
414 get_irn_free_attr (ir_node *node)
416 assert (node->op == op_Free);
417 return node->attr.f = skip_tid(node->attr.f);
421 get_irn_symconst_attr (ir_node *node)
423 assert (node->op == op_SymConst);
428 get_irn_call_attr (ir_node *node)
430 assert (node->op == op_Call);
431 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
435 get_irn_funccall_attr (ir_node *node)
437 assert (node->op == op_FuncCall);
438 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
442 get_irn_sel_attr (ir_node *node)
444 assert (node->op == op_Sel);
449 get_irn_phi_attr (ir_node *node)
451 assert (node->op == op_Phi);
452 return node->attr.phi0_pos;
456 get_irn_block_attr (ir_node *node)
458 assert (node->op == op_Block);
459 return node->attr.block;
463 get_irn_load_attr (ir_node *node)
465 assert (node->op == op_Load);
466 return node->attr.load;
470 get_irn_store_attr (ir_node *node)
472 assert (node->op == op_Store);
473 return node->attr.store;
477 get_irn_except_attr (ir_node *node)
479 assert (node->op == op_Div || node->op == op_Quot ||
480 node->op == op_DivMod || node->op == op_Mod);
481 return node->attr.except;
484 /** manipulate fields of individual nodes **/
486 /* this works for all except Block */
488 get_nodes_block (ir_node *node) {
489 assert (!(node->op == op_Block));
490 return get_irn_n(node, -1);
494 set_nodes_block (ir_node *node, ir_node *block) {
495 assert (!(node->op == op_Block));
496 set_irn_n(node, -1, block);
499 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
500 * from Start. If so returns frame type, else Null. */
501 type *is_frame_pointer(ir_node *n) {
502 if ((get_irn_op(n) == op_Proj) &&
503 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
504 ir_node *start = get_Proj_pred(n);
505 if (get_irn_op(start) == op_Start) {
506 return get_irg_frame_type(get_irn_irg(start));
512 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
513 * from Start. If so returns global type, else Null. */
514 type *is_globals_pointer(ir_node *n) {
515 if ((get_irn_op(n) == op_Proj) &&
516 (get_Proj_proj(n) == pn_Start_P_globals)) {
517 ir_node *start = get_Proj_pred(n);
518 if (get_irn_op(start) == op_Start) {
519 return get_glob_type();
525 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
526 * from Start. If so returns 1, else 0. */
527 int is_value_arg_pointer(ir_node *n) {
528 if ((get_irn_op(n) == op_Proj) &&
529 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
530 (get_irn_op(get_Proj_pred(n)) == op_Start))
535 /* Returns an array with the predecessors of the Block. Depending on
536 the implementation of the graph data structure this can be a copy of
537 the internal representation of predecessors as well as the internal
538 array itself. Therefore writing to this array might obstruct the ir. */
540 get_Block_cfgpred_arr (ir_node *node)
542 assert ((node->op == op_Block));
543 return (ir_node **)&(get_irn_in(node)[1]);
548 get_Block_n_cfgpreds (ir_node *node) {
549 assert ((node->op == op_Block));
550 return get_irn_arity(node);
554 get_Block_cfgpred (ir_node *node, int pos) {
556 assert (node->op == op_Block);
557 assert(-1 <= pos && pos < get_irn_arity(node));
558 return get_irn_n(node, pos);
562 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
563 assert (node->op == op_Block);
564 set_irn_n(node, pos, pred);
568 get_Block_matured (ir_node *node) {
569 assert (node->op == op_Block);
570 return node->attr.block.matured;
574 set_Block_matured (ir_node *node, bool matured) {
575 assert (node->op == op_Block);
576 node->attr.block.matured = matured;
579 get_Block_block_visited (ir_node *node) {
580 assert (node->op == op_Block);
581 return node->attr.block.block_visited;
585 set_Block_block_visited (ir_node *node, unsigned long visit) {
586 assert (node->op == op_Block);
587 node->attr.block.block_visited = visit;
590 /* For this current_ir_graph must be set. */
592 mark_Block_block_visited (ir_node *node) {
593 assert (node->op == op_Block);
594 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
598 Block_not_block_visited(ir_node *node) {
599 assert (node->op == op_Block);
600 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
604 get_Block_graph_arr (ir_node *node, int pos) {
605 assert (node->op == op_Block);
606 return node->attr.block.graph_arr[pos+1];
610 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
611 assert (node->op == op_Block);
612 node->attr.block.graph_arr[pos+1] = value;
615 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
616 assert(node->op == op_Block);
617 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
618 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
619 node->attr.block.in_cg[0] = NULL;
620 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
622 /* Fix backedge array. fix_backedges operates depending on
623 interprocedural_view. */
624 bool ipv = interprocedural_view;
625 interprocedural_view = true;
626 fix_backedges(current_ir_graph->obst, node);
627 interprocedural_view = ipv;
630 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
633 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
634 assert(node->op == op_Block &&
635 node->attr.block.in_cg &&
636 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
637 node->attr.block.in_cg[pos + 1] = pred;
640 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
641 assert(node->op == op_Block);
642 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
645 int get_Block_cg_n_cfgpreds(ir_node * node) {
646 assert(node->op == op_Block);
647 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
650 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
651 assert(node->op == op_Block && node->attr.block.in_cg);
652 return node->attr.block.in_cg[pos + 1];
655 void remove_Block_cg_cfgpred_arr(ir_node * node) {
656 assert(node->op == op_Block);
657 node->attr.block.in_cg = NULL;
661 set_Start_irg(ir_node *node, ir_graph *irg) {
662 assert(node->op == op_Start);
663 assert(is_ir_graph(irg));
664 assert(0 && " Why set irg? -- use set_irn_irg");
668 get_End_n_keepalives(ir_node *end) {
669 assert (end->op == op_End);
670 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
674 get_End_keepalive(ir_node *end, int pos) {
675 assert (end->op == op_End);
676 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
680 add_End_keepalive (ir_node *end, ir_node *ka) {
681 assert (end->op == op_End);
682 ARR_APP1 (ir_node *, end->in, ka);
686 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
687 assert (end->op == op_End);
688 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
692 free_End (ir_node *end) {
693 assert (end->op == op_End);
695 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
696 end->in = NULL; /* @@@ make sure we get an error if we use the
697 in array afterwards ... */
702 > Implementing the case construct (which is where the constant Proj node is
703 > important) involves far more than simply determining the constant values.
704 > We could argue that this is more properly a function of the translator from
705 > Firm to the target machine. That could be done if there was some way of
706 > projecting "default" out of the Cond node.
707 I know it's complicated.
708 Basically there are two proglems:
709 - determining the gaps between the projs
710 - determining the biggest case constant to know the proj number for
712 I see several solutions:
713 1. Introduce a ProjDefault node. Solves both problems.
714 This means to extend all optimizations executed during construction.
715 2. Give the Cond node for switch two flavors:
716 a) there are no gaps in the projs (existing flavor)
717 b) gaps may exist, default proj is still the Proj with the largest
718 projection number. This covers also the gaps.
719 3. Fix the semantic of the Cond to that of 2b)
721 Solution 2 seems to be the best:
722 Computing the gaps in the Firm representation is not too hard, i.e.,
723 libFIRM can implement a routine that transforms between the two
724 flavours. This is also possible for 1) but 2) does not require to
725 change any existing optimization.
726 Further it should be far simpler to determine the biggest constant than
728 I don't want to choose 3) as 2a) seems to have advantages for
729 dataflow analysis and 3) does not allow to convert the representation to
733 get_Cond_selector (ir_node *node) {
734 assert (node->op == op_Cond);
735 return get_irn_n(node, 0);
739 set_Cond_selector (ir_node *node, ir_node *selector) {
740 assert (node->op == op_Cond);
741 set_irn_n(node, 0, selector);
745 get_Cond_kind (ir_node *node) {
746 assert (node->op == op_Cond);
747 return node->attr.c.kind;
751 set_Cond_kind (ir_node *node, cond_kind kind) {
752 assert (node->op == op_Cond);
753 node->attr.c.kind = kind;
757 get_Cond_defaultProj (ir_node *node) {
758 assert (node->op == op_Cond);
759 return node->attr.c.default_proj;
763 get_Return_mem (ir_node *node) {
764 assert (node->op == op_Return);
765 return get_irn_n(node, 0);
769 set_Return_mem (ir_node *node, ir_node *mem) {
770 assert (node->op == op_Return);
771 set_irn_n(node, 0, mem);
775 get_Return_n_ress (ir_node *node) {
776 assert (node->op == op_Return);
777 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
781 get_Return_res_arr (ir_node *node)
783 assert ((node->op == op_Return));
784 if (get_Return_n_ress(node) > 0)
785 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
792 set_Return_n_res (ir_node *node, int results) {
793 assert (node->op == op_Return);
798 get_Return_res (ir_node *node, int pos) {
799 assert (node->op == op_Return);
800 assert (get_Return_n_ress(node) > pos);
801 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
805 set_Return_res (ir_node *node, int pos, ir_node *res){
806 assert (node->op == op_Return);
807 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
811 get_Raise_mem (ir_node *node) {
812 assert (node->op == op_Raise);
813 return get_irn_n(node, 0);
817 set_Raise_mem (ir_node *node, ir_node *mem) {
818 assert (node->op == op_Raise);
819 set_irn_n(node, 0, mem);
823 get_Raise_exo_ptr (ir_node *node) {
824 assert (node->op == op_Raise);
825 return get_irn_n(node, 1);
829 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
830 assert (node->op == op_Raise);
831 set_irn_n(node, 1, exo_ptr);
834 tarval *get_Const_tarval (ir_node *node) {
835 assert (node->op == op_Const);
836 return node->attr.con.tv;
840 set_Const_tarval (ir_node *node, tarval *con) {
841 assert (node->op == op_Const);
842 node->attr.con.tv = con;
846 /* The source language type. Must be an atomic type. Mode of type must
847 be mode of node. For tarvals from entities type must be pointer to
850 get_Const_type (ir_node *node) {
851 assert (node->op == op_Const);
852 return node->attr.con.tp;
856 set_Const_type (ir_node *node, type *tp) {
857 assert (node->op == op_Const);
858 if (tp != unknown_type) {
859 assert (is_atomic_type(tp));
860 assert (get_type_mode(tp) == get_irn_mode(node));
863 node->attr.con.tp = tp;
868 get_SymConst_kind (const ir_node *node) {
869 assert (node->op == op_SymConst);
870 return node->attr.i.num;
874 set_SymConst_kind (ir_node *node, symconst_kind num) {
875 assert (node->op == op_SymConst);
876 node->attr.i.num = num;
880 get_SymConst_type (ir_node *node) {
881 assert ( (node->op == op_SymConst)
882 && ( get_SymConst_kind(node) == symconst_type_tag
883 || get_SymConst_kind(node) == symconst_size));
884 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
888 set_SymConst_type (ir_node *node, type *tp) {
889 assert ( (node->op == op_SymConst)
890 && ( get_SymConst_kind(node) == symconst_type_tag
891 || get_SymConst_kind(node) == symconst_size));
892 node->attr.i.sym.type_p = tp;
896 get_SymConst_name (ir_node *node) {
897 assert ( (node->op == op_SymConst)
898 && (get_SymConst_kind(node) == symconst_addr_name));
899 return node->attr.i.sym.ident_p;
903 set_SymConst_name (ir_node *node, ident *name) {
904 assert ( (node->op == op_SymConst)
905 && (get_SymConst_kind(node) == symconst_addr_name));
906 node->attr.i.sym.ident_p = name;
910 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
911 entity *get_SymConst_entity (ir_node *node) {
912 assert ( (node->op == op_SymConst)
913 && (get_SymConst_kind (node) == symconst_addr_ent));
914 return node->attr.i.sym.entity_p;
917 void set_SymConst_entity (ir_node *node, entity *ent) {
918 assert ( (node->op == op_SymConst)
919 && (get_SymConst_kind(node) == symconst_addr_ent));
920 node->attr.i.sym.entity_p = ent;
924 union symconst_symbol
925 get_SymConst_symbol (ir_node *node) {
926 assert (node->op == op_SymConst);
927 return node->attr.i.sym;
931 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
932 assert (node->op == op_SymConst);
933 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
934 node->attr.i.sym = sym;
938 get_Sel_mem (ir_node *node) {
939 assert (node->op == op_Sel);
940 return get_irn_n(node, 0);
944 set_Sel_mem (ir_node *node, ir_node *mem) {
945 assert (node->op == op_Sel);
946 set_irn_n(node, 0, mem);
950 get_Sel_ptr (ir_node *node) {
951 assert (node->op == op_Sel);
952 return get_irn_n(node, 1);
956 set_Sel_ptr (ir_node *node, ir_node *ptr) {
957 assert (node->op == op_Sel);
958 set_irn_n(node, 1, ptr);
962 get_Sel_n_indexs (ir_node *node) {
963 assert (node->op == op_Sel);
964 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
968 get_Sel_index_arr (ir_node *node)
970 assert ((node->op == op_Sel));
971 if (get_Sel_n_indexs(node) > 0)
972 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
978 get_Sel_index (ir_node *node, int pos) {
979 assert (node->op == op_Sel);
980 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
984 set_Sel_index (ir_node *node, int pos, ir_node *index) {
985 assert (node->op == op_Sel);
986 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
990 get_Sel_entity (ir_node *node) {
991 assert (node->op == op_Sel);
992 return node->attr.s.ent;
996 set_Sel_entity (ir_node *node, entity *ent) {
997 assert (node->op == op_Sel);
998 node->attr.s.ent = ent;
1002 get_InstOf_ent (ir_node *node) {
1003 assert (node->op = op_InstOf);
1004 return (node->attr.io.ent);
1008 set_InstOf_ent (ir_node *node, type *ent) {
1009 assert (node->op = op_InstOf);
1010 node->attr.io.ent = ent;
1014 get_InstOf_store (ir_node *node) {
1015 assert (node->op = op_InstOf);
1016 return (get_irn_n (node, 0));
1020 set_InstOf_store (ir_node *node, ir_node *obj) {
1021 assert (node->op = op_InstOf);
1022 set_irn_n (node, 0, obj);
1026 get_InstOf_obj (ir_node *node) {
1027 assert (node->op = op_InstOf);
1028 return (get_irn_n (node, 1));
1032 set_InstOf_obj (ir_node *node, ir_node *obj) {
1033 assert (node->op = op_InstOf);
1034 set_irn_n (node, 1, obj);
1038 /* For unary and binary arithmetic operations the access to the
1039 operands can be factored out. Left is the first, right the
1040 second arithmetic value as listed in tech report 0999-33.
1041 unops are: Minus, Abs, Not, Conv, Cast
1042 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1043 Shr, Shrs, Rotate, Cmp */
1047 get_Call_mem (ir_node *node) {
1048 assert (node->op == op_Call);
1049 return get_irn_n(node, 0);
1053 set_Call_mem (ir_node *node, ir_node *mem) {
1054 assert (node->op == op_Call);
1055 set_irn_n(node, 0, mem);
1059 get_Call_ptr (ir_node *node) {
1060 assert (node->op == op_Call);
1061 return get_irn_n(node, 1);
1065 set_Call_ptr (ir_node *node, ir_node *ptr) {
1066 assert (node->op == op_Call);
1067 set_irn_n(node, 1, ptr);
1071 get_Call_param_arr (ir_node *node) {
1072 assert (node->op == op_Call);
1073 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1077 get_Call_n_params (ir_node *node) {
1078 assert (node->op == op_Call);
1079 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1083 get_Call_arity (ir_node *node) {
1084 assert (node->op == op_Call);
1085 return get_Call_n_params(node);
1089 set_Call_arity (ir_node *node, ir_node *arity) {
1090 assert (node->op == op_Call);
1095 get_Call_param (ir_node *node, int pos) {
1096 assert (node->op == op_Call);
1097 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1101 set_Call_param (ir_node *node, int pos, ir_node *param) {
1102 assert (node->op == op_Call);
1103 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1107 get_Call_type (ir_node *node) {
1108 assert (node->op == op_Call);
1109 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1113 set_Call_type (ir_node *node, type *tp) {
1114 assert (node->op == op_Call);
1115 assert (is_method_type(tp));
1116 node->attr.call.cld_tp = tp;
1119 int Call_has_callees(ir_node *node) {
1120 assert(node && node->op == op_Call);
1121 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1122 (node->attr.call.callee_arr != NULL));
1125 int get_Call_n_callees(ir_node * node) {
1126 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1127 return ARR_LEN(node->attr.call.callee_arr);
1130 entity * get_Call_callee(ir_node * node, int pos) {
1131 assert(pos >= 0 && pos < get_Call_n_callees(node));
1132 return node->attr.call.callee_arr[pos];
1135 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1136 assert(node->op == op_Call);
1137 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1138 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1140 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1143 void remove_Call_callee_arr(ir_node * node) {
1144 assert(node->op == op_Call);
1145 node->attr.call.callee_arr = NULL;
1148 ir_node * get_CallBegin_ptr (ir_node *node) {
1149 assert(node->op == op_CallBegin);
1150 return get_irn_n(node, 0);
1152 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1153 assert(node->op == op_CallBegin);
1154 set_irn_n(node, 0, ptr);
1156 ir_node * get_CallBegin_call (ir_node *node) {
1157 assert(node->op == op_CallBegin);
1158 return node->attr.callbegin.call;
1160 void set_CallBegin_call (ir_node *node, ir_node *call) {
1161 assert(node->op == op_CallBegin);
1162 node->attr.callbegin.call = call;
1166 get_FuncCall_ptr (ir_node *node) {
1167 assert (node->op == op_FuncCall);
1168 return get_irn_n(node, 0);
1172 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1173 assert (node->op == op_FuncCall);
1174 set_irn_n(node, 0, ptr);
1178 get_FuncCall_param_arr (ir_node *node) {
1179 assert (node->op == op_FuncCall);
1180 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1184 get_FuncCall_n_params (ir_node *node) {
1185 assert (node->op == op_FuncCall);
1186 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1190 get_FuncCall_arity (ir_node *node) {
1191 assert (node->op == op_FuncCall);
1192 return get_FuncCall_n_params(node);
1196 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1197 assert (node->op == op_FuncCall);
1202 get_FuncCall_param (ir_node *node, int pos) {
1203 assert (node->op == op_FuncCall);
1204 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1208 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1209 assert (node->op == op_FuncCall);
1210 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1214 get_FuncCall_type (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1220 set_FuncCall_type (ir_node *node, type *tp) {
1221 assert (node->op == op_FuncCall);
1222 assert (is_method_type(tp));
1223 node->attr.call.cld_tp = tp;
1226 int FuncCall_has_callees(ir_node *node) {
1227 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1228 (node->attr.call.callee_arr != NULL));
1231 int get_FuncCall_n_callees(ir_node * node) {
1232 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1233 return ARR_LEN(node->attr.call.callee_arr);
1236 entity * get_FuncCall_callee(ir_node * node, int pos) {
1237 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1238 return node->attr.call.callee_arr[pos];
1241 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1242 assert(node->op == op_FuncCall);
1243 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1244 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1246 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1249 void remove_FuncCall_callee_arr(ir_node * node) {
1250 assert(node->op == op_FuncCall);
1251 node->attr.call.callee_arr = NULL;
1256 ir_node * get_##OP##_left(ir_node *node) { \
1257 assert(node->op == op_##OP); \
1258 return get_irn_n(node, node->op->op_index); \
1260 void set_##OP##_left(ir_node *node, ir_node *left) { \
1261 assert(node->op == op_##OP); \
1262 set_irn_n(node, node->op->op_index, left); \
1264 ir_node *get_##OP##_right(ir_node *node) { \
1265 assert(node->op == op_##OP); \
1266 return get_irn_n(node, node->op->op_index + 1); \
1268 void set_##OP##_right(ir_node *node, ir_node *right) { \
1269 assert(node->op == op_##OP); \
1270 set_irn_n(node, node->op->op_index + 1, right); \
1274 ir_node *get_##OP##_op(ir_node *node) { \
1275 assert(node->op == op_##OP); \
1276 return get_irn_n(node, node->op->op_index); \
1278 void set_##OP##_op (ir_node *node, ir_node *op) { \
1279 assert(node->op == op_##OP); \
1280 set_irn_n(node, node->op->op_index, op); \
1290 get_Quot_mem (ir_node *node) {
1291 assert (node->op == op_Quot);
1292 return get_irn_n(node, 0);
1296 set_Quot_mem (ir_node *node, ir_node *mem) {
1297 assert (node->op == op_Quot);
1298 set_irn_n(node, 0, mem);
1304 get_DivMod_mem (ir_node *node) {
1305 assert (node->op == op_DivMod);
1306 return get_irn_n(node, 0);
1310 set_DivMod_mem (ir_node *node, ir_node *mem) {
1311 assert (node->op == op_DivMod);
1312 set_irn_n(node, 0, mem);
1318 get_Div_mem (ir_node *node) {
1319 assert (node->op == op_Div);
1320 return get_irn_n(node, 0);
1324 set_Div_mem (ir_node *node, ir_node *mem) {
1325 assert (node->op == op_Div);
1326 set_irn_n(node, 0, mem);
1332 get_Mod_mem (ir_node *node) {
1333 assert (node->op == op_Mod);
1334 return get_irn_n(node, 0);
1338 set_Mod_mem (ir_node *node, ir_node *mem) {
1339 assert (node->op == op_Mod);
1340 set_irn_n(node, 0, mem);
1357 get_Cast_type (ir_node *node) {
1358 assert (node->op == op_Cast);
1359 return node->attr.cast.totype;
1363 set_Cast_type (ir_node *node, type *to_tp) {
1364 assert (node->op == op_Cast);
1365 node->attr.cast.totype = to_tp;
1369 is_unop (ir_node *node) {
1370 return (node->op->opar == oparity_unary);
1374 get_unop_op (ir_node *node) {
1375 if (node->op->opar == oparity_unary)
1376 return get_irn_n(node, node->op->op_index);
1378 assert(node->op->opar == oparity_unary);
1383 set_unop_op (ir_node *node, ir_node *op) {
1384 if (node->op->opar == oparity_unary)
1385 set_irn_n(node, node->op->op_index, op);
1387 assert(node->op->opar == oparity_unary);
1391 is_binop (ir_node *node) {
1392 return (node->op->opar == oparity_binary);
1396 get_binop_left (ir_node *node) {
1397 if (node->op->opar == oparity_binary)
1398 return get_irn_n(node, node->op->op_index);
1400 assert(node->op->opar == oparity_binary);
1405 set_binop_left (ir_node *node, ir_node *left) {
1406 if (node->op->opar == oparity_binary)
1407 set_irn_n(node, node->op->op_index, left);
1409 assert (node->op->opar == oparity_binary);
1413 get_binop_right (ir_node *node) {
1414 if (node->op->opar == oparity_binary)
1415 return get_irn_n(node, node->op->op_index + 1);
1417 assert(node->op->opar == oparity_binary);
1422 set_binop_right (ir_node *node, ir_node *right) {
1423 if (node->op->opar == oparity_binary)
1424 set_irn_n(node, node->op->op_index + 1, right);
1426 assert (node->op->opar == oparity_binary);
1429 int is_Phi (ir_node *n) {
1435 if (op == op_Filter) return interprocedural_view;
1438 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1439 (get_irn_arity(n) > 0));
1444 int is_Phi0 (ir_node *n) {
1447 return ((get_irn_op(n) == op_Phi) &&
1448 (get_irn_arity(n) == 0) &&
1449 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1453 get_Phi_preds_arr (ir_node *node) {
1454 assert (node->op == op_Phi);
1455 return (ir_node **)&(get_irn_in(node)[1]);
1459 get_Phi_n_preds (ir_node *node) {
1460 assert (is_Phi(node) || is_Phi0(node));
1461 return (get_irn_arity(node));
1465 void set_Phi_n_preds (ir_node *node, int n_preds) {
1466 assert (node->op == op_Phi);
1471 get_Phi_pred (ir_node *node, int pos) {
1472 assert (is_Phi(node) || is_Phi0(node));
1473 return get_irn_n(node, pos);
1477 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1478 assert (is_Phi(node) || is_Phi0(node));
1479 set_irn_n(node, pos, pred);
1483 int is_memop(ir_node *node) {
1484 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1487 ir_node *get_memop_mem (ir_node *node) {
1488 assert(is_memop(node));
1489 return get_irn_n(node, 0);
1492 void set_memop_mem (ir_node *node, ir_node *mem) {
1493 assert(is_memop(node));
1494 set_irn_n(node, 0, mem);
1497 ir_node *get_memop_ptr (ir_node *node) {
1498 assert(is_memop(node));
1499 return get_irn_n(node, 1);
1502 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1503 assert(is_memop(node));
1504 set_irn_n(node, 1, ptr);
1508 get_Load_mem (ir_node *node) {
1509 assert (node->op == op_Load);
1510 return get_irn_n(node, 0);
1514 set_Load_mem (ir_node *node, ir_node *mem) {
1515 assert (node->op == op_Load);
1516 set_irn_n(node, 0, mem);
1520 get_Load_ptr (ir_node *node) {
1521 assert (node->op == op_Load);
1522 return get_irn_n(node, 1);
1526 set_Load_ptr (ir_node *node, ir_node *ptr) {
1527 assert (node->op == op_Load);
1528 set_irn_n(node, 1, ptr);
1532 get_Load_mode (ir_node *node) {
1533 assert (node->op == op_Load);
1534 return node->attr.load.load_mode;
1538 set_Load_mode (ir_node *node, ir_mode *mode) {
1539 assert (node->op == op_Load);
1540 node->attr.load.load_mode = mode;
1544 get_Load_volatility (ir_node *node) {
1545 assert (node->op == op_Load);
1546 return node->attr.load.volatility;
1550 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1551 assert (node->op == op_Load);
1552 node->attr.load.volatility = volatility;
1557 get_Store_mem (ir_node *node) {
1558 assert (node->op == op_Store);
1559 return get_irn_n(node, 0);
1563 set_Store_mem (ir_node *node, ir_node *mem) {
1564 assert (node->op == op_Store);
1565 set_irn_n(node, 0, mem);
1569 get_Store_ptr (ir_node *node) {
1570 assert (node->op == op_Store);
1571 return get_irn_n(node, 1);
1575 set_Store_ptr (ir_node *node, ir_node *ptr) {
1576 assert (node->op == op_Store);
1577 set_irn_n(node, 1, ptr);
1581 get_Store_value (ir_node *node) {
1582 assert (node->op == op_Store);
1583 return get_irn_n(node, 2);
1587 set_Store_value (ir_node *node, ir_node *value) {
1588 assert (node->op == op_Store);
1589 set_irn_n(node, 2, value);
1593 get_Store_volatility (ir_node *node) {
1594 assert (node->op == op_Store);
1595 return node->attr.store.volatility;
1599 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1600 assert (node->op == op_Store);
1601 node->attr.store.volatility = volatility;
1606 get_Alloc_mem (ir_node *node) {
1607 assert (node->op == op_Alloc);
1608 return get_irn_n(node, 0);
1612 set_Alloc_mem (ir_node *node, ir_node *mem) {
1613 assert (node->op == op_Alloc);
1614 set_irn_n(node, 0, mem);
1618 get_Alloc_size (ir_node *node) {
1619 assert (node->op == op_Alloc);
1620 return get_irn_n(node, 1);
1624 set_Alloc_size (ir_node *node, ir_node *size) {
1625 assert (node->op == op_Alloc);
1626 set_irn_n(node, 1, size);
1630 get_Alloc_type (ir_node *node) {
1631 assert (node->op == op_Alloc);
1632 return node->attr.a.type = skip_tid(node->attr.a.type);
1636 set_Alloc_type (ir_node *node, type *tp) {
1637 assert (node->op == op_Alloc);
1638 node->attr.a.type = tp;
1642 get_Alloc_where (ir_node *node) {
1643 assert (node->op == op_Alloc);
1644 return node->attr.a.where;
1648 set_Alloc_where (ir_node *node, where_alloc where) {
1649 assert (node->op == op_Alloc);
1650 node->attr.a.where = where;
1655 get_Free_mem (ir_node *node) {
1656 assert (node->op == op_Free);
1657 return get_irn_n(node, 0);
1661 set_Free_mem (ir_node *node, ir_node *mem) {
1662 assert (node->op == op_Free);
1663 set_irn_n(node, 0, mem);
1667 get_Free_ptr (ir_node *node) {
1668 assert (node->op == op_Free);
1669 return get_irn_n(node, 1);
1673 set_Free_ptr (ir_node *node, ir_node *ptr) {
1674 assert (node->op == op_Free);
1675 set_irn_n(node, 1, ptr);
1679 get_Free_size (ir_node *node) {
1680 assert (node->op == op_Free);
1681 return get_irn_n(node, 2);
1685 set_Free_size (ir_node *node, ir_node *size) {
1686 assert (node->op == op_Free);
1687 set_irn_n(node, 2, size);
1691 get_Free_type (ir_node *node) {
1692 assert (node->op == op_Free);
1693 return node->attr.f = skip_tid(node->attr.f);
1697 set_Free_type (ir_node *node, type *tp) {
1698 assert (node->op == op_Free);
1703 get_Sync_preds_arr (ir_node *node) {
1704 assert (node->op == op_Sync);
1705 return (ir_node **)&(get_irn_in(node)[1]);
1709 get_Sync_n_preds (ir_node *node) {
1710 assert (node->op == op_Sync);
1711 return (get_irn_arity(node));
1716 set_Sync_n_preds (ir_node *node, int n_preds) {
1717 assert (node->op == op_Sync);
1722 get_Sync_pred (ir_node *node, int pos) {
1723 assert (node->op == op_Sync);
1724 return get_irn_n(node, pos);
1728 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1729 assert (node->op == op_Sync);
1730 set_irn_n(node, pos, pred);
1734 get_Proj_pred (ir_node *node) {
1735 assert (is_Proj(node));
1736 return get_irn_n(node, 0);
1740 set_Proj_pred (ir_node *node, ir_node *pred) {
1741 assert (is_Proj(node));
1742 set_irn_n(node, 0, pred);
1746 get_Proj_proj (ir_node *node) {
1747 assert (is_Proj(node));
1748 if (get_irn_opcode(node) == iro_Proj) {
1749 return node->attr.proj;
1751 assert(get_irn_opcode(node) == iro_Filter);
1752 return node->attr.filter.proj;
1757 set_Proj_proj (ir_node *node, long proj) {
1758 assert (node->op == op_Proj);
1759 node->attr.proj = proj;
1763 get_Tuple_preds_arr (ir_node *node) {
1764 assert (node->op == op_Tuple);
1765 return (ir_node **)&(get_irn_in(node)[1]);
1769 get_Tuple_n_preds (ir_node *node) {
1770 assert (node->op == op_Tuple);
1771 return (get_irn_arity(node));
1776 set_Tuple_n_preds (ir_node *node, int n_preds) {
1777 assert (node->op == op_Tuple);
1782 get_Tuple_pred (ir_node *node, int pos) {
1783 assert (node->op == op_Tuple);
1784 return get_irn_n(node, pos);
1788 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1789 assert (node->op == op_Tuple);
1790 set_irn_n(node, pos, pred);
1794 get_Id_pred (ir_node *node) {
1795 assert (node->op == op_Id);
1796 return get_irn_n(node, 0);
1800 set_Id_pred (ir_node *node, ir_node *pred) {
1801 assert (node->op == op_Id);
1802 set_irn_n(node, 0, pred);
1805 ir_node *get_Confirm_value (ir_node *node) {
1806 assert (node->op == op_Confirm);
1807 return get_irn_n(node, 0);
1809 void set_Confirm_value (ir_node *node, ir_node *value) {
1810 assert (node->op == op_Confirm);
1811 set_irn_n(node, 0, value);
1813 ir_node *get_Confirm_bound (ir_node *node) {
1814 assert (node->op == op_Confirm);
1815 return get_irn_n(node, 1);
1817 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1818 assert (node->op == op_Confirm);
1819 set_irn_n(node, 0, bound);
1821 pn_Cmp get_Confirm_cmp (ir_node *node) {
1822 assert (node->op == op_Confirm);
1823 return node->attr.confirm_cmp;
1825 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1826 assert (node->op == op_Confirm);
1827 node->attr.confirm_cmp = cmp;
1832 get_Filter_pred (ir_node *node) {
1833 assert(node->op == op_Filter);
1837 set_Filter_pred (ir_node *node, ir_node *pred) {
1838 assert(node->op == op_Filter);
1842 get_Filter_proj(ir_node *node) {
1843 assert(node->op == op_Filter);
1844 return node->attr.filter.proj;
1847 set_Filter_proj (ir_node *node, long proj) {
1848 assert(node->op == op_Filter);
1849 node->attr.filter.proj = proj;
1852 /* Don't use get_irn_arity, get_irn_n in implementation as access
1853 shall work independent of view!!! */
1854 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1855 assert(node->op == op_Filter);
1856 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1857 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1858 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1859 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1860 node->attr.filter.in_cg[0] = node->in[0];
1862 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1865 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1866 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1867 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1868 node->attr.filter.in_cg[pos + 1] = pred;
1870 int get_Filter_n_cg_preds(ir_node *node) {
1871 assert(node->op == op_Filter && node->attr.filter.in_cg);
1872 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1874 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1876 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1878 arity = ARR_LEN(node->attr.filter.in_cg);
1879 assert(pos < arity - 1);
1880 return node->attr.filter.in_cg[pos + 1];
1885 get_irn_irg(ir_node *node) {
1886 if (get_irn_op(node) != op_Block)
1887 node = get_nodes_block(node);
1888 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1889 node = get_nodes_block(node);
1890 assert(get_irn_op(node) == op_Block);
1891 return node->attr.block.irg;
1895 /*----------------------------------------------------------------*/
1896 /* Auxiliary routines */
1897 /*----------------------------------------------------------------*/
1900 skip_Proj (ir_node *node) {
1901 /* don't assert node !!! */
1902 if (node && is_Proj(node)) {
1903 return get_Proj_pred(node);
1910 skip_Tuple (ir_node *node) {
1913 if (!get_opt_normalize()) return node;
1915 node = skip_Id(node);
1916 if (get_irn_op(node) == op_Proj) {
1917 pred = skip_Id(get_Proj_pred(node));
1918 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1919 pred = skip_Id(skip_Tuple(pred));
1920 if (get_irn_op(pred) == op_Tuple)
1921 return get_Tuple_pred(pred, get_Proj_proj(node));
1926 /** returns operand of node if node is a Cast */
1927 ir_node *skip_Cast (ir_node *node) {
1928 if (node && get_irn_op(node) == op_Cast) {
1929 return skip_Id(get_irn_n(node, 0));
1936 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1937 than any other approach, as Id chains are resolved and all point to the real node, or
1938 all id's are self loops. */
1940 skip_Id (ir_node *node) {
1941 /* don't assert node !!! */
1943 if (!get_opt_normalize()) return node;
1945 /* Don't use get_Id_pred: We get into an endless loop for
1946 self-referencing Ids. */
1947 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1948 ir_node *rem_pred = node->in[0+1];
1951 assert (get_irn_arity (node) > 0);
1953 node->in[0+1] = node;
1954 res = skip_Id(rem_pred);
1955 if (res->op == op_Id) /* self-loop */ return node;
1957 node->in[0+1] = res;
1964 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1965 than any other approach, as Id chains are resolved and all point to the real node, or
1966 all id's are self loops. */
1968 skip_Id (ir_node *node) {
1970 /* don't assert node !!! */
1972 if (!node || (node->op != op_Id)) return node;
1974 if (!get_opt_normalize()) return node;
1976 /* Don't use get_Id_pred: We get into an endless loop for
1977 self-referencing Ids. */
1978 pred = node->in[0+1];
1980 if (pred->op != op_Id) return pred;
1982 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1983 ir_node *rem_pred, *res;
1985 if (pred->op != op_Id) return pred; /* shortcut */
1988 assert (get_irn_arity (node) > 0);
1990 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1991 res = skip_Id(rem_pred);
1992 if (res->op == op_Id) /* self-loop */ return node;
1994 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2003 is_Bad (ir_node *node) {
2005 if ((node) && get_irn_opcode(node) == iro_Bad)
2011 is_no_Block (ir_node *node) {
2013 return (get_irn_opcode(node) != iro_Block);
2017 is_Block (ir_node *node) {
2019 return (get_irn_opcode(node) == iro_Block);
2022 /* returns true if node is a Unknown node. */
2024 is_Unknown (ir_node *node) {
2026 return (get_irn_opcode(node) == iro_Unknown);
2030 is_Proj (const ir_node *node) {
2032 return node->op == op_Proj
2033 || (!interprocedural_view && node->op == op_Filter);
2036 /* Returns true if the operation manipulates control flow. */
2038 is_cfop(ir_node *node) {
2039 return is_cfopcode(get_irn_op(node));
2042 /* Returns true if the operation manipulates interprocedural control flow:
2043 CallBegin, EndReg, EndExcept */
2044 int is_ip_cfop(ir_node *node) {
2045 return is_ip_cfopcode(get_irn_op(node));
2048 /* Returns true if the operation can change the control flow because
2051 is_fragile_op(ir_node *node) {
2052 return is_op_fragile(get_irn_op(node));
2055 /* Returns the memory operand of fragile operations. */
2056 ir_node *get_fragile_op_mem(ir_node *node) {
2057 assert(node && is_fragile_op(node));
2059 switch (get_irn_opcode (node)) {
2068 return get_irn_n(node, 0);
2073 assert(0 && "should not be reached");
2078 /* Returns true if the operation is a forking control flow operation. */
2080 is_forking_op(ir_node *node) {
2081 return is_op_forking(get_irn_op(node));
2084 #ifdef DEBUG_libfirm
2085 void dump_irn (ir_node *n) {
2086 int i, arity = get_irn_arity(n);
2087 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2089 ir_node *pred = get_irn_n(n, -1);
2090 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2091 get_irn_node_nr(pred), (void *)pred);
2093 printf(" preds: \n");
2094 for (i = 0; i < arity; ++i) {
2095 ir_node *pred = get_irn_n(n, i);
2096 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2097 get_irn_node_nr(pred), (void *)pred);
2101 #else /* DEBUG_libfirm */
2102 void dump_irn (ir_node *n) {}
2103 #endif /* DEBUG_libfirm */