3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 (get_irn_pinned)(const ir_node *node) {
358 /* return __get_irn_pinned(node); */
359 return (__get_op_pinned (get_irn_op (node)));
363 #ifdef DO_HEAPANALYSIS
364 /* Access the abstract interpretation information of a node.
365 Returns NULL if no such information is available. */
366 struct abstval *get_irn_abst_value(ir_node *n) {
369 /* Set the abstract interpretation information of a node. */
370 void set_irn_abst_value(ir_node *n, struct abstval *os) {
373 struct section *firm_get_irn_section(ir_node *n) {
376 void firm_set_irn_section(ir_node *n, struct section *s) {
379 #endif /* DO_HEAPANALYSIS */
382 /* Outputs a unique number for this node */
384 get_irn_node_nr(const ir_node *node) {
387 return node->node_nr;
394 get_irn_const_attr (ir_node *node)
396 assert (node->op == op_Const);
397 return node->attr.con;
401 get_irn_proj_attr (ir_node *node)
403 assert (node->op == op_Proj);
404 return node->attr.proj;
408 get_irn_alloc_attr (ir_node *node)
410 assert (node->op == op_Alloc);
415 get_irn_free_attr (ir_node *node)
417 assert (node->op == op_Free);
418 return node->attr.f = skip_tid(node->attr.f);
422 get_irn_symconst_attr (ir_node *node)
424 assert (node->op == op_SymConst);
429 get_irn_call_attr (ir_node *node)
431 assert (node->op == op_Call);
432 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
436 get_irn_funccall_attr (ir_node *node)
438 assert (node->op == op_FuncCall);
439 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
443 get_irn_sel_attr (ir_node *node)
445 assert (node->op == op_Sel);
450 get_irn_phi_attr (ir_node *node)
452 assert (node->op == op_Phi);
453 return node->attr.phi0_pos;
457 get_irn_block_attr (ir_node *node)
459 assert (node->op == op_Block);
460 return node->attr.block;
464 get_irn_load_attr (ir_node *node)
466 assert (node->op == op_Load);
467 return node->attr.load;
471 get_irn_store_attr (ir_node *node)
473 assert (node->op == op_Store);
474 return node->attr.store;
478 get_irn_except_attr (ir_node *node)
480 assert (node->op == op_Div || node->op == op_Quot ||
481 node->op == op_DivMod || node->op == op_Mod);
482 return node->attr.except;
485 /** manipulate fields of individual nodes **/
487 /* this works for all except Block */
489 get_nodes_block (ir_node *node) {
490 assert (!(node->op == op_Block));
491 return get_irn_n(node, -1);
495 set_nodes_block (ir_node *node, ir_node *block) {
496 assert (!(node->op == op_Block));
497 set_irn_n(node, -1, block);
500 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
501 * from Start. If so returns frame type, else Null. */
502 type *is_frame_pointer(ir_node *n) {
503 if ((get_irn_op(n) == op_Proj) &&
504 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
505 ir_node *start = get_Proj_pred(n);
506 if (get_irn_op(start) == op_Start) {
507 return get_irg_frame_type(get_irn_irg(start));
513 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
514 * from Start. If so returns global type, else Null. */
515 type *is_globals_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_globals)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_glob_type();
526 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
527 * from Start. If so returns 1, else 0. */
528 int is_value_arg_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
531 (get_irn_op(get_Proj_pred(n)) == op_Start))
536 /* Returns an array with the predecessors of the Block. Depending on
537 the implementation of the graph data structure this can be a copy of
538 the internal representation of predecessors as well as the internal
539 array itself. Therefore writing to this array might obstruct the ir. */
541 get_Block_cfgpred_arr (ir_node *node)
543 assert ((node->op == op_Block));
544 return (ir_node **)&(get_irn_in(node)[1]);
549 get_Block_n_cfgpreds (ir_node *node) {
550 assert ((node->op == op_Block));
551 return get_irn_arity(node);
555 get_Block_cfgpred (ir_node *node, int pos) {
557 assert (node->op == op_Block);
558 assert(-1 <= pos && pos < get_irn_arity(node));
559 return get_irn_n(node, pos);
563 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
564 assert (node->op == op_Block);
565 set_irn_n(node, pos, pred);
569 get_Block_matured (ir_node *node) {
570 assert (node->op == op_Block);
571 return node->attr.block.matured;
575 set_Block_matured (ir_node *node, bool matured) {
576 assert (node->op == op_Block);
577 node->attr.block.matured = matured;
580 get_Block_block_visited (ir_node *node) {
581 assert (node->op == op_Block);
582 return node->attr.block.block_visited;
586 set_Block_block_visited (ir_node *node, unsigned long visit) {
587 assert (node->op == op_Block);
588 node->attr.block.block_visited = visit;
591 /* For this current_ir_graph must be set. */
593 mark_Block_block_visited (ir_node *node) {
594 assert (node->op == op_Block);
595 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
599 Block_not_block_visited(ir_node *node) {
600 assert (node->op == op_Block);
601 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
605 get_Block_graph_arr (ir_node *node, int pos) {
606 assert (node->op == op_Block);
607 return node->attr.block.graph_arr[pos+1];
611 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
612 assert (node->op == op_Block);
613 node->attr.block.graph_arr[pos+1] = value;
616 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
617 assert(node->op == op_Block);
618 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
619 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
620 node->attr.block.in_cg[0] = NULL;
621 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
623 /* Fix backedge array. fix_backedges operates depending on
624 interprocedural_view. */
625 bool ipv = interprocedural_view;
626 interprocedural_view = true;
627 fix_backedges(current_ir_graph->obst, node);
628 interprocedural_view = ipv;
631 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
634 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
635 assert(node->op == op_Block &&
636 node->attr.block.in_cg &&
637 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
638 node->attr.block.in_cg[pos + 1] = pred;
641 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
642 assert(node->op == op_Block);
643 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
646 int get_Block_cg_n_cfgpreds(ir_node * node) {
647 assert(node->op == op_Block);
648 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
651 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
652 assert(node->op == op_Block && node->attr.block.in_cg);
653 return node->attr.block.in_cg[pos + 1];
656 void remove_Block_cg_cfgpred_arr(ir_node * node) {
657 assert(node->op == op_Block);
658 node->attr.block.in_cg = NULL;
662 set_Start_irg(ir_node *node, ir_graph *irg) {
663 assert(node->op == op_Start);
664 assert(is_ir_graph(irg));
665 assert(0 && " Why set irg? -- use set_irn_irg");
669 get_End_n_keepalives(ir_node *end) {
670 assert (end->op == op_End);
671 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
675 get_End_keepalive(ir_node *end, int pos) {
676 assert (end->op == op_End);
677 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
681 add_End_keepalive (ir_node *end, ir_node *ka) {
682 assert (end->op == op_End);
683 ARR_APP1 (ir_node *, end->in, ka);
687 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
688 assert (end->op == op_End);
689 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
693 free_End (ir_node *end) {
694 assert (end->op == op_End);
696 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
697 end->in = NULL; /* @@@ make sure we get an error if we use the
698 in array afterwards ... */
703 > Implementing the case construct (which is where the constant Proj node is
704 > important) involves far more than simply determining the constant values.
705 > We could argue that this is more properly a function of the translator from
706 > Firm to the target machine. That could be done if there was some way of
707 > projecting "default" out of the Cond node.
708 I know it's complicated.
709 Basically there are two proglems:
710 - determining the gaps between the projs
711 - determining the biggest case constant to know the proj number for
713 I see several solutions:
714 1. Introduce a ProjDefault node. Solves both problems.
715 This means to extend all optimizations executed during construction.
716 2. Give the Cond node for switch two flavors:
717 a) there are no gaps in the projs (existing flavor)
718 b) gaps may exist, default proj is still the Proj with the largest
719 projection number. This covers also the gaps.
720 3. Fix the semantic of the Cond to that of 2b)
722 Solution 2 seems to be the best:
723 Computing the gaps in the Firm representation is not too hard, i.e.,
724 libFIRM can implement a routine that transforms between the two
725 flavours. This is also possible for 1) but 2) does not require to
726 change any existing optimization.
727 Further it should be far simpler to determine the biggest constant than
729 I don't want to choose 3) as 2a) seems to have advantages for
730 dataflow analysis and 3) does not allow to convert the representation to
734 get_Cond_selector (ir_node *node) {
735 assert (node->op == op_Cond);
736 return get_irn_n(node, 0);
740 set_Cond_selector (ir_node *node, ir_node *selector) {
741 assert (node->op == op_Cond);
742 set_irn_n(node, 0, selector);
746 get_Cond_kind (ir_node *node) {
747 assert (node->op == op_Cond);
748 return node->attr.c.kind;
752 set_Cond_kind (ir_node *node, cond_kind kind) {
753 assert (node->op == op_Cond);
754 node->attr.c.kind = kind;
758 get_Cond_defaultProj (ir_node *node) {
759 assert (node->op == op_Cond);
760 return node->attr.c.default_proj;
764 get_Return_mem (ir_node *node) {
765 assert (node->op == op_Return);
766 return get_irn_n(node, 0);
770 set_Return_mem (ir_node *node, ir_node *mem) {
771 assert (node->op == op_Return);
772 set_irn_n(node, 0, mem);
776 get_Return_n_ress (ir_node *node) {
777 assert (node->op == op_Return);
778 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
782 get_Return_res_arr (ir_node *node)
784 assert ((node->op == op_Return));
785 if (get_Return_n_ress(node) > 0)
786 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
793 set_Return_n_res (ir_node *node, int results) {
794 assert (node->op == op_Return);
799 get_Return_res (ir_node *node, int pos) {
800 assert (node->op == op_Return);
801 assert (get_Return_n_ress(node) > pos);
802 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
806 set_Return_res (ir_node *node, int pos, ir_node *res){
807 assert (node->op == op_Return);
808 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
812 get_Raise_mem (ir_node *node) {
813 assert (node->op == op_Raise);
814 return get_irn_n(node, 0);
818 set_Raise_mem (ir_node *node, ir_node *mem) {
819 assert (node->op == op_Raise);
820 set_irn_n(node, 0, mem);
824 get_Raise_exo_ptr (ir_node *node) {
825 assert (node->op == op_Raise);
826 return get_irn_n(node, 1);
830 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
831 assert (node->op == op_Raise);
832 set_irn_n(node, 1, exo_ptr);
835 tarval *get_Const_tarval (ir_node *node) {
836 assert (node->op == op_Const);
837 return node->attr.con.tv;
841 set_Const_tarval (ir_node *node, tarval *con) {
842 assert (node->op == op_Const);
843 node->attr.con.tv = con;
847 /* The source language type. Must be an atomic type. Mode of type must
848 be mode of node. For tarvals from entities type must be pointer to
851 get_Const_type (ir_node *node) {
852 assert (node->op == op_Const);
853 return node->attr.con.tp;
857 set_Const_type (ir_node *node, type *tp) {
858 assert (node->op == op_Const);
859 if (tp != unknown_type) {
860 assert (is_atomic_type(tp));
861 assert (get_type_mode(tp) == get_irn_mode(node));
864 node->attr.con.tp = tp;
869 get_SymConst_kind (const ir_node *node) {
870 assert (node->op == op_SymConst);
871 return node->attr.i.num;
875 set_SymConst_kind (ir_node *node, symconst_kind num) {
876 assert (node->op == op_SymConst);
877 node->attr.i.num = num;
881 get_SymConst_type (ir_node *node) {
882 assert ( (node->op == op_SymConst)
883 && ( get_SymConst_kind(node) == symconst_type_tag
884 || get_SymConst_kind(node) == symconst_size));
885 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
889 set_SymConst_type (ir_node *node, type *tp) {
890 assert ( (node->op == op_SymConst)
891 && ( get_SymConst_kind(node) == symconst_type_tag
892 || get_SymConst_kind(node) == symconst_size));
893 node->attr.i.sym.type_p = tp;
897 get_SymConst_name (ir_node *node) {
898 assert ( (node->op == op_SymConst)
899 && (get_SymConst_kind(node) == symconst_addr_name));
900 return node->attr.i.sym.ident_p;
904 set_SymConst_name (ir_node *node, ident *name) {
905 assert ( (node->op == op_SymConst)
906 && (get_SymConst_kind(node) == symconst_addr_name));
907 node->attr.i.sym.ident_p = name;
911 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
912 entity *get_SymConst_entity (ir_node *node) {
913 assert ( (node->op == op_SymConst)
914 && (get_SymConst_kind (node) == symconst_addr_ent));
915 return node->attr.i.sym.entity_p;
918 void set_SymConst_entity (ir_node *node, entity *ent) {
919 assert ( (node->op == op_SymConst)
920 && (get_SymConst_kind(node) == symconst_addr_ent));
921 node->attr.i.sym.entity_p = ent;
925 union symconst_symbol
926 get_SymConst_symbol (ir_node *node) {
927 assert (node->op == op_SymConst);
928 return node->attr.i.sym;
932 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
933 assert (node->op == op_SymConst);
934 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
935 node->attr.i.sym = sym;
939 get_Sel_mem (ir_node *node) {
940 assert (node->op == op_Sel);
941 return get_irn_n(node, 0);
945 set_Sel_mem (ir_node *node, ir_node *mem) {
946 assert (node->op == op_Sel);
947 set_irn_n(node, 0, mem);
951 get_Sel_ptr (ir_node *node) {
952 assert (node->op == op_Sel);
953 return get_irn_n(node, 1);
957 set_Sel_ptr (ir_node *node, ir_node *ptr) {
958 assert (node->op == op_Sel);
959 set_irn_n(node, 1, ptr);
963 get_Sel_n_indexs (ir_node *node) {
964 assert (node->op == op_Sel);
965 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
969 get_Sel_index_arr (ir_node *node)
971 assert ((node->op == op_Sel));
972 if (get_Sel_n_indexs(node) > 0)
973 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
979 get_Sel_index (ir_node *node, int pos) {
980 assert (node->op == op_Sel);
981 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
985 set_Sel_index (ir_node *node, int pos, ir_node *index) {
986 assert (node->op == op_Sel);
987 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
991 get_Sel_entity (ir_node *node) {
992 assert (node->op == op_Sel);
993 return node->attr.s.ent;
997 set_Sel_entity (ir_node *node, entity *ent) {
998 assert (node->op == op_Sel);
999 node->attr.s.ent = ent;
1003 get_InstOf_ent (ir_node *node) {
1004 assert (node->op = op_InstOf);
1005 return (node->attr.io.ent);
1009 set_InstOf_ent (ir_node *node, type *ent) {
1010 assert (node->op = op_InstOf);
1011 node->attr.io.ent = ent;
1015 get_InstOf_store (ir_node *node) {
1016 assert (node->op = op_InstOf);
1017 return (get_irn_n (node, 0));
1021 set_InstOf_store (ir_node *node, ir_node *obj) {
1022 assert (node->op = op_InstOf);
1023 set_irn_n (node, 0, obj);
1027 get_InstOf_obj (ir_node *node) {
1028 assert (node->op = op_InstOf);
1029 return (get_irn_n (node, 1));
1033 set_InstOf_obj (ir_node *node, ir_node *obj) {
1034 assert (node->op = op_InstOf);
1035 set_irn_n (node, 1, obj);
1039 /* For unary and binary arithmetic operations the access to the
1040 operands can be factored out. Left is the first, right the
1041 second arithmetic value as listed in tech report 0999-33.
1042 unops are: Minus, Abs, Not, Conv, Cast
1043 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1044 Shr, Shrs, Rotate, Cmp */
1048 get_Call_mem (ir_node *node) {
1049 assert (node->op == op_Call);
1050 return get_irn_n(node, 0);
1054 set_Call_mem (ir_node *node, ir_node *mem) {
1055 assert (node->op == op_Call);
1056 set_irn_n(node, 0, mem);
1060 get_Call_ptr (ir_node *node) {
1061 assert (node->op == op_Call);
1062 return get_irn_n(node, 1);
1066 set_Call_ptr (ir_node *node, ir_node *ptr) {
1067 assert (node->op == op_Call);
1068 set_irn_n(node, 1, ptr);
1072 get_Call_param_arr (ir_node *node) {
1073 assert (node->op == op_Call);
1074 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1078 get_Call_n_params (ir_node *node) {
1079 assert (node->op == op_Call);
1080 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1084 get_Call_arity (ir_node *node) {
1085 assert (node->op == op_Call);
1086 return get_Call_n_params(node);
1090 set_Call_arity (ir_node *node, ir_node *arity) {
1091 assert (node->op == op_Call);
1096 get_Call_param (ir_node *node, int pos) {
1097 assert (node->op == op_Call);
1098 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1102 set_Call_param (ir_node *node, int pos, ir_node *param) {
1103 assert (node->op == op_Call);
1104 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1108 get_Call_type (ir_node *node) {
1109 assert (node->op == op_Call);
1110 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1114 set_Call_type (ir_node *node, type *tp) {
1115 assert (node->op == op_Call);
1116 assert (is_method_type(tp));
1117 node->attr.call.cld_tp = tp;
1120 int Call_has_callees(ir_node *node) {
1121 assert(node && node->op == op_Call);
1122 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1123 (node->attr.call.callee_arr != NULL));
1126 int get_Call_n_callees(ir_node * node) {
1127 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1128 return ARR_LEN(node->attr.call.callee_arr);
1131 entity * get_Call_callee(ir_node * node, int pos) {
1132 assert(pos >= 0 && pos < get_Call_n_callees(node));
1133 return node->attr.call.callee_arr[pos];
1136 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1137 assert(node->op == op_Call);
1138 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1139 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1141 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1144 void remove_Call_callee_arr(ir_node * node) {
1145 assert(node->op == op_Call);
1146 node->attr.call.callee_arr = NULL;
1149 ir_node * get_CallBegin_ptr (ir_node *node) {
1150 assert(node->op == op_CallBegin);
1151 return get_irn_n(node, 0);
1153 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1154 assert(node->op == op_CallBegin);
1155 set_irn_n(node, 0, ptr);
1157 ir_node * get_CallBegin_call (ir_node *node) {
1158 assert(node->op == op_CallBegin);
1159 return node->attr.callbegin.call;
1161 void set_CallBegin_call (ir_node *node, ir_node *call) {
1162 assert(node->op == op_CallBegin);
1163 node->attr.callbegin.call = call;
1167 get_FuncCall_ptr (ir_node *node) {
1168 assert (node->op == op_FuncCall);
1169 return get_irn_n(node, 0);
1173 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1174 assert (node->op == op_FuncCall);
1175 set_irn_n(node, 0, ptr);
1179 get_FuncCall_param_arr (ir_node *node) {
1180 assert (node->op == op_FuncCall);
1181 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1185 get_FuncCall_n_params (ir_node *node) {
1186 assert (node->op == op_FuncCall);
1187 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1191 get_FuncCall_arity (ir_node *node) {
1192 assert (node->op == op_FuncCall);
1193 return get_FuncCall_n_params(node);
1197 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1198 assert (node->op == op_FuncCall);
1203 get_FuncCall_param (ir_node *node, int pos) {
1204 assert (node->op == op_FuncCall);
1205 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1209 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1210 assert (node->op == op_FuncCall);
1211 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1215 get_FuncCall_type (ir_node *node) {
1216 assert (node->op == op_FuncCall);
1217 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1221 set_FuncCall_type (ir_node *node, type *tp) {
1222 assert (node->op == op_FuncCall);
1223 assert (is_method_type(tp));
1224 node->attr.call.cld_tp = tp;
1227 int FuncCall_has_callees(ir_node *node) {
1228 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1229 (node->attr.call.callee_arr != NULL));
1232 int get_FuncCall_n_callees(ir_node * node) {
1233 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1234 return ARR_LEN(node->attr.call.callee_arr);
1237 entity * get_FuncCall_callee(ir_node * node, int pos) {
1238 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1239 return node->attr.call.callee_arr[pos];
1242 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1243 assert(node->op == op_FuncCall);
1244 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1245 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1247 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1250 void remove_FuncCall_callee_arr(ir_node * node) {
1251 assert(node->op == op_FuncCall);
1252 node->attr.call.callee_arr = NULL;
1257 ir_node * get_##OP##_left(ir_node *node) { \
1258 assert(node->op == op_##OP); \
1259 return get_irn_n(node, node->op->op_index); \
1261 void set_##OP##_left(ir_node *node, ir_node *left) { \
1262 assert(node->op == op_##OP); \
1263 set_irn_n(node, node->op->op_index, left); \
1265 ir_node *get_##OP##_right(ir_node *node) { \
1266 assert(node->op == op_##OP); \
1267 return get_irn_n(node, node->op->op_index + 1); \
1269 void set_##OP##_right(ir_node *node, ir_node *right) { \
1270 assert(node->op == op_##OP); \
1271 set_irn_n(node, node->op->op_index + 1, right); \
1275 ir_node *get_##OP##_op(ir_node *node) { \
1276 assert(node->op == op_##OP); \
1277 return get_irn_n(node, node->op->op_index); \
1279 void set_##OP##_op (ir_node *node, ir_node *op) { \
1280 assert(node->op == op_##OP); \
1281 set_irn_n(node, node->op->op_index, op); \
1291 get_Quot_mem (ir_node *node) {
1292 assert (node->op == op_Quot);
1293 return get_irn_n(node, 0);
1297 set_Quot_mem (ir_node *node, ir_node *mem) {
1298 assert (node->op == op_Quot);
1299 set_irn_n(node, 0, mem);
1305 get_DivMod_mem (ir_node *node) {
1306 assert (node->op == op_DivMod);
1307 return get_irn_n(node, 0);
1311 set_DivMod_mem (ir_node *node, ir_node *mem) {
1312 assert (node->op == op_DivMod);
1313 set_irn_n(node, 0, mem);
1319 get_Div_mem (ir_node *node) {
1320 assert (node->op == op_Div);
1321 return get_irn_n(node, 0);
1325 set_Div_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_Div);
1327 set_irn_n(node, 0, mem);
1333 get_Mod_mem (ir_node *node) {
1334 assert (node->op == op_Mod);
1335 return get_irn_n(node, 0);
1339 set_Mod_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_Mod);
1341 set_irn_n(node, 0, mem);
1358 get_Cast_type (ir_node *node) {
1359 assert (node->op == op_Cast);
1360 return node->attr.cast.totype;
1364 set_Cast_type (ir_node *node, type *to_tp) {
1365 assert (node->op == op_Cast);
1366 node->attr.cast.totype = to_tp;
1370 is_unop (ir_node *node) {
1371 return (node->op->opar == oparity_unary);
1375 get_unop_op (ir_node *node) {
1376 if (node->op->opar == oparity_unary)
1377 return get_irn_n(node, node->op->op_index);
1379 assert(node->op->opar == oparity_unary);
1384 set_unop_op (ir_node *node, ir_node *op) {
1385 if (node->op->opar == oparity_unary)
1386 set_irn_n(node, node->op->op_index, op);
1388 assert(node->op->opar == oparity_unary);
1392 is_binop (ir_node *node) {
1393 return (node->op->opar == oparity_binary);
1397 get_binop_left (ir_node *node) {
1398 if (node->op->opar == oparity_binary)
1399 return get_irn_n(node, node->op->op_index);
1401 assert(node->op->opar == oparity_binary);
1406 set_binop_left (ir_node *node, ir_node *left) {
1407 if (node->op->opar == oparity_binary)
1408 set_irn_n(node, node->op->op_index, left);
1410 assert (node->op->opar == oparity_binary);
1414 get_binop_right (ir_node *node) {
1415 if (node->op->opar == oparity_binary)
1416 return get_irn_n(node, node->op->op_index + 1);
1418 assert(node->op->opar == oparity_binary);
1423 set_binop_right (ir_node *node, ir_node *right) {
1424 if (node->op->opar == oparity_binary)
1425 set_irn_n(node, node->op->op_index + 1, right);
1427 assert (node->op->opar == oparity_binary);
1430 int is_Phi (ir_node *n) {
1436 if (op == op_Filter) return interprocedural_view;
1439 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1440 (get_irn_arity(n) > 0));
1445 int is_Phi0 (ir_node *n) {
1448 return ((get_irn_op(n) == op_Phi) &&
1449 (get_irn_arity(n) == 0) &&
1450 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1454 get_Phi_preds_arr (ir_node *node) {
1455 assert (node->op == op_Phi);
1456 return (ir_node **)&(get_irn_in(node)[1]);
1460 get_Phi_n_preds (ir_node *node) {
1461 assert (is_Phi(node) || is_Phi0(node));
1462 return (get_irn_arity(node));
1466 void set_Phi_n_preds (ir_node *node, int n_preds) {
1467 assert (node->op == op_Phi);
1472 get_Phi_pred (ir_node *node, int pos) {
1473 assert (is_Phi(node) || is_Phi0(node));
1474 return get_irn_n(node, pos);
1478 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1479 assert (is_Phi(node) || is_Phi0(node));
1480 set_irn_n(node, pos, pred);
1484 int is_memop(ir_node *node) {
1485 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1488 ir_node *get_memop_mem (ir_node *node) {
1489 assert(is_memop(node));
1490 return get_irn_n(node, 0);
1493 void set_memop_mem (ir_node *node, ir_node *mem) {
1494 assert(is_memop(node));
1495 set_irn_n(node, 0, mem);
1498 ir_node *get_memop_ptr (ir_node *node) {
1499 assert(is_memop(node));
1500 return get_irn_n(node, 1);
1503 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1504 assert(is_memop(node));
1505 set_irn_n(node, 1, ptr);
1509 get_Load_mem (ir_node *node) {
1510 assert (node->op == op_Load);
1511 return get_irn_n(node, 0);
1515 set_Load_mem (ir_node *node, ir_node *mem) {
1516 assert (node->op == op_Load);
1517 set_irn_n(node, 0, mem);
1521 get_Load_ptr (ir_node *node) {
1522 assert (node->op == op_Load);
1523 return get_irn_n(node, 1);
1527 set_Load_ptr (ir_node *node, ir_node *ptr) {
1528 assert (node->op == op_Load);
1529 set_irn_n(node, 1, ptr);
1533 get_Load_mode (ir_node *node) {
1534 assert (node->op == op_Load);
1535 return node->attr.load.load_mode;
1539 set_Load_mode (ir_node *node, ir_mode *mode) {
1540 assert (node->op == op_Load);
1541 node->attr.load.load_mode = mode;
1545 get_Load_volatility (ir_node *node) {
1546 assert (node->op == op_Load);
1547 return node->attr.load.volatility;
1551 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1552 assert (node->op == op_Load);
1553 node->attr.load.volatility = volatility;
1558 get_Store_mem (ir_node *node) {
1559 assert (node->op == op_Store);
1560 return get_irn_n(node, 0);
1564 set_Store_mem (ir_node *node, ir_node *mem) {
1565 assert (node->op == op_Store);
1566 set_irn_n(node, 0, mem);
1570 get_Store_ptr (ir_node *node) {
1571 assert (node->op == op_Store);
1572 return get_irn_n(node, 1);
1576 set_Store_ptr (ir_node *node, ir_node *ptr) {
1577 assert (node->op == op_Store);
1578 set_irn_n(node, 1, ptr);
1582 get_Store_value (ir_node *node) {
1583 assert (node->op == op_Store);
1584 return get_irn_n(node, 2);
1588 set_Store_value (ir_node *node, ir_node *value) {
1589 assert (node->op == op_Store);
1590 set_irn_n(node, 2, value);
1594 get_Store_volatility (ir_node *node) {
1595 assert (node->op == op_Store);
1596 return node->attr.store.volatility;
1600 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1601 assert (node->op == op_Store);
1602 node->attr.store.volatility = volatility;
1607 get_Alloc_mem (ir_node *node) {
1608 assert (node->op == op_Alloc);
1609 return get_irn_n(node, 0);
1613 set_Alloc_mem (ir_node *node, ir_node *mem) {
1614 assert (node->op == op_Alloc);
1615 set_irn_n(node, 0, mem);
1619 get_Alloc_size (ir_node *node) {
1620 assert (node->op == op_Alloc);
1621 return get_irn_n(node, 1);
1625 set_Alloc_size (ir_node *node, ir_node *size) {
1626 assert (node->op == op_Alloc);
1627 set_irn_n(node, 1, size);
1631 get_Alloc_type (ir_node *node) {
1632 assert (node->op == op_Alloc);
1633 return node->attr.a.type = skip_tid(node->attr.a.type);
1637 set_Alloc_type (ir_node *node, type *tp) {
1638 assert (node->op == op_Alloc);
1639 node->attr.a.type = tp;
1643 get_Alloc_where (ir_node *node) {
1644 assert (node->op == op_Alloc);
1645 return node->attr.a.where;
1649 set_Alloc_where (ir_node *node, where_alloc where) {
1650 assert (node->op == op_Alloc);
1651 node->attr.a.where = where;
1656 get_Free_mem (ir_node *node) {
1657 assert (node->op == op_Free);
1658 return get_irn_n(node, 0);
1662 set_Free_mem (ir_node *node, ir_node *mem) {
1663 assert (node->op == op_Free);
1664 set_irn_n(node, 0, mem);
1668 get_Free_ptr (ir_node *node) {
1669 assert (node->op == op_Free);
1670 return get_irn_n(node, 1);
1674 set_Free_ptr (ir_node *node, ir_node *ptr) {
1675 assert (node->op == op_Free);
1676 set_irn_n(node, 1, ptr);
1680 get_Free_size (ir_node *node) {
1681 assert (node->op == op_Free);
1682 return get_irn_n(node, 2);
1686 set_Free_size (ir_node *node, ir_node *size) {
1687 assert (node->op == op_Free);
1688 set_irn_n(node, 2, size);
1692 get_Free_type (ir_node *node) {
1693 assert (node->op == op_Free);
1694 return node->attr.f = skip_tid(node->attr.f);
1698 set_Free_type (ir_node *node, type *tp) {
1699 assert (node->op == op_Free);
1704 get_Sync_preds_arr (ir_node *node) {
1705 assert (node->op == op_Sync);
1706 return (ir_node **)&(get_irn_in(node)[1]);
1710 get_Sync_n_preds (ir_node *node) {
1711 assert (node->op == op_Sync);
1712 return (get_irn_arity(node));
1717 set_Sync_n_preds (ir_node *node, int n_preds) {
1718 assert (node->op == op_Sync);
1723 get_Sync_pred (ir_node *node, int pos) {
1724 assert (node->op == op_Sync);
1725 return get_irn_n(node, pos);
1729 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1730 assert (node->op == op_Sync);
1731 set_irn_n(node, pos, pred);
1735 get_Proj_pred (ir_node *node) {
1736 assert (is_Proj(node));
1737 return get_irn_n(node, 0);
1741 set_Proj_pred (ir_node *node, ir_node *pred) {
1742 assert (is_Proj(node));
1743 set_irn_n(node, 0, pred);
1747 get_Proj_proj (ir_node *node) {
1748 assert (is_Proj(node));
1749 if (get_irn_opcode(node) == iro_Proj) {
1750 return node->attr.proj;
1752 assert(get_irn_opcode(node) == iro_Filter);
1753 return node->attr.filter.proj;
1758 set_Proj_proj (ir_node *node, long proj) {
1759 assert (node->op == op_Proj);
1760 node->attr.proj = proj;
1764 get_Tuple_preds_arr (ir_node *node) {
1765 assert (node->op == op_Tuple);
1766 return (ir_node **)&(get_irn_in(node)[1]);
1770 get_Tuple_n_preds (ir_node *node) {
1771 assert (node->op == op_Tuple);
1772 return (get_irn_arity(node));
1777 set_Tuple_n_preds (ir_node *node, int n_preds) {
1778 assert (node->op == op_Tuple);
1783 get_Tuple_pred (ir_node *node, int pos) {
1784 assert (node->op == op_Tuple);
1785 return get_irn_n(node, pos);
1789 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1790 assert (node->op == op_Tuple);
1791 set_irn_n(node, pos, pred);
1795 get_Id_pred (ir_node *node) {
1796 assert (node->op == op_Id);
1797 return get_irn_n(node, 0);
1801 set_Id_pred (ir_node *node, ir_node *pred) {
1802 assert (node->op == op_Id);
1803 set_irn_n(node, 0, pred);
1806 ir_node *get_Confirm_value (ir_node *node) {
1807 assert (node->op == op_Confirm);
1808 return get_irn_n(node, 0);
1810 void set_Confirm_value (ir_node *node, ir_node *value) {
1811 assert (node->op == op_Confirm);
1812 set_irn_n(node, 0, value);
1814 ir_node *get_Confirm_bound (ir_node *node) {
1815 assert (node->op == op_Confirm);
1816 return get_irn_n(node, 1);
1818 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1819 assert (node->op == op_Confirm);
1820 set_irn_n(node, 0, bound);
1822 pn_Cmp get_Confirm_cmp (ir_node *node) {
1823 assert (node->op == op_Confirm);
1824 return node->attr.confirm_cmp;
1826 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1827 assert (node->op == op_Confirm);
1828 node->attr.confirm_cmp = cmp;
1833 get_Filter_pred (ir_node *node) {
1834 assert(node->op == op_Filter);
1838 set_Filter_pred (ir_node *node, ir_node *pred) {
1839 assert(node->op == op_Filter);
1843 get_Filter_proj(ir_node *node) {
1844 assert(node->op == op_Filter);
1845 return node->attr.filter.proj;
1848 set_Filter_proj (ir_node *node, long proj) {
1849 assert(node->op == op_Filter);
1850 node->attr.filter.proj = proj;
1853 /* Don't use get_irn_arity, get_irn_n in implementation as access
1854 shall work independent of view!!! */
1855 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1856 assert(node->op == op_Filter);
1857 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1858 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1859 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1860 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1861 node->attr.filter.in_cg[0] = node->in[0];
1863 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1866 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1867 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1868 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1869 node->attr.filter.in_cg[pos + 1] = pred;
1871 int get_Filter_n_cg_preds(ir_node *node) {
1872 assert(node->op == op_Filter && node->attr.filter.in_cg);
1873 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1875 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1877 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1879 arity = ARR_LEN(node->attr.filter.in_cg);
1880 assert(pos < arity - 1);
1881 return node->attr.filter.in_cg[pos + 1];
1886 get_irn_irg(ir_node *node) {
1887 if (get_irn_op(node) != op_Block)
1888 node = get_nodes_block(node);
1889 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1890 node = get_nodes_block(node);
1891 assert(get_irn_op(node) == op_Block);
1892 return node->attr.block.irg;
1896 /*----------------------------------------------------------------*/
1897 /* Auxiliary routines */
1898 /*----------------------------------------------------------------*/
1901 skip_Proj (ir_node *node) {
1902 /* don't assert node !!! */
1903 if (node && is_Proj(node)) {
1904 return get_Proj_pred(node);
1911 skip_Tuple (ir_node *node) {
1914 if (!get_opt_normalize()) return node;
1916 node = skip_Id(node);
1917 if (get_irn_op(node) == op_Proj) {
1918 pred = skip_Id(get_Proj_pred(node));
1919 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1920 pred = skip_Id(skip_Tuple(pred));
1921 if (get_irn_op(pred) == op_Tuple)
1922 return get_Tuple_pred(pred, get_Proj_proj(node));
1927 /** returns operand of node if node is a Cast */
1928 ir_node *skip_Cast (ir_node *node) {
1929 if (node && get_irn_op(node) == op_Cast) {
1930 return skip_Id(get_irn_n(node, 0));
1937 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1938 than any other approach, as Id chains are resolved and all point to the real node, or
1939 all id's are self loops. */
1941 skip_Id (ir_node *node) {
1942 /* don't assert node !!! */
1944 if (!get_opt_normalize()) return node;
1946 /* Don't use get_Id_pred: We get into an endless loop for
1947 self-referencing Ids. */
1948 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1949 ir_node *rem_pred = node->in[0+1];
1952 assert (get_irn_arity (node) > 0);
1954 node->in[0+1] = node;
1955 res = skip_Id(rem_pred);
1956 if (res->op == op_Id) /* self-loop */ return node;
1958 node->in[0+1] = res;
1965 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1966 than any other approach, as Id chains are resolved and all point to the real node, or
1967 all id's are self loops. */
1969 skip_Id (ir_node *node) {
1971 /* don't assert node !!! */
1973 if (!node || (node->op != op_Id)) return node;
1975 if (!get_opt_normalize()) return node;
1977 /* Don't use get_Id_pred: We get into an endless loop for
1978 self-referencing Ids. */
1979 pred = node->in[0+1];
1981 if (pred->op != op_Id) return pred;
1983 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1984 ir_node *rem_pred, *res;
1986 if (pred->op != op_Id) return pred; /* shortcut */
1989 assert (get_irn_arity (node) > 0);
1991 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1992 res = skip_Id(rem_pred);
1993 if (res->op == op_Id) /* self-loop */ return node;
1995 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2004 is_Bad (ir_node *node) {
2006 if ((node) && get_irn_opcode(node) == iro_Bad)
2012 is_no_Block (ir_node *node) {
2014 return (get_irn_opcode(node) != iro_Block);
2018 is_Block (ir_node *node) {
2020 return (get_irn_opcode(node) == iro_Block);
2023 /* returns true if node is a Unknown node. */
2025 is_Unknown (ir_node *node) {
2027 return (get_irn_opcode(node) == iro_Unknown);
2031 is_Proj (const ir_node *node) {
2033 return node->op == op_Proj
2034 || (!interprocedural_view && node->op == op_Filter);
2037 /* Returns true if the operation manipulates control flow. */
2039 is_cfop(ir_node *node) {
2040 return is_cfopcode(get_irn_op(node));
2043 /* Returns true if the operation manipulates interprocedural control flow:
2044 CallBegin, EndReg, EndExcept */
2045 int is_ip_cfop(ir_node *node) {
2046 return is_ip_cfopcode(get_irn_op(node));
2049 /* Returns true if the operation can change the control flow because
2052 is_fragile_op(ir_node *node) {
2053 return is_op_fragile(get_irn_op(node));
2056 /* Returns the memory operand of fragile operations. */
2057 ir_node *get_fragile_op_mem(ir_node *node) {
2058 assert(node && is_fragile_op(node));
2060 switch (get_irn_opcode (node)) {
2069 return get_irn_n(node, 0);
2074 assert(0 && "should not be reached");
2079 /* Returns true if the operation is a forking control flow operation. */
2081 is_forking_op(ir_node *node) {
2082 return is_op_forking(get_irn_op(node));
2085 #ifdef DEBUG_libfirm
2086 void dump_irn (ir_node *n) {
2087 int i, arity = get_irn_arity(n);
2088 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2090 ir_node *pred = get_irn_n(n, -1);
2091 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2092 get_irn_node_nr(pred), (void *)pred);
2094 printf(" preds: \n");
2095 for (i = 0; i < arity; ++i) {
2096 ir_node *pred = get_irn_n(n, i);
2097 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2098 get_irn_node_nr(pred), (void *)pred);
2102 #else /* DEBUG_libfirm */
2103 void dump_irn (ir_node *n) {}
2104 #endif /* DEBUG_libfirm */