3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_funccall_attr (ir_node *node)
451 assert (node->op == op_FuncCall);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
569 assert(-1 <= pos && pos < get_irn_arity(node));
570 assert(node->op == op_Block);
571 return get_irn_n(node, pos);
575 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
576 assert (node->op == op_Block);
577 set_irn_n(node, pos, pred);
581 get_Block_matured (ir_node *node) {
582 assert (node->op == op_Block);
583 return node->attr.block.matured;
587 set_Block_matured (ir_node *node, bool matured) {
588 assert (node->op == op_Block);
589 node->attr.block.matured = matured;
592 get_Block_block_visited (ir_node *node) {
593 assert (node->op == op_Block);
594 return node->attr.block.block_visited;
598 set_Block_block_visited (ir_node *node, unsigned long visit) {
599 assert (node->op == op_Block);
600 node->attr.block.block_visited = visit;
603 /* For this current_ir_graph must be set. */
605 mark_Block_block_visited (ir_node *node) {
606 assert (node->op == op_Block);
607 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
611 Block_not_block_visited(ir_node *node) {
612 assert (node->op == op_Block);
613 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
617 get_Block_graph_arr (ir_node *node, int pos) {
618 assert (node->op == op_Block);
619 return node->attr.block.graph_arr[pos+1];
623 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
624 assert (node->op == op_Block);
625 node->attr.block.graph_arr[pos+1] = value;
628 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
629 assert(node->op == op_Block);
630 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
631 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
632 node->attr.block.in_cg[0] = NULL;
633 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
635 /* Fix backedge array. fix_backedges operates depending on
636 interprocedural_view. */
637 int ipv = get_interprocedural_view();
638 set_interprocedural_view(true);
639 fix_backedges(current_ir_graph->obst, node);
640 set_interprocedural_view(ipv);
643 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
646 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
647 assert(node->op == op_Block &&
648 node->attr.block.in_cg &&
649 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
650 node->attr.block.in_cg[pos + 1] = pred;
653 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
654 assert(node->op == op_Block);
655 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
658 int get_Block_cg_n_cfgpreds(ir_node * node) {
659 assert(node->op == op_Block);
660 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
663 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
664 assert(node->op == op_Block && node->attr.block.in_cg);
665 return node->attr.block.in_cg[pos + 1];
668 void remove_Block_cg_cfgpred_arr(ir_node * node) {
669 assert(node->op == op_Block);
670 node->attr.block.in_cg = NULL;
674 set_Start_irg(ir_node *node, ir_graph *irg) {
675 assert(node->op == op_Start);
676 assert(is_ir_graph(irg));
677 assert(0 && " Why set irg? -- use set_irn_irg");
681 get_End_n_keepalives(ir_node *end) {
682 assert (end->op == op_End);
683 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
687 get_End_keepalive(ir_node *end, int pos) {
688 assert (end->op == op_End);
689 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
693 add_End_keepalive (ir_node *end, ir_node *ka) {
694 assert (end->op == op_End);
695 ARR_APP1 (ir_node *, end->in, ka);
699 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
700 assert (end->op == op_End);
701 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
705 free_End (ir_node *end) {
706 assert (end->op == op_End);
708 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
709 end->in = NULL; /* @@@ make sure we get an error if we use the
710 in array afterwards ... */
715 > Implementing the case construct (which is where the constant Proj node is
716 > important) involves far more than simply determining the constant values.
717 > We could argue that this is more properly a function of the translator from
718 > Firm to the target machine. That could be done if there was some way of
719 > projecting "default" out of the Cond node.
720 I know it's complicated.
721 Basically there are two proglems:
722 - determining the gaps between the projs
723 - determining the biggest case constant to know the proj number for
725 I see several solutions:
726 1. Introduce a ProjDefault node. Solves both problems.
727 This means to extend all optimizations executed during construction.
728 2. Give the Cond node for switch two flavors:
729 a) there are no gaps in the projs (existing flavor)
730 b) gaps may exist, default proj is still the Proj with the largest
731 projection number. This covers also the gaps.
732 3. Fix the semantic of the Cond to that of 2b)
734 Solution 2 seems to be the best:
735 Computing the gaps in the Firm representation is not too hard, i.e.,
736 libFIRM can implement a routine that transforms between the two
737 flavours. This is also possible for 1) but 2) does not require to
738 change any existing optimization.
739 Further it should be far simpler to determine the biggest constant than
741 I don't want to choose 3) as 2a) seems to have advantages for
742 dataflow analysis and 3) does not allow to convert the representation to
746 get_Cond_selector (ir_node *node) {
747 assert (node->op == op_Cond);
748 return get_irn_n(node, 0);
752 set_Cond_selector (ir_node *node, ir_node *selector) {
753 assert (node->op == op_Cond);
754 set_irn_n(node, 0, selector);
758 get_Cond_kind (ir_node *node) {
759 assert (node->op == op_Cond);
760 return node->attr.c.kind;
764 set_Cond_kind (ir_node *node, cond_kind kind) {
765 assert (node->op == op_Cond);
766 node->attr.c.kind = kind;
770 get_Cond_defaultProj (ir_node *node) {
771 assert (node->op == op_Cond);
772 return node->attr.c.default_proj;
776 get_Return_mem (ir_node *node) {
777 assert (node->op == op_Return);
778 return get_irn_n(node, 0);
782 set_Return_mem (ir_node *node, ir_node *mem) {
783 assert (node->op == op_Return);
784 set_irn_n(node, 0, mem);
788 get_Return_n_ress (ir_node *node) {
789 assert (node->op == op_Return);
790 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
794 get_Return_res_arr (ir_node *node)
796 assert ((node->op == op_Return));
797 if (get_Return_n_ress(node) > 0)
798 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
805 set_Return_n_res (ir_node *node, int results) {
806 assert (node->op == op_Return);
811 get_Return_res (ir_node *node, int pos) {
812 assert (node->op == op_Return);
813 assert (get_Return_n_ress(node) > pos);
814 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
818 set_Return_res (ir_node *node, int pos, ir_node *res){
819 assert (node->op == op_Return);
820 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
824 get_Raise_mem (ir_node *node) {
825 assert (node->op == op_Raise);
826 return get_irn_n(node, 0);
830 set_Raise_mem (ir_node *node, ir_node *mem) {
831 assert (node->op == op_Raise);
832 set_irn_n(node, 0, mem);
836 get_Raise_exo_ptr (ir_node *node) {
837 assert (node->op == op_Raise);
838 return get_irn_n(node, 1);
842 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
843 assert (node->op == op_Raise);
844 set_irn_n(node, 1, exo_ptr);
847 tarval *get_Const_tarval (ir_node *node) {
848 assert (node->op == op_Const);
849 return node->attr.con.tv;
853 set_Const_tarval (ir_node *node, tarval *con) {
854 assert (node->op == op_Const);
855 node->attr.con.tv = con;
859 /* The source language type. Must be an atomic type. Mode of type must
860 be mode of node. For tarvals from entities type must be pointer to
863 get_Const_type (ir_node *node) {
864 assert (node->op == op_Const);
865 return node->attr.con.tp;
869 set_Const_type (ir_node *node, type *tp) {
870 assert (node->op == op_Const);
871 if (tp != unknown_type) {
872 assert (is_atomic_type(tp));
873 assert (get_type_mode(tp) == get_irn_mode(node));
875 node->attr.con.tp = tp;
880 get_SymConst_kind (const ir_node *node) {
881 assert (node->op == op_SymConst);
882 return node->attr.i.num;
886 set_SymConst_kind (ir_node *node, symconst_kind num) {
887 assert (node->op == op_SymConst);
888 node->attr.i.num = num;
892 get_SymConst_type (ir_node *node) {
893 assert ( (node->op == op_SymConst)
894 && ( get_SymConst_kind(node) == symconst_type_tag
895 || get_SymConst_kind(node) == symconst_size));
896 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
900 set_SymConst_type (ir_node *node, type *tp) {
901 assert ( (node->op == op_SymConst)
902 && ( get_SymConst_kind(node) == symconst_type_tag
903 || get_SymConst_kind(node) == symconst_size));
904 node->attr.i.sym.type_p = tp;
908 get_SymConst_name (ir_node *node) {
909 assert ( (node->op == op_SymConst)
910 && (get_SymConst_kind(node) == symconst_addr_name));
911 return node->attr.i.sym.ident_p;
915 set_SymConst_name (ir_node *node, ident *name) {
916 assert ( (node->op == op_SymConst)
917 && (get_SymConst_kind(node) == symconst_addr_name));
918 node->attr.i.sym.ident_p = name;
922 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
923 entity *get_SymConst_entity (ir_node *node) {
924 assert ( (node->op == op_SymConst)
925 && (get_SymConst_kind (node) == symconst_addr_ent));
926 return node->attr.i.sym.entity_p;
929 void set_SymConst_entity (ir_node *node, entity *ent) {
930 assert ( (node->op == op_SymConst)
931 && (get_SymConst_kind(node) == symconst_addr_ent));
932 node->attr.i.sym.entity_p = ent;
935 union symconst_symbol
936 get_SymConst_symbol (ir_node *node) {
937 assert (node->op == op_SymConst);
938 return node->attr.i.sym;
942 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
943 assert (node->op == op_SymConst);
944 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
945 node->attr.i.sym = sym;
949 get_SymConst_value_type (ir_node *node) {
950 assert (node->op == op_SymConst);
951 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
952 return node->attr.i.tp;
956 set_SymConst_value_type (ir_node *node, type *tp) {
957 assert (node->op == op_SymConst);
958 node->attr.i.tp = tp;
962 get_Sel_mem (ir_node *node) {
963 assert (node->op == op_Sel);
964 return get_irn_n(node, 0);
968 set_Sel_mem (ir_node *node, ir_node *mem) {
969 assert (node->op == op_Sel);
970 set_irn_n(node, 0, mem);
974 get_Sel_ptr (ir_node *node) {
975 assert (node->op == op_Sel);
976 return get_irn_n(node, 1);
980 set_Sel_ptr (ir_node *node, ir_node *ptr) {
981 assert (node->op == op_Sel);
982 set_irn_n(node, 1, ptr);
986 get_Sel_n_indexs (ir_node *node) {
987 assert (node->op == op_Sel);
988 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
992 get_Sel_index_arr (ir_node *node)
994 assert ((node->op == op_Sel));
995 if (get_Sel_n_indexs(node) > 0)
996 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1002 get_Sel_index (ir_node *node, int pos) {
1003 assert (node->op == op_Sel);
1004 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1008 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1009 assert (node->op == op_Sel);
1010 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1014 get_Sel_entity (ir_node *node) {
1015 assert (node->op == op_Sel);
1016 return node->attr.s.ent;
1020 set_Sel_entity (ir_node *node, entity *ent) {
1021 assert (node->op == op_Sel);
1022 node->attr.s.ent = ent;
1026 get_InstOf_ent (ir_node *node) {
1027 assert (node->op = op_InstOf);
1028 return (node->attr.io.ent);
1032 set_InstOf_ent (ir_node *node, type *ent) {
1033 assert (node->op = op_InstOf);
1034 node->attr.io.ent = ent;
1038 get_InstOf_store (ir_node *node) {
1039 assert (node->op = op_InstOf);
1040 return (get_irn_n (node, 0));
1044 set_InstOf_store (ir_node *node, ir_node *obj) {
1045 assert (node->op = op_InstOf);
1046 set_irn_n (node, 0, obj);
1050 get_InstOf_obj (ir_node *node) {
1051 assert (node->op = op_InstOf);
1052 return (get_irn_n (node, 1));
1056 set_InstOf_obj (ir_node *node, ir_node *obj) {
1057 assert (node->op = op_InstOf);
1058 set_irn_n (node, 1, obj);
1062 /* For unary and binary arithmetic operations the access to the
1063 operands can be factored out. Left is the first, right the
1064 second arithmetic value as listed in tech report 0999-33.
1065 unops are: Minus, Abs, Not, Conv, Cast
1066 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1067 Shr, Shrs, Rotate, Cmp */
1071 get_Call_mem (ir_node *node) {
1072 assert (node->op == op_Call);
1073 return get_irn_n(node, 0);
1077 set_Call_mem (ir_node *node, ir_node *mem) {
1078 assert (node->op == op_Call);
1079 set_irn_n(node, 0, mem);
1083 get_Call_ptr (ir_node *node) {
1084 assert (node->op == op_Call);
1085 return get_irn_n(node, 1);
1089 set_Call_ptr (ir_node *node, ir_node *ptr) {
1090 assert (node->op == op_Call);
1091 set_irn_n(node, 1, ptr);
1095 get_Call_param_arr (ir_node *node) {
1096 assert (node->op == op_Call);
1097 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1101 get_Call_n_params (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1107 get_Call_arity (ir_node *node) {
1108 assert (node->op == op_Call);
1109 return get_Call_n_params(node);
1113 set_Call_arity (ir_node *node, ir_node *arity) {
1114 assert (node->op == op_Call);
1119 get_Call_param (ir_node *node, int pos) {
1120 assert (node->op == op_Call);
1121 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1125 set_Call_param (ir_node *node, int pos, ir_node *param) {
1126 assert (node->op == op_Call);
1127 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1131 get_Call_type (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1137 set_Call_type (ir_node *node, type *tp) {
1138 assert (node->op == op_Call);
1139 assert ((get_unknown_type() == tp) || is_method_type(tp));
1140 node->attr.call.cld_tp = tp;
1143 int Call_has_callees(ir_node *node) {
1144 assert(node && node->op == op_Call);
1145 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1146 (node->attr.call.callee_arr != NULL));
1149 int get_Call_n_callees(ir_node * node) {
1150 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1151 return ARR_LEN(node->attr.call.callee_arr);
1154 entity * get_Call_callee(ir_node * node, int pos) {
1155 assert(pos >= 0 && pos < get_Call_n_callees(node));
1156 return node->attr.call.callee_arr[pos];
1159 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1160 assert(node->op == op_Call);
1161 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1162 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1164 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1167 void remove_Call_callee_arr(ir_node * node) {
1168 assert(node->op == op_Call);
1169 node->attr.call.callee_arr = NULL;
1172 ir_node * get_CallBegin_ptr (ir_node *node) {
1173 assert(node->op == op_CallBegin);
1174 return get_irn_n(node, 0);
1176 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1177 assert(node->op == op_CallBegin);
1178 set_irn_n(node, 0, ptr);
1180 ir_node * get_CallBegin_call (ir_node *node) {
1181 assert(node->op == op_CallBegin);
1182 return node->attr.callbegin.call;
1184 void set_CallBegin_call (ir_node *node, ir_node *call) {
1185 assert(node->op == op_CallBegin);
1186 node->attr.callbegin.call = call;
1190 get_FuncCall_ptr (ir_node *node) {
1191 assert (node->op == op_FuncCall);
1192 return get_irn_n(node, 0);
1196 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1197 assert (node->op == op_FuncCall);
1198 set_irn_n(node, 0, ptr);
1202 get_FuncCall_param_arr (ir_node *node) {
1203 assert (node->op == op_FuncCall);
1204 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1208 get_FuncCall_n_params (ir_node *node) {
1209 assert (node->op == op_FuncCall);
1210 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1214 get_FuncCall_arity (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return get_FuncCall_n_params(node);
1220 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1221 assert (node->op == op_FuncCall);
1226 get_FuncCall_param (ir_node *node, int pos) {
1227 assert (node->op == op_FuncCall);
1228 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1232 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1233 assert (node->op == op_FuncCall);
1234 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1238 get_FuncCall_type (ir_node *node) {
1239 assert (node->op == op_FuncCall);
1240 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1244 set_FuncCall_type (ir_node *node, type *tp) {
1245 assert (node->op == op_FuncCall);
1246 assert (is_method_type(tp));
1247 node->attr.call.cld_tp = tp;
1250 int FuncCall_has_callees(ir_node *node) {
1251 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1252 (node->attr.call.callee_arr != NULL));
1255 int get_FuncCall_n_callees(ir_node * node) {
1256 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1257 return ARR_LEN(node->attr.call.callee_arr);
1260 entity * get_FuncCall_callee(ir_node * node, int pos) {
1261 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1262 return node->attr.call.callee_arr[pos];
1265 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1266 assert(node->op == op_FuncCall);
1267 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1268 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1270 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1273 void remove_FuncCall_callee_arr(ir_node * node) {
1274 assert(node->op == op_FuncCall);
1275 node->attr.call.callee_arr = NULL;
1280 ir_node * get_##OP##_left(ir_node *node) { \
1281 assert(node->op == op_##OP); \
1282 return get_irn_n(node, node->op->op_index); \
1284 void set_##OP##_left(ir_node *node, ir_node *left) { \
1285 assert(node->op == op_##OP); \
1286 set_irn_n(node, node->op->op_index, left); \
1288 ir_node *get_##OP##_right(ir_node *node) { \
1289 assert(node->op == op_##OP); \
1290 return get_irn_n(node, node->op->op_index + 1); \
1292 void set_##OP##_right(ir_node *node, ir_node *right) { \
1293 assert(node->op == op_##OP); \
1294 set_irn_n(node, node->op->op_index + 1, right); \
1298 ir_node *get_##OP##_op(ir_node *node) { \
1299 assert(node->op == op_##OP); \
1300 return get_irn_n(node, node->op->op_index); \
1302 void set_##OP##_op (ir_node *node, ir_node *op) { \
1303 assert(node->op == op_##OP); \
1304 set_irn_n(node, node->op->op_index, op); \
1314 get_Quot_mem (ir_node *node) {
1315 assert (node->op == op_Quot);
1316 return get_irn_n(node, 0);
1320 set_Quot_mem (ir_node *node, ir_node *mem) {
1321 assert (node->op == op_Quot);
1322 set_irn_n(node, 0, mem);
1328 get_DivMod_mem (ir_node *node) {
1329 assert (node->op == op_DivMod);
1330 return get_irn_n(node, 0);
1334 set_DivMod_mem (ir_node *node, ir_node *mem) {
1335 assert (node->op == op_DivMod);
1336 set_irn_n(node, 0, mem);
1342 get_Div_mem (ir_node *node) {
1343 assert (node->op == op_Div);
1344 return get_irn_n(node, 0);
1348 set_Div_mem (ir_node *node, ir_node *mem) {
1349 assert (node->op == op_Div);
1350 set_irn_n(node, 0, mem);
1356 get_Mod_mem (ir_node *node) {
1357 assert (node->op == op_Mod);
1358 return get_irn_n(node, 0);
1362 set_Mod_mem (ir_node *node, ir_node *mem) {
1363 assert (node->op == op_Mod);
1364 set_irn_n(node, 0, mem);
1381 get_Cast_type (ir_node *node) {
1382 assert (node->op == op_Cast);
1383 return node->attr.cast.totype;
1387 set_Cast_type (ir_node *node, type *to_tp) {
1388 assert (node->op == op_Cast);
1389 node->attr.cast.totype = to_tp;
1393 (is_unop)(const ir_node *node) {
1394 return __is_unop(node);
1398 get_unop_op (ir_node *node) {
1399 if (node->op->opar == oparity_unary)
1400 return get_irn_n(node, node->op->op_index);
1402 assert(node->op->opar == oparity_unary);
1407 set_unop_op (ir_node *node, ir_node *op) {
1408 if (node->op->opar == oparity_unary)
1409 set_irn_n(node, node->op->op_index, op);
1411 assert(node->op->opar == oparity_unary);
1415 (is_binop)(const ir_node *node) {
1416 return __is_binop(node);
1420 get_binop_left (ir_node *node) {
1421 if (node->op->opar == oparity_binary)
1422 return get_irn_n(node, node->op->op_index);
1424 assert(node->op->opar == oparity_binary);
1429 set_binop_left (ir_node *node, ir_node *left) {
1430 if (node->op->opar == oparity_binary)
1431 set_irn_n(node, node->op->op_index, left);
1433 assert (node->op->opar == oparity_binary);
1437 get_binop_right (ir_node *node) {
1438 if (node->op->opar == oparity_binary)
1439 return get_irn_n(node, node->op->op_index + 1);
1441 assert(node->op->opar == oparity_binary);
1446 set_binop_right (ir_node *node, ir_node *right) {
1447 if (node->op->opar == oparity_binary)
1448 set_irn_n(node, node->op->op_index + 1, right);
1450 assert (node->op->opar == oparity_binary);
1453 int is_Phi (ir_node *n) {
1459 if (op == op_Filter) return get_interprocedural_view();
1462 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1463 (get_irn_arity(n) > 0));
1468 int is_Phi0 (ir_node *n) {
1471 return ((get_irn_op(n) == op_Phi) &&
1472 (get_irn_arity(n) == 0) &&
1473 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1477 get_Phi_preds_arr (ir_node *node) {
1478 assert (node->op == op_Phi);
1479 return (ir_node **)&(get_irn_in(node)[1]);
1483 get_Phi_n_preds (ir_node *node) {
1484 assert (is_Phi(node) || is_Phi0(node));
1485 return (get_irn_arity(node));
1489 void set_Phi_n_preds (ir_node *node, int n_preds) {
1490 assert (node->op == op_Phi);
1495 get_Phi_pred (ir_node *node, int pos) {
1496 assert (is_Phi(node) || is_Phi0(node));
1497 return get_irn_n(node, pos);
1501 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1502 assert (is_Phi(node) || is_Phi0(node));
1503 set_irn_n(node, pos, pred);
1507 int is_memop(ir_node *node) {
1508 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1511 ir_node *get_memop_mem (ir_node *node) {
1512 assert(is_memop(node));
1513 return get_irn_n(node, 0);
1516 void set_memop_mem (ir_node *node, ir_node *mem) {
1517 assert(is_memop(node));
1518 set_irn_n(node, 0, mem);
1521 ir_node *get_memop_ptr (ir_node *node) {
1522 assert(is_memop(node));
1523 return get_irn_n(node, 1);
1526 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1527 assert(is_memop(node));
1528 set_irn_n(node, 1, ptr);
1532 get_Load_mem (ir_node *node) {
1533 assert (node->op == op_Load);
1534 return get_irn_n(node, 0);
1538 set_Load_mem (ir_node *node, ir_node *mem) {
1539 assert (node->op == op_Load);
1540 set_irn_n(node, 0, mem);
1544 get_Load_ptr (ir_node *node) {
1545 assert (node->op == op_Load);
1546 return get_irn_n(node, 1);
1550 set_Load_ptr (ir_node *node, ir_node *ptr) {
1551 assert (node->op == op_Load);
1552 set_irn_n(node, 1, ptr);
1556 get_Load_mode (ir_node *node) {
1557 assert (node->op == op_Load);
1558 return node->attr.load.load_mode;
1562 set_Load_mode (ir_node *node, ir_mode *mode) {
1563 assert (node->op == op_Load);
1564 node->attr.load.load_mode = mode;
1568 get_Load_volatility (ir_node *node) {
1569 assert (node->op == op_Load);
1570 return node->attr.load.volatility;
1574 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1575 assert (node->op == op_Load);
1576 node->attr.load.volatility = volatility;
1581 get_Store_mem (ir_node *node) {
1582 assert (node->op == op_Store);
1583 return get_irn_n(node, 0);
1587 set_Store_mem (ir_node *node, ir_node *mem) {
1588 assert (node->op == op_Store);
1589 set_irn_n(node, 0, mem);
1593 get_Store_ptr (ir_node *node) {
1594 assert (node->op == op_Store);
1595 return get_irn_n(node, 1);
1599 set_Store_ptr (ir_node *node, ir_node *ptr) {
1600 assert (node->op == op_Store);
1601 set_irn_n(node, 1, ptr);
1605 get_Store_value (ir_node *node) {
1606 assert (node->op == op_Store);
1607 return get_irn_n(node, 2);
1611 set_Store_value (ir_node *node, ir_node *value) {
1612 assert (node->op == op_Store);
1613 set_irn_n(node, 2, value);
1617 get_Store_volatility (ir_node *node) {
1618 assert (node->op == op_Store);
1619 return node->attr.store.volatility;
1623 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1624 assert (node->op == op_Store);
1625 node->attr.store.volatility = volatility;
1630 get_Alloc_mem (ir_node *node) {
1631 assert (node->op == op_Alloc);
1632 return get_irn_n(node, 0);
1636 set_Alloc_mem (ir_node *node, ir_node *mem) {
1637 assert (node->op == op_Alloc);
1638 set_irn_n(node, 0, mem);
1642 get_Alloc_size (ir_node *node) {
1643 assert (node->op == op_Alloc);
1644 return get_irn_n(node, 1);
1648 set_Alloc_size (ir_node *node, ir_node *size) {
1649 assert (node->op == op_Alloc);
1650 set_irn_n(node, 1, size);
1654 get_Alloc_type (ir_node *node) {
1655 assert (node->op == op_Alloc);
1656 return node->attr.a.type = skip_tid(node->attr.a.type);
1660 set_Alloc_type (ir_node *node, type *tp) {
1661 assert (node->op == op_Alloc);
1662 node->attr.a.type = tp;
1666 get_Alloc_where (ir_node *node) {
1667 assert (node->op == op_Alloc);
1668 return node->attr.a.where;
1672 set_Alloc_where (ir_node *node, where_alloc where) {
1673 assert (node->op == op_Alloc);
1674 node->attr.a.where = where;
1679 get_Free_mem (ir_node *node) {
1680 assert (node->op == op_Free);
1681 return get_irn_n(node, 0);
1685 set_Free_mem (ir_node *node, ir_node *mem) {
1686 assert (node->op == op_Free);
1687 set_irn_n(node, 0, mem);
1691 get_Free_ptr (ir_node *node) {
1692 assert (node->op == op_Free);
1693 return get_irn_n(node, 1);
1697 set_Free_ptr (ir_node *node, ir_node *ptr) {
1698 assert (node->op == op_Free);
1699 set_irn_n(node, 1, ptr);
1703 get_Free_size (ir_node *node) {
1704 assert (node->op == op_Free);
1705 return get_irn_n(node, 2);
1709 set_Free_size (ir_node *node, ir_node *size) {
1710 assert (node->op == op_Free);
1711 set_irn_n(node, 2, size);
1715 get_Free_type (ir_node *node) {
1716 assert (node->op == op_Free);
1717 return node->attr.f = skip_tid(node->attr.f);
1721 set_Free_type (ir_node *node, type *tp) {
1722 assert (node->op == op_Free);
1727 get_Sync_preds_arr (ir_node *node) {
1728 assert (node->op == op_Sync);
1729 return (ir_node **)&(get_irn_in(node)[1]);
1733 get_Sync_n_preds (ir_node *node) {
1734 assert (node->op == op_Sync);
1735 return (get_irn_arity(node));
1740 set_Sync_n_preds (ir_node *node, int n_preds) {
1741 assert (node->op == op_Sync);
1746 get_Sync_pred (ir_node *node, int pos) {
1747 assert (node->op == op_Sync);
1748 return get_irn_n(node, pos);
1752 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1753 assert (node->op == op_Sync);
1754 set_irn_n(node, pos, pred);
1758 get_Proj_pred (ir_node *node) {
1759 assert (is_Proj(node));
1760 return get_irn_n(node, 0);
1764 set_Proj_pred (ir_node *node, ir_node *pred) {
1765 assert (is_Proj(node));
1766 set_irn_n(node, 0, pred);
1770 get_Proj_proj (ir_node *node) {
1771 assert (is_Proj(node));
1772 if (get_irn_opcode(node) == iro_Proj) {
1773 return node->attr.proj;
1775 assert(get_irn_opcode(node) == iro_Filter);
1776 return node->attr.filter.proj;
1781 set_Proj_proj (ir_node *node, long proj) {
1782 assert (node->op == op_Proj);
1783 node->attr.proj = proj;
1787 get_Tuple_preds_arr (ir_node *node) {
1788 assert (node->op == op_Tuple);
1789 return (ir_node **)&(get_irn_in(node)[1]);
1793 get_Tuple_n_preds (ir_node *node) {
1794 assert (node->op == op_Tuple);
1795 return (get_irn_arity(node));
1800 set_Tuple_n_preds (ir_node *node, int n_preds) {
1801 assert (node->op == op_Tuple);
1806 get_Tuple_pred (ir_node *node, int pos) {
1807 assert (node->op == op_Tuple);
1808 return get_irn_n(node, pos);
1812 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1813 assert (node->op == op_Tuple);
1814 set_irn_n(node, pos, pred);
1818 get_Id_pred (ir_node *node) {
1819 assert (node->op == op_Id);
1820 return get_irn_n(node, 0);
1824 set_Id_pred (ir_node *node, ir_node *pred) {
1825 assert (node->op == op_Id);
1826 set_irn_n(node, 0, pred);
1829 ir_node *get_Confirm_value (ir_node *node) {
1830 assert (node->op == op_Confirm);
1831 return get_irn_n(node, 0);
1833 void set_Confirm_value (ir_node *node, ir_node *value) {
1834 assert (node->op == op_Confirm);
1835 set_irn_n(node, 0, value);
1837 ir_node *get_Confirm_bound (ir_node *node) {
1838 assert (node->op == op_Confirm);
1839 return get_irn_n(node, 1);
1841 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1842 assert (node->op == op_Confirm);
1843 set_irn_n(node, 0, bound);
1845 pn_Cmp get_Confirm_cmp (ir_node *node) {
1846 assert (node->op == op_Confirm);
1847 return node->attr.confirm_cmp;
1849 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1850 assert (node->op == op_Confirm);
1851 node->attr.confirm_cmp = cmp;
1856 get_Filter_pred (ir_node *node) {
1857 assert(node->op == op_Filter);
1861 set_Filter_pred (ir_node *node, ir_node *pred) {
1862 assert(node->op == op_Filter);
1866 get_Filter_proj(ir_node *node) {
1867 assert(node->op == op_Filter);
1868 return node->attr.filter.proj;
1871 set_Filter_proj (ir_node *node, long proj) {
1872 assert(node->op == op_Filter);
1873 node->attr.filter.proj = proj;
1876 /* Don't use get_irn_arity, get_irn_n in implementation as access
1877 shall work independent of view!!! */
1878 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1879 assert(node->op == op_Filter);
1880 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1881 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1882 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1883 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1884 node->attr.filter.in_cg[0] = node->in[0];
1886 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1889 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1890 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1891 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1892 node->attr.filter.in_cg[pos + 1] = pred;
1894 int get_Filter_n_cg_preds(ir_node *node) {
1895 assert(node->op == op_Filter && node->attr.filter.in_cg);
1896 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1898 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1900 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1902 arity = ARR_LEN(node->attr.filter.in_cg);
1903 assert(pos < arity - 1);
1904 return node->attr.filter.in_cg[pos + 1];
1909 get_irn_irg(ir_node *node) {
1910 if (get_irn_op(node) != op_Block)
1911 node = get_nodes_block(node);
1912 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1913 node = get_nodes_block(node);
1914 assert(get_irn_op(node) == op_Block);
1915 return node->attr.block.irg;
1919 /*----------------------------------------------------------------*/
1920 /* Auxiliary routines */
1921 /*----------------------------------------------------------------*/
1924 skip_Proj (ir_node *node) {
1925 /* don't assert node !!! */
1926 if (node && is_Proj(node)) {
1927 return get_Proj_pred(node);
1934 skip_Tuple (ir_node *node) {
1937 if (!get_opt_normalize()) return node;
1939 node = skip_Id(node);
1940 if (get_irn_op(node) == op_Proj) {
1941 pred = skip_Id(get_Proj_pred(node));
1942 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1943 pred = skip_Id(skip_Tuple(pred));
1944 if (get_irn_op(pred) == op_Tuple)
1945 return get_Tuple_pred(pred, get_Proj_proj(node));
1950 /** returns operand of node if node is a Cast */
1951 ir_node *skip_Cast (ir_node *node) {
1952 if (node && get_irn_op(node) == op_Cast) {
1953 return skip_Id(get_irn_n(node, 0));
1960 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1961 than any other approach, as Id chains are resolved and all point to the real node, or
1962 all id's are self loops. */
1964 skip_Id (ir_node *node) {
1965 /* don't assert node !!! */
1967 if (!get_opt_normalize()) return node;
1969 /* Don't use get_Id_pred: We get into an endless loop for
1970 self-referencing Ids. */
1971 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1972 ir_node *rem_pred = node->in[0+1];
1975 assert (get_irn_arity (node) > 0);
1977 node->in[0+1] = node;
1978 res = skip_Id(rem_pred);
1979 if (res->op == op_Id) /* self-loop */ return node;
1981 node->in[0+1] = res;
1988 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1989 than any other approach, as Id chains are resolved and all point to the real node, or
1990 all id's are self loops. */
1992 skip_Id (ir_node *node) {
1994 /* don't assert node !!! */
1996 if (!node || (node->op != op_Id)) return node;
1998 if (!get_opt_normalize()) return node;
2000 /* Don't use get_Id_pred: We get into an endless loop for
2001 self-referencing Ids. */
2002 pred = node->in[0+1];
2004 if (pred->op != op_Id) return pred;
2006 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2007 ir_node *rem_pred, *res;
2009 if (pred->op != op_Id) return pred; /* shortcut */
2012 assert (get_irn_arity (node) > 0);
2014 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2015 res = skip_Id(rem_pred);
2016 if (res->op == op_Id) /* self-loop */ return node;
2018 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2027 (is_Bad)(const ir_node *node) {
2028 return __is_Bad(node);
2032 (is_no_Block)(const ir_node *node) {
2033 return __is_no_Block(node);
2037 (is_Block)(const ir_node *node) {
2038 return __is_Block(node);
2041 /* returns true if node is a Unknown node. */
2043 is_Unknown (const ir_node *node) {
2045 return (get_irn_op(node) == op_Unknown);
2049 is_Proj (const ir_node *node) {
2051 return node->op == op_Proj
2052 || (!get_interprocedural_view() && node->op == op_Filter);
2055 /* Returns true if the operation manipulates control flow. */
2057 is_cfop(const ir_node *node) {
2058 return is_cfopcode(get_irn_op(node));
2061 /* Returns true if the operation manipulates interprocedural control flow:
2062 CallBegin, EndReg, EndExcept */
2063 int is_ip_cfop(const ir_node *node) {
2064 return is_ip_cfopcode(get_irn_op(node));
2067 /* Returns true if the operation can change the control flow because
2070 is_fragile_op(const ir_node *node) {
2071 return is_op_fragile(get_irn_op(node));
2074 /* Returns the memory operand of fragile operations. */
2075 ir_node *get_fragile_op_mem(ir_node *node) {
2076 assert(node && is_fragile_op(node));
2078 switch (get_irn_opcode (node)) {
2087 return get_irn_n(node, 0);
2092 assert(0 && "should not be reached");
2097 /* Returns true if the operation is a forking control flow operation. */
2099 is_forking_op(const ir_node *node) {
2100 return is_op_forking(get_irn_op(node));
2103 #ifdef DEBUG_libfirm
2104 void dump_irn (ir_node *n) {
2105 int i, arity = get_irn_arity(n);
2106 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2108 ir_node *pred = get_irn_n(n, -1);
2109 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2110 get_irn_node_nr(pred), (void *)pred);
2112 printf(" preds: \n");
2113 for (i = 0; i < arity; ++i) {
2114 ir_node *pred = get_irn_n(n, i);
2115 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2116 get_irn_node_nr(pred), (void *)pred);
2120 #else /* DEBUG_libfirm */
2121 void dump_irn (ir_node *n) {}
2122 #endif /* DEBUG_libfirm */