3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_funccall_attr (ir_node *node)
451 assert (node->op == op_FuncCall);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
570 assert (node->op == op_Block);
571 assert(-1 <= pos && pos < get_irn_arity(node));
572 return get_irn_n(node, pos);
576 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
577 assert (node->op == op_Block);
578 set_irn_n(node, pos, pred);
582 get_Block_matured (ir_node *node) {
583 assert (node->op == op_Block);
584 return node->attr.block.matured;
588 set_Block_matured (ir_node *node, bool matured) {
589 assert (node->op == op_Block);
590 node->attr.block.matured = matured;
593 get_Block_block_visited (ir_node *node) {
594 assert (node->op == op_Block);
595 return node->attr.block.block_visited;
599 set_Block_block_visited (ir_node *node, unsigned long visit) {
600 assert (node->op == op_Block);
601 node->attr.block.block_visited = visit;
604 /* For this current_ir_graph must be set. */
606 mark_Block_block_visited (ir_node *node) {
607 assert (node->op == op_Block);
608 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
612 Block_not_block_visited(ir_node *node) {
613 assert (node->op == op_Block);
614 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
618 get_Block_graph_arr (ir_node *node, int pos) {
619 assert (node->op == op_Block);
620 return node->attr.block.graph_arr[pos+1];
624 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
625 assert (node->op == op_Block);
626 node->attr.block.graph_arr[pos+1] = value;
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 int ipv = get_interprocedural_view();
639 set_interprocedural_view(true);
640 fix_backedges(current_ir_graph->obst, node);
641 set_interprocedural_view(ipv);
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
675 set_Start_irg(ir_node *node, ir_graph *irg) {
676 assert(node->op == op_Start);
677 assert(is_ir_graph(irg));
678 assert(0 && " Why set irg? -- use set_irn_irg");
682 get_End_n_keepalives(ir_node *end) {
683 assert (end->op == op_End);
684 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
688 get_End_keepalive(ir_node *end, int pos) {
689 assert (end->op == op_End);
690 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
694 add_End_keepalive (ir_node *end, ir_node *ka) {
695 assert (end->op == op_End);
696 ARR_APP1 (ir_node *, end->in, ka);
700 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
701 assert (end->op == op_End);
702 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
706 free_End (ir_node *end) {
707 assert (end->op == op_End);
709 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
710 end->in = NULL; /* @@@ make sure we get an error if we use the
711 in array afterwards ... */
716 > Implementing the case construct (which is where the constant Proj node is
717 > important) involves far more than simply determining the constant values.
718 > We could argue that this is more properly a function of the translator from
719 > Firm to the target machine. That could be done if there was some way of
720 > projecting "default" out of the Cond node.
721 I know it's complicated.
722 Basically there are two proglems:
723 - determining the gaps between the projs
724 - determining the biggest case constant to know the proj number for
726 I see several solutions:
727 1. Introduce a ProjDefault node. Solves both problems.
728 This means to extend all optimizations executed during construction.
729 2. Give the Cond node for switch two flavors:
730 a) there are no gaps in the projs (existing flavor)
731 b) gaps may exist, default proj is still the Proj with the largest
732 projection number. This covers also the gaps.
733 3. Fix the semantic of the Cond to that of 2b)
735 Solution 2 seems to be the best:
736 Computing the gaps in the Firm representation is not too hard, i.e.,
737 libFIRM can implement a routine that transforms between the two
738 flavours. This is also possible for 1) but 2) does not require to
739 change any existing optimization.
740 Further it should be far simpler to determine the biggest constant than
742 I don't want to choose 3) as 2a) seems to have advantages for
743 dataflow analysis and 3) does not allow to convert the representation to
747 get_Cond_selector (ir_node *node) {
748 assert (node->op == op_Cond);
749 return get_irn_n(node, 0);
753 set_Cond_selector (ir_node *node, ir_node *selector) {
754 assert (node->op == op_Cond);
755 set_irn_n(node, 0, selector);
759 get_Cond_kind (ir_node *node) {
760 assert (node->op == op_Cond);
761 return node->attr.c.kind;
765 set_Cond_kind (ir_node *node, cond_kind kind) {
766 assert (node->op == op_Cond);
767 node->attr.c.kind = kind;
771 get_Cond_defaultProj (ir_node *node) {
772 assert (node->op == op_Cond);
773 return node->attr.c.default_proj;
777 get_Return_mem (ir_node *node) {
778 assert (node->op == op_Return);
779 return get_irn_n(node, 0);
783 set_Return_mem (ir_node *node, ir_node *mem) {
784 assert (node->op == op_Return);
785 set_irn_n(node, 0, mem);
789 get_Return_n_ress (ir_node *node) {
790 assert (node->op == op_Return);
791 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
795 get_Return_res_arr (ir_node *node)
797 assert ((node->op == op_Return));
798 if (get_Return_n_ress(node) > 0)
799 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
806 set_Return_n_res (ir_node *node, int results) {
807 assert (node->op == op_Return);
812 get_Return_res (ir_node *node, int pos) {
813 assert (node->op == op_Return);
814 assert (get_Return_n_ress(node) > pos);
815 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
819 set_Return_res (ir_node *node, int pos, ir_node *res){
820 assert (node->op == op_Return);
821 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
825 get_Raise_mem (ir_node *node) {
826 assert (node->op == op_Raise);
827 return get_irn_n(node, 0);
831 set_Raise_mem (ir_node *node, ir_node *mem) {
832 assert (node->op == op_Raise);
833 set_irn_n(node, 0, mem);
837 get_Raise_exo_ptr (ir_node *node) {
838 assert (node->op == op_Raise);
839 return get_irn_n(node, 1);
843 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
844 assert (node->op == op_Raise);
845 set_irn_n(node, 1, exo_ptr);
848 tarval *get_Const_tarval (ir_node *node) {
849 assert (node->op == op_Const);
850 return node->attr.con.tv;
854 set_Const_tarval (ir_node *node, tarval *con) {
855 assert (node->op == op_Const);
856 node->attr.con.tv = con;
860 /* The source language type. Must be an atomic type. Mode of type must
861 be mode of node. For tarvals from entities type must be pointer to
864 get_Const_type (ir_node *node) {
865 assert (node->op == op_Const);
866 return node->attr.con.tp;
870 set_Const_type (ir_node *node, type *tp) {
871 assert (node->op == op_Const);
872 if (tp != unknown_type) {
873 assert (is_atomic_type(tp));
874 assert (get_type_mode(tp) == get_irn_mode(node));
876 node->attr.con.tp = tp;
881 get_SymConst_kind (const ir_node *node) {
882 assert (node->op == op_SymConst);
883 return node->attr.i.num;
887 set_SymConst_kind (ir_node *node, symconst_kind num) {
888 assert (node->op == op_SymConst);
889 node->attr.i.num = num;
893 get_SymConst_type (ir_node *node) {
894 assert ( (node->op == op_SymConst)
895 && ( get_SymConst_kind(node) == symconst_type_tag
896 || get_SymConst_kind(node) == symconst_size));
897 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
901 set_SymConst_type (ir_node *node, type *tp) {
902 assert ( (node->op == op_SymConst)
903 && ( get_SymConst_kind(node) == symconst_type_tag
904 || get_SymConst_kind(node) == symconst_size));
905 node->attr.i.sym.type_p = tp;
909 get_SymConst_name (ir_node *node) {
910 assert ( (node->op == op_SymConst)
911 && (get_SymConst_kind(node) == symconst_addr_name));
912 return node->attr.i.sym.ident_p;
916 set_SymConst_name (ir_node *node, ident *name) {
917 assert ( (node->op == op_SymConst)
918 && (get_SymConst_kind(node) == symconst_addr_name));
919 node->attr.i.sym.ident_p = name;
923 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
924 entity *get_SymConst_entity (ir_node *node) {
925 assert ( (node->op == op_SymConst)
926 && (get_SymConst_kind (node) == symconst_addr_ent));
927 return node->attr.i.sym.entity_p;
930 void set_SymConst_entity (ir_node *node, entity *ent) {
931 assert ( (node->op == op_SymConst)
932 && (get_SymConst_kind(node) == symconst_addr_ent));
933 node->attr.i.sym.entity_p = ent;
936 union symconst_symbol
937 get_SymConst_symbol (ir_node *node) {
938 assert (node->op == op_SymConst);
939 return node->attr.i.sym;
943 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
944 assert (node->op == op_SymConst);
945 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
946 node->attr.i.sym = sym;
950 get_SymConst_value_type (ir_node *node) {
951 assert (node->op == op_SymConst);
952 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
953 return node->attr.i.tp;
957 set_SymConst_value_type (ir_node *node, type *tp) {
958 assert (node->op == op_SymConst);
959 node->attr.i.tp = tp;
963 get_Sel_mem (ir_node *node) {
964 assert (node->op == op_Sel);
965 return get_irn_n(node, 0);
969 set_Sel_mem (ir_node *node, ir_node *mem) {
970 assert (node->op == op_Sel);
971 set_irn_n(node, 0, mem);
975 get_Sel_ptr (ir_node *node) {
976 assert (node->op == op_Sel);
977 return get_irn_n(node, 1);
981 set_Sel_ptr (ir_node *node, ir_node *ptr) {
982 assert (node->op == op_Sel);
983 set_irn_n(node, 1, ptr);
987 get_Sel_n_indexs (ir_node *node) {
988 assert (node->op == op_Sel);
989 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
993 get_Sel_index_arr (ir_node *node)
995 assert ((node->op == op_Sel));
996 if (get_Sel_n_indexs(node) > 0)
997 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1003 get_Sel_index (ir_node *node, int pos) {
1004 assert (node->op == op_Sel);
1005 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1009 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1010 assert (node->op == op_Sel);
1011 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1015 get_Sel_entity (ir_node *node) {
1016 assert (node->op == op_Sel);
1017 return node->attr.s.ent;
1021 set_Sel_entity (ir_node *node, entity *ent) {
1022 assert (node->op == op_Sel);
1023 node->attr.s.ent = ent;
1027 get_InstOf_ent (ir_node *node) {
1028 assert (node->op = op_InstOf);
1029 return (node->attr.io.ent);
1033 set_InstOf_ent (ir_node *node, type *ent) {
1034 assert (node->op = op_InstOf);
1035 node->attr.io.ent = ent;
1039 get_InstOf_store (ir_node *node) {
1040 assert (node->op = op_InstOf);
1041 return (get_irn_n (node, 0));
1045 set_InstOf_store (ir_node *node, ir_node *obj) {
1046 assert (node->op = op_InstOf);
1047 set_irn_n (node, 0, obj);
1051 get_InstOf_obj (ir_node *node) {
1052 assert (node->op = op_InstOf);
1053 return (get_irn_n (node, 1));
1057 set_InstOf_obj (ir_node *node, ir_node *obj) {
1058 assert (node->op = op_InstOf);
1059 set_irn_n (node, 1, obj);
1063 /* For unary and binary arithmetic operations the access to the
1064 operands can be factored out. Left is the first, right the
1065 second arithmetic value as listed in tech report 0999-33.
1066 unops are: Minus, Abs, Not, Conv, Cast
1067 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1068 Shr, Shrs, Rotate, Cmp */
1072 get_Call_mem (ir_node *node) {
1073 assert (node->op == op_Call);
1074 return get_irn_n(node, 0);
1078 set_Call_mem (ir_node *node, ir_node *mem) {
1079 assert (node->op == op_Call);
1080 set_irn_n(node, 0, mem);
1084 get_Call_ptr (ir_node *node) {
1085 assert (node->op == op_Call);
1086 return get_irn_n(node, 1);
1090 set_Call_ptr (ir_node *node, ir_node *ptr) {
1091 assert (node->op == op_Call);
1092 set_irn_n(node, 1, ptr);
1096 get_Call_param_arr (ir_node *node) {
1097 assert (node->op == op_Call);
1098 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1102 get_Call_n_params (ir_node *node) {
1103 assert (node->op == op_Call);
1104 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1108 get_Call_arity (ir_node *node) {
1109 assert (node->op == op_Call);
1110 return get_Call_n_params(node);
1114 set_Call_arity (ir_node *node, ir_node *arity) {
1115 assert (node->op == op_Call);
1120 get_Call_param (ir_node *node, int pos) {
1121 assert (node->op == op_Call);
1122 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1126 set_Call_param (ir_node *node, int pos, ir_node *param) {
1127 assert (node->op == op_Call);
1128 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1132 get_Call_type (ir_node *node) {
1133 assert (node->op == op_Call);
1134 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1138 set_Call_type (ir_node *node, type *tp) {
1139 assert (node->op == op_Call);
1140 assert ((get_unknown_type() == tp) || is_method_type(tp));
1141 node->attr.call.cld_tp = tp;
1144 int Call_has_callees(ir_node *node) {
1145 assert(node && node->op == op_Call);
1146 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1147 (node->attr.call.callee_arr != NULL));
1150 int get_Call_n_callees(ir_node * node) {
1151 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1152 return ARR_LEN(node->attr.call.callee_arr);
1155 entity * get_Call_callee(ir_node * node, int pos) {
1156 assert(pos >= 0 && pos < get_Call_n_callees(node));
1157 return node->attr.call.callee_arr[pos];
1160 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1161 assert(node->op == op_Call);
1162 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1163 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1165 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1168 void remove_Call_callee_arr(ir_node * node) {
1169 assert(node->op == op_Call);
1170 node->attr.call.callee_arr = NULL;
1173 ir_node * get_CallBegin_ptr (ir_node *node) {
1174 assert(node->op == op_CallBegin);
1175 return get_irn_n(node, 0);
1177 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1178 assert(node->op == op_CallBegin);
1179 set_irn_n(node, 0, ptr);
1181 ir_node * get_CallBegin_call (ir_node *node) {
1182 assert(node->op == op_CallBegin);
1183 return node->attr.callbegin.call;
1185 void set_CallBegin_call (ir_node *node, ir_node *call) {
1186 assert(node->op == op_CallBegin);
1187 node->attr.callbegin.call = call;
1191 get_FuncCall_ptr (ir_node *node) {
1192 assert (node->op == op_FuncCall);
1193 return get_irn_n(node, 0);
1197 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1198 assert (node->op == op_FuncCall);
1199 set_irn_n(node, 0, ptr);
1203 get_FuncCall_param_arr (ir_node *node) {
1204 assert (node->op == op_FuncCall);
1205 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1209 get_FuncCall_n_params (ir_node *node) {
1210 assert (node->op == op_FuncCall);
1211 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1215 get_FuncCall_arity (ir_node *node) {
1216 assert (node->op == op_FuncCall);
1217 return get_FuncCall_n_params(node);
1221 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1222 assert (node->op == op_FuncCall);
1227 get_FuncCall_param (ir_node *node, int pos) {
1228 assert (node->op == op_FuncCall);
1229 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1233 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1234 assert (node->op == op_FuncCall);
1235 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1239 get_FuncCall_type (ir_node *node) {
1240 assert (node->op == op_FuncCall);
1241 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1245 set_FuncCall_type (ir_node *node, type *tp) {
1246 assert (node->op == op_FuncCall);
1247 assert (is_method_type(tp));
1248 node->attr.call.cld_tp = tp;
1251 int FuncCall_has_callees(ir_node *node) {
1252 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1253 (node->attr.call.callee_arr != NULL));
1256 int get_FuncCall_n_callees(ir_node * node) {
1257 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1258 return ARR_LEN(node->attr.call.callee_arr);
1261 entity * get_FuncCall_callee(ir_node * node, int pos) {
1262 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1263 return node->attr.call.callee_arr[pos];
1266 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1267 assert(node->op == op_FuncCall);
1268 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1269 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1271 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1274 void remove_FuncCall_callee_arr(ir_node * node) {
1275 assert(node->op == op_FuncCall);
1276 node->attr.call.callee_arr = NULL;
1281 ir_node * get_##OP##_left(ir_node *node) { \
1282 assert(node->op == op_##OP); \
1283 return get_irn_n(node, node->op->op_index); \
1285 void set_##OP##_left(ir_node *node, ir_node *left) { \
1286 assert(node->op == op_##OP); \
1287 set_irn_n(node, node->op->op_index, left); \
1289 ir_node *get_##OP##_right(ir_node *node) { \
1290 assert(node->op == op_##OP); \
1291 return get_irn_n(node, node->op->op_index + 1); \
1293 void set_##OP##_right(ir_node *node, ir_node *right) { \
1294 assert(node->op == op_##OP); \
1295 set_irn_n(node, node->op->op_index + 1, right); \
1299 ir_node *get_##OP##_op(ir_node *node) { \
1300 assert(node->op == op_##OP); \
1301 return get_irn_n(node, node->op->op_index); \
1303 void set_##OP##_op (ir_node *node, ir_node *op) { \
1304 assert(node->op == op_##OP); \
1305 set_irn_n(node, node->op->op_index, op); \
1315 get_Quot_mem (ir_node *node) {
1316 assert (node->op == op_Quot);
1317 return get_irn_n(node, 0);
1321 set_Quot_mem (ir_node *node, ir_node *mem) {
1322 assert (node->op == op_Quot);
1323 set_irn_n(node, 0, mem);
1329 get_DivMod_mem (ir_node *node) {
1330 assert (node->op == op_DivMod);
1331 return get_irn_n(node, 0);
1335 set_DivMod_mem (ir_node *node, ir_node *mem) {
1336 assert (node->op == op_DivMod);
1337 set_irn_n(node, 0, mem);
1343 get_Div_mem (ir_node *node) {
1344 assert (node->op == op_Div);
1345 return get_irn_n(node, 0);
1349 set_Div_mem (ir_node *node, ir_node *mem) {
1350 assert (node->op == op_Div);
1351 set_irn_n(node, 0, mem);
1357 get_Mod_mem (ir_node *node) {
1358 assert (node->op == op_Mod);
1359 return get_irn_n(node, 0);
1363 set_Mod_mem (ir_node *node, ir_node *mem) {
1364 assert (node->op == op_Mod);
1365 set_irn_n(node, 0, mem);
1382 get_Cast_type (ir_node *node) {
1383 assert (node->op == op_Cast);
1384 return node->attr.cast.totype;
1388 set_Cast_type (ir_node *node, type *to_tp) {
1389 assert (node->op == op_Cast);
1390 node->attr.cast.totype = to_tp;
1394 (is_unop)(const ir_node *node) {
1395 return __is_unop(node);
1399 get_unop_op (ir_node *node) {
1400 if (node->op->opar == oparity_unary)
1401 return get_irn_n(node, node->op->op_index);
1403 assert(node->op->opar == oparity_unary);
1408 set_unop_op (ir_node *node, ir_node *op) {
1409 if (node->op->opar == oparity_unary)
1410 set_irn_n(node, node->op->op_index, op);
1412 assert(node->op->opar == oparity_unary);
1416 (is_binop)(const ir_node *node) {
1417 return __is_binop(node);
1421 get_binop_left (ir_node *node) {
1422 if (node->op->opar == oparity_binary)
1423 return get_irn_n(node, node->op->op_index);
1425 assert(node->op->opar == oparity_binary);
1430 set_binop_left (ir_node *node, ir_node *left) {
1431 if (node->op->opar == oparity_binary)
1432 set_irn_n(node, node->op->op_index, left);
1434 assert (node->op->opar == oparity_binary);
1438 get_binop_right (ir_node *node) {
1439 if (node->op->opar == oparity_binary)
1440 return get_irn_n(node, node->op->op_index + 1);
1442 assert(node->op->opar == oparity_binary);
1447 set_binop_right (ir_node *node, ir_node *right) {
1448 if (node->op->opar == oparity_binary)
1449 set_irn_n(node, node->op->op_index + 1, right);
1451 assert (node->op->opar == oparity_binary);
1454 int is_Phi (ir_node *n) {
1460 if (op == op_Filter) return get_interprocedural_view();
1463 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1464 (get_irn_arity(n) > 0));
1469 int is_Phi0 (ir_node *n) {
1472 return ((get_irn_op(n) == op_Phi) &&
1473 (get_irn_arity(n) == 0) &&
1474 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1478 get_Phi_preds_arr (ir_node *node) {
1479 assert (node->op == op_Phi);
1480 return (ir_node **)&(get_irn_in(node)[1]);
1484 get_Phi_n_preds (ir_node *node) {
1485 assert (is_Phi(node) || is_Phi0(node));
1486 return (get_irn_arity(node));
1490 void set_Phi_n_preds (ir_node *node, int n_preds) {
1491 assert (node->op == op_Phi);
1496 get_Phi_pred (ir_node *node, int pos) {
1497 assert (is_Phi(node) || is_Phi0(node));
1498 return get_irn_n(node, pos);
1502 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1503 assert (is_Phi(node) || is_Phi0(node));
1504 set_irn_n(node, pos, pred);
1508 int is_memop(ir_node *node) {
1509 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1512 ir_node *get_memop_mem (ir_node *node) {
1513 assert(is_memop(node));
1514 return get_irn_n(node, 0);
1517 void set_memop_mem (ir_node *node, ir_node *mem) {
1518 assert(is_memop(node));
1519 set_irn_n(node, 0, mem);
1522 ir_node *get_memop_ptr (ir_node *node) {
1523 assert(is_memop(node));
1524 return get_irn_n(node, 1);
1527 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1528 assert(is_memop(node));
1529 set_irn_n(node, 1, ptr);
1533 get_Load_mem (ir_node *node) {
1534 assert (node->op == op_Load);
1535 return get_irn_n(node, 0);
1539 set_Load_mem (ir_node *node, ir_node *mem) {
1540 assert (node->op == op_Load);
1541 set_irn_n(node, 0, mem);
1545 get_Load_ptr (ir_node *node) {
1546 assert (node->op == op_Load);
1547 return get_irn_n(node, 1);
1551 set_Load_ptr (ir_node *node, ir_node *ptr) {
1552 assert (node->op == op_Load);
1553 set_irn_n(node, 1, ptr);
1557 get_Load_mode (ir_node *node) {
1558 assert (node->op == op_Load);
1559 return node->attr.load.load_mode;
1563 set_Load_mode (ir_node *node, ir_mode *mode) {
1564 assert (node->op == op_Load);
1565 node->attr.load.load_mode = mode;
1569 get_Load_volatility (ir_node *node) {
1570 assert (node->op == op_Load);
1571 return node->attr.load.volatility;
1575 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1576 assert (node->op == op_Load);
1577 node->attr.load.volatility = volatility;
1582 get_Store_mem (ir_node *node) {
1583 assert (node->op == op_Store);
1584 return get_irn_n(node, 0);
1588 set_Store_mem (ir_node *node, ir_node *mem) {
1589 assert (node->op == op_Store);
1590 set_irn_n(node, 0, mem);
1594 get_Store_ptr (ir_node *node) {
1595 assert (node->op == op_Store);
1596 return get_irn_n(node, 1);
1600 set_Store_ptr (ir_node *node, ir_node *ptr) {
1601 assert (node->op == op_Store);
1602 set_irn_n(node, 1, ptr);
1606 get_Store_value (ir_node *node) {
1607 assert (node->op == op_Store);
1608 return get_irn_n(node, 2);
1612 set_Store_value (ir_node *node, ir_node *value) {
1613 assert (node->op == op_Store);
1614 set_irn_n(node, 2, value);
1618 get_Store_volatility (ir_node *node) {
1619 assert (node->op == op_Store);
1620 return node->attr.store.volatility;
1624 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1625 assert (node->op == op_Store);
1626 node->attr.store.volatility = volatility;
1631 get_Alloc_mem (ir_node *node) {
1632 assert (node->op == op_Alloc);
1633 return get_irn_n(node, 0);
1637 set_Alloc_mem (ir_node *node, ir_node *mem) {
1638 assert (node->op == op_Alloc);
1639 set_irn_n(node, 0, mem);
1643 get_Alloc_size (ir_node *node) {
1644 assert (node->op == op_Alloc);
1645 return get_irn_n(node, 1);
1649 set_Alloc_size (ir_node *node, ir_node *size) {
1650 assert (node->op == op_Alloc);
1651 set_irn_n(node, 1, size);
1655 get_Alloc_type (ir_node *node) {
1656 assert (node->op == op_Alloc);
1657 return node->attr.a.type = skip_tid(node->attr.a.type);
1661 set_Alloc_type (ir_node *node, type *tp) {
1662 assert (node->op == op_Alloc);
1663 node->attr.a.type = tp;
1667 get_Alloc_where (ir_node *node) {
1668 assert (node->op == op_Alloc);
1669 return node->attr.a.where;
1673 set_Alloc_where (ir_node *node, where_alloc where) {
1674 assert (node->op == op_Alloc);
1675 node->attr.a.where = where;
1680 get_Free_mem (ir_node *node) {
1681 assert (node->op == op_Free);
1682 return get_irn_n(node, 0);
1686 set_Free_mem (ir_node *node, ir_node *mem) {
1687 assert (node->op == op_Free);
1688 set_irn_n(node, 0, mem);
1692 get_Free_ptr (ir_node *node) {
1693 assert (node->op == op_Free);
1694 return get_irn_n(node, 1);
1698 set_Free_ptr (ir_node *node, ir_node *ptr) {
1699 assert (node->op == op_Free);
1700 set_irn_n(node, 1, ptr);
1704 get_Free_size (ir_node *node) {
1705 assert (node->op == op_Free);
1706 return get_irn_n(node, 2);
1710 set_Free_size (ir_node *node, ir_node *size) {
1711 assert (node->op == op_Free);
1712 set_irn_n(node, 2, size);
1716 get_Free_type (ir_node *node) {
1717 assert (node->op == op_Free);
1718 return node->attr.f = skip_tid(node->attr.f);
1722 set_Free_type (ir_node *node, type *tp) {
1723 assert (node->op == op_Free);
1728 get_Sync_preds_arr (ir_node *node) {
1729 assert (node->op == op_Sync);
1730 return (ir_node **)&(get_irn_in(node)[1]);
1734 get_Sync_n_preds (ir_node *node) {
1735 assert (node->op == op_Sync);
1736 return (get_irn_arity(node));
1741 set_Sync_n_preds (ir_node *node, int n_preds) {
1742 assert (node->op == op_Sync);
1747 get_Sync_pred (ir_node *node, int pos) {
1748 assert (node->op == op_Sync);
1749 return get_irn_n(node, pos);
1753 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1754 assert (node->op == op_Sync);
1755 set_irn_n(node, pos, pred);
1759 get_Proj_pred (ir_node *node) {
1760 assert (is_Proj(node));
1761 return get_irn_n(node, 0);
1765 set_Proj_pred (ir_node *node, ir_node *pred) {
1766 assert (is_Proj(node));
1767 set_irn_n(node, 0, pred);
1771 get_Proj_proj (ir_node *node) {
1772 assert (is_Proj(node));
1773 if (get_irn_opcode(node) == iro_Proj) {
1774 return node->attr.proj;
1776 assert(get_irn_opcode(node) == iro_Filter);
1777 return node->attr.filter.proj;
1782 set_Proj_proj (ir_node *node, long proj) {
1783 assert (node->op == op_Proj);
1784 node->attr.proj = proj;
1788 get_Tuple_preds_arr (ir_node *node) {
1789 assert (node->op == op_Tuple);
1790 return (ir_node **)&(get_irn_in(node)[1]);
1794 get_Tuple_n_preds (ir_node *node) {
1795 assert (node->op == op_Tuple);
1796 return (get_irn_arity(node));
1801 set_Tuple_n_preds (ir_node *node, int n_preds) {
1802 assert (node->op == op_Tuple);
1807 get_Tuple_pred (ir_node *node, int pos) {
1808 assert (node->op == op_Tuple);
1809 return get_irn_n(node, pos);
1813 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1814 assert (node->op == op_Tuple);
1815 set_irn_n(node, pos, pred);
1819 get_Id_pred (ir_node *node) {
1820 assert (node->op == op_Id);
1821 return get_irn_n(node, 0);
1825 set_Id_pred (ir_node *node, ir_node *pred) {
1826 assert (node->op == op_Id);
1827 set_irn_n(node, 0, pred);
1830 ir_node *get_Confirm_value (ir_node *node) {
1831 assert (node->op == op_Confirm);
1832 return get_irn_n(node, 0);
1834 void set_Confirm_value (ir_node *node, ir_node *value) {
1835 assert (node->op == op_Confirm);
1836 set_irn_n(node, 0, value);
1838 ir_node *get_Confirm_bound (ir_node *node) {
1839 assert (node->op == op_Confirm);
1840 return get_irn_n(node, 1);
1842 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1843 assert (node->op == op_Confirm);
1844 set_irn_n(node, 0, bound);
1846 pn_Cmp get_Confirm_cmp (ir_node *node) {
1847 assert (node->op == op_Confirm);
1848 return node->attr.confirm_cmp;
1850 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1851 assert (node->op == op_Confirm);
1852 node->attr.confirm_cmp = cmp;
1857 get_Filter_pred (ir_node *node) {
1858 assert(node->op == op_Filter);
1862 set_Filter_pred (ir_node *node, ir_node *pred) {
1863 assert(node->op == op_Filter);
1867 get_Filter_proj(ir_node *node) {
1868 assert(node->op == op_Filter);
1869 return node->attr.filter.proj;
1872 set_Filter_proj (ir_node *node, long proj) {
1873 assert(node->op == op_Filter);
1874 node->attr.filter.proj = proj;
1877 /* Don't use get_irn_arity, get_irn_n in implementation as access
1878 shall work independent of view!!! */
1879 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1880 assert(node->op == op_Filter);
1881 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1882 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1883 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1884 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1885 node->attr.filter.in_cg[0] = node->in[0];
1887 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1890 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1891 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1892 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1893 node->attr.filter.in_cg[pos + 1] = pred;
1895 int get_Filter_n_cg_preds(ir_node *node) {
1896 assert(node->op == op_Filter && node->attr.filter.in_cg);
1897 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1899 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1901 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1903 arity = ARR_LEN(node->attr.filter.in_cg);
1904 assert(pos < arity - 1);
1905 return node->attr.filter.in_cg[pos + 1];
1910 get_irn_irg(ir_node *node) {
1911 if (get_irn_op(node) != op_Block)
1912 node = get_nodes_block(node);
1913 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1914 node = get_nodes_block(node);
1915 assert(get_irn_op(node) == op_Block);
1916 return node->attr.block.irg;
1920 /*----------------------------------------------------------------*/
1921 /* Auxiliary routines */
1922 /*----------------------------------------------------------------*/
1925 skip_Proj (ir_node *node) {
1926 /* don't assert node !!! */
1927 if (node && is_Proj(node)) {
1928 return get_Proj_pred(node);
1935 skip_Tuple (ir_node *node) {
1938 if (!get_opt_normalize()) return node;
1940 node = skip_Id(node);
1941 if (get_irn_op(node) == op_Proj) {
1942 pred = skip_Id(get_Proj_pred(node));
1943 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1944 pred = skip_Id(skip_Tuple(pred));
1945 if (get_irn_op(pred) == op_Tuple)
1946 return get_Tuple_pred(pred, get_Proj_proj(node));
1951 /** returns operand of node if node is a Cast */
1952 ir_node *skip_Cast (ir_node *node) {
1953 if (node && get_irn_op(node) == op_Cast) {
1954 return skip_Id(get_irn_n(node, 0));
1961 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1962 than any other approach, as Id chains are resolved and all point to the real node, or
1963 all id's are self loops. */
1965 skip_Id (ir_node *node) {
1966 /* don't assert node !!! */
1968 if (!get_opt_normalize()) return node;
1970 /* Don't use get_Id_pred: We get into an endless loop for
1971 self-referencing Ids. */
1972 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1973 ir_node *rem_pred = node->in[0+1];
1976 assert (get_irn_arity (node) > 0);
1978 node->in[0+1] = node;
1979 res = skip_Id(rem_pred);
1980 if (res->op == op_Id) /* self-loop */ return node;
1982 node->in[0+1] = res;
1989 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1990 than any other approach, as Id chains are resolved and all point to the real node, or
1991 all id's are self loops. */
1993 skip_Id (ir_node *node) {
1995 /* don't assert node !!! */
1997 if (!node || (node->op != op_Id)) return node;
1999 if (!get_opt_normalize()) return node;
2001 /* Don't use get_Id_pred: We get into an endless loop for
2002 self-referencing Ids. */
2003 pred = node->in[0+1];
2005 if (pred->op != op_Id) return pred;
2007 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2008 ir_node *rem_pred, *res;
2010 if (pred->op != op_Id) return pred; /* shortcut */
2013 assert (get_irn_arity (node) > 0);
2015 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2016 res = skip_Id(rem_pred);
2017 if (res->op == op_Id) /* self-loop */ return node;
2019 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2028 (is_Bad)(const ir_node *node) {
2029 return __is_Bad(node);
2033 (is_no_Block)(const ir_node *node) {
2034 return __is_no_Block(node);
2038 (is_Block)(const ir_node *node) {
2039 return __is_Block(node);
2042 /* returns true if node is a Unknown node. */
2044 is_Unknown (const ir_node *node) {
2046 return (get_irn_op(node) == op_Unknown);
2050 is_Proj (const ir_node *node) {
2052 return node->op == op_Proj
2053 || (!get_interprocedural_view() && node->op == op_Filter);
2056 /* Returns true if the operation manipulates control flow. */
2058 is_cfop(const ir_node *node) {
2059 return is_cfopcode(get_irn_op(node));
2062 /* Returns true if the operation manipulates interprocedural control flow:
2063 CallBegin, EndReg, EndExcept */
2064 int is_ip_cfop(const ir_node *node) {
2065 return is_ip_cfopcode(get_irn_op(node));
2068 /* Returns true if the operation can change the control flow because
2071 is_fragile_op(const ir_node *node) {
2072 return is_op_fragile(get_irn_op(node));
2075 /* Returns the memory operand of fragile operations. */
2076 ir_node *get_fragile_op_mem(ir_node *node) {
2077 assert(node && is_fragile_op(node));
2079 switch (get_irn_opcode (node)) {
2088 return get_irn_n(node, 0);
2093 assert(0 && "should not be reached");
2098 /* Returns true if the operation is a forking control flow operation. */
2100 is_forking_op(const ir_node *node) {
2101 return is_op_forking(get_irn_op(node));
2104 #ifdef DEBUG_libfirm
2105 void dump_irn (ir_node *n) {
2106 int i, arity = get_irn_arity(n);
2107 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2109 ir_node *pred = get_irn_n(n, -1);
2110 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2111 get_irn_node_nr(pred), (void *)pred);
2113 printf(" preds: \n");
2114 for (i = 0; i < arity; ++i) {
2115 ir_node *pred = get_irn_n(n, i);
2116 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2117 get_irn_node_nr(pred), (void *)pred);
2121 #else /* DEBUG_libfirm */
2122 void dump_irn (ir_node *n) {}
2123 #endif /* DEBUG_libfirm */