3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_funccall_attr (ir_node *node)
451 assert (node->op == op_FuncCall);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
570 assert (node->op == op_Block);
571 assert(-1 <= pos && pos < get_irn_arity(node));
572 return get_irn_n(node, pos);
576 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
577 assert (node->op == op_Block);
578 set_irn_n(node, pos, pred);
582 get_Block_matured (ir_node *node) {
583 assert (node->op == op_Block);
584 return node->attr.block.matured;
588 set_Block_matured (ir_node *node, bool matured) {
589 assert (node->op == op_Block);
590 node->attr.block.matured = matured;
593 get_Block_block_visited (ir_node *node) {
594 assert (node->op == op_Block);
595 return node->attr.block.block_visited;
599 set_Block_block_visited (ir_node *node, unsigned long visit) {
600 assert (node->op == op_Block);
601 node->attr.block.block_visited = visit;
604 /* For this current_ir_graph must be set. */
606 mark_Block_block_visited (ir_node *node) {
607 assert (node->op == op_Block);
608 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
612 Block_not_block_visited(ir_node *node) {
613 assert (node->op == op_Block);
614 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
618 get_Block_graph_arr (ir_node *node, int pos) {
619 assert (node->op == op_Block);
620 return node->attr.block.graph_arr[pos+1];
624 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
625 assert (node->op == op_Block);
626 node->attr.block.graph_arr[pos+1] = value;
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 int ipv = get_interprocedural_view();
639 set_interprocedural_view(true);
640 fix_backedges(current_ir_graph->obst, node);
641 set_interprocedural_view(ipv);
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
675 set_Start_irg(ir_node *node, ir_graph *irg) {
676 assert(node->op == op_Start);
677 assert(is_ir_graph(irg));
678 assert(0 && " Why set irg? -- use set_irn_irg");
682 get_End_n_keepalives(ir_node *end) {
683 assert (end->op == op_End);
684 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
688 get_End_keepalive(ir_node *end, int pos) {
689 assert (end->op == op_End);
690 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
694 add_End_keepalive (ir_node *end, ir_node *ka) {
695 assert (end->op == op_End);
696 ARR_APP1 (ir_node *, end->in, ka);
700 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
701 assert (end->op == op_End);
702 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
706 free_End (ir_node *end) {
707 assert (end->op == op_End);
709 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
710 end->in = NULL; /* @@@ make sure we get an error if we use the
711 in array afterwards ... */
716 > Implementing the case construct (which is where the constant Proj node is
717 > important) involves far more than simply determining the constant values.
718 > We could argue that this is more properly a function of the translator from
719 > Firm to the target machine. That could be done if there was some way of
720 > projecting "default" out of the Cond node.
721 I know it's complicated.
722 Basically there are two proglems:
723 - determining the gaps between the projs
724 - determining the biggest case constant to know the proj number for
726 I see several solutions:
727 1. Introduce a ProjDefault node. Solves both problems.
728 This means to extend all optimizations executed during construction.
729 2. Give the Cond node for switch two flavors:
730 a) there are no gaps in the projs (existing flavor)
731 b) gaps may exist, default proj is still the Proj with the largest
732 projection number. This covers also the gaps.
733 3. Fix the semantic of the Cond to that of 2b)
735 Solution 2 seems to be the best:
736 Computing the gaps in the Firm representation is not too hard, i.e.,
737 libFIRM can implement a routine that transforms between the two
738 flavours. This is also possible for 1) but 2) does not require to
739 change any existing optimization.
740 Further it should be far simpler to determine the biggest constant than
742 I don't want to choose 3) as 2a) seems to have advantages for
743 dataflow analysis and 3) does not allow to convert the representation to
747 get_Cond_selector (ir_node *node) {
748 assert (node->op == op_Cond);
749 return get_irn_n(node, 0);
753 set_Cond_selector (ir_node *node, ir_node *selector) {
754 assert (node->op == op_Cond);
755 set_irn_n(node, 0, selector);
759 get_Cond_kind (ir_node *node) {
760 assert (node->op == op_Cond);
761 return node->attr.c.kind;
765 set_Cond_kind (ir_node *node, cond_kind kind) {
766 assert (node->op == op_Cond);
767 node->attr.c.kind = kind;
771 get_Cond_defaultProj (ir_node *node) {
772 assert (node->op == op_Cond);
773 return node->attr.c.default_proj;
777 get_Return_mem (ir_node *node) {
778 assert (node->op == op_Return);
779 return get_irn_n(node, 0);
783 set_Return_mem (ir_node *node, ir_node *mem) {
784 assert (node->op == op_Return);
785 set_irn_n(node, 0, mem);
789 get_Return_n_ress (ir_node *node) {
790 assert (node->op == op_Return);
791 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
795 get_Return_res_arr (ir_node *node)
797 assert ((node->op == op_Return));
798 if (get_Return_n_ress(node) > 0)
799 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
806 set_Return_n_res (ir_node *node, int results) {
807 assert (node->op == op_Return);
812 get_Return_res (ir_node *node, int pos) {
813 assert (node->op == op_Return);
814 assert (get_Return_n_ress(node) > pos);
815 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
819 set_Return_res (ir_node *node, int pos, ir_node *res){
820 assert (node->op == op_Return);
821 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
825 get_Raise_mem (ir_node *node) {
826 assert (node->op == op_Raise);
827 return get_irn_n(node, 0);
831 set_Raise_mem (ir_node *node, ir_node *mem) {
832 assert (node->op == op_Raise);
833 set_irn_n(node, 0, mem);
837 get_Raise_exo_ptr (ir_node *node) {
838 assert (node->op == op_Raise);
839 return get_irn_n(node, 1);
843 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
844 assert (node->op == op_Raise);
845 set_irn_n(node, 1, exo_ptr);
848 tarval *get_Const_tarval (ir_node *node) {
849 assert (node->op == op_Const);
850 return node->attr.con.tv;
854 set_Const_tarval (ir_node *node, tarval *con) {
855 assert (node->op == op_Const);
856 node->attr.con.tv = con;
860 /* The source language type. Must be an atomic type. Mode of type must
861 be mode of node. For tarvals from entities type must be pointer to
864 get_Const_type (ir_node *node) {
865 assert (node->op == op_Const);
866 return node->attr.con.tp;
870 set_Const_type (ir_node *node, type *tp) {
871 assert (node->op == op_Const);
872 if (tp != unknown_type) {
873 assert (is_atomic_type(tp));
874 assert (get_type_mode(tp) == get_irn_mode(node));
877 if ((get_irn_node_nr(node) == 259216) && (tp == unknown_type))
881 node->attr.con.tp = tp;
886 get_SymConst_kind (const ir_node *node) {
887 assert (node->op == op_SymConst);
888 return node->attr.i.num;
892 set_SymConst_kind (ir_node *node, symconst_kind num) {
893 assert (node->op == op_SymConst);
894 node->attr.i.num = num;
898 get_SymConst_type (ir_node *node) {
899 assert ( (node->op == op_SymConst)
900 && ( get_SymConst_kind(node) == symconst_type_tag
901 || get_SymConst_kind(node) == symconst_size));
902 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
906 set_SymConst_type (ir_node *node, type *tp) {
907 assert ( (node->op == op_SymConst)
908 && ( get_SymConst_kind(node) == symconst_type_tag
909 || get_SymConst_kind(node) == symconst_size));
910 node->attr.i.sym.type_p = tp;
914 get_SymConst_name (ir_node *node) {
915 assert ( (node->op == op_SymConst)
916 && (get_SymConst_kind(node) == symconst_addr_name));
917 return node->attr.i.sym.ident_p;
921 set_SymConst_name (ir_node *node, ident *name) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind(node) == symconst_addr_name));
924 node->attr.i.sym.ident_p = name;
928 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
929 entity *get_SymConst_entity (ir_node *node) {
930 assert ( (node->op == op_SymConst)
931 && (get_SymConst_kind (node) == symconst_addr_ent));
932 return node->attr.i.sym.entity_p;
935 void set_SymConst_entity (ir_node *node, entity *ent) {
936 assert ( (node->op == op_SymConst)
937 && (get_SymConst_kind(node) == symconst_addr_ent));
938 node->attr.i.sym.entity_p = ent;
941 union symconst_symbol
942 get_SymConst_symbol (ir_node *node) {
943 assert (node->op == op_SymConst);
944 return node->attr.i.sym;
948 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
949 assert (node->op == op_SymConst);
950 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
951 node->attr.i.sym = sym;
955 get_SymConst_value_type (ir_node *node) {
956 assert (node->op == op_SymConst);
957 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
958 return node->attr.i.tp;
962 set_SymConst_value_type (ir_node *node, type *tp) {
963 assert (node->op == op_SymConst);
964 node->attr.i.tp = tp;
968 get_Sel_mem (ir_node *node) {
969 assert (node->op == op_Sel);
970 return get_irn_n(node, 0);
974 set_Sel_mem (ir_node *node, ir_node *mem) {
975 assert (node->op == op_Sel);
976 set_irn_n(node, 0, mem);
980 get_Sel_ptr (ir_node *node) {
981 assert (node->op == op_Sel);
982 return get_irn_n(node, 1);
986 set_Sel_ptr (ir_node *node, ir_node *ptr) {
987 assert (node->op == op_Sel);
988 set_irn_n(node, 1, ptr);
992 get_Sel_n_indexs (ir_node *node) {
993 assert (node->op == op_Sel);
994 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
998 get_Sel_index_arr (ir_node *node)
1000 assert ((node->op == op_Sel));
1001 if (get_Sel_n_indexs(node) > 0)
1002 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1008 get_Sel_index (ir_node *node, int pos) {
1009 assert (node->op == op_Sel);
1010 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1014 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1015 assert (node->op == op_Sel);
1016 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1020 get_Sel_entity (ir_node *node) {
1021 assert (node->op == op_Sel);
1022 return node->attr.s.ent;
1026 set_Sel_entity (ir_node *node, entity *ent) {
1027 assert (node->op == op_Sel);
1028 node->attr.s.ent = ent;
1032 get_InstOf_ent (ir_node *node) {
1033 assert (node->op = op_InstOf);
1034 return (node->attr.io.ent);
1038 set_InstOf_ent (ir_node *node, type *ent) {
1039 assert (node->op = op_InstOf);
1040 node->attr.io.ent = ent;
1044 get_InstOf_store (ir_node *node) {
1045 assert (node->op = op_InstOf);
1046 return (get_irn_n (node, 0));
1050 set_InstOf_store (ir_node *node, ir_node *obj) {
1051 assert (node->op = op_InstOf);
1052 set_irn_n (node, 0, obj);
1056 get_InstOf_obj (ir_node *node) {
1057 assert (node->op = op_InstOf);
1058 return (get_irn_n (node, 1));
1062 set_InstOf_obj (ir_node *node, ir_node *obj) {
1063 assert (node->op = op_InstOf);
1064 set_irn_n (node, 1, obj);
1068 /* For unary and binary arithmetic operations the access to the
1069 operands can be factored out. Left is the first, right the
1070 second arithmetic value as listed in tech report 0999-33.
1071 unops are: Minus, Abs, Not, Conv, Cast
1072 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1073 Shr, Shrs, Rotate, Cmp */
1077 get_Call_mem (ir_node *node) {
1078 assert (node->op == op_Call);
1079 return get_irn_n(node, 0);
1083 set_Call_mem (ir_node *node, ir_node *mem) {
1084 assert (node->op == op_Call);
1085 set_irn_n(node, 0, mem);
1089 get_Call_ptr (ir_node *node) {
1090 assert (node->op == op_Call);
1091 return get_irn_n(node, 1);
1095 set_Call_ptr (ir_node *node, ir_node *ptr) {
1096 assert (node->op == op_Call);
1097 set_irn_n(node, 1, ptr);
1101 get_Call_param_arr (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1107 get_Call_n_params (ir_node *node) {
1108 assert (node->op == op_Call);
1109 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1113 get_Call_arity (ir_node *node) {
1114 assert (node->op == op_Call);
1115 return get_Call_n_params(node);
1119 set_Call_arity (ir_node *node, ir_node *arity) {
1120 assert (node->op == op_Call);
1125 get_Call_param (ir_node *node, int pos) {
1126 assert (node->op == op_Call);
1127 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1131 set_Call_param (ir_node *node, int pos, ir_node *param) {
1132 assert (node->op == op_Call);
1133 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1137 get_Call_type (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1143 set_Call_type (ir_node *node, type *tp) {
1144 assert (node->op == op_Call);
1145 assert ((get_unknown_type() == tp) || is_method_type(tp));
1146 node->attr.call.cld_tp = tp;
1149 int Call_has_callees(ir_node *node) {
1150 assert(node && node->op == op_Call);
1151 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1152 (node->attr.call.callee_arr != NULL));
1155 int get_Call_n_callees(ir_node * node) {
1156 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1157 return ARR_LEN(node->attr.call.callee_arr);
1160 entity * get_Call_callee(ir_node * node, int pos) {
1161 assert(pos >= 0 && pos < get_Call_n_callees(node));
1162 return node->attr.call.callee_arr[pos];
1165 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1166 assert(node->op == op_Call);
1167 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1168 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1170 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1173 void remove_Call_callee_arr(ir_node * node) {
1174 assert(node->op == op_Call);
1175 node->attr.call.callee_arr = NULL;
1178 ir_node * get_CallBegin_ptr (ir_node *node) {
1179 assert(node->op == op_CallBegin);
1180 return get_irn_n(node, 0);
1182 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1183 assert(node->op == op_CallBegin);
1184 set_irn_n(node, 0, ptr);
1186 ir_node * get_CallBegin_call (ir_node *node) {
1187 assert(node->op == op_CallBegin);
1188 return node->attr.callbegin.call;
1190 void set_CallBegin_call (ir_node *node, ir_node *call) {
1191 assert(node->op == op_CallBegin);
1192 node->attr.callbegin.call = call;
1196 get_FuncCall_ptr (ir_node *node) {
1197 assert (node->op == op_FuncCall);
1198 return get_irn_n(node, 0);
1202 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1203 assert (node->op == op_FuncCall);
1204 set_irn_n(node, 0, ptr);
1208 get_FuncCall_param_arr (ir_node *node) {
1209 assert (node->op == op_FuncCall);
1210 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1214 get_FuncCall_n_params (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1220 get_FuncCall_arity (ir_node *node) {
1221 assert (node->op == op_FuncCall);
1222 return get_FuncCall_n_params(node);
1226 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1227 assert (node->op == op_FuncCall);
1232 get_FuncCall_param (ir_node *node, int pos) {
1233 assert (node->op == op_FuncCall);
1234 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1238 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1239 assert (node->op == op_FuncCall);
1240 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1244 get_FuncCall_type (ir_node *node) {
1245 assert (node->op == op_FuncCall);
1246 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1250 set_FuncCall_type (ir_node *node, type *tp) {
1251 assert (node->op == op_FuncCall);
1252 assert (is_method_type(tp));
1253 node->attr.call.cld_tp = tp;
1256 int FuncCall_has_callees(ir_node *node) {
1257 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1258 (node->attr.call.callee_arr != NULL));
1261 int get_FuncCall_n_callees(ir_node * node) {
1262 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1263 return ARR_LEN(node->attr.call.callee_arr);
1266 entity * get_FuncCall_callee(ir_node * node, int pos) {
1267 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1268 return node->attr.call.callee_arr[pos];
1271 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1272 assert(node->op == op_FuncCall);
1273 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1274 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1276 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1279 void remove_FuncCall_callee_arr(ir_node * node) {
1280 assert(node->op == op_FuncCall);
1281 node->attr.call.callee_arr = NULL;
1286 ir_node * get_##OP##_left(ir_node *node) { \
1287 assert(node->op == op_##OP); \
1288 return get_irn_n(node, node->op->op_index); \
1290 void set_##OP##_left(ir_node *node, ir_node *left) { \
1291 assert(node->op == op_##OP); \
1292 set_irn_n(node, node->op->op_index, left); \
1294 ir_node *get_##OP##_right(ir_node *node) { \
1295 assert(node->op == op_##OP); \
1296 return get_irn_n(node, node->op->op_index + 1); \
1298 void set_##OP##_right(ir_node *node, ir_node *right) { \
1299 assert(node->op == op_##OP); \
1300 set_irn_n(node, node->op->op_index + 1, right); \
1304 ir_node *get_##OP##_op(ir_node *node) { \
1305 assert(node->op == op_##OP); \
1306 return get_irn_n(node, node->op->op_index); \
1308 void set_##OP##_op (ir_node *node, ir_node *op) { \
1309 assert(node->op == op_##OP); \
1310 set_irn_n(node, node->op->op_index, op); \
1320 get_Quot_mem (ir_node *node) {
1321 assert (node->op == op_Quot);
1322 return get_irn_n(node, 0);
1326 set_Quot_mem (ir_node *node, ir_node *mem) {
1327 assert (node->op == op_Quot);
1328 set_irn_n(node, 0, mem);
1334 get_DivMod_mem (ir_node *node) {
1335 assert (node->op == op_DivMod);
1336 return get_irn_n(node, 0);
1340 set_DivMod_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_DivMod);
1342 set_irn_n(node, 0, mem);
1348 get_Div_mem (ir_node *node) {
1349 assert (node->op == op_Div);
1350 return get_irn_n(node, 0);
1354 set_Div_mem (ir_node *node, ir_node *mem) {
1355 assert (node->op == op_Div);
1356 set_irn_n(node, 0, mem);
1362 get_Mod_mem (ir_node *node) {
1363 assert (node->op == op_Mod);
1364 return get_irn_n(node, 0);
1368 set_Mod_mem (ir_node *node, ir_node *mem) {
1369 assert (node->op == op_Mod);
1370 set_irn_n(node, 0, mem);
1387 get_Cast_type (ir_node *node) {
1388 assert (node->op == op_Cast);
1389 return node->attr.cast.totype;
1393 set_Cast_type (ir_node *node, type *to_tp) {
1394 assert (node->op == op_Cast);
1395 node->attr.cast.totype = to_tp;
1399 is_unop (ir_node *node) {
1400 return (node->op->opar == oparity_unary);
1404 get_unop_op (ir_node *node) {
1405 if (node->op->opar == oparity_unary)
1406 return get_irn_n(node, node->op->op_index);
1408 assert(node->op->opar == oparity_unary);
1413 set_unop_op (ir_node *node, ir_node *op) {
1414 if (node->op->opar == oparity_unary)
1415 set_irn_n(node, node->op->op_index, op);
1417 assert(node->op->opar == oparity_unary);
1421 is_binop (ir_node *node) {
1422 return (node->op->opar == oparity_binary);
1426 get_binop_left (ir_node *node) {
1427 if (node->op->opar == oparity_binary)
1428 return get_irn_n(node, node->op->op_index);
1430 assert(node->op->opar == oparity_binary);
1435 set_binop_left (ir_node *node, ir_node *left) {
1436 if (node->op->opar == oparity_binary)
1437 set_irn_n(node, node->op->op_index, left);
1439 assert (node->op->opar == oparity_binary);
1443 get_binop_right (ir_node *node) {
1444 if (node->op->opar == oparity_binary)
1445 return get_irn_n(node, node->op->op_index + 1);
1447 assert(node->op->opar == oparity_binary);
1452 set_binop_right (ir_node *node, ir_node *right) {
1453 if (node->op->opar == oparity_binary)
1454 set_irn_n(node, node->op->op_index + 1, right);
1456 assert (node->op->opar == oparity_binary);
1459 int is_Phi (ir_node *n) {
1465 if (op == op_Filter) return get_interprocedural_view();
1468 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1469 (get_irn_arity(n) > 0));
1474 int is_Phi0 (ir_node *n) {
1477 return ((get_irn_op(n) == op_Phi) &&
1478 (get_irn_arity(n) == 0) &&
1479 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1483 get_Phi_preds_arr (ir_node *node) {
1484 assert (node->op == op_Phi);
1485 return (ir_node **)&(get_irn_in(node)[1]);
1489 get_Phi_n_preds (ir_node *node) {
1490 assert (is_Phi(node) || is_Phi0(node));
1491 return (get_irn_arity(node));
1495 void set_Phi_n_preds (ir_node *node, int n_preds) {
1496 assert (node->op == op_Phi);
1501 get_Phi_pred (ir_node *node, int pos) {
1502 assert (is_Phi(node) || is_Phi0(node));
1503 return get_irn_n(node, pos);
1507 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1508 assert (is_Phi(node) || is_Phi0(node));
1509 set_irn_n(node, pos, pred);
1513 int is_memop(ir_node *node) {
1514 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1517 ir_node *get_memop_mem (ir_node *node) {
1518 assert(is_memop(node));
1519 return get_irn_n(node, 0);
1522 void set_memop_mem (ir_node *node, ir_node *mem) {
1523 assert(is_memop(node));
1524 set_irn_n(node, 0, mem);
1527 ir_node *get_memop_ptr (ir_node *node) {
1528 assert(is_memop(node));
1529 return get_irn_n(node, 1);
1532 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1533 assert(is_memop(node));
1534 set_irn_n(node, 1, ptr);
1538 get_Load_mem (ir_node *node) {
1539 assert (node->op == op_Load);
1540 return get_irn_n(node, 0);
1544 set_Load_mem (ir_node *node, ir_node *mem) {
1545 assert (node->op == op_Load);
1546 set_irn_n(node, 0, mem);
1550 get_Load_ptr (ir_node *node) {
1551 assert (node->op == op_Load);
1552 return get_irn_n(node, 1);
1556 set_Load_ptr (ir_node *node, ir_node *ptr) {
1557 assert (node->op == op_Load);
1558 set_irn_n(node, 1, ptr);
1562 get_Load_mode (ir_node *node) {
1563 assert (node->op == op_Load);
1564 return node->attr.load.load_mode;
1568 set_Load_mode (ir_node *node, ir_mode *mode) {
1569 assert (node->op == op_Load);
1570 node->attr.load.load_mode = mode;
1574 get_Load_volatility (ir_node *node) {
1575 assert (node->op == op_Load);
1576 return node->attr.load.volatility;
1580 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1581 assert (node->op == op_Load);
1582 node->attr.load.volatility = volatility;
1587 get_Store_mem (ir_node *node) {
1588 assert (node->op == op_Store);
1589 return get_irn_n(node, 0);
1593 set_Store_mem (ir_node *node, ir_node *mem) {
1594 assert (node->op == op_Store);
1595 set_irn_n(node, 0, mem);
1599 get_Store_ptr (ir_node *node) {
1600 assert (node->op == op_Store);
1601 return get_irn_n(node, 1);
1605 set_Store_ptr (ir_node *node, ir_node *ptr) {
1606 assert (node->op == op_Store);
1607 set_irn_n(node, 1, ptr);
1611 get_Store_value (ir_node *node) {
1612 assert (node->op == op_Store);
1613 return get_irn_n(node, 2);
1617 set_Store_value (ir_node *node, ir_node *value) {
1618 assert (node->op == op_Store);
1619 set_irn_n(node, 2, value);
1623 get_Store_volatility (ir_node *node) {
1624 assert (node->op == op_Store);
1625 return node->attr.store.volatility;
1629 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1630 assert (node->op == op_Store);
1631 node->attr.store.volatility = volatility;
1636 get_Alloc_mem (ir_node *node) {
1637 assert (node->op == op_Alloc);
1638 return get_irn_n(node, 0);
1642 set_Alloc_mem (ir_node *node, ir_node *mem) {
1643 assert (node->op == op_Alloc);
1644 set_irn_n(node, 0, mem);
1648 get_Alloc_size (ir_node *node) {
1649 assert (node->op == op_Alloc);
1650 return get_irn_n(node, 1);
1654 set_Alloc_size (ir_node *node, ir_node *size) {
1655 assert (node->op == op_Alloc);
1656 set_irn_n(node, 1, size);
1660 get_Alloc_type (ir_node *node) {
1661 assert (node->op == op_Alloc);
1662 return node->attr.a.type = skip_tid(node->attr.a.type);
1666 set_Alloc_type (ir_node *node, type *tp) {
1667 assert (node->op == op_Alloc);
1668 node->attr.a.type = tp;
1672 get_Alloc_where (ir_node *node) {
1673 assert (node->op == op_Alloc);
1674 return node->attr.a.where;
1678 set_Alloc_where (ir_node *node, where_alloc where) {
1679 assert (node->op == op_Alloc);
1680 node->attr.a.where = where;
1685 get_Free_mem (ir_node *node) {
1686 assert (node->op == op_Free);
1687 return get_irn_n(node, 0);
1691 set_Free_mem (ir_node *node, ir_node *mem) {
1692 assert (node->op == op_Free);
1693 set_irn_n(node, 0, mem);
1697 get_Free_ptr (ir_node *node) {
1698 assert (node->op == op_Free);
1699 return get_irn_n(node, 1);
1703 set_Free_ptr (ir_node *node, ir_node *ptr) {
1704 assert (node->op == op_Free);
1705 set_irn_n(node, 1, ptr);
1709 get_Free_size (ir_node *node) {
1710 assert (node->op == op_Free);
1711 return get_irn_n(node, 2);
1715 set_Free_size (ir_node *node, ir_node *size) {
1716 assert (node->op == op_Free);
1717 set_irn_n(node, 2, size);
1721 get_Free_type (ir_node *node) {
1722 assert (node->op == op_Free);
1723 return node->attr.f = skip_tid(node->attr.f);
1727 set_Free_type (ir_node *node, type *tp) {
1728 assert (node->op == op_Free);
1733 get_Sync_preds_arr (ir_node *node) {
1734 assert (node->op == op_Sync);
1735 return (ir_node **)&(get_irn_in(node)[1]);
1739 get_Sync_n_preds (ir_node *node) {
1740 assert (node->op == op_Sync);
1741 return (get_irn_arity(node));
1746 set_Sync_n_preds (ir_node *node, int n_preds) {
1747 assert (node->op == op_Sync);
1752 get_Sync_pred (ir_node *node, int pos) {
1753 assert (node->op == op_Sync);
1754 return get_irn_n(node, pos);
1758 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1759 assert (node->op == op_Sync);
1760 set_irn_n(node, pos, pred);
1764 get_Proj_pred (ir_node *node) {
1765 assert (is_Proj(node));
1766 return get_irn_n(node, 0);
1770 set_Proj_pred (ir_node *node, ir_node *pred) {
1771 assert (is_Proj(node));
1772 set_irn_n(node, 0, pred);
1776 get_Proj_proj (ir_node *node) {
1777 assert (is_Proj(node));
1778 if (get_irn_opcode(node) == iro_Proj) {
1779 return node->attr.proj;
1781 assert(get_irn_opcode(node) == iro_Filter);
1782 return node->attr.filter.proj;
1787 set_Proj_proj (ir_node *node, long proj) {
1788 assert (node->op == op_Proj);
1789 node->attr.proj = proj;
1793 get_Tuple_preds_arr (ir_node *node) {
1794 assert (node->op == op_Tuple);
1795 return (ir_node **)&(get_irn_in(node)[1]);
1799 get_Tuple_n_preds (ir_node *node) {
1800 assert (node->op == op_Tuple);
1801 return (get_irn_arity(node));
1806 set_Tuple_n_preds (ir_node *node, int n_preds) {
1807 assert (node->op == op_Tuple);
1812 get_Tuple_pred (ir_node *node, int pos) {
1813 assert (node->op == op_Tuple);
1814 return get_irn_n(node, pos);
1818 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1819 assert (node->op == op_Tuple);
1820 set_irn_n(node, pos, pred);
1824 get_Id_pred (ir_node *node) {
1825 assert (node->op == op_Id);
1826 return get_irn_n(node, 0);
1830 set_Id_pred (ir_node *node, ir_node *pred) {
1831 assert (node->op == op_Id);
1832 set_irn_n(node, 0, pred);
1835 ir_node *get_Confirm_value (ir_node *node) {
1836 assert (node->op == op_Confirm);
1837 return get_irn_n(node, 0);
1839 void set_Confirm_value (ir_node *node, ir_node *value) {
1840 assert (node->op == op_Confirm);
1841 set_irn_n(node, 0, value);
1843 ir_node *get_Confirm_bound (ir_node *node) {
1844 assert (node->op == op_Confirm);
1845 return get_irn_n(node, 1);
1847 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1848 assert (node->op == op_Confirm);
1849 set_irn_n(node, 0, bound);
1851 pn_Cmp get_Confirm_cmp (ir_node *node) {
1852 assert (node->op == op_Confirm);
1853 return node->attr.confirm_cmp;
1855 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1856 assert (node->op == op_Confirm);
1857 node->attr.confirm_cmp = cmp;
1862 get_Filter_pred (ir_node *node) {
1863 assert(node->op == op_Filter);
1867 set_Filter_pred (ir_node *node, ir_node *pred) {
1868 assert(node->op == op_Filter);
1872 get_Filter_proj(ir_node *node) {
1873 assert(node->op == op_Filter);
1874 return node->attr.filter.proj;
1877 set_Filter_proj (ir_node *node, long proj) {
1878 assert(node->op == op_Filter);
1879 node->attr.filter.proj = proj;
1882 /* Don't use get_irn_arity, get_irn_n in implementation as access
1883 shall work independent of view!!! */
1884 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1885 assert(node->op == op_Filter);
1886 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1887 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1888 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1889 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1890 node->attr.filter.in_cg[0] = node->in[0];
1892 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1895 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1896 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1897 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1898 node->attr.filter.in_cg[pos + 1] = pred;
1900 int get_Filter_n_cg_preds(ir_node *node) {
1901 assert(node->op == op_Filter && node->attr.filter.in_cg);
1902 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1904 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1906 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1908 arity = ARR_LEN(node->attr.filter.in_cg);
1909 assert(pos < arity - 1);
1910 return node->attr.filter.in_cg[pos + 1];
1915 get_irn_irg(ir_node *node) {
1916 if (get_irn_op(node) != op_Block)
1917 node = get_nodes_block(node);
1918 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1919 node = get_nodes_block(node);
1920 assert(get_irn_op(node) == op_Block);
1921 return node->attr.block.irg;
1925 /*----------------------------------------------------------------*/
1926 /* Auxiliary routines */
1927 /*----------------------------------------------------------------*/
1930 skip_Proj (ir_node *node) {
1931 /* don't assert node !!! */
1932 if (node && is_Proj(node)) {
1933 return get_Proj_pred(node);
1940 skip_Tuple (ir_node *node) {
1943 if (!get_opt_normalize()) return node;
1945 node = skip_Id(node);
1946 if (get_irn_op(node) == op_Proj) {
1947 pred = skip_Id(get_Proj_pred(node));
1948 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1949 pred = skip_Id(skip_Tuple(pred));
1950 if (get_irn_op(pred) == op_Tuple)
1951 return get_Tuple_pred(pred, get_Proj_proj(node));
1956 /** returns operand of node if node is a Cast */
1957 ir_node *skip_Cast (ir_node *node) {
1958 if (node && get_irn_op(node) == op_Cast) {
1959 return skip_Id(get_irn_n(node, 0));
1966 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1967 than any other approach, as Id chains are resolved and all point to the real node, or
1968 all id's are self loops. */
1970 skip_Id (ir_node *node) {
1971 /* don't assert node !!! */
1973 if (!get_opt_normalize()) return node;
1975 /* Don't use get_Id_pred: We get into an endless loop for
1976 self-referencing Ids. */
1977 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1978 ir_node *rem_pred = node->in[0+1];
1981 assert (get_irn_arity (node) > 0);
1983 node->in[0+1] = node;
1984 res = skip_Id(rem_pred);
1985 if (res->op == op_Id) /* self-loop */ return node;
1987 node->in[0+1] = res;
1994 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1995 than any other approach, as Id chains are resolved and all point to the real node, or
1996 all id's are self loops. */
1998 skip_Id (ir_node *node) {
2000 /* don't assert node !!! */
2002 if (!node || (node->op != op_Id)) return node;
2004 if (!get_opt_normalize()) return node;
2006 /* Don't use get_Id_pred: We get into an endless loop for
2007 self-referencing Ids. */
2008 pred = node->in[0+1];
2010 if (pred->op != op_Id) return pred;
2012 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2013 ir_node *rem_pred, *res;
2015 if (pred->op != op_Id) return pred; /* shortcut */
2018 assert (get_irn_arity (node) > 0);
2020 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2021 res = skip_Id(rem_pred);
2022 if (res->op == op_Id) /* self-loop */ return node;
2024 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2033 is_Bad (ir_node *node) {
2035 if ((node) && get_irn_opcode(node) == iro_Bad)
2041 is_no_Block (ir_node *node) {
2043 return (get_irn_opcode(node) != iro_Block);
2047 is_Block (ir_node *node) {
2049 return (get_irn_opcode(node) == iro_Block);
2052 /* returns true if node is a Unknown node. */
2054 is_Unknown (ir_node *node) {
2056 return (get_irn_opcode(node) == iro_Unknown);
2060 is_Proj (const ir_node *node) {
2062 return node->op == op_Proj
2063 || (!get_interprocedural_view() && node->op == op_Filter);
2066 /* Returns true if the operation manipulates control flow. */
2068 is_cfop(ir_node *node) {
2069 return is_cfopcode(get_irn_op(node));
2072 /* Returns true if the operation manipulates interprocedural control flow:
2073 CallBegin, EndReg, EndExcept */
2074 int is_ip_cfop(ir_node *node) {
2075 return is_ip_cfopcode(get_irn_op(node));
2078 /* Returns true if the operation can change the control flow because
2081 is_fragile_op(ir_node *node) {
2082 return is_op_fragile(get_irn_op(node));
2085 /* Returns the memory operand of fragile operations. */
2086 ir_node *get_fragile_op_mem(ir_node *node) {
2087 assert(node && is_fragile_op(node));
2089 switch (get_irn_opcode (node)) {
2098 return get_irn_n(node, 0);
2103 assert(0 && "should not be reached");
2108 /* Returns true if the operation is a forking control flow operation. */
2110 is_forking_op(ir_node *node) {
2111 return is_op_forking(get_irn_op(node));
2114 #ifdef DEBUG_libfirm
2115 void dump_irn (ir_node *n) {
2116 int i, arity = get_irn_arity(n);
2117 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2119 ir_node *pred = get_irn_n(n, -1);
2120 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2121 get_irn_node_nr(pred), (void *)pred);
2123 printf(" preds: \n");
2124 for (i = 0; i < arity; ++i) {
2125 ir_node *pred = get_irn_n(n, i);
2126 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2127 get_irn_node_nr(pred), (void *)pred);
2131 #else /* DEBUG_libfirm */
2132 void dump_irn (ir_node *n) {}
2133 #endif /* DEBUG_libfirm */