3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_funccall_attr (ir_node *node)
451 assert (node->op == op_FuncCall);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
570 assert (node->op == op_Block);
571 assert(-1 <= pos && pos < get_irn_arity(node));
572 return get_irn_n(node, pos);
576 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
577 assert (node->op == op_Block);
578 set_irn_n(node, pos, pred);
582 get_Block_matured (ir_node *node) {
583 assert (node->op == op_Block);
584 return node->attr.block.matured;
588 set_Block_matured (ir_node *node, bool matured) {
589 assert (node->op == op_Block);
590 node->attr.block.matured = matured;
593 get_Block_block_visited (ir_node *node) {
594 assert (node->op == op_Block);
595 return node->attr.block.block_visited;
599 set_Block_block_visited (ir_node *node, unsigned long visit) {
600 assert (node->op == op_Block);
601 node->attr.block.block_visited = visit;
604 /* For this current_ir_graph must be set. */
606 mark_Block_block_visited (ir_node *node) {
607 assert (node->op == op_Block);
608 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
612 Block_not_block_visited(ir_node *node) {
613 assert (node->op == op_Block);
614 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
618 get_Block_graph_arr (ir_node *node, int pos) {
619 assert (node->op == op_Block);
620 return node->attr.block.graph_arr[pos+1];
624 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
625 assert (node->op == op_Block);
626 node->attr.block.graph_arr[pos+1] = value;
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 int ipv = get_interprocedural_view();
639 set_interprocedural_view(true);
640 fix_backedges(current_ir_graph->obst, node);
641 set_interprocedural_view(ipv);
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
675 set_Start_irg(ir_node *node, ir_graph *irg) {
676 assert(node->op == op_Start);
677 assert(is_ir_graph(irg));
678 assert(0 && " Why set irg? -- use set_irn_irg");
682 get_End_n_keepalives(ir_node *end) {
683 assert (end->op == op_End);
684 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
688 get_End_keepalive(ir_node *end, int pos) {
689 assert (end->op == op_End);
690 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
694 add_End_keepalive (ir_node *end, ir_node *ka) {
695 assert (end->op == op_End);
696 ARR_APP1 (ir_node *, end->in, ka);
700 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
701 assert (end->op == op_End);
702 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
706 free_End (ir_node *end) {
707 assert (end->op == op_End);
709 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
710 end->in = NULL; /* @@@ make sure we get an error if we use the
711 in array afterwards ... */
716 > Implementing the case construct (which is where the constant Proj node is
717 > important) involves far more than simply determining the constant values.
718 > We could argue that this is more properly a function of the translator from
719 > Firm to the target machine. That could be done if there was some way of
720 > projecting "default" out of the Cond node.
721 I know it's complicated.
722 Basically there are two proglems:
723 - determining the gaps between the projs
724 - determining the biggest case constant to know the proj number for
726 I see several solutions:
727 1. Introduce a ProjDefault node. Solves both problems.
728 This means to extend all optimizations executed during construction.
729 2. Give the Cond node for switch two flavors:
730 a) there are no gaps in the projs (existing flavor)
731 b) gaps may exist, default proj is still the Proj with the largest
732 projection number. This covers also the gaps.
733 3. Fix the semantic of the Cond to that of 2b)
735 Solution 2 seems to be the best:
736 Computing the gaps in the Firm representation is not too hard, i.e.,
737 libFIRM can implement a routine that transforms between the two
738 flavours. This is also possible for 1) but 2) does not require to
739 change any existing optimization.
740 Further it should be far simpler to determine the biggest constant than
742 I don't want to choose 3) as 2a) seems to have advantages for
743 dataflow analysis and 3) does not allow to convert the representation to
747 get_Cond_selector (ir_node *node) {
748 assert (node->op == op_Cond);
749 return get_irn_n(node, 0);
753 set_Cond_selector (ir_node *node, ir_node *selector) {
754 assert (node->op == op_Cond);
755 set_irn_n(node, 0, selector);
759 get_Cond_kind (ir_node *node) {
760 assert (node->op == op_Cond);
761 return node->attr.c.kind;
765 set_Cond_kind (ir_node *node, cond_kind kind) {
766 assert (node->op == op_Cond);
767 node->attr.c.kind = kind;
771 get_Cond_defaultProj (ir_node *node) {
772 assert (node->op == op_Cond);
773 return node->attr.c.default_proj;
777 get_Return_mem (ir_node *node) {
778 assert (node->op == op_Return);
779 return get_irn_n(node, 0);
783 set_Return_mem (ir_node *node, ir_node *mem) {
784 assert (node->op == op_Return);
785 set_irn_n(node, 0, mem);
789 get_Return_n_ress (ir_node *node) {
790 assert (node->op == op_Return);
791 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
795 get_Return_res_arr (ir_node *node)
797 assert ((node->op == op_Return));
798 if (get_Return_n_ress(node) > 0)
799 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
806 set_Return_n_res (ir_node *node, int results) {
807 assert (node->op == op_Return);
812 get_Return_res (ir_node *node, int pos) {
813 assert (node->op == op_Return);
814 assert (get_Return_n_ress(node) > pos);
815 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
819 set_Return_res (ir_node *node, int pos, ir_node *res){
820 assert (node->op == op_Return);
821 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
825 get_Raise_mem (ir_node *node) {
826 assert (node->op == op_Raise);
827 return get_irn_n(node, 0);
831 set_Raise_mem (ir_node *node, ir_node *mem) {
832 assert (node->op == op_Raise);
833 set_irn_n(node, 0, mem);
837 get_Raise_exo_ptr (ir_node *node) {
838 assert (node->op == op_Raise);
839 return get_irn_n(node, 1);
843 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
844 assert (node->op == op_Raise);
845 set_irn_n(node, 1, exo_ptr);
848 tarval *get_Const_tarval (ir_node *node) {
849 assert (node->op == op_Const);
850 return node->attr.con.tv;
854 set_Const_tarval (ir_node *node, tarval *con) {
855 assert (node->op == op_Const);
856 node->attr.con.tv = con;
860 /* The source language type. Must be an atomic type. Mode of type must
861 be mode of node. For tarvals from entities type must be pointer to
864 get_Const_type (ir_node *node) {
865 assert (node->op == op_Const);
866 return node->attr.con.tp;
870 set_Const_type (ir_node *node, type *tp) {
871 assert (node->op == op_Const);
872 if (tp != unknown_type) {
873 assert (is_atomic_type(tp));
874 assert (get_type_mode(tp) == get_irn_mode(node));
877 if ((get_irn_node_nr(node) == 259216) && (tp == unknown_type))
881 node->attr.con.tp = tp;
886 get_SymConst_kind (const ir_node *node) {
887 assert (node->op == op_SymConst);
888 return node->attr.i.num;
892 set_SymConst_kind (ir_node *node, symconst_kind num) {
893 assert (node->op == op_SymConst);
894 node->attr.i.num = num;
898 get_SymConst_type (ir_node *node) {
899 assert ( (node->op == op_SymConst)
900 && ( get_SymConst_kind(node) == symconst_type_tag
901 || get_SymConst_kind(node) == symconst_size));
902 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
906 set_SymConst_type (ir_node *node, type *tp) {
907 assert ( (node->op == op_SymConst)
908 && ( get_SymConst_kind(node) == symconst_type_tag
909 || get_SymConst_kind(node) == symconst_size));
910 node->attr.i.sym.type_p = tp;
914 get_SymConst_name (ir_node *node) {
915 assert ( (node->op == op_SymConst)
916 && (get_SymConst_kind(node) == symconst_addr_name));
917 return node->attr.i.sym.ident_p;
921 set_SymConst_name (ir_node *node, ident *name) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind(node) == symconst_addr_name));
924 node->attr.i.sym.ident_p = name;
928 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
929 entity *get_SymConst_entity (ir_node *node) {
930 assert ( (node->op == op_SymConst)
931 && (get_SymConst_kind (node) == symconst_addr_ent));
932 return node->attr.i.sym.entity_p;
935 void set_SymConst_entity (ir_node *node, entity *ent) {
936 assert ( (node->op == op_SymConst)
937 && (get_SymConst_kind(node) == symconst_addr_ent));
938 node->attr.i.sym.entity_p = ent;
941 union symconst_symbol
942 get_SymConst_symbol (ir_node *node) {
943 assert (node->op == op_SymConst);
944 return node->attr.i.sym;
948 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
949 assert (node->op == op_SymConst);
950 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
951 node->attr.i.sym = sym;
955 get_SymConst_value_type (ir_node *node) {
956 assert (node->op == op_SymConst);
957 return node->attr.i.tp = skip_tid(node->attr.i.tp);
961 set_SymConst_value_type (ir_node *node, type *tp) {
962 assert (node->op == op_SymConst);
963 node->attr.i.tp = tp;
967 get_Sel_mem (ir_node *node) {
968 assert (node->op == op_Sel);
969 return get_irn_n(node, 0);
973 set_Sel_mem (ir_node *node, ir_node *mem) {
974 assert (node->op == op_Sel);
975 set_irn_n(node, 0, mem);
979 get_Sel_ptr (ir_node *node) {
980 assert (node->op == op_Sel);
981 return get_irn_n(node, 1);
985 set_Sel_ptr (ir_node *node, ir_node *ptr) {
986 assert (node->op == op_Sel);
987 set_irn_n(node, 1, ptr);
991 get_Sel_n_indexs (ir_node *node) {
992 assert (node->op == op_Sel);
993 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
997 get_Sel_index_arr (ir_node *node)
999 assert ((node->op == op_Sel));
1000 if (get_Sel_n_indexs(node) > 0)
1001 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1007 get_Sel_index (ir_node *node, int pos) {
1008 assert (node->op == op_Sel);
1009 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1013 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1014 assert (node->op == op_Sel);
1015 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1019 get_Sel_entity (ir_node *node) {
1020 assert (node->op == op_Sel);
1021 return node->attr.s.ent;
1025 set_Sel_entity (ir_node *node, entity *ent) {
1026 assert (node->op == op_Sel);
1027 node->attr.s.ent = ent;
1031 get_InstOf_ent (ir_node *node) {
1032 assert (node->op = op_InstOf);
1033 return (node->attr.io.ent);
1037 set_InstOf_ent (ir_node *node, type *ent) {
1038 assert (node->op = op_InstOf);
1039 node->attr.io.ent = ent;
1043 get_InstOf_store (ir_node *node) {
1044 assert (node->op = op_InstOf);
1045 return (get_irn_n (node, 0));
1049 set_InstOf_store (ir_node *node, ir_node *obj) {
1050 assert (node->op = op_InstOf);
1051 set_irn_n (node, 0, obj);
1055 get_InstOf_obj (ir_node *node) {
1056 assert (node->op = op_InstOf);
1057 return (get_irn_n (node, 1));
1061 set_InstOf_obj (ir_node *node, ir_node *obj) {
1062 assert (node->op = op_InstOf);
1063 set_irn_n (node, 1, obj);
1067 /* For unary and binary arithmetic operations the access to the
1068 operands can be factored out. Left is the first, right the
1069 second arithmetic value as listed in tech report 0999-33.
1070 unops are: Minus, Abs, Not, Conv, Cast
1071 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1072 Shr, Shrs, Rotate, Cmp */
1076 get_Call_mem (ir_node *node) {
1077 assert (node->op == op_Call);
1078 return get_irn_n(node, 0);
1082 set_Call_mem (ir_node *node, ir_node *mem) {
1083 assert (node->op == op_Call);
1084 set_irn_n(node, 0, mem);
1088 get_Call_ptr (ir_node *node) {
1089 assert (node->op == op_Call);
1090 return get_irn_n(node, 1);
1094 set_Call_ptr (ir_node *node, ir_node *ptr) {
1095 assert (node->op == op_Call);
1096 set_irn_n(node, 1, ptr);
1100 get_Call_param_arr (ir_node *node) {
1101 assert (node->op == op_Call);
1102 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1106 get_Call_n_params (ir_node *node) {
1107 assert (node->op == op_Call);
1108 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1112 get_Call_arity (ir_node *node) {
1113 assert (node->op == op_Call);
1114 return get_Call_n_params(node);
1118 set_Call_arity (ir_node *node, ir_node *arity) {
1119 assert (node->op == op_Call);
1124 get_Call_param (ir_node *node, int pos) {
1125 assert (node->op == op_Call);
1126 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1130 set_Call_param (ir_node *node, int pos, ir_node *param) {
1131 assert (node->op == op_Call);
1132 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1136 get_Call_type (ir_node *node) {
1137 assert (node->op == op_Call);
1138 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1142 set_Call_type (ir_node *node, type *tp) {
1143 assert (node->op == op_Call);
1144 assert ((get_unknown_type() == tp) || is_method_type(tp));
1145 node->attr.call.cld_tp = tp;
1148 int Call_has_callees(ir_node *node) {
1149 assert(node && node->op == op_Call);
1150 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1151 (node->attr.call.callee_arr != NULL));
1154 int get_Call_n_callees(ir_node * node) {
1155 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1156 return ARR_LEN(node->attr.call.callee_arr);
1159 entity * get_Call_callee(ir_node * node, int pos) {
1160 assert(pos >= 0 && pos < get_Call_n_callees(node));
1161 return node->attr.call.callee_arr[pos];
1164 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1165 assert(node->op == op_Call);
1166 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1167 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1169 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1172 void remove_Call_callee_arr(ir_node * node) {
1173 assert(node->op == op_Call);
1174 node->attr.call.callee_arr = NULL;
1177 ir_node * get_CallBegin_ptr (ir_node *node) {
1178 assert(node->op == op_CallBegin);
1179 return get_irn_n(node, 0);
1181 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1182 assert(node->op == op_CallBegin);
1183 set_irn_n(node, 0, ptr);
1185 ir_node * get_CallBegin_call (ir_node *node) {
1186 assert(node->op == op_CallBegin);
1187 return node->attr.callbegin.call;
1189 void set_CallBegin_call (ir_node *node, ir_node *call) {
1190 assert(node->op == op_CallBegin);
1191 node->attr.callbegin.call = call;
1195 get_FuncCall_ptr (ir_node *node) {
1196 assert (node->op == op_FuncCall);
1197 return get_irn_n(node, 0);
1201 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1202 assert (node->op == op_FuncCall);
1203 set_irn_n(node, 0, ptr);
1207 get_FuncCall_param_arr (ir_node *node) {
1208 assert (node->op == op_FuncCall);
1209 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1213 get_FuncCall_n_params (ir_node *node) {
1214 assert (node->op == op_FuncCall);
1215 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1219 get_FuncCall_arity (ir_node *node) {
1220 assert (node->op == op_FuncCall);
1221 return get_FuncCall_n_params(node);
1225 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1226 assert (node->op == op_FuncCall);
1231 get_FuncCall_param (ir_node *node, int pos) {
1232 assert (node->op == op_FuncCall);
1233 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1237 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1238 assert (node->op == op_FuncCall);
1239 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1243 get_FuncCall_type (ir_node *node) {
1244 assert (node->op == op_FuncCall);
1245 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1249 set_FuncCall_type (ir_node *node, type *tp) {
1250 assert (node->op == op_FuncCall);
1251 assert (is_method_type(tp));
1252 node->attr.call.cld_tp = tp;
1255 int FuncCall_has_callees(ir_node *node) {
1256 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1257 (node->attr.call.callee_arr != NULL));
1260 int get_FuncCall_n_callees(ir_node * node) {
1261 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1262 return ARR_LEN(node->attr.call.callee_arr);
1265 entity * get_FuncCall_callee(ir_node * node, int pos) {
1266 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1267 return node->attr.call.callee_arr[pos];
1270 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1271 assert(node->op == op_FuncCall);
1272 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1273 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1275 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1278 void remove_FuncCall_callee_arr(ir_node * node) {
1279 assert(node->op == op_FuncCall);
1280 node->attr.call.callee_arr = NULL;
1285 ir_node * get_##OP##_left(ir_node *node) { \
1286 assert(node->op == op_##OP); \
1287 return get_irn_n(node, node->op->op_index); \
1289 void set_##OP##_left(ir_node *node, ir_node *left) { \
1290 assert(node->op == op_##OP); \
1291 set_irn_n(node, node->op->op_index, left); \
1293 ir_node *get_##OP##_right(ir_node *node) { \
1294 assert(node->op == op_##OP); \
1295 return get_irn_n(node, node->op->op_index + 1); \
1297 void set_##OP##_right(ir_node *node, ir_node *right) { \
1298 assert(node->op == op_##OP); \
1299 set_irn_n(node, node->op->op_index + 1, right); \
1303 ir_node *get_##OP##_op(ir_node *node) { \
1304 assert(node->op == op_##OP); \
1305 return get_irn_n(node, node->op->op_index); \
1307 void set_##OP##_op (ir_node *node, ir_node *op) { \
1308 assert(node->op == op_##OP); \
1309 set_irn_n(node, node->op->op_index, op); \
1319 get_Quot_mem (ir_node *node) {
1320 assert (node->op == op_Quot);
1321 return get_irn_n(node, 0);
1325 set_Quot_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_Quot);
1327 set_irn_n(node, 0, mem);
1333 get_DivMod_mem (ir_node *node) {
1334 assert (node->op == op_DivMod);
1335 return get_irn_n(node, 0);
1339 set_DivMod_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_DivMod);
1341 set_irn_n(node, 0, mem);
1347 get_Div_mem (ir_node *node) {
1348 assert (node->op == op_Div);
1349 return get_irn_n(node, 0);
1353 set_Div_mem (ir_node *node, ir_node *mem) {
1354 assert (node->op == op_Div);
1355 set_irn_n(node, 0, mem);
1361 get_Mod_mem (ir_node *node) {
1362 assert (node->op == op_Mod);
1363 return get_irn_n(node, 0);
1367 set_Mod_mem (ir_node *node, ir_node *mem) {
1368 assert (node->op == op_Mod);
1369 set_irn_n(node, 0, mem);
1386 get_Cast_type (ir_node *node) {
1387 assert (node->op == op_Cast);
1388 return node->attr.cast.totype;
1392 set_Cast_type (ir_node *node, type *to_tp) {
1393 assert (node->op == op_Cast);
1394 node->attr.cast.totype = to_tp;
1398 is_unop (ir_node *node) {
1399 return (node->op->opar == oparity_unary);
1403 get_unop_op (ir_node *node) {
1404 if (node->op->opar == oparity_unary)
1405 return get_irn_n(node, node->op->op_index);
1407 assert(node->op->opar == oparity_unary);
1412 set_unop_op (ir_node *node, ir_node *op) {
1413 if (node->op->opar == oparity_unary)
1414 set_irn_n(node, node->op->op_index, op);
1416 assert(node->op->opar == oparity_unary);
1420 is_binop (ir_node *node) {
1421 return (node->op->opar == oparity_binary);
1425 get_binop_left (ir_node *node) {
1426 if (node->op->opar == oparity_binary)
1427 return get_irn_n(node, node->op->op_index);
1429 assert(node->op->opar == oparity_binary);
1434 set_binop_left (ir_node *node, ir_node *left) {
1435 if (node->op->opar == oparity_binary)
1436 set_irn_n(node, node->op->op_index, left);
1438 assert (node->op->opar == oparity_binary);
1442 get_binop_right (ir_node *node) {
1443 if (node->op->opar == oparity_binary)
1444 return get_irn_n(node, node->op->op_index + 1);
1446 assert(node->op->opar == oparity_binary);
1451 set_binop_right (ir_node *node, ir_node *right) {
1452 if (node->op->opar == oparity_binary)
1453 set_irn_n(node, node->op->op_index + 1, right);
1455 assert (node->op->opar == oparity_binary);
1458 int is_Phi (ir_node *n) {
1464 if (op == op_Filter) return get_interprocedural_view();
1467 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1468 (get_irn_arity(n) > 0));
1473 int is_Phi0 (ir_node *n) {
1476 return ((get_irn_op(n) == op_Phi) &&
1477 (get_irn_arity(n) == 0) &&
1478 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1482 get_Phi_preds_arr (ir_node *node) {
1483 assert (node->op == op_Phi);
1484 return (ir_node **)&(get_irn_in(node)[1]);
1488 get_Phi_n_preds (ir_node *node) {
1489 assert (is_Phi(node) || is_Phi0(node));
1490 return (get_irn_arity(node));
1494 void set_Phi_n_preds (ir_node *node, int n_preds) {
1495 assert (node->op == op_Phi);
1500 get_Phi_pred (ir_node *node, int pos) {
1501 assert (is_Phi(node) || is_Phi0(node));
1502 return get_irn_n(node, pos);
1506 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1507 assert (is_Phi(node) || is_Phi0(node));
1508 set_irn_n(node, pos, pred);
1512 int is_memop(ir_node *node) {
1513 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1516 ir_node *get_memop_mem (ir_node *node) {
1517 assert(is_memop(node));
1518 return get_irn_n(node, 0);
1521 void set_memop_mem (ir_node *node, ir_node *mem) {
1522 assert(is_memop(node));
1523 set_irn_n(node, 0, mem);
1526 ir_node *get_memop_ptr (ir_node *node) {
1527 assert(is_memop(node));
1528 return get_irn_n(node, 1);
1531 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1532 assert(is_memop(node));
1533 set_irn_n(node, 1, ptr);
1537 get_Load_mem (ir_node *node) {
1538 assert (node->op == op_Load);
1539 return get_irn_n(node, 0);
1543 set_Load_mem (ir_node *node, ir_node *mem) {
1544 assert (node->op == op_Load);
1545 set_irn_n(node, 0, mem);
1549 get_Load_ptr (ir_node *node) {
1550 assert (node->op == op_Load);
1551 return get_irn_n(node, 1);
1555 set_Load_ptr (ir_node *node, ir_node *ptr) {
1556 assert (node->op == op_Load);
1557 set_irn_n(node, 1, ptr);
1561 get_Load_mode (ir_node *node) {
1562 assert (node->op == op_Load);
1563 return node->attr.load.load_mode;
1567 set_Load_mode (ir_node *node, ir_mode *mode) {
1568 assert (node->op == op_Load);
1569 node->attr.load.load_mode = mode;
1573 get_Load_volatility (ir_node *node) {
1574 assert (node->op == op_Load);
1575 return node->attr.load.volatility;
1579 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1580 assert (node->op == op_Load);
1581 node->attr.load.volatility = volatility;
1586 get_Store_mem (ir_node *node) {
1587 assert (node->op == op_Store);
1588 return get_irn_n(node, 0);
1592 set_Store_mem (ir_node *node, ir_node *mem) {
1593 assert (node->op == op_Store);
1594 set_irn_n(node, 0, mem);
1598 get_Store_ptr (ir_node *node) {
1599 assert (node->op == op_Store);
1600 return get_irn_n(node, 1);
1604 set_Store_ptr (ir_node *node, ir_node *ptr) {
1605 assert (node->op == op_Store);
1606 set_irn_n(node, 1, ptr);
1610 get_Store_value (ir_node *node) {
1611 assert (node->op == op_Store);
1612 return get_irn_n(node, 2);
1616 set_Store_value (ir_node *node, ir_node *value) {
1617 assert (node->op == op_Store);
1618 set_irn_n(node, 2, value);
1622 get_Store_volatility (ir_node *node) {
1623 assert (node->op == op_Store);
1624 return node->attr.store.volatility;
1628 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1629 assert (node->op == op_Store);
1630 node->attr.store.volatility = volatility;
1635 get_Alloc_mem (ir_node *node) {
1636 assert (node->op == op_Alloc);
1637 return get_irn_n(node, 0);
1641 set_Alloc_mem (ir_node *node, ir_node *mem) {
1642 assert (node->op == op_Alloc);
1643 set_irn_n(node, 0, mem);
1647 get_Alloc_size (ir_node *node) {
1648 assert (node->op == op_Alloc);
1649 return get_irn_n(node, 1);
1653 set_Alloc_size (ir_node *node, ir_node *size) {
1654 assert (node->op == op_Alloc);
1655 set_irn_n(node, 1, size);
1659 get_Alloc_type (ir_node *node) {
1660 assert (node->op == op_Alloc);
1661 return node->attr.a.type = skip_tid(node->attr.a.type);
1665 set_Alloc_type (ir_node *node, type *tp) {
1666 assert (node->op == op_Alloc);
1667 node->attr.a.type = tp;
1671 get_Alloc_where (ir_node *node) {
1672 assert (node->op == op_Alloc);
1673 return node->attr.a.where;
1677 set_Alloc_where (ir_node *node, where_alloc where) {
1678 assert (node->op == op_Alloc);
1679 node->attr.a.where = where;
1684 get_Free_mem (ir_node *node) {
1685 assert (node->op == op_Free);
1686 return get_irn_n(node, 0);
1690 set_Free_mem (ir_node *node, ir_node *mem) {
1691 assert (node->op == op_Free);
1692 set_irn_n(node, 0, mem);
1696 get_Free_ptr (ir_node *node) {
1697 assert (node->op == op_Free);
1698 return get_irn_n(node, 1);
1702 set_Free_ptr (ir_node *node, ir_node *ptr) {
1703 assert (node->op == op_Free);
1704 set_irn_n(node, 1, ptr);
1708 get_Free_size (ir_node *node) {
1709 assert (node->op == op_Free);
1710 return get_irn_n(node, 2);
1714 set_Free_size (ir_node *node, ir_node *size) {
1715 assert (node->op == op_Free);
1716 set_irn_n(node, 2, size);
1720 get_Free_type (ir_node *node) {
1721 assert (node->op == op_Free);
1722 return node->attr.f = skip_tid(node->attr.f);
1726 set_Free_type (ir_node *node, type *tp) {
1727 assert (node->op == op_Free);
1732 get_Sync_preds_arr (ir_node *node) {
1733 assert (node->op == op_Sync);
1734 return (ir_node **)&(get_irn_in(node)[1]);
1738 get_Sync_n_preds (ir_node *node) {
1739 assert (node->op == op_Sync);
1740 return (get_irn_arity(node));
1745 set_Sync_n_preds (ir_node *node, int n_preds) {
1746 assert (node->op == op_Sync);
1751 get_Sync_pred (ir_node *node, int pos) {
1752 assert (node->op == op_Sync);
1753 return get_irn_n(node, pos);
1757 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1758 assert (node->op == op_Sync);
1759 set_irn_n(node, pos, pred);
1763 get_Proj_pred (ir_node *node) {
1764 assert (is_Proj(node));
1765 return get_irn_n(node, 0);
1769 set_Proj_pred (ir_node *node, ir_node *pred) {
1770 assert (is_Proj(node));
1771 set_irn_n(node, 0, pred);
1775 get_Proj_proj (ir_node *node) {
1776 assert (is_Proj(node));
1777 if (get_irn_opcode(node) == iro_Proj) {
1778 return node->attr.proj;
1780 assert(get_irn_opcode(node) == iro_Filter);
1781 return node->attr.filter.proj;
1786 set_Proj_proj (ir_node *node, long proj) {
1787 assert (node->op == op_Proj);
1788 node->attr.proj = proj;
1792 get_Tuple_preds_arr (ir_node *node) {
1793 assert (node->op == op_Tuple);
1794 return (ir_node **)&(get_irn_in(node)[1]);
1798 get_Tuple_n_preds (ir_node *node) {
1799 assert (node->op == op_Tuple);
1800 return (get_irn_arity(node));
1805 set_Tuple_n_preds (ir_node *node, int n_preds) {
1806 assert (node->op == op_Tuple);
1811 get_Tuple_pred (ir_node *node, int pos) {
1812 assert (node->op == op_Tuple);
1813 return get_irn_n(node, pos);
1817 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1818 assert (node->op == op_Tuple);
1819 set_irn_n(node, pos, pred);
1823 get_Id_pred (ir_node *node) {
1824 assert (node->op == op_Id);
1825 return get_irn_n(node, 0);
1829 set_Id_pred (ir_node *node, ir_node *pred) {
1830 assert (node->op == op_Id);
1831 set_irn_n(node, 0, pred);
1834 ir_node *get_Confirm_value (ir_node *node) {
1835 assert (node->op == op_Confirm);
1836 return get_irn_n(node, 0);
1838 void set_Confirm_value (ir_node *node, ir_node *value) {
1839 assert (node->op == op_Confirm);
1840 set_irn_n(node, 0, value);
1842 ir_node *get_Confirm_bound (ir_node *node) {
1843 assert (node->op == op_Confirm);
1844 return get_irn_n(node, 1);
1846 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1847 assert (node->op == op_Confirm);
1848 set_irn_n(node, 0, bound);
1850 pn_Cmp get_Confirm_cmp (ir_node *node) {
1851 assert (node->op == op_Confirm);
1852 return node->attr.confirm_cmp;
1854 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1855 assert (node->op == op_Confirm);
1856 node->attr.confirm_cmp = cmp;
1861 get_Filter_pred (ir_node *node) {
1862 assert(node->op == op_Filter);
1866 set_Filter_pred (ir_node *node, ir_node *pred) {
1867 assert(node->op == op_Filter);
1871 get_Filter_proj(ir_node *node) {
1872 assert(node->op == op_Filter);
1873 return node->attr.filter.proj;
1876 set_Filter_proj (ir_node *node, long proj) {
1877 assert(node->op == op_Filter);
1878 node->attr.filter.proj = proj;
1881 /* Don't use get_irn_arity, get_irn_n in implementation as access
1882 shall work independent of view!!! */
1883 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1884 assert(node->op == op_Filter);
1885 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1886 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1887 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1888 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1889 node->attr.filter.in_cg[0] = node->in[0];
1891 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1894 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1895 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1896 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1897 node->attr.filter.in_cg[pos + 1] = pred;
1899 int get_Filter_n_cg_preds(ir_node *node) {
1900 assert(node->op == op_Filter && node->attr.filter.in_cg);
1901 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1903 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1905 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1907 arity = ARR_LEN(node->attr.filter.in_cg);
1908 assert(pos < arity - 1);
1909 return node->attr.filter.in_cg[pos + 1];
1914 get_irn_irg(ir_node *node) {
1915 if (get_irn_op(node) != op_Block)
1916 node = get_nodes_block(node);
1917 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1918 node = get_nodes_block(node);
1919 assert(get_irn_op(node) == op_Block);
1920 return node->attr.block.irg;
1924 /*----------------------------------------------------------------*/
1925 /* Auxiliary routines */
1926 /*----------------------------------------------------------------*/
1929 skip_Proj (ir_node *node) {
1930 /* don't assert node !!! */
1931 if (node && is_Proj(node)) {
1932 return get_Proj_pred(node);
1939 skip_Tuple (ir_node *node) {
1942 if (!get_opt_normalize()) return node;
1944 node = skip_Id(node);
1945 if (get_irn_op(node) == op_Proj) {
1946 pred = skip_Id(get_Proj_pred(node));
1947 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1948 pred = skip_Id(skip_Tuple(pred));
1949 if (get_irn_op(pred) == op_Tuple)
1950 return get_Tuple_pred(pred, get_Proj_proj(node));
1955 /** returns operand of node if node is a Cast */
1956 ir_node *skip_Cast (ir_node *node) {
1957 if (node && get_irn_op(node) == op_Cast) {
1958 return skip_Id(get_irn_n(node, 0));
1965 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1966 than any other approach, as Id chains are resolved and all point to the real node, or
1967 all id's are self loops. */
1969 skip_Id (ir_node *node) {
1970 /* don't assert node !!! */
1972 if (!get_opt_normalize()) return node;
1974 /* Don't use get_Id_pred: We get into an endless loop for
1975 self-referencing Ids. */
1976 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1977 ir_node *rem_pred = node->in[0+1];
1980 assert (get_irn_arity (node) > 0);
1982 node->in[0+1] = node;
1983 res = skip_Id(rem_pred);
1984 if (res->op == op_Id) /* self-loop */ return node;
1986 node->in[0+1] = res;
1993 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1994 than any other approach, as Id chains are resolved and all point to the real node, or
1995 all id's are self loops. */
1997 skip_Id (ir_node *node) {
1999 /* don't assert node !!! */
2001 if (!node || (node->op != op_Id)) return node;
2003 if (!get_opt_normalize()) return node;
2005 /* Don't use get_Id_pred: We get into an endless loop for
2006 self-referencing Ids. */
2007 pred = node->in[0+1];
2009 if (pred->op != op_Id) return pred;
2011 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2012 ir_node *rem_pred, *res;
2014 if (pred->op != op_Id) return pred; /* shortcut */
2017 assert (get_irn_arity (node) > 0);
2019 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2020 res = skip_Id(rem_pred);
2021 if (res->op == op_Id) /* self-loop */ return node;
2023 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2032 is_Bad (ir_node *node) {
2034 if ((node) && get_irn_opcode(node) == iro_Bad)
2040 is_no_Block (ir_node *node) {
2042 return (get_irn_opcode(node) != iro_Block);
2046 is_Block (ir_node *node) {
2048 return (get_irn_opcode(node) == iro_Block);
2051 /* returns true if node is a Unknown node. */
2053 is_Unknown (ir_node *node) {
2055 return (get_irn_opcode(node) == iro_Unknown);
2059 is_Proj (const ir_node *node) {
2061 return node->op == op_Proj
2062 || (!get_interprocedural_view() && node->op == op_Filter);
2065 /* Returns true if the operation manipulates control flow. */
2067 is_cfop(ir_node *node) {
2068 return is_cfopcode(get_irn_op(node));
2071 /* Returns true if the operation manipulates interprocedural control flow:
2072 CallBegin, EndReg, EndExcept */
2073 int is_ip_cfop(ir_node *node) {
2074 return is_ip_cfopcode(get_irn_op(node));
2077 /* Returns true if the operation can change the control flow because
2080 is_fragile_op(ir_node *node) {
2081 return is_op_fragile(get_irn_op(node));
2084 /* Returns the memory operand of fragile operations. */
2085 ir_node *get_fragile_op_mem(ir_node *node) {
2086 assert(node && is_fragile_op(node));
2088 switch (get_irn_opcode (node)) {
2097 return get_irn_n(node, 0);
2102 assert(0 && "should not be reached");
2107 /* Returns true if the operation is a forking control flow operation. */
2109 is_forking_op(ir_node *node) {
2110 return is_op_forking(get_irn_op(node));
2113 #ifdef DEBUG_libfirm
2114 void dump_irn (ir_node *n) {
2115 int i, arity = get_irn_arity(n);
2116 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2118 ir_node *pred = get_irn_n(n, -1);
2119 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2120 get_irn_node_nr(pred), (void *)pred);
2122 printf(" preds: \n");
2123 for (i = 0; i < arity; ++i) {
2124 ir_node *pred = get_irn_n(n, i);
2125 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2126 get_irn_node_nr(pred), (void *)pred);
2130 #else /* DEBUG_libfirm */
2131 void dump_irn (ir_node *n) {}
2132 #endif /* DEBUG_libfirm */