3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
158 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
161 (get_irn_arity)(const ir_node *node) {
162 return __get_irn_arity(node);
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (get_interprocedural_view()) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (get_interprocedural_view()) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
212 (get_irn_intra_n)(ir_node *node, int n) {
213 return __get_irn_intra_n (node, n);
217 (get_irn_inter_n)(ir_node *node, int n) {
218 return __get_irn_inter_n (node, n);
221 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
224 (get_irn_n)(ir_node *node, int n) {
225 return __get_irn_n(node, n);
229 set_irn_n (ir_node *node, int n, ir_node *in) {
230 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
231 assert(in && in->kind == k_ir_node);
232 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
233 /* Change block pred in both views! */
234 node->in[n + 1] = in;
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 node->attr.filter.in_cg[n + 1] = in;
244 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
245 node->attr.block.in_cg[n + 1] = in;
248 /* else fall through */
250 node->in[n + 1] = in;
254 (get_irn_mode)(const ir_node *node) {
255 return __get_irn_mode(node);
259 (set_irn_mode)(ir_node *node, ir_mode *mode)
261 __set_irn_mode(node, mode);
265 get_irn_modecode (const ir_node *node)
268 return node->mode->code;
271 /** Gets the string representation of the mode .*/
273 get_irn_modename (const ir_node *node)
276 return get_mode_name(node->mode);
280 get_irn_modeident (const ir_node *node)
283 return get_mode_ident(node->mode);
287 (get_irn_op)(const ir_node *node)
289 return __get_irn_op(node);
292 /* should be private to the library: */
294 set_irn_op (ir_node *node, ir_op *op)
301 (get_irn_opcode)(const ir_node *node)
303 return __get_irn_opcode(node);
307 get_irn_opname (const ir_node *node)
310 if ((get_irn_op((ir_node *)node) == op_Phi) &&
311 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
312 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
313 return get_id_str(node->op->name);
317 get_irn_opident (const ir_node *node)
320 return node->op->name;
324 (get_irn_visited)(const ir_node *node)
326 return __get_irn_visited(node);
330 (set_irn_visited)(ir_node *node, unsigned long visited)
332 __set_irn_visited(node, visited);
336 (mark_irn_visited)(ir_node *node) {
337 __mark_irn_visited(node);
341 (irn_not_visited)(const ir_node *node) {
342 return __irn_not_visited(node);
346 (irn_visited)(const ir_node *node) {
347 return __irn_visited(node);
351 (set_irn_link)(ir_node *node, void *link) {
352 __set_irn_link(node, link);
356 (get_irn_link)(const ir_node *node) {
357 return __get_irn_link(node);
361 (get_irn_pinned)(const ir_node *node) {
362 return __get_irn_pinned(node);
365 void set_irn_pinned(ir_node *node, op_pin_state state) {
366 /* due to optimization an opt may be turned into a Tuple */
367 if (get_irn_op(node) == op_Tuple)
370 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
371 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
373 node->attr.except.pin_state = state;
376 #ifdef DO_HEAPANALYSIS
377 /* Access the abstract interpretation information of a node.
378 Returns NULL if no such information is available. */
379 struct abstval *get_irn_abst_value(ir_node *n) {
382 /* Set the abstract interpretation information of a node. */
383 void set_irn_abst_value(ir_node *n, struct abstval *os) {
386 struct section *firm_get_irn_section(ir_node *n) {
389 void firm_set_irn_section(ir_node *n, struct section *s) {
392 #endif /* DO_HEAPANALYSIS */
395 /* Outputs a unique number for this node */
397 get_irn_node_nr(const ir_node *node) {
400 return node->node_nr;
407 get_irn_const_attr (ir_node *node)
409 assert (node->op == op_Const);
410 return node->attr.con;
414 get_irn_proj_attr (ir_node *node)
416 assert (node->op == op_Proj);
417 return node->attr.proj;
421 get_irn_alloc_attr (ir_node *node)
423 assert (node->op == op_Alloc);
428 get_irn_free_attr (ir_node *node)
430 assert (node->op == op_Free);
431 return node->attr.f = skip_tid(node->attr.f);
435 get_irn_symconst_attr (ir_node *node)
437 assert (node->op == op_SymConst);
442 get_irn_call_attr (ir_node *node)
444 assert (node->op == op_Call);
445 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
449 get_irn_funccall_attr (ir_node *node)
451 assert (node->op == op_FuncCall);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
570 assert (node->op == op_Block);
571 assert(-1 <= pos && pos < get_irn_arity(node));
572 return get_irn_n(node, pos);
576 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
577 assert (node->op == op_Block);
578 set_irn_n(node, pos, pred);
582 get_Block_matured (ir_node *node) {
583 assert (node->op == op_Block);
584 return node->attr.block.matured;
588 set_Block_matured (ir_node *node, bool matured) {
589 assert (node->op == op_Block);
590 node->attr.block.matured = matured;
593 get_Block_block_visited (ir_node *node) {
594 assert (node->op == op_Block);
595 return node->attr.block.block_visited;
599 set_Block_block_visited (ir_node *node, unsigned long visit) {
600 assert (node->op == op_Block);
601 node->attr.block.block_visited = visit;
604 /* For this current_ir_graph must be set. */
606 mark_Block_block_visited (ir_node *node) {
607 assert (node->op == op_Block);
608 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
612 Block_not_block_visited(ir_node *node) {
613 assert (node->op == op_Block);
614 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
618 get_Block_graph_arr (ir_node *node, int pos) {
619 assert (node->op == op_Block);
620 return node->attr.block.graph_arr[pos+1];
624 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
625 assert (node->op == op_Block);
626 node->attr.block.graph_arr[pos+1] = value;
629 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
630 assert(node->op == op_Block);
631 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
632 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
633 node->attr.block.in_cg[0] = NULL;
634 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
636 /* Fix backedge array. fix_backedges operates depending on
637 interprocedural_view. */
638 int ipv = get_interprocedural_view();
639 set_interprocedural_view(true);
640 fix_backedges(current_ir_graph->obst, node);
641 set_interprocedural_view(ipv);
644 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
647 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
648 assert(node->op == op_Block &&
649 node->attr.block.in_cg &&
650 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
651 node->attr.block.in_cg[pos + 1] = pred;
654 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
655 assert(node->op == op_Block);
656 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
659 int get_Block_cg_n_cfgpreds(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
664 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
665 assert(node->op == op_Block && node->attr.block.in_cg);
666 return node->attr.block.in_cg[pos + 1];
669 void remove_Block_cg_cfgpred_arr(ir_node * node) {
670 assert(node->op == op_Block);
671 node->attr.block.in_cg = NULL;
675 set_Start_irg(ir_node *node, ir_graph *irg) {
676 assert(node->op == op_Start);
677 assert(is_ir_graph(irg));
678 assert(0 && " Why set irg? -- use set_irn_irg");
682 get_End_n_keepalives(ir_node *end) {
683 assert (end->op == op_End);
684 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
688 get_End_keepalive(ir_node *end, int pos) {
689 assert (end->op == op_End);
690 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
694 add_End_keepalive (ir_node *end, ir_node *ka) {
695 assert (end->op == op_End);
696 ARR_APP1 (ir_node *, end->in, ka);
700 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
701 assert (end->op == op_End);
702 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
706 free_End (ir_node *end) {
707 assert (end->op == op_End);
709 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
710 end->in = NULL; /* @@@ make sure we get an error if we use the
711 in array afterwards ... */
716 > Implementing the case construct (which is where the constant Proj node is
717 > important) involves far more than simply determining the constant values.
718 > We could argue that this is more properly a function of the translator from
719 > Firm to the target machine. That could be done if there was some way of
720 > projecting "default" out of the Cond node.
721 I know it's complicated.
722 Basically there are two proglems:
723 - determining the gaps between the projs
724 - determining the biggest case constant to know the proj number for
726 I see several solutions:
727 1. Introduce a ProjDefault node. Solves both problems.
728 This means to extend all optimizations executed during construction.
729 2. Give the Cond node for switch two flavors:
730 a) there are no gaps in the projs (existing flavor)
731 b) gaps may exist, default proj is still the Proj with the largest
732 projection number. This covers also the gaps.
733 3. Fix the semantic of the Cond to that of 2b)
735 Solution 2 seems to be the best:
736 Computing the gaps in the Firm representation is not too hard, i.e.,
737 libFIRM can implement a routine that transforms between the two
738 flavours. This is also possible for 1) but 2) does not require to
739 change any existing optimization.
740 Further it should be far simpler to determine the biggest constant than
742 I don't want to choose 3) as 2a) seems to have advantages for
743 dataflow analysis and 3) does not allow to convert the representation to
747 get_Cond_selector (ir_node *node) {
748 assert (node->op == op_Cond);
749 return get_irn_n(node, 0);
753 set_Cond_selector (ir_node *node, ir_node *selector) {
754 assert (node->op == op_Cond);
755 set_irn_n(node, 0, selector);
759 get_Cond_kind (ir_node *node) {
760 assert (node->op == op_Cond);
761 return node->attr.c.kind;
765 set_Cond_kind (ir_node *node, cond_kind kind) {
766 assert (node->op == op_Cond);
767 node->attr.c.kind = kind;
771 get_Cond_defaultProj (ir_node *node) {
772 assert (node->op == op_Cond);
773 return node->attr.c.default_proj;
777 get_Return_mem (ir_node *node) {
778 assert (node->op == op_Return);
779 return get_irn_n(node, 0);
783 set_Return_mem (ir_node *node, ir_node *mem) {
784 assert (node->op == op_Return);
785 set_irn_n(node, 0, mem);
789 get_Return_n_ress (ir_node *node) {
790 assert (node->op == op_Return);
791 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
795 get_Return_res_arr (ir_node *node)
797 assert ((node->op == op_Return));
798 if (get_Return_n_ress(node) > 0)
799 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
806 set_Return_n_res (ir_node *node, int results) {
807 assert (node->op == op_Return);
812 get_Return_res (ir_node *node, int pos) {
813 assert (node->op == op_Return);
814 assert (get_Return_n_ress(node) > pos);
815 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
819 set_Return_res (ir_node *node, int pos, ir_node *res){
820 assert (node->op == op_Return);
821 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
825 get_Raise_mem (ir_node *node) {
826 assert (node->op == op_Raise);
827 return get_irn_n(node, 0);
831 set_Raise_mem (ir_node *node, ir_node *mem) {
832 assert (node->op == op_Raise);
833 set_irn_n(node, 0, mem);
837 get_Raise_exo_ptr (ir_node *node) {
838 assert (node->op == op_Raise);
839 return get_irn_n(node, 1);
843 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
844 assert (node->op == op_Raise);
845 set_irn_n(node, 1, exo_ptr);
848 tarval *get_Const_tarval (ir_node *node) {
849 assert (node->op == op_Const);
850 return node->attr.con.tv;
854 set_Const_tarval (ir_node *node, tarval *con) {
855 assert (node->op == op_Const);
856 node->attr.con.tv = con;
860 /* The source language type. Must be an atomic type. Mode of type must
861 be mode of node. For tarvals from entities type must be pointer to
864 get_Const_type (ir_node *node) {
865 assert (node->op == op_Const);
866 return node->attr.con.tp;
870 set_Const_type (ir_node *node, type *tp) {
871 assert (node->op == op_Const);
872 if (tp != unknown_type) {
873 assert (is_atomic_type(tp));
874 assert (get_type_mode(tp) == get_irn_mode(node));
877 node->attr.con.tp = tp;
882 get_SymConst_kind (const ir_node *node) {
883 assert (node->op == op_SymConst);
884 return node->attr.i.num;
888 set_SymConst_kind (ir_node *node, symconst_kind num) {
889 assert (node->op == op_SymConst);
890 node->attr.i.num = num;
894 get_SymConst_type (ir_node *node) {
895 assert ( (node->op == op_SymConst)
896 && ( get_SymConst_kind(node) == symconst_type_tag
897 || get_SymConst_kind(node) == symconst_size));
898 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
902 set_SymConst_type (ir_node *node, type *tp) {
903 assert ( (node->op == op_SymConst)
904 && ( get_SymConst_kind(node) == symconst_type_tag
905 || get_SymConst_kind(node) == symconst_size));
906 node->attr.i.sym.type_p = tp;
910 get_SymConst_name (ir_node *node) {
911 assert ( (node->op == op_SymConst)
912 && (get_SymConst_kind(node) == symconst_addr_name));
913 return node->attr.i.sym.ident_p;
917 set_SymConst_name (ir_node *node, ident *name) {
918 assert ( (node->op == op_SymConst)
919 && (get_SymConst_kind(node) == symconst_addr_name));
920 node->attr.i.sym.ident_p = name;
924 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
925 entity *get_SymConst_entity (ir_node *node) {
926 assert ( (node->op == op_SymConst)
927 && (get_SymConst_kind (node) == symconst_addr_ent));
928 return node->attr.i.sym.entity_p;
931 void set_SymConst_entity (ir_node *node, entity *ent) {
932 assert ( (node->op == op_SymConst)
933 && (get_SymConst_kind(node) == symconst_addr_ent));
934 node->attr.i.sym.entity_p = ent;
937 union symconst_symbol
938 get_SymConst_symbol (ir_node *node) {
939 assert (node->op == op_SymConst);
940 return node->attr.i.sym;
944 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
945 assert (node->op == op_SymConst);
946 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
947 node->attr.i.sym = sym;
951 get_SymConst_value_type (ir_node *node) {
952 assert (node->op == op_SymConst);
953 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
954 return node->attr.i.tp;
958 set_SymConst_value_type (ir_node *node, type *tp) {
959 assert (node->op == op_SymConst);
960 node->attr.i.tp = tp;
964 get_Sel_mem (ir_node *node) {
965 assert (node->op == op_Sel);
966 return get_irn_n(node, 0);
970 set_Sel_mem (ir_node *node, ir_node *mem) {
971 assert (node->op == op_Sel);
972 set_irn_n(node, 0, mem);
976 get_Sel_ptr (ir_node *node) {
977 assert (node->op == op_Sel);
978 return get_irn_n(node, 1);
982 set_Sel_ptr (ir_node *node, ir_node *ptr) {
983 assert (node->op == op_Sel);
984 set_irn_n(node, 1, ptr);
988 get_Sel_n_indexs (ir_node *node) {
989 assert (node->op == op_Sel);
990 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
994 get_Sel_index_arr (ir_node *node)
996 assert ((node->op == op_Sel));
997 if (get_Sel_n_indexs(node) > 0)
998 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1004 get_Sel_index (ir_node *node, int pos) {
1005 assert (node->op == op_Sel);
1006 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1010 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1011 assert (node->op == op_Sel);
1012 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1016 get_Sel_entity (ir_node *node) {
1017 assert (node->op == op_Sel);
1018 return node->attr.s.ent;
1022 set_Sel_entity (ir_node *node, entity *ent) {
1023 assert (node->op == op_Sel);
1024 node->attr.s.ent = ent;
1028 get_InstOf_ent (ir_node *node) {
1029 assert (node->op = op_InstOf);
1030 return (node->attr.io.ent);
1034 set_InstOf_ent (ir_node *node, type *ent) {
1035 assert (node->op = op_InstOf);
1036 node->attr.io.ent = ent;
1040 get_InstOf_store (ir_node *node) {
1041 assert (node->op = op_InstOf);
1042 return (get_irn_n (node, 0));
1046 set_InstOf_store (ir_node *node, ir_node *obj) {
1047 assert (node->op = op_InstOf);
1048 set_irn_n (node, 0, obj);
1052 get_InstOf_obj (ir_node *node) {
1053 assert (node->op = op_InstOf);
1054 return (get_irn_n (node, 1));
1058 set_InstOf_obj (ir_node *node, ir_node *obj) {
1059 assert (node->op = op_InstOf);
1060 set_irn_n (node, 1, obj);
1064 /* For unary and binary arithmetic operations the access to the
1065 operands can be factored out. Left is the first, right the
1066 second arithmetic value as listed in tech report 0999-33.
1067 unops are: Minus, Abs, Not, Conv, Cast
1068 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1069 Shr, Shrs, Rotate, Cmp */
1073 get_Call_mem (ir_node *node) {
1074 assert (node->op == op_Call);
1075 return get_irn_n(node, 0);
1079 set_Call_mem (ir_node *node, ir_node *mem) {
1080 assert (node->op == op_Call);
1081 set_irn_n(node, 0, mem);
1085 get_Call_ptr (ir_node *node) {
1086 assert (node->op == op_Call);
1087 return get_irn_n(node, 1);
1091 set_Call_ptr (ir_node *node, ir_node *ptr) {
1092 assert (node->op == op_Call);
1093 set_irn_n(node, 1, ptr);
1097 get_Call_param_arr (ir_node *node) {
1098 assert (node->op == op_Call);
1099 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1103 get_Call_n_params (ir_node *node) {
1104 assert (node->op == op_Call);
1105 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1109 get_Call_arity (ir_node *node) {
1110 assert (node->op == op_Call);
1111 return get_Call_n_params(node);
1115 set_Call_arity (ir_node *node, ir_node *arity) {
1116 assert (node->op == op_Call);
1121 get_Call_param (ir_node *node, int pos) {
1122 assert (node->op == op_Call);
1123 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1127 set_Call_param (ir_node *node, int pos, ir_node *param) {
1128 assert (node->op == op_Call);
1129 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1133 get_Call_type (ir_node *node) {
1134 assert (node->op == op_Call);
1135 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1139 set_Call_type (ir_node *node, type *tp) {
1140 assert (node->op == op_Call);
1141 assert ((get_unknown_type() == tp) || is_method_type(tp));
1142 node->attr.call.cld_tp = tp;
1145 int Call_has_callees(ir_node *node) {
1146 assert(node && node->op == op_Call);
1147 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1148 (node->attr.call.callee_arr != NULL));
1151 int get_Call_n_callees(ir_node * node) {
1152 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1153 return ARR_LEN(node->attr.call.callee_arr);
1156 entity * get_Call_callee(ir_node * node, int pos) {
1157 assert(pos >= 0 && pos < get_Call_n_callees(node));
1158 return node->attr.call.callee_arr[pos];
1161 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1162 assert(node->op == op_Call);
1163 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1164 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1166 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1169 void remove_Call_callee_arr(ir_node * node) {
1170 assert(node->op == op_Call);
1171 node->attr.call.callee_arr = NULL;
1174 ir_node * get_CallBegin_ptr (ir_node *node) {
1175 assert(node->op == op_CallBegin);
1176 return get_irn_n(node, 0);
1178 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1179 assert(node->op == op_CallBegin);
1180 set_irn_n(node, 0, ptr);
1182 ir_node * get_CallBegin_call (ir_node *node) {
1183 assert(node->op == op_CallBegin);
1184 return node->attr.callbegin.call;
1186 void set_CallBegin_call (ir_node *node, ir_node *call) {
1187 assert(node->op == op_CallBegin);
1188 node->attr.callbegin.call = call;
1192 get_FuncCall_ptr (ir_node *node) {
1193 assert (node->op == op_FuncCall);
1194 return get_irn_n(node, 0);
1198 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1199 assert (node->op == op_FuncCall);
1200 set_irn_n(node, 0, ptr);
1204 get_FuncCall_param_arr (ir_node *node) {
1205 assert (node->op == op_FuncCall);
1206 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1210 get_FuncCall_n_params (ir_node *node) {
1211 assert (node->op == op_FuncCall);
1212 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1216 get_FuncCall_arity (ir_node *node) {
1217 assert (node->op == op_FuncCall);
1218 return get_FuncCall_n_params(node);
1222 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1223 assert (node->op == op_FuncCall);
1228 get_FuncCall_param (ir_node *node, int pos) {
1229 assert (node->op == op_FuncCall);
1230 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1234 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1235 assert (node->op == op_FuncCall);
1236 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1240 get_FuncCall_type (ir_node *node) {
1241 assert (node->op == op_FuncCall);
1242 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1246 set_FuncCall_type (ir_node *node, type *tp) {
1247 assert (node->op == op_FuncCall);
1248 assert (is_method_type(tp));
1249 node->attr.call.cld_tp = tp;
1252 int FuncCall_has_callees(ir_node *node) {
1253 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1254 (node->attr.call.callee_arr != NULL));
1257 int get_FuncCall_n_callees(ir_node * node) {
1258 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1259 return ARR_LEN(node->attr.call.callee_arr);
1262 entity * get_FuncCall_callee(ir_node * node, int pos) {
1263 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1264 return node->attr.call.callee_arr[pos];
1267 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1268 assert(node->op == op_FuncCall);
1269 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1270 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1272 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1275 void remove_FuncCall_callee_arr(ir_node * node) {
1276 assert(node->op == op_FuncCall);
1277 node->attr.call.callee_arr = NULL;
1282 ir_node * get_##OP##_left(ir_node *node) { \
1283 assert(node->op == op_##OP); \
1284 return get_irn_n(node, node->op->op_index); \
1286 void set_##OP##_left(ir_node *node, ir_node *left) { \
1287 assert(node->op == op_##OP); \
1288 set_irn_n(node, node->op->op_index, left); \
1290 ir_node *get_##OP##_right(ir_node *node) { \
1291 assert(node->op == op_##OP); \
1292 return get_irn_n(node, node->op->op_index + 1); \
1294 void set_##OP##_right(ir_node *node, ir_node *right) { \
1295 assert(node->op == op_##OP); \
1296 set_irn_n(node, node->op->op_index + 1, right); \
1300 ir_node *get_##OP##_op(ir_node *node) { \
1301 assert(node->op == op_##OP); \
1302 return get_irn_n(node, node->op->op_index); \
1304 void set_##OP##_op (ir_node *node, ir_node *op) { \
1305 assert(node->op == op_##OP); \
1306 set_irn_n(node, node->op->op_index, op); \
1316 get_Quot_mem (ir_node *node) {
1317 assert (node->op == op_Quot);
1318 return get_irn_n(node, 0);
1322 set_Quot_mem (ir_node *node, ir_node *mem) {
1323 assert (node->op == op_Quot);
1324 set_irn_n(node, 0, mem);
1330 get_DivMod_mem (ir_node *node) {
1331 assert (node->op == op_DivMod);
1332 return get_irn_n(node, 0);
1336 set_DivMod_mem (ir_node *node, ir_node *mem) {
1337 assert (node->op == op_DivMod);
1338 set_irn_n(node, 0, mem);
1344 get_Div_mem (ir_node *node) {
1345 assert (node->op == op_Div);
1346 return get_irn_n(node, 0);
1350 set_Div_mem (ir_node *node, ir_node *mem) {
1351 assert (node->op == op_Div);
1352 set_irn_n(node, 0, mem);
1358 get_Mod_mem (ir_node *node) {
1359 assert (node->op == op_Mod);
1360 return get_irn_n(node, 0);
1364 set_Mod_mem (ir_node *node, ir_node *mem) {
1365 assert (node->op == op_Mod);
1366 set_irn_n(node, 0, mem);
1383 get_Cast_type (ir_node *node) {
1384 assert (node->op == op_Cast);
1385 return node->attr.cast.totype;
1389 set_Cast_type (ir_node *node, type *to_tp) {
1390 assert (node->op == op_Cast);
1391 node->attr.cast.totype = to_tp;
1395 is_unop (ir_node *node) {
1396 return (node->op->opar == oparity_unary);
1400 get_unop_op (ir_node *node) {
1401 if (node->op->opar == oparity_unary)
1402 return get_irn_n(node, node->op->op_index);
1404 assert(node->op->opar == oparity_unary);
1409 set_unop_op (ir_node *node, ir_node *op) {
1410 if (node->op->opar == oparity_unary)
1411 set_irn_n(node, node->op->op_index, op);
1413 assert(node->op->opar == oparity_unary);
1417 is_binop (ir_node *node) {
1418 return (node->op->opar == oparity_binary);
1422 get_binop_left (ir_node *node) {
1423 if (node->op->opar == oparity_binary)
1424 return get_irn_n(node, node->op->op_index);
1426 assert(node->op->opar == oparity_binary);
1431 set_binop_left (ir_node *node, ir_node *left) {
1432 if (node->op->opar == oparity_binary)
1433 set_irn_n(node, node->op->op_index, left);
1435 assert (node->op->opar == oparity_binary);
1439 get_binop_right (ir_node *node) {
1440 if (node->op->opar == oparity_binary)
1441 return get_irn_n(node, node->op->op_index + 1);
1443 assert(node->op->opar == oparity_binary);
1448 set_binop_right (ir_node *node, ir_node *right) {
1449 if (node->op->opar == oparity_binary)
1450 set_irn_n(node, node->op->op_index + 1, right);
1452 assert (node->op->opar == oparity_binary);
1455 int is_Phi (ir_node *n) {
1461 if (op == op_Filter) return get_interprocedural_view();
1464 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1465 (get_irn_arity(n) > 0));
1470 int is_Phi0 (ir_node *n) {
1473 return ((get_irn_op(n) == op_Phi) &&
1474 (get_irn_arity(n) == 0) &&
1475 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1479 get_Phi_preds_arr (ir_node *node) {
1480 assert (node->op == op_Phi);
1481 return (ir_node **)&(get_irn_in(node)[1]);
1485 get_Phi_n_preds (ir_node *node) {
1486 assert (is_Phi(node) || is_Phi0(node));
1487 return (get_irn_arity(node));
1491 void set_Phi_n_preds (ir_node *node, int n_preds) {
1492 assert (node->op == op_Phi);
1497 get_Phi_pred (ir_node *node, int pos) {
1498 assert (is_Phi(node) || is_Phi0(node));
1499 return get_irn_n(node, pos);
1503 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1504 assert (is_Phi(node) || is_Phi0(node));
1505 set_irn_n(node, pos, pred);
1509 int is_memop(ir_node *node) {
1510 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1513 ir_node *get_memop_mem (ir_node *node) {
1514 assert(is_memop(node));
1515 return get_irn_n(node, 0);
1518 void set_memop_mem (ir_node *node, ir_node *mem) {
1519 assert(is_memop(node));
1520 set_irn_n(node, 0, mem);
1523 ir_node *get_memop_ptr (ir_node *node) {
1524 assert(is_memop(node));
1525 return get_irn_n(node, 1);
1528 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1529 assert(is_memop(node));
1530 set_irn_n(node, 1, ptr);
1534 get_Load_mem (ir_node *node) {
1535 assert (node->op == op_Load);
1536 return get_irn_n(node, 0);
1540 set_Load_mem (ir_node *node, ir_node *mem) {
1541 assert (node->op == op_Load);
1542 set_irn_n(node, 0, mem);
1546 get_Load_ptr (ir_node *node) {
1547 assert (node->op == op_Load);
1548 return get_irn_n(node, 1);
1552 set_Load_ptr (ir_node *node, ir_node *ptr) {
1553 assert (node->op == op_Load);
1554 set_irn_n(node, 1, ptr);
1558 get_Load_mode (ir_node *node) {
1559 assert (node->op == op_Load);
1560 return node->attr.load.load_mode;
1564 set_Load_mode (ir_node *node, ir_mode *mode) {
1565 assert (node->op == op_Load);
1566 node->attr.load.load_mode = mode;
1570 get_Load_volatility (ir_node *node) {
1571 assert (node->op == op_Load);
1572 return node->attr.load.volatility;
1576 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1577 assert (node->op == op_Load);
1578 node->attr.load.volatility = volatility;
1583 get_Store_mem (ir_node *node) {
1584 assert (node->op == op_Store);
1585 return get_irn_n(node, 0);
1589 set_Store_mem (ir_node *node, ir_node *mem) {
1590 assert (node->op == op_Store);
1591 set_irn_n(node, 0, mem);
1595 get_Store_ptr (ir_node *node) {
1596 assert (node->op == op_Store);
1597 return get_irn_n(node, 1);
1601 set_Store_ptr (ir_node *node, ir_node *ptr) {
1602 assert (node->op == op_Store);
1603 set_irn_n(node, 1, ptr);
1607 get_Store_value (ir_node *node) {
1608 assert (node->op == op_Store);
1609 return get_irn_n(node, 2);
1613 set_Store_value (ir_node *node, ir_node *value) {
1614 assert (node->op == op_Store);
1615 set_irn_n(node, 2, value);
1619 get_Store_volatility (ir_node *node) {
1620 assert (node->op == op_Store);
1621 return node->attr.store.volatility;
1625 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1626 assert (node->op == op_Store);
1627 node->attr.store.volatility = volatility;
1632 get_Alloc_mem (ir_node *node) {
1633 assert (node->op == op_Alloc);
1634 return get_irn_n(node, 0);
1638 set_Alloc_mem (ir_node *node, ir_node *mem) {
1639 assert (node->op == op_Alloc);
1640 set_irn_n(node, 0, mem);
1644 get_Alloc_size (ir_node *node) {
1645 assert (node->op == op_Alloc);
1646 return get_irn_n(node, 1);
1650 set_Alloc_size (ir_node *node, ir_node *size) {
1651 assert (node->op == op_Alloc);
1652 set_irn_n(node, 1, size);
1656 get_Alloc_type (ir_node *node) {
1657 assert (node->op == op_Alloc);
1658 return node->attr.a.type = skip_tid(node->attr.a.type);
1662 set_Alloc_type (ir_node *node, type *tp) {
1663 assert (node->op == op_Alloc);
1664 node->attr.a.type = tp;
1668 get_Alloc_where (ir_node *node) {
1669 assert (node->op == op_Alloc);
1670 return node->attr.a.where;
1674 set_Alloc_where (ir_node *node, where_alloc where) {
1675 assert (node->op == op_Alloc);
1676 node->attr.a.where = where;
1681 get_Free_mem (ir_node *node) {
1682 assert (node->op == op_Free);
1683 return get_irn_n(node, 0);
1687 set_Free_mem (ir_node *node, ir_node *mem) {
1688 assert (node->op == op_Free);
1689 set_irn_n(node, 0, mem);
1693 get_Free_ptr (ir_node *node) {
1694 assert (node->op == op_Free);
1695 return get_irn_n(node, 1);
1699 set_Free_ptr (ir_node *node, ir_node *ptr) {
1700 assert (node->op == op_Free);
1701 set_irn_n(node, 1, ptr);
1705 get_Free_size (ir_node *node) {
1706 assert (node->op == op_Free);
1707 return get_irn_n(node, 2);
1711 set_Free_size (ir_node *node, ir_node *size) {
1712 assert (node->op == op_Free);
1713 set_irn_n(node, 2, size);
1717 get_Free_type (ir_node *node) {
1718 assert (node->op == op_Free);
1719 return node->attr.f = skip_tid(node->attr.f);
1723 set_Free_type (ir_node *node, type *tp) {
1724 assert (node->op == op_Free);
1729 get_Sync_preds_arr (ir_node *node) {
1730 assert (node->op == op_Sync);
1731 return (ir_node **)&(get_irn_in(node)[1]);
1735 get_Sync_n_preds (ir_node *node) {
1736 assert (node->op == op_Sync);
1737 return (get_irn_arity(node));
1742 set_Sync_n_preds (ir_node *node, int n_preds) {
1743 assert (node->op == op_Sync);
1748 get_Sync_pred (ir_node *node, int pos) {
1749 assert (node->op == op_Sync);
1750 return get_irn_n(node, pos);
1754 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1755 assert (node->op == op_Sync);
1756 set_irn_n(node, pos, pred);
1760 get_Proj_pred (ir_node *node) {
1761 assert (is_Proj(node));
1762 return get_irn_n(node, 0);
1766 set_Proj_pred (ir_node *node, ir_node *pred) {
1767 assert (is_Proj(node));
1768 set_irn_n(node, 0, pred);
1772 get_Proj_proj (ir_node *node) {
1773 assert (is_Proj(node));
1774 if (get_irn_opcode(node) == iro_Proj) {
1775 return node->attr.proj;
1777 assert(get_irn_opcode(node) == iro_Filter);
1778 return node->attr.filter.proj;
1783 set_Proj_proj (ir_node *node, long proj) {
1784 assert (node->op == op_Proj);
1785 node->attr.proj = proj;
1789 get_Tuple_preds_arr (ir_node *node) {
1790 assert (node->op == op_Tuple);
1791 return (ir_node **)&(get_irn_in(node)[1]);
1795 get_Tuple_n_preds (ir_node *node) {
1796 assert (node->op == op_Tuple);
1797 return (get_irn_arity(node));
1802 set_Tuple_n_preds (ir_node *node, int n_preds) {
1803 assert (node->op == op_Tuple);
1808 get_Tuple_pred (ir_node *node, int pos) {
1809 assert (node->op == op_Tuple);
1810 return get_irn_n(node, pos);
1814 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1815 assert (node->op == op_Tuple);
1816 set_irn_n(node, pos, pred);
1820 get_Id_pred (ir_node *node) {
1821 assert (node->op == op_Id);
1822 return get_irn_n(node, 0);
1826 set_Id_pred (ir_node *node, ir_node *pred) {
1827 assert (node->op == op_Id);
1828 set_irn_n(node, 0, pred);
1831 ir_node *get_Confirm_value (ir_node *node) {
1832 assert (node->op == op_Confirm);
1833 return get_irn_n(node, 0);
1835 void set_Confirm_value (ir_node *node, ir_node *value) {
1836 assert (node->op == op_Confirm);
1837 set_irn_n(node, 0, value);
1839 ir_node *get_Confirm_bound (ir_node *node) {
1840 assert (node->op == op_Confirm);
1841 return get_irn_n(node, 1);
1843 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1844 assert (node->op == op_Confirm);
1845 set_irn_n(node, 0, bound);
1847 pn_Cmp get_Confirm_cmp (ir_node *node) {
1848 assert (node->op == op_Confirm);
1849 return node->attr.confirm_cmp;
1851 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1852 assert (node->op == op_Confirm);
1853 node->attr.confirm_cmp = cmp;
1858 get_Filter_pred (ir_node *node) {
1859 assert(node->op == op_Filter);
1863 set_Filter_pred (ir_node *node, ir_node *pred) {
1864 assert(node->op == op_Filter);
1868 get_Filter_proj(ir_node *node) {
1869 assert(node->op == op_Filter);
1870 return node->attr.filter.proj;
1873 set_Filter_proj (ir_node *node, long proj) {
1874 assert(node->op == op_Filter);
1875 node->attr.filter.proj = proj;
1878 /* Don't use get_irn_arity, get_irn_n in implementation as access
1879 shall work independent of view!!! */
1880 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1881 assert(node->op == op_Filter);
1882 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1883 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1884 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1885 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1886 node->attr.filter.in_cg[0] = node->in[0];
1888 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1891 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1892 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1893 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1894 node->attr.filter.in_cg[pos + 1] = pred;
1896 int get_Filter_n_cg_preds(ir_node *node) {
1897 assert(node->op == op_Filter && node->attr.filter.in_cg);
1898 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1900 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1902 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1904 arity = ARR_LEN(node->attr.filter.in_cg);
1905 assert(pos < arity - 1);
1906 return node->attr.filter.in_cg[pos + 1];
1911 get_irn_irg(ir_node *node) {
1912 if (get_irn_op(node) != op_Block)
1913 node = get_nodes_block(node);
1914 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1915 node = get_nodes_block(node);
1916 assert(get_irn_op(node) == op_Block);
1917 return node->attr.block.irg;
1921 /*----------------------------------------------------------------*/
1922 /* Auxiliary routines */
1923 /*----------------------------------------------------------------*/
1926 skip_Proj (ir_node *node) {
1927 /* don't assert node !!! */
1928 if (node && is_Proj(node)) {
1929 return get_Proj_pred(node);
1936 skip_Tuple (ir_node *node) {
1939 if (!get_opt_normalize()) return node;
1941 node = skip_Id(node);
1942 if (get_irn_op(node) == op_Proj) {
1943 pred = skip_Id(get_Proj_pred(node));
1944 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1945 pred = skip_Id(skip_Tuple(pred));
1946 if (get_irn_op(pred) == op_Tuple)
1947 return get_Tuple_pred(pred, get_Proj_proj(node));
1952 /** returns operand of node if node is a Cast */
1953 ir_node *skip_Cast (ir_node *node) {
1954 if (node && get_irn_op(node) == op_Cast) {
1955 return skip_Id(get_irn_n(node, 0));
1962 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1963 than any other approach, as Id chains are resolved and all point to the real node, or
1964 all id's are self loops. */
1966 skip_Id (ir_node *node) {
1967 /* don't assert node !!! */
1969 if (!get_opt_normalize()) return node;
1971 /* Don't use get_Id_pred: We get into an endless loop for
1972 self-referencing Ids. */
1973 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1974 ir_node *rem_pred = node->in[0+1];
1977 assert (get_irn_arity (node) > 0);
1979 node->in[0+1] = node;
1980 res = skip_Id(rem_pred);
1981 if (res->op == op_Id) /* self-loop */ return node;
1983 node->in[0+1] = res;
1990 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1991 than any other approach, as Id chains are resolved and all point to the real node, or
1992 all id's are self loops. */
1994 skip_Id (ir_node *node) {
1996 /* don't assert node !!! */
1998 if (!node || (node->op != op_Id)) return node;
2000 if (!get_opt_normalize()) return node;
2002 /* Don't use get_Id_pred: We get into an endless loop for
2003 self-referencing Ids. */
2004 pred = node->in[0+1];
2006 if (pred->op != op_Id) return pred;
2008 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2009 ir_node *rem_pred, *res;
2011 if (pred->op != op_Id) return pred; /* shortcut */
2014 assert (get_irn_arity (node) > 0);
2016 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2017 res = skip_Id(rem_pred);
2018 if (res->op == op_Id) /* self-loop */ return node;
2020 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2029 is_Bad (ir_node *node) {
2031 if ((node) && get_irn_opcode(node) == iro_Bad)
2037 is_no_Block (ir_node *node) {
2039 return (get_irn_opcode(node) != iro_Block);
2043 is_Block (ir_node *node) {
2045 return (get_irn_opcode(node) == iro_Block);
2048 /* returns true if node is a Unknown node. */
2050 is_Unknown (ir_node *node) {
2052 return (get_irn_opcode(node) == iro_Unknown);
2056 is_Proj (const ir_node *node) {
2058 return node->op == op_Proj
2059 || (!get_interprocedural_view() && node->op == op_Filter);
2062 /* Returns true if the operation manipulates control flow. */
2064 is_cfop(ir_node *node) {
2065 return is_cfopcode(get_irn_op(node));
2068 /* Returns true if the operation manipulates interprocedural control flow:
2069 CallBegin, EndReg, EndExcept */
2070 int is_ip_cfop(ir_node *node) {
2071 return is_ip_cfopcode(get_irn_op(node));
2074 /* Returns true if the operation can change the control flow because
2077 is_fragile_op(ir_node *node) {
2078 return is_op_fragile(get_irn_op(node));
2081 /* Returns the memory operand of fragile operations. */
2082 ir_node *get_fragile_op_mem(ir_node *node) {
2083 assert(node && is_fragile_op(node));
2085 switch (get_irn_opcode (node)) {
2094 return get_irn_n(node, 0);
2099 assert(0 && "should not be reached");
2104 /* Returns true if the operation is a forking control flow operation. */
2106 is_forking_op(ir_node *node) {
2107 return is_op_forking(get_irn_op(node));
2110 #ifdef DEBUG_libfirm
2111 void dump_irn (ir_node *n) {
2112 int i, arity = get_irn_arity(n);
2113 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2115 ir_node *pred = get_irn_n(n, -1);
2116 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2117 get_irn_node_nr(pred), (void *)pred);
2119 printf(" preds: \n");
2120 for (i = 0; i < arity; ++i) {
2121 ir_node *pred = get_irn_n(n, i);
2122 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2123 get_irn_node_nr(pred), (void *)pred);
2127 #else /* DEBUG_libfirm */
2128 void dump_irn (ir_node *n) {}
2129 #endif /* DEBUG_libfirm */