3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
88 * Indicates, whether additional data can be registered to ir nodes.
89 * If set to 1, this is not possible anymore.
91 static int forbid_new_data = 0;
94 * The amount of additional space for custom data to be allocated upon
95 * creating a new node.
97 static size_t additional_node_data_size = 0;
100 size_t register_additional_node_data(size_t size)
102 assert(!forbid_new_data && "Too late to register additional node data");
107 return additional_node_data_size += size;
114 /* Forbid the addition of new data to an ir node. */
119 * irnode constructor.
120 * Create a new irnode in irg, with an op, mode, arity and
121 * some incoming irnodes.
122 * If arity is negative, a node with a dynamic array is created.
125 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
126 int arity, ir_node **in)
129 size_t node_size = offsetof(ir_node, attr) + op->attr_size + additional_node_data_size;
132 assert(irg && op && mode);
133 p = obstack_alloc (irg->obst, node_size);
134 memset(p, 0, node_size);
135 res = (ir_node *) (p + additional_node_data_size);
137 res->kind = k_ir_node;
143 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
145 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
146 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
149 set_irn_dbg_info(res, db);
153 res->node_nr = get_irp_new_node_nr();
161 /* Copies all attributes stored in the old node to the new node.
162 Assumes both have the same opcode and sufficient size. */
164 copy_attrs (const ir_node *old_node, ir_node *new_node) {
165 assert(get_irn_op(old_node) == get_irn_op(new_node));
166 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
167 if (get_irn_op(new_node) == op_Call) remove_Call_callee_arr(new_node);
170 /*-- getting some parameters from ir_nodes --*/
173 (is_ir_node)(const void *thing) {
174 return __is_ir_node(thing);
178 (get_irn_intra_arity)(const ir_node *node) {
179 return __get_irn_intra_arity(node);
183 (get_irn_inter_arity)(const ir_node *node) {
184 return __get_irn_inter_arity(node);
187 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
190 (get_irn_arity)(const ir_node *node) {
191 return __get_irn_arity(node);
194 /* Returns the array with ins. This array is shifted with respect to the
195 array accessed by get_irn_n: The block operand is at position 0 not -1.
196 (@@@ This should be changed.)
197 The order of the predecessors in this array is not guaranteed, except that
198 lists of operands as predecessors of Block or arguments of a Call are
201 get_irn_in (const ir_node *node) {
203 if (get_interprocedural_view()) { /* handle Filter and Block specially */
204 if (get_irn_opcode(node) == iro_Filter) {
205 assert(node->attr.filter.in_cg);
206 return node->attr.filter.in_cg;
207 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
208 return node->attr.block.in_cg;
210 /* else fall through */
216 set_irn_in (ir_node *node, int arity, ir_node **in) {
219 if (get_interprocedural_view()) { /* handle Filter and Block specially */
220 if (get_irn_opcode(node) == iro_Filter) {
221 assert(node->attr.filter.in_cg);
222 arr = &node->attr.filter.in_cg;
223 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
224 arr = &node->attr.block.in_cg;
231 if (arity != ARR_LEN(*arr) - 1) {
232 ir_node * block = (*arr)[0];
233 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
236 fix_backedges(current_ir_graph->obst, node);
237 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
241 (get_irn_intra_n)(const ir_node *node, int n) {
242 return __get_irn_intra_n (node, n);
246 (get_irn_inter_n)(const ir_node *node, int n) {
247 return __get_irn_inter_n (node, n);
250 ir_node *(*__get_irn_n)(const ir_node *node, int n) = __get_irn_intra_n;
253 (get_irn_n)(const ir_node *node, int n) {
254 return __get_irn_n(node, n);
258 set_irn_n (ir_node *node, int n, ir_node *in) {
259 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
260 assert(in && in->kind == k_ir_node);
261 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
262 /* Change block pred in both views! */
263 node->in[n + 1] = in;
264 assert(node->attr.filter.in_cg);
265 node->attr.filter.in_cg[n + 1] = in;
268 if (get_interprocedural_view()) { /* handle Filter and Block specially */
269 if (get_irn_opcode(node) == iro_Filter) {
270 assert(node->attr.filter.in_cg);
271 node->attr.filter.in_cg[n + 1] = in;
273 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
274 node->attr.block.in_cg[n + 1] = in;
277 /* else fall through */
279 node->in[n + 1] = in;
283 (get_irn_mode)(const ir_node *node) {
284 return __get_irn_mode(node);
288 (set_irn_mode)(ir_node *node, ir_mode *mode)
290 __set_irn_mode(node, mode);
294 get_irn_modecode (const ir_node *node)
297 return node->mode->code;
300 /** Gets the string representation of the mode .*/
302 get_irn_modename (const ir_node *node)
305 return get_mode_name(node->mode);
309 get_irn_modeident (const ir_node *node)
312 return get_mode_ident(node->mode);
316 (get_irn_op)(const ir_node *node)
318 return __get_irn_op(node);
321 /* should be private to the library: */
323 set_irn_op (ir_node *node, ir_op *op)
330 (get_irn_opcode)(const ir_node *node)
332 return __get_irn_opcode(node);
336 get_irn_opname (const ir_node *node)
339 if ((get_irn_op((ir_node *)node) == op_Phi) &&
340 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
341 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
342 return get_id_str(node->op->name);
346 get_irn_opident (const ir_node *node)
349 return node->op->name;
353 (get_irn_visited)(const ir_node *node)
355 return __get_irn_visited(node);
359 (set_irn_visited)(ir_node *node, unsigned long visited)
361 __set_irn_visited(node, visited);
365 (mark_irn_visited)(ir_node *node) {
366 __mark_irn_visited(node);
370 (irn_not_visited)(const ir_node *node) {
371 return __irn_not_visited(node);
375 (irn_visited)(const ir_node *node) {
376 return __irn_visited(node);
380 (set_irn_link)(ir_node *node, void *link) {
381 __set_irn_link(node, link);
385 (get_irn_link)(const ir_node *node) {
386 return __get_irn_link(node);
390 (get_irn_pinned)(const ir_node *node) {
391 return __get_irn_pinned(node);
394 void set_irn_pinned(ir_node *node, op_pin_state state) {
395 /* due to optimization an opt may be turned into a Tuple */
396 if (get_irn_op(node) == op_Tuple)
399 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
400 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
402 node->attr.except.pin_state = state;
405 #ifdef DO_HEAPANALYSIS
406 /* Access the abstract interpretation information of a node.
407 Returns NULL if no such information is available. */
408 struct abstval *get_irn_abst_value(ir_node *n) {
411 /* Set the abstract interpretation information of a node. */
412 void set_irn_abst_value(ir_node *n, struct abstval *os) {
415 struct section *firm_get_irn_section(ir_node *n) {
418 void firm_set_irn_section(ir_node *n, struct section *s) {
422 /* Dummies needed for firmjni. */
423 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
424 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
425 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
426 void firm_set_irn_section(ir_node *n, struct section *s) {}
427 #endif /* DO_HEAPANALYSIS */
430 /* Outputs a unique number for this node */
432 get_irn_node_nr(const ir_node *node) {
435 return node->node_nr;
442 get_irn_const_attr (ir_node *node)
444 assert (node->op == op_Const);
445 return node->attr.con;
449 get_irn_proj_attr (ir_node *node)
451 assert (node->op == op_Proj);
452 return node->attr.proj;
456 get_irn_alloc_attr (ir_node *node)
458 assert (node->op == op_Alloc);
463 get_irn_free_attr (ir_node *node)
465 assert (node->op == op_Free);
466 return node->attr.f = skip_tid(node->attr.f);
470 get_irn_symconst_attr (ir_node *node)
472 assert (node->op == op_SymConst);
477 get_irn_call_attr (ir_node *node)
479 assert (node->op == op_Call);
480 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
484 get_irn_sel_attr (ir_node *node)
486 assert (node->op == op_Sel);
491 get_irn_phi_attr (ir_node *node)
493 assert (node->op == op_Phi);
494 return node->attr.phi0_pos;
498 get_irn_block_attr (ir_node *node)
500 assert (node->op == op_Block);
501 return node->attr.block;
505 get_irn_load_attr (ir_node *node)
507 assert (node->op == op_Load);
508 return node->attr.load;
512 get_irn_store_attr (ir_node *node)
514 assert (node->op == op_Store);
515 return node->attr.store;
519 get_irn_except_attr (ir_node *node)
521 assert (node->op == op_Div || node->op == op_Quot ||
522 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
523 return node->attr.except;
526 /** manipulate fields of individual nodes **/
528 /* this works for all except Block */
530 get_nodes_block (const ir_node *node) {
531 assert (!(node->op == op_Block));
532 return get_irn_n(node, -1);
536 set_nodes_block (ir_node *node, ir_node *block) {
537 assert (!(node->op == op_Block));
538 set_irn_n(node, -1, block);
541 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
542 * from Start. If so returns frame type, else Null. */
543 type *is_frame_pointer(ir_node *n) {
544 if ((get_irn_op(n) == op_Proj) &&
545 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
546 ir_node *start = get_Proj_pred(n);
547 if (get_irn_op(start) == op_Start) {
548 return get_irg_frame_type(get_irn_irg(start));
554 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
555 * from Start. If so returns global type, else Null. */
556 type *is_globals_pointer(ir_node *n) {
557 if ((get_irn_op(n) == op_Proj) &&
558 (get_Proj_proj(n) == pn_Start_P_globals)) {
559 ir_node *start = get_Proj_pred(n);
560 if (get_irn_op(start) == op_Start) {
561 return get_glob_type();
567 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
568 * from Start. If so returns 1, else 0. */
569 int is_value_arg_pointer(ir_node *n) {
570 if ((get_irn_op(n) == op_Proj) &&
571 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
572 (get_irn_op(get_Proj_pred(n)) == op_Start))
577 /* Returns an array with the predecessors of the Block. Depending on
578 the implementation of the graph data structure this can be a copy of
579 the internal representation of predecessors as well as the internal
580 array itself. Therefore writing to this array might obstruct the ir. */
582 get_Block_cfgpred_arr (ir_node *node)
584 assert ((node->op == op_Block));
585 return (ir_node **)&(get_irn_in(node)[1]);
590 get_Block_n_cfgpreds (ir_node *node) {
591 assert ((node->op == op_Block));
592 return get_irn_arity(node);
596 get_Block_cfgpred (ir_node *node, int pos) {
597 assert(-1 <= pos && pos < get_irn_arity(node));
598 assert(node->op == op_Block);
599 return get_irn_n(node, pos);
603 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
604 assert (node->op == op_Block);
605 set_irn_n(node, pos, pred);
609 get_Block_matured (ir_node *node) {
610 assert (node->op == op_Block);
611 return node->attr.block.matured;
615 set_Block_matured (ir_node *node, bool matured) {
616 assert (node->op == op_Block);
617 node->attr.block.matured = matured;
620 get_Block_block_visited (ir_node *node) {
621 assert (node->op == op_Block);
622 return node->attr.block.block_visited;
626 set_Block_block_visited (ir_node *node, unsigned long visit) {
627 assert (node->op == op_Block);
628 node->attr.block.block_visited = visit;
631 /* For this current_ir_graph must be set. */
633 mark_Block_block_visited (ir_node *node) {
634 assert (node->op == op_Block);
635 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
639 Block_not_block_visited(ir_node *node) {
640 assert (node->op == op_Block);
641 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
645 get_Block_graph_arr (ir_node *node, int pos) {
646 assert (node->op == op_Block);
647 return node->attr.block.graph_arr[pos+1];
651 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
652 assert (node->op == op_Block);
653 node->attr.block.graph_arr[pos+1] = value;
656 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
657 assert(node->op == op_Block);
658 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
659 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
660 node->attr.block.in_cg[0] = NULL;
661 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
663 /* Fix backedge array. fix_backedges operates depending on
664 interprocedural_view. */
665 int ipv = get_interprocedural_view();
666 set_interprocedural_view(true);
667 fix_backedges(current_ir_graph->obst, node);
668 set_interprocedural_view(ipv);
671 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
674 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
675 assert(node->op == op_Block &&
676 node->attr.block.in_cg &&
677 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
678 node->attr.block.in_cg[pos + 1] = pred;
681 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
682 assert(node->op == op_Block);
683 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
686 int get_Block_cg_n_cfgpreds(ir_node * node) {
687 assert(node->op == op_Block);
688 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
691 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
692 assert(node->op == op_Block && node->attr.block.in_cg);
693 return node->attr.block.in_cg[pos + 1];
696 void remove_Block_cg_cfgpred_arr(ir_node * node) {
697 assert(node->op == op_Block);
698 node->attr.block.in_cg = NULL;
701 ir_node *(set_Block_dead)(ir_node *block) {
702 return __set_Block_dead(block);
705 int (is_Block_dead)(const ir_node *block) {
706 return __is_Block_dead(block);
710 set_Start_irg(ir_node *node, ir_graph *irg) {
711 assert(node->op == op_Start);
712 assert(is_ir_graph(irg));
713 assert(0 && " Why set irg? -- use set_irn_irg");
717 get_End_n_keepalives(ir_node *end) {
718 assert (end->op == op_End);
719 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
723 get_End_keepalive(ir_node *end, int pos) {
724 assert (end->op == op_End);
725 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
729 add_End_keepalive (ir_node *end, ir_node *ka) {
730 assert (end->op == op_End);
731 ARR_APP1 (ir_node *, end->in, ka);
735 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
736 assert (end->op == op_End);
737 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
741 free_End (ir_node *end) {
742 assert (end->op == op_End);
744 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
745 end->in = NULL; /* @@@ make sure we get an error if we use the
746 in array afterwards ... */
751 > Implementing the case construct (which is where the constant Proj node is
752 > important) involves far more than simply determining the constant values.
753 > We could argue that this is more properly a function of the translator from
754 > Firm to the target machine. That could be done if there was some way of
755 > projecting "default" out of the Cond node.
756 I know it's complicated.
757 Basically there are two proglems:
758 - determining the gaps between the projs
759 - determining the biggest case constant to know the proj number for
761 I see several solutions:
762 1. Introduce a ProjDefault node. Solves both problems.
763 This means to extend all optimizations executed during construction.
764 2. Give the Cond node for switch two flavors:
765 a) there are no gaps in the projs (existing flavor)
766 b) gaps may exist, default proj is still the Proj with the largest
767 projection number. This covers also the gaps.
768 3. Fix the semantic of the Cond to that of 2b)
770 Solution 2 seems to be the best:
771 Computing the gaps in the Firm representation is not too hard, i.e.,
772 libFIRM can implement a routine that transforms between the two
773 flavours. This is also possible for 1) but 2) does not require to
774 change any existing optimization.
775 Further it should be far simpler to determine the biggest constant than
777 I don't want to choose 3) as 2a) seems to have advantages for
778 dataflow analysis and 3) does not allow to convert the representation to
782 get_Cond_selector (ir_node *node) {
783 assert (node->op == op_Cond);
784 return get_irn_n(node, 0);
788 set_Cond_selector (ir_node *node, ir_node *selector) {
789 assert (node->op == op_Cond);
790 set_irn_n(node, 0, selector);
794 get_Cond_kind (ir_node *node) {
795 assert (node->op == op_Cond);
796 return node->attr.c.kind;
800 set_Cond_kind (ir_node *node, cond_kind kind) {
801 assert (node->op == op_Cond);
802 node->attr.c.kind = kind;
806 get_Cond_defaultProj (ir_node *node) {
807 assert (node->op == op_Cond);
808 return node->attr.c.default_proj;
812 get_Return_mem (ir_node *node) {
813 assert (node->op == op_Return);
814 return get_irn_n(node, 0);
818 set_Return_mem (ir_node *node, ir_node *mem) {
819 assert (node->op == op_Return);
820 set_irn_n(node, 0, mem);
824 get_Return_n_ress (ir_node *node) {
825 assert (node->op == op_Return);
826 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
830 get_Return_res_arr (ir_node *node)
832 assert ((node->op == op_Return));
833 if (get_Return_n_ress(node) > 0)
834 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
841 set_Return_n_res (ir_node *node, int results) {
842 assert (node->op == op_Return);
847 get_Return_res (ir_node *node, int pos) {
848 assert (node->op == op_Return);
849 assert (get_Return_n_ress(node) > pos);
850 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
854 set_Return_res (ir_node *node, int pos, ir_node *res){
855 assert (node->op == op_Return);
856 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
860 get_Raise_mem (ir_node *node) {
861 assert (node->op == op_Raise);
862 return get_irn_n(node, 0);
866 set_Raise_mem (ir_node *node, ir_node *mem) {
867 assert (node->op == op_Raise);
868 set_irn_n(node, 0, mem);
872 get_Raise_exo_ptr (ir_node *node) {
873 assert (node->op == op_Raise);
874 return get_irn_n(node, 1);
878 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
879 assert (node->op == op_Raise);
880 set_irn_n(node, 1, exo_ptr);
883 tarval *get_Const_tarval (ir_node *node) {
884 assert (node->op == op_Const);
885 return node->attr.con.tv;
889 set_Const_tarval (ir_node *node, tarval *con) {
890 assert (node->op == op_Const);
891 node->attr.con.tv = con;
895 /* The source language type. Must be an atomic type. Mode of type must
896 be mode of node. For tarvals from entities type must be pointer to
899 get_Const_type (ir_node *node) {
900 assert (node->op == op_Const);
901 return node->attr.con.tp;
905 set_Const_type (ir_node *node, type *tp) {
906 assert (node->op == op_Const);
907 if (tp != unknown_type) {
908 assert (is_atomic_type(tp));
909 assert (get_type_mode(tp) == get_irn_mode(node));
911 node->attr.con.tp = tp;
916 get_SymConst_kind (const ir_node *node) {
917 assert (node->op == op_SymConst);
918 return node->attr.i.num;
922 set_SymConst_kind (ir_node *node, symconst_kind num) {
923 assert (node->op == op_SymConst);
924 node->attr.i.num = num;
928 get_SymConst_type (ir_node *node) {
929 assert ( (node->op == op_SymConst)
930 && ( get_SymConst_kind(node) == symconst_type_tag
931 || get_SymConst_kind(node) == symconst_size));
932 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
936 set_SymConst_type (ir_node *node, type *tp) {
937 assert ( (node->op == op_SymConst)
938 && ( get_SymConst_kind(node) == symconst_type_tag
939 || get_SymConst_kind(node) == symconst_size));
940 node->attr.i.sym.type_p = tp;
944 get_SymConst_name (ir_node *node) {
945 assert ( (node->op == op_SymConst)
946 && (get_SymConst_kind(node) == symconst_addr_name));
947 return node->attr.i.sym.ident_p;
951 set_SymConst_name (ir_node *node, ident *name) {
952 assert ( (node->op == op_SymConst)
953 && (get_SymConst_kind(node) == symconst_addr_name));
954 node->attr.i.sym.ident_p = name;
958 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
959 entity *get_SymConst_entity (ir_node *node) {
960 assert ( (node->op == op_SymConst)
961 && (get_SymConst_kind (node) == symconst_addr_ent));
962 return node->attr.i.sym.entity_p;
965 void set_SymConst_entity (ir_node *node, entity *ent) {
966 assert ( (node->op == op_SymConst)
967 && (get_SymConst_kind(node) == symconst_addr_ent));
968 node->attr.i.sym.entity_p = ent;
971 union symconst_symbol
972 get_SymConst_symbol (ir_node *node) {
973 assert (node->op == op_SymConst);
974 return node->attr.i.sym;
978 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
979 assert (node->op == op_SymConst);
980 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
981 node->attr.i.sym = sym;
985 get_SymConst_value_type (ir_node *node) {
986 assert (node->op == op_SymConst);
987 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
988 return node->attr.i.tp;
992 set_SymConst_value_type (ir_node *node, type *tp) {
993 assert (node->op == op_SymConst);
994 node->attr.i.tp = tp;
998 get_Sel_mem (ir_node *node) {
999 assert (node->op == op_Sel);
1000 return get_irn_n(node, 0);
1004 set_Sel_mem (ir_node *node, ir_node *mem) {
1005 assert (node->op == op_Sel);
1006 set_irn_n(node, 0, mem);
1010 get_Sel_ptr (ir_node *node) {
1011 assert (node->op == op_Sel);
1012 return get_irn_n(node, 1);
1016 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1017 assert (node->op == op_Sel);
1018 set_irn_n(node, 1, ptr);
1022 get_Sel_n_indexs (ir_node *node) {
1023 assert (node->op == op_Sel);
1024 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1028 get_Sel_index_arr (ir_node *node)
1030 assert ((node->op == op_Sel));
1031 if (get_Sel_n_indexs(node) > 0)
1032 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1038 get_Sel_index (ir_node *node, int pos) {
1039 assert (node->op == op_Sel);
1040 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1044 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1045 assert (node->op == op_Sel);
1046 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1050 get_Sel_entity (ir_node *node) {
1051 assert (node->op == op_Sel);
1052 return node->attr.s.ent;
1056 set_Sel_entity (ir_node *node, entity *ent) {
1057 assert (node->op == op_Sel);
1058 node->attr.s.ent = ent;
1062 get_InstOf_ent (ir_node *node) {
1063 assert (node->op = op_InstOf);
1064 return (node->attr.io.ent);
1068 set_InstOf_ent (ir_node *node, type *ent) {
1069 assert (node->op = op_InstOf);
1070 node->attr.io.ent = ent;
1074 get_InstOf_store (ir_node *node) {
1075 assert (node->op = op_InstOf);
1076 return (get_irn_n (node, 0));
1080 set_InstOf_store (ir_node *node, ir_node *obj) {
1081 assert (node->op = op_InstOf);
1082 set_irn_n (node, 0, obj);
1086 get_InstOf_obj (ir_node *node) {
1087 assert (node->op = op_InstOf);
1088 return (get_irn_n (node, 1));
1092 set_InstOf_obj (ir_node *node, ir_node *obj) {
1093 assert (node->op = op_InstOf);
1094 set_irn_n (node, 1, obj);
1098 /* For unary and binary arithmetic operations the access to the
1099 operands can be factored out. Left is the first, right the
1100 second arithmetic value as listed in tech report 0999-33.
1101 unops are: Minus, Abs, Not, Conv, Cast
1102 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1103 Shr, Shrs, Rotate, Cmp */
1107 get_Call_mem (ir_node *node) {
1108 assert (node->op == op_Call);
1109 return get_irn_n(node, 0);
1113 set_Call_mem (ir_node *node, ir_node *mem) {
1114 assert (node->op == op_Call);
1115 set_irn_n(node, 0, mem);
1119 get_Call_ptr (ir_node *node) {
1120 assert (node->op == op_Call);
1121 return get_irn_n(node, 1);
1125 set_Call_ptr (ir_node *node, ir_node *ptr) {
1126 assert (node->op == op_Call);
1127 set_irn_n(node, 1, ptr);
1131 get_Call_param_arr (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1137 get_Call_n_params (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1143 get_Call_arity (ir_node *node) {
1144 assert (node->op == op_Call);
1145 return get_Call_n_params(node);
1149 set_Call_arity (ir_node *node, ir_node *arity) {
1150 assert (node->op == op_Call);
1155 get_Call_param (ir_node *node, int pos) {
1156 assert (node->op == op_Call);
1157 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1161 set_Call_param (ir_node *node, int pos, ir_node *param) {
1162 assert (node->op == op_Call);
1163 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1167 get_Call_type (ir_node *node) {
1168 assert (node->op == op_Call);
1169 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1173 set_Call_type (ir_node *node, type *tp) {
1174 assert (node->op == op_Call);
1175 assert ((get_unknown_type() == tp) || is_method_type(tp));
1176 node->attr.call.cld_tp = tp;
1179 int Call_has_callees(ir_node *node) {
1180 assert(node && node->op == op_Call);
1181 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1182 (node->attr.call.callee_arr != NULL));
1185 int get_Call_n_callees(ir_node * node) {
1186 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1187 return ARR_LEN(node->attr.call.callee_arr);
1190 entity * get_Call_callee(ir_node * node, int pos) {
1191 assert(pos >= 0 && pos < get_Call_n_callees(node));
1192 return node->attr.call.callee_arr[pos];
1195 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1196 assert(node->op == op_Call);
1197 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1198 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1200 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1203 void remove_Call_callee_arr(ir_node * node) {
1204 assert(node->op == op_Call);
1205 node->attr.call.callee_arr = NULL;
1208 ir_node * get_CallBegin_ptr (ir_node *node) {
1209 assert(node->op == op_CallBegin);
1210 return get_irn_n(node, 0);
1212 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1213 assert(node->op == op_CallBegin);
1214 set_irn_n(node, 0, ptr);
1216 ir_node * get_CallBegin_call (ir_node *node) {
1217 assert(node->op == op_CallBegin);
1218 return node->attr.callbegin.call;
1220 void set_CallBegin_call (ir_node *node, ir_node *call) {
1221 assert(node->op == op_CallBegin);
1222 node->attr.callbegin.call = call;
1227 ir_node * get_##OP##_left(ir_node *node) { \
1228 assert(node->op == op_##OP); \
1229 return get_irn_n(node, node->op->op_index); \
1231 void set_##OP##_left(ir_node *node, ir_node *left) { \
1232 assert(node->op == op_##OP); \
1233 set_irn_n(node, node->op->op_index, left); \
1235 ir_node *get_##OP##_right(ir_node *node) { \
1236 assert(node->op == op_##OP); \
1237 return get_irn_n(node, node->op->op_index + 1); \
1239 void set_##OP##_right(ir_node *node, ir_node *right) { \
1240 assert(node->op == op_##OP); \
1241 set_irn_n(node, node->op->op_index + 1, right); \
1245 ir_node *get_##OP##_op(ir_node *node) { \
1246 assert(node->op == op_##OP); \
1247 return get_irn_n(node, node->op->op_index); \
1249 void set_##OP##_op (ir_node *node, ir_node *op) { \
1250 assert(node->op == op_##OP); \
1251 set_irn_n(node, node->op->op_index, op); \
1261 get_Quot_mem (ir_node *node) {
1262 assert (node->op == op_Quot);
1263 return get_irn_n(node, 0);
1267 set_Quot_mem (ir_node *node, ir_node *mem) {
1268 assert (node->op == op_Quot);
1269 set_irn_n(node, 0, mem);
1275 get_DivMod_mem (ir_node *node) {
1276 assert (node->op == op_DivMod);
1277 return get_irn_n(node, 0);
1281 set_DivMod_mem (ir_node *node, ir_node *mem) {
1282 assert (node->op == op_DivMod);
1283 set_irn_n(node, 0, mem);
1289 get_Div_mem (ir_node *node) {
1290 assert (node->op == op_Div);
1291 return get_irn_n(node, 0);
1295 set_Div_mem (ir_node *node, ir_node *mem) {
1296 assert (node->op == op_Div);
1297 set_irn_n(node, 0, mem);
1303 get_Mod_mem (ir_node *node) {
1304 assert (node->op == op_Mod);
1305 return get_irn_n(node, 0);
1309 set_Mod_mem (ir_node *node, ir_node *mem) {
1310 assert (node->op == op_Mod);
1311 set_irn_n(node, 0, mem);
1328 get_Cast_type (ir_node *node) {
1329 assert (node->op == op_Cast);
1330 return node->attr.cast.totype;
1334 set_Cast_type (ir_node *node, type *to_tp) {
1335 assert (node->op == op_Cast);
1336 node->attr.cast.totype = to_tp;
1340 (is_unop)(const ir_node *node) {
1341 return __is_unop(node);
1345 get_unop_op (ir_node *node) {
1346 if (node->op->opar == oparity_unary)
1347 return get_irn_n(node, node->op->op_index);
1349 assert(node->op->opar == oparity_unary);
1354 set_unop_op (ir_node *node, ir_node *op) {
1355 if (node->op->opar == oparity_unary)
1356 set_irn_n(node, node->op->op_index, op);
1358 assert(node->op->opar == oparity_unary);
1362 (is_binop)(const ir_node *node) {
1363 return __is_binop(node);
1367 get_binop_left (ir_node *node) {
1368 if (node->op->opar == oparity_binary)
1369 return get_irn_n(node, node->op->op_index);
1371 assert(node->op->opar == oparity_binary);
1376 set_binop_left (ir_node *node, ir_node *left) {
1377 if (node->op->opar == oparity_binary)
1378 set_irn_n(node, node->op->op_index, left);
1380 assert (node->op->opar == oparity_binary);
1384 get_binop_right (ir_node *node) {
1385 if (node->op->opar == oparity_binary)
1386 return get_irn_n(node, node->op->op_index + 1);
1388 assert(node->op->opar == oparity_binary);
1393 set_binop_right (ir_node *node, ir_node *right) {
1394 if (node->op->opar == oparity_binary)
1395 set_irn_n(node, node->op->op_index + 1, right);
1397 assert (node->op->opar == oparity_binary);
1400 int is_Phi (ir_node *n) {
1406 if (op == op_Filter) return get_interprocedural_view();
1409 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1410 (get_irn_arity(n) > 0));
1415 int is_Phi0 (ir_node *n) {
1418 return ((get_irn_op(n) == op_Phi) &&
1419 (get_irn_arity(n) == 0) &&
1420 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1424 get_Phi_preds_arr (ir_node *node) {
1425 assert (node->op == op_Phi);
1426 return (ir_node **)&(get_irn_in(node)[1]);
1430 get_Phi_n_preds (ir_node *node) {
1431 assert (is_Phi(node) || is_Phi0(node));
1432 return (get_irn_arity(node));
1436 void set_Phi_n_preds (ir_node *node, int n_preds) {
1437 assert (node->op == op_Phi);
1442 get_Phi_pred (ir_node *node, int pos) {
1443 assert (is_Phi(node) || is_Phi0(node));
1444 return get_irn_n(node, pos);
1448 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1449 assert (is_Phi(node) || is_Phi0(node));
1450 set_irn_n(node, pos, pred);
1454 int is_memop(ir_node *node) {
1455 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1458 ir_node *get_memop_mem (ir_node *node) {
1459 assert(is_memop(node));
1460 return get_irn_n(node, 0);
1463 void set_memop_mem (ir_node *node, ir_node *mem) {
1464 assert(is_memop(node));
1465 set_irn_n(node, 0, mem);
1468 ir_node *get_memop_ptr (ir_node *node) {
1469 assert(is_memop(node));
1470 return get_irn_n(node, 1);
1473 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1474 assert(is_memop(node));
1475 set_irn_n(node, 1, ptr);
1479 get_Load_mem (ir_node *node) {
1480 assert (node->op == op_Load);
1481 return get_irn_n(node, 0);
1485 set_Load_mem (ir_node *node, ir_node *mem) {
1486 assert (node->op == op_Load);
1487 set_irn_n(node, 0, mem);
1491 get_Load_ptr (ir_node *node) {
1492 assert (node->op == op_Load);
1493 return get_irn_n(node, 1);
1497 set_Load_ptr (ir_node *node, ir_node *ptr) {
1498 assert (node->op == op_Load);
1499 set_irn_n(node, 1, ptr);
1503 get_Load_mode (ir_node *node) {
1504 assert (node->op == op_Load);
1505 return node->attr.load.load_mode;
1509 set_Load_mode (ir_node *node, ir_mode *mode) {
1510 assert (node->op == op_Load);
1511 node->attr.load.load_mode = mode;
1515 get_Load_volatility (ir_node *node) {
1516 assert (node->op == op_Load);
1517 return node->attr.load.volatility;
1521 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1522 assert (node->op == op_Load);
1523 node->attr.load.volatility = volatility;
1528 get_Store_mem (ir_node *node) {
1529 assert (node->op == op_Store);
1530 return get_irn_n(node, 0);
1534 set_Store_mem (ir_node *node, ir_node *mem) {
1535 assert (node->op == op_Store);
1536 set_irn_n(node, 0, mem);
1540 get_Store_ptr (ir_node *node) {
1541 assert (node->op == op_Store);
1542 return get_irn_n(node, 1);
1546 set_Store_ptr (ir_node *node, ir_node *ptr) {
1547 assert (node->op == op_Store);
1548 set_irn_n(node, 1, ptr);
1552 get_Store_value (ir_node *node) {
1553 assert (node->op == op_Store);
1554 return get_irn_n(node, 2);
1558 set_Store_value (ir_node *node, ir_node *value) {
1559 assert (node->op == op_Store);
1560 set_irn_n(node, 2, value);
1564 get_Store_volatility (ir_node *node) {
1565 assert (node->op == op_Store);
1566 return node->attr.store.volatility;
1570 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1571 assert (node->op == op_Store);
1572 node->attr.store.volatility = volatility;
1577 get_Alloc_mem (ir_node *node) {
1578 assert (node->op == op_Alloc);
1579 return get_irn_n(node, 0);
1583 set_Alloc_mem (ir_node *node, ir_node *mem) {
1584 assert (node->op == op_Alloc);
1585 set_irn_n(node, 0, mem);
1589 get_Alloc_size (ir_node *node) {
1590 assert (node->op == op_Alloc);
1591 return get_irn_n(node, 1);
1595 set_Alloc_size (ir_node *node, ir_node *size) {
1596 assert (node->op == op_Alloc);
1597 set_irn_n(node, 1, size);
1601 get_Alloc_type (ir_node *node) {
1602 assert (node->op == op_Alloc);
1603 return node->attr.a.type = skip_tid(node->attr.a.type);
1607 set_Alloc_type (ir_node *node, type *tp) {
1608 assert (node->op == op_Alloc);
1609 node->attr.a.type = tp;
1613 get_Alloc_where (ir_node *node) {
1614 assert (node->op == op_Alloc);
1615 return node->attr.a.where;
1619 set_Alloc_where (ir_node *node, where_alloc where) {
1620 assert (node->op == op_Alloc);
1621 node->attr.a.where = where;
1626 get_Free_mem (ir_node *node) {
1627 assert (node->op == op_Free);
1628 return get_irn_n(node, 0);
1632 set_Free_mem (ir_node *node, ir_node *mem) {
1633 assert (node->op == op_Free);
1634 set_irn_n(node, 0, mem);
1638 get_Free_ptr (ir_node *node) {
1639 assert (node->op == op_Free);
1640 return get_irn_n(node, 1);
1644 set_Free_ptr (ir_node *node, ir_node *ptr) {
1645 assert (node->op == op_Free);
1646 set_irn_n(node, 1, ptr);
1650 get_Free_size (ir_node *node) {
1651 assert (node->op == op_Free);
1652 return get_irn_n(node, 2);
1656 set_Free_size (ir_node *node, ir_node *size) {
1657 assert (node->op == op_Free);
1658 set_irn_n(node, 2, size);
1662 get_Free_type (ir_node *node) {
1663 assert (node->op == op_Free);
1664 return node->attr.f = skip_tid(node->attr.f);
1668 set_Free_type (ir_node *node, type *tp) {
1669 assert (node->op == op_Free);
1674 get_Sync_preds_arr (ir_node *node) {
1675 assert (node->op == op_Sync);
1676 return (ir_node **)&(get_irn_in(node)[1]);
1680 get_Sync_n_preds (ir_node *node) {
1681 assert (node->op == op_Sync);
1682 return (get_irn_arity(node));
1687 set_Sync_n_preds (ir_node *node, int n_preds) {
1688 assert (node->op == op_Sync);
1693 get_Sync_pred (ir_node *node, int pos) {
1694 assert (node->op == op_Sync);
1695 return get_irn_n(node, pos);
1699 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1700 assert (node->op == op_Sync);
1701 set_irn_n(node, pos, pred);
1705 get_Proj_pred (ir_node *node) {
1706 assert (is_Proj(node));
1707 return get_irn_n(node, 0);
1711 set_Proj_pred (ir_node *node, ir_node *pred) {
1712 assert (is_Proj(node));
1713 set_irn_n(node, 0, pred);
1717 get_Proj_proj (ir_node *node) {
1718 assert (is_Proj(node));
1719 if (get_irn_opcode(node) == iro_Proj) {
1720 return node->attr.proj;
1722 assert(get_irn_opcode(node) == iro_Filter);
1723 return node->attr.filter.proj;
1728 set_Proj_proj (ir_node *node, long proj) {
1729 assert (node->op == op_Proj);
1730 node->attr.proj = proj;
1734 get_Tuple_preds_arr (ir_node *node) {
1735 assert (node->op == op_Tuple);
1736 return (ir_node **)&(get_irn_in(node)[1]);
1740 get_Tuple_n_preds (ir_node *node) {
1741 assert (node->op == op_Tuple);
1742 return (get_irn_arity(node));
1747 set_Tuple_n_preds (ir_node *node, int n_preds) {
1748 assert (node->op == op_Tuple);
1753 get_Tuple_pred (ir_node *node, int pos) {
1754 assert (node->op == op_Tuple);
1755 return get_irn_n(node, pos);
1759 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1760 assert (node->op == op_Tuple);
1761 set_irn_n(node, pos, pred);
1765 get_Id_pred (ir_node *node) {
1766 assert (node->op == op_Id);
1767 return get_irn_n(node, 0);
1771 set_Id_pred (ir_node *node, ir_node *pred) {
1772 assert (node->op == op_Id);
1773 set_irn_n(node, 0, pred);
1776 ir_node *get_Confirm_value (ir_node *node) {
1777 assert (node->op == op_Confirm);
1778 return get_irn_n(node, 0);
1780 void set_Confirm_value (ir_node *node, ir_node *value) {
1781 assert (node->op == op_Confirm);
1782 set_irn_n(node, 0, value);
1784 ir_node *get_Confirm_bound (ir_node *node) {
1785 assert (node->op == op_Confirm);
1786 return get_irn_n(node, 1);
1788 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1789 assert (node->op == op_Confirm);
1790 set_irn_n(node, 0, bound);
1792 pn_Cmp get_Confirm_cmp (ir_node *node) {
1793 assert (node->op == op_Confirm);
1794 return node->attr.confirm_cmp;
1796 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1797 assert (node->op == op_Confirm);
1798 node->attr.confirm_cmp = cmp;
1803 get_Filter_pred (ir_node *node) {
1804 assert(node->op == op_Filter);
1808 set_Filter_pred (ir_node *node, ir_node *pred) {
1809 assert(node->op == op_Filter);
1813 get_Filter_proj(ir_node *node) {
1814 assert(node->op == op_Filter);
1815 return node->attr.filter.proj;
1818 set_Filter_proj (ir_node *node, long proj) {
1819 assert(node->op == op_Filter);
1820 node->attr.filter.proj = proj;
1823 /* Don't use get_irn_arity, get_irn_n in implementation as access
1824 shall work independent of view!!! */
1825 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1826 assert(node->op == op_Filter);
1827 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1828 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1829 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1830 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1831 node->attr.filter.in_cg[0] = node->in[0];
1833 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1836 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1837 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1838 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1839 node->attr.filter.in_cg[pos + 1] = pred;
1841 int get_Filter_n_cg_preds(ir_node *node) {
1842 assert(node->op == op_Filter && node->attr.filter.in_cg);
1843 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1845 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1847 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1849 arity = ARR_LEN(node->attr.filter.in_cg);
1850 assert(pos < arity - 1);
1851 return node->attr.filter.in_cg[pos + 1];
1856 get_irn_irg(ir_node *node) {
1857 if (get_irn_op(node) != op_Block)
1858 node = get_nodes_block(node);
1859 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1860 node = get_nodes_block(node);
1861 assert(get_irn_op(node) == op_Block);
1862 return node->attr.block.irg;
1866 /*----------------------------------------------------------------*/
1867 /* Auxiliary routines */
1868 /*----------------------------------------------------------------*/
1871 skip_Proj (ir_node *node) {
1872 /* don't assert node !!! */
1873 if (node && is_Proj(node)) {
1874 return get_Proj_pred(node);
1881 skip_Tuple (ir_node *node) {
1884 if (!get_opt_normalize()) return node;
1886 node = skip_Id(node);
1887 if (get_irn_op(node) == op_Proj) {
1888 pred = skip_Id(get_Proj_pred(node));
1889 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1890 pred = skip_Id(skip_Tuple(pred));
1891 if (get_irn_op(pred) == op_Tuple)
1892 return get_Tuple_pred(pred, get_Proj_proj(node));
1897 /** returns operand of node if node is a Cast */
1898 ir_node *skip_Cast (ir_node *node) {
1899 if (node && get_irn_op(node) == op_Cast) {
1900 return skip_Id(get_irn_n(node, 0));
1907 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1908 than any other approach, as Id chains are resolved and all point to the real node, or
1909 all id's are self loops. */
1911 skip_Id (ir_node *node) {
1912 /* don't assert node !!! */
1914 if (!get_opt_normalize()) return node;
1916 /* Don't use get_Id_pred: We get into an endless loop for
1917 self-referencing Ids. */
1918 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1919 ir_node *rem_pred = node->in[0+1];
1922 assert (get_irn_arity (node) > 0);
1924 node->in[0+1] = node;
1925 res = skip_Id(rem_pred);
1926 if (res->op == op_Id) /* self-loop */ return node;
1928 node->in[0+1] = res;
1935 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1936 than any other approach, as Id chains are resolved and all point to the real node, or
1937 all id's are self loops. */
1939 skip_Id (ir_node *node) {
1941 /* don't assert node !!! */
1943 if (!node || (node->op != op_Id)) return node;
1945 if (!get_opt_normalize()) return node;
1947 /* Don't use get_Id_pred: We get into an endless loop for
1948 self-referencing Ids. */
1949 pred = node->in[0+1];
1951 if (pred->op != op_Id) return pred;
1953 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1954 ir_node *rem_pred, *res;
1956 if (pred->op != op_Id) return pred; /* shortcut */
1959 assert (get_irn_arity (node) > 0);
1961 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1962 res = skip_Id(rem_pred);
1963 if (res->op == op_Id) /* self-loop */ return node;
1965 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1974 (is_Bad)(const ir_node *node) {
1975 return __is_Bad(node);
1979 (is_no_Block)(const ir_node *node) {
1980 return __is_no_Block(node);
1984 (is_Block)(const ir_node *node) {
1985 return __is_Block(node);
1988 /* returns true if node is a Unknown node. */
1990 is_Unknown (const ir_node *node) {
1992 return (get_irn_op(node) == op_Unknown);
1996 is_Proj (const ir_node *node) {
1998 return node->op == op_Proj
1999 || (!get_interprocedural_view() && node->op == op_Filter);
2002 /* Returns true if the operation manipulates control flow. */
2004 is_cfop(const ir_node *node) {
2005 return is_cfopcode(get_irn_op(node));
2008 /* Returns true if the operation manipulates interprocedural control flow:
2009 CallBegin, EndReg, EndExcept */
2010 int is_ip_cfop(const ir_node *node) {
2011 return is_ip_cfopcode(get_irn_op(node));
2014 /* Returns true if the operation can change the control flow because
2017 is_fragile_op(const ir_node *node) {
2018 return is_op_fragile(get_irn_op(node));
2021 /* Returns the memory operand of fragile operations. */
2022 ir_node *get_fragile_op_mem(ir_node *node) {
2023 assert(node && is_fragile_op(node));
2025 switch (get_irn_opcode (node)) {
2034 return get_irn_n(node, 0);
2039 assert(0 && "should not be reached");
2044 /* Returns true if the operation is a forking control flow operation. */
2046 is_forking_op(const ir_node *node) {
2047 return is_op_forking(get_irn_op(node));
2051 #ifdef DEBUG_libfirm
2052 void dump_irn (ir_node *n) {
2053 int i, arity = get_irn_arity(n);
2054 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2056 ir_node *pred = get_irn_n(n, -1);
2057 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2058 get_irn_node_nr(pred), (void *)pred);
2060 printf(" preds: \n");
2061 for (i = 0; i < arity; ++i) {
2062 ir_node *pred = get_irn_n(n, i);
2063 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2064 get_irn_node_nr(pred), (void *)pred);
2068 #else /* DEBUG_libfirm */
2069 void dump_irn (ir_node *n) {}
2070 #endif /* DEBUG_libfirm */