3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
33 /* some constants fixing the positions of nodes predecessors
35 #define CALL_PARAM_OFFSET 2
36 #define FUNCCALL_PARAM_OFFSET 1
37 #define SEL_INDEX_OFFSET 2
38 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
39 #define END_KEEPALIVE_OFFSET 0
41 static const char *pnc_name_arr [] = {
42 "False", "Eq", "Lt", "Le",
43 "Gt", "Ge", "Lg", "Leg", "Uo",
44 "Ue", "Ul", "Ule", "Ug", "Uge",
49 * returns the pnc name from an pnc constant
51 const char *get_pnc_string(int pnc) {
52 return pnc_name_arr[pnc];
56 * Calculates the negated pnc condition.
59 get_negated_pnc(int pnc) {
61 case False: return True; break;
62 case Eq: return Ne; break;
63 case Lt: return Uge; break;
64 case Le: return Ug; break;
65 case Gt: return Ule; break;
66 case Ge: return Ul; break;
67 case Lg: return Ue; break;
68 case Leg: return Uo; break;
69 case Uo: return Leg; break;
70 case Ue: return Lg; break;
71 case Ul: return Ge; break;
72 case Ule: return Gt; break;
73 case Ug: return Le; break;
74 case Uge: return Lt; break;
75 case Ne: return Eq; break;
76 case True: return False; break;
78 return 99; /* to shut up gcc */
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
151 set_irn_dbg_info(res, db);
155 res->node_nr = get_irp_new_node_nr();
163 /*-- getting some parameters from ir_nodes --*/
166 (is_ir_node)(const void *thing) {
167 return __is_ir_node(thing);
171 (get_irn_intra_arity)(const ir_node *node) {
172 return __get_irn_intra_arity(node);
176 (get_irn_inter_arity)(const ir_node *node) {
177 return __get_irn_inter_arity(node);
180 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
183 (get_irn_arity)(const ir_node *node) {
184 return __get_irn_arity(node);
187 /* Returns the array with ins. This array is shifted with respect to the
188 array accessed by get_irn_n: The block operand is at position 0 not -1.
189 (@@@ This should be changed.)
190 The order of the predecessors in this array is not guaranteed, except that
191 lists of operands as predecessors of Block or arguments of a Call are
194 get_irn_in (const ir_node *node) {
196 if (get_interprocedural_view()) { /* handle Filter and Block specially */
197 if (get_irn_opcode(node) == iro_Filter) {
198 assert(node->attr.filter.in_cg);
199 return node->attr.filter.in_cg;
200 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
201 return node->attr.block.in_cg;
203 /* else fall through */
209 set_irn_in (ir_node *node, int arity, ir_node **in) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 arr = &node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 arr = &node->attr.block.in_cg;
224 if (arity != ARR_LEN(*arr) - 1) {
225 ir_node * block = (*arr)[0];
226 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
229 fix_backedges(current_ir_graph->obst, node);
230 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
234 (get_irn_intra_n)(const ir_node *node, int n) {
235 return __get_irn_intra_n (node, n);
239 (get_irn_inter_n)(const ir_node *node, int n) {
240 return __get_irn_inter_n (node, n);
243 ir_node *(*__get_irn_n)(const ir_node *node, int n) = __get_irn_intra_n;
246 (get_irn_n)(const ir_node *node, int n) {
247 return __get_irn_n(node, n);
251 set_irn_n (ir_node *node, int n, ir_node *in) {
252 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
253 assert(in && in->kind == k_ir_node);
254 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
255 /* Change block pred in both views! */
256 node->in[n + 1] = in;
257 assert(node->attr.filter.in_cg);
258 node->attr.filter.in_cg[n + 1] = in;
261 if (get_interprocedural_view()) { /* handle Filter and Block specially */
262 if (get_irn_opcode(node) == iro_Filter) {
263 assert(node->attr.filter.in_cg);
264 node->attr.filter.in_cg[n + 1] = in;
266 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
267 node->attr.block.in_cg[n + 1] = in;
270 /* else fall through */
272 node->in[n + 1] = in;
276 (get_irn_mode)(const ir_node *node) {
277 return __get_irn_mode(node);
281 (set_irn_mode)(ir_node *node, ir_mode *mode)
283 __set_irn_mode(node, mode);
287 get_irn_modecode (const ir_node *node)
290 return node->mode->code;
293 /** Gets the string representation of the mode .*/
295 get_irn_modename (const ir_node *node)
298 return get_mode_name(node->mode);
302 get_irn_modeident (const ir_node *node)
305 return get_mode_ident(node->mode);
309 (get_irn_op)(const ir_node *node)
311 return __get_irn_op(node);
314 /* should be private to the library: */
316 set_irn_op (ir_node *node, ir_op *op)
323 (get_irn_opcode)(const ir_node *node)
325 return __get_irn_opcode(node);
329 get_irn_opname (const ir_node *node)
332 if ((get_irn_op((ir_node *)node) == op_Phi) &&
333 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
334 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
335 return get_id_str(node->op->name);
339 get_irn_opident (const ir_node *node)
342 return node->op->name;
346 (get_irn_visited)(const ir_node *node)
348 return __get_irn_visited(node);
352 (set_irn_visited)(ir_node *node, unsigned long visited)
354 __set_irn_visited(node, visited);
358 (mark_irn_visited)(ir_node *node) {
359 __mark_irn_visited(node);
363 (irn_not_visited)(const ir_node *node) {
364 return __irn_not_visited(node);
368 (irn_visited)(const ir_node *node) {
369 return __irn_visited(node);
373 (set_irn_link)(ir_node *node, void *link) {
374 __set_irn_link(node, link);
378 (get_irn_link)(const ir_node *node) {
379 return __get_irn_link(node);
383 (get_irn_pinned)(const ir_node *node) {
384 return __get_irn_pinned(node);
387 void set_irn_pinned(ir_node *node, op_pin_state state) {
388 /* due to optimization an opt may be turned into a Tuple */
389 if (get_irn_op(node) == op_Tuple)
392 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
393 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
395 node->attr.except.pin_state = state;
398 #ifdef DO_HEAPANALYSIS
399 /* Access the abstract interpretation information of a node.
400 Returns NULL if no such information is available. */
401 struct abstval *get_irn_abst_value(ir_node *n) {
404 /* Set the abstract interpretation information of a node. */
405 void set_irn_abst_value(ir_node *n, struct abstval *os) {
408 struct section *firm_get_irn_section(ir_node *n) {
411 void firm_set_irn_section(ir_node *n, struct section *s) {
415 /* Dummies needed for firmjni. */
416 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
417 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
418 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
419 void firm_set_irn_section(ir_node *n, struct section *s) {}
420 #endif /* DO_HEAPANALYSIS */
423 /* Outputs a unique number for this node */
425 get_irn_node_nr(const ir_node *node) {
428 return node->node_nr;
435 get_irn_const_attr (ir_node *node)
437 assert (node->op == op_Const);
438 return node->attr.con;
442 get_irn_proj_attr (ir_node *node)
444 assert (node->op == op_Proj);
445 return node->attr.proj;
449 get_irn_alloc_attr (ir_node *node)
451 assert (node->op == op_Alloc);
456 get_irn_free_attr (ir_node *node)
458 assert (node->op == op_Free);
459 return node->attr.f = skip_tid(node->attr.f);
463 get_irn_symconst_attr (ir_node *node)
465 assert (node->op == op_SymConst);
470 get_irn_call_attr (ir_node *node)
472 assert (node->op == op_Call);
473 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
477 get_irn_sel_attr (ir_node *node)
479 assert (node->op == op_Sel);
484 get_irn_phi_attr (ir_node *node)
486 assert (node->op == op_Phi);
487 return node->attr.phi0_pos;
491 get_irn_block_attr (ir_node *node)
493 assert (node->op == op_Block);
494 return node->attr.block;
498 get_irn_load_attr (ir_node *node)
500 assert (node->op == op_Load);
501 return node->attr.load;
505 get_irn_store_attr (ir_node *node)
507 assert (node->op == op_Store);
508 return node->attr.store;
512 get_irn_except_attr (ir_node *node)
514 assert (node->op == op_Div || node->op == op_Quot ||
515 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
516 return node->attr.except;
519 /** manipulate fields of individual nodes **/
521 /* this works for all except Block */
523 get_nodes_block (const ir_node *node) {
524 assert (!(node->op == op_Block));
525 return get_irn_n(node, -1);
529 set_nodes_block (ir_node *node, ir_node *block) {
530 assert (!(node->op == op_Block));
531 set_irn_n(node, -1, block);
534 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
535 * from Start. If so returns frame type, else Null. */
536 type *is_frame_pointer(ir_node *n) {
537 if ((get_irn_op(n) == op_Proj) &&
538 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
539 ir_node *start = get_Proj_pred(n);
540 if (get_irn_op(start) == op_Start) {
541 return get_irg_frame_type(get_irn_irg(start));
547 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
548 * from Start. If so returns global type, else Null. */
549 type *is_globals_pointer(ir_node *n) {
550 if ((get_irn_op(n) == op_Proj) &&
551 (get_Proj_proj(n) == pn_Start_P_globals)) {
552 ir_node *start = get_Proj_pred(n);
553 if (get_irn_op(start) == op_Start) {
554 return get_glob_type();
560 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
561 * from Start. If so returns 1, else 0. */
562 int is_value_arg_pointer(ir_node *n) {
563 if ((get_irn_op(n) == op_Proj) &&
564 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
565 (get_irn_op(get_Proj_pred(n)) == op_Start))
570 /* Returns an array with the predecessors of the Block. Depending on
571 the implementation of the graph data structure this can be a copy of
572 the internal representation of predecessors as well as the internal
573 array itself. Therefore writing to this array might obstruct the ir. */
575 get_Block_cfgpred_arr (ir_node *node)
577 assert ((node->op == op_Block));
578 return (ir_node **)&(get_irn_in(node)[1]);
583 get_Block_n_cfgpreds (ir_node *node) {
584 assert ((node->op == op_Block));
585 return get_irn_arity(node);
589 get_Block_cfgpred (ir_node *node, int pos) {
590 assert(-1 <= pos && pos < get_irn_arity(node));
591 assert(node->op == op_Block);
592 return get_irn_n(node, pos);
596 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
597 assert (node->op == op_Block);
598 set_irn_n(node, pos, pred);
602 get_Block_matured (ir_node *node) {
603 assert (node->op == op_Block);
604 return node->attr.block.matured;
608 set_Block_matured (ir_node *node, bool matured) {
609 assert (node->op == op_Block);
610 node->attr.block.matured = matured;
613 get_Block_block_visited (ir_node *node) {
614 assert (node->op == op_Block);
615 return node->attr.block.block_visited;
619 set_Block_block_visited (ir_node *node, unsigned long visit) {
620 assert (node->op == op_Block);
621 node->attr.block.block_visited = visit;
624 /* For this current_ir_graph must be set. */
626 mark_Block_block_visited (ir_node *node) {
627 assert (node->op == op_Block);
628 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
632 Block_not_block_visited(ir_node *node) {
633 assert (node->op == op_Block);
634 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
638 get_Block_graph_arr (ir_node *node, int pos) {
639 assert (node->op == op_Block);
640 return node->attr.block.graph_arr[pos+1];
644 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
645 assert (node->op == op_Block);
646 node->attr.block.graph_arr[pos+1] = value;
649 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
650 assert(node->op == op_Block);
651 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
652 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
653 node->attr.block.in_cg[0] = NULL;
654 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
656 /* Fix backedge array. fix_backedges operates depending on
657 interprocedural_view. */
658 int ipv = get_interprocedural_view();
659 set_interprocedural_view(true);
660 fix_backedges(current_ir_graph->obst, node);
661 set_interprocedural_view(ipv);
664 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
667 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
668 assert(node->op == op_Block &&
669 node->attr.block.in_cg &&
670 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
671 node->attr.block.in_cg[pos + 1] = pred;
674 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
675 assert(node->op == op_Block);
676 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
679 int get_Block_cg_n_cfgpreds(ir_node * node) {
680 assert(node->op == op_Block);
681 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
684 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
685 assert(node->op == op_Block && node->attr.block.in_cg);
686 return node->attr.block.in_cg[pos + 1];
689 void remove_Block_cg_cfgpred_arr(ir_node * node) {
690 assert(node->op == op_Block);
691 node->attr.block.in_cg = NULL;
694 ir_node *(set_Block_dead)(ir_node *block) {
695 return __set_Block_dead(block);
698 int (is_Block_dead)(const ir_node *block) {
699 return __is_Block_dead(block);
703 set_Start_irg(ir_node *node, ir_graph *irg) {
704 assert(node->op == op_Start);
705 assert(is_ir_graph(irg));
706 assert(0 && " Why set irg? -- use set_irn_irg");
710 get_End_n_keepalives(ir_node *end) {
711 assert (end->op == op_End);
712 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
716 get_End_keepalive(ir_node *end, int pos) {
717 assert (end->op == op_End);
718 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
722 add_End_keepalive (ir_node *end, ir_node *ka) {
723 assert (end->op == op_End);
724 ARR_APP1 (ir_node *, end->in, ka);
728 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
729 assert (end->op == op_End);
730 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
734 free_End (ir_node *end) {
735 assert (end->op == op_End);
737 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
738 end->in = NULL; /* @@@ make sure we get an error if we use the
739 in array afterwards ... */
744 > Implementing the case construct (which is where the constant Proj node is
745 > important) involves far more than simply determining the constant values.
746 > We could argue that this is more properly a function of the translator from
747 > Firm to the target machine. That could be done if there was some way of
748 > projecting "default" out of the Cond node.
749 I know it's complicated.
750 Basically there are two proglems:
751 - determining the gaps between the projs
752 - determining the biggest case constant to know the proj number for
754 I see several solutions:
755 1. Introduce a ProjDefault node. Solves both problems.
756 This means to extend all optimizations executed during construction.
757 2. Give the Cond node for switch two flavors:
758 a) there are no gaps in the projs (existing flavor)
759 b) gaps may exist, default proj is still the Proj with the largest
760 projection number. This covers also the gaps.
761 3. Fix the semantic of the Cond to that of 2b)
763 Solution 2 seems to be the best:
764 Computing the gaps in the Firm representation is not too hard, i.e.,
765 libFIRM can implement a routine that transforms between the two
766 flavours. This is also possible for 1) but 2) does not require to
767 change any existing optimization.
768 Further it should be far simpler to determine the biggest constant than
770 I don't want to choose 3) as 2a) seems to have advantages for
771 dataflow analysis and 3) does not allow to convert the representation to
775 get_Cond_selector (ir_node *node) {
776 assert (node->op == op_Cond);
777 return get_irn_n(node, 0);
781 set_Cond_selector (ir_node *node, ir_node *selector) {
782 assert (node->op == op_Cond);
783 set_irn_n(node, 0, selector);
787 get_Cond_kind (ir_node *node) {
788 assert (node->op == op_Cond);
789 return node->attr.c.kind;
793 set_Cond_kind (ir_node *node, cond_kind kind) {
794 assert (node->op == op_Cond);
795 node->attr.c.kind = kind;
799 get_Cond_defaultProj (ir_node *node) {
800 assert (node->op == op_Cond);
801 return node->attr.c.default_proj;
805 get_Return_mem (ir_node *node) {
806 assert (node->op == op_Return);
807 return get_irn_n(node, 0);
811 set_Return_mem (ir_node *node, ir_node *mem) {
812 assert (node->op == op_Return);
813 set_irn_n(node, 0, mem);
817 get_Return_n_ress (ir_node *node) {
818 assert (node->op == op_Return);
819 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
823 get_Return_res_arr (ir_node *node)
825 assert ((node->op == op_Return));
826 if (get_Return_n_ress(node) > 0)
827 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
834 set_Return_n_res (ir_node *node, int results) {
835 assert (node->op == op_Return);
840 get_Return_res (ir_node *node, int pos) {
841 assert (node->op == op_Return);
842 assert (get_Return_n_ress(node) > pos);
843 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
847 set_Return_res (ir_node *node, int pos, ir_node *res){
848 assert (node->op == op_Return);
849 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
853 get_Raise_mem (ir_node *node) {
854 assert (node->op == op_Raise);
855 return get_irn_n(node, 0);
859 set_Raise_mem (ir_node *node, ir_node *mem) {
860 assert (node->op == op_Raise);
861 set_irn_n(node, 0, mem);
865 get_Raise_exo_ptr (ir_node *node) {
866 assert (node->op == op_Raise);
867 return get_irn_n(node, 1);
871 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
872 assert (node->op == op_Raise);
873 set_irn_n(node, 1, exo_ptr);
876 tarval *get_Const_tarval (ir_node *node) {
877 assert (node->op == op_Const);
878 return node->attr.con.tv;
882 set_Const_tarval (ir_node *node, tarval *con) {
883 assert (node->op == op_Const);
884 node->attr.con.tv = con;
888 /* The source language type. Must be an atomic type. Mode of type must
889 be mode of node. For tarvals from entities type must be pointer to
892 get_Const_type (ir_node *node) {
893 assert (node->op == op_Const);
894 return node->attr.con.tp;
898 set_Const_type (ir_node *node, type *tp) {
899 assert (node->op == op_Const);
900 if (tp != unknown_type) {
901 assert (is_atomic_type(tp));
902 assert (get_type_mode(tp) == get_irn_mode(node));
904 node->attr.con.tp = tp;
909 get_SymConst_kind (const ir_node *node) {
910 assert (node->op == op_SymConst);
911 return node->attr.i.num;
915 set_SymConst_kind (ir_node *node, symconst_kind num) {
916 assert (node->op == op_SymConst);
917 node->attr.i.num = num;
921 get_SymConst_type (ir_node *node) {
922 assert ( (node->op == op_SymConst)
923 && ( get_SymConst_kind(node) == symconst_type_tag
924 || get_SymConst_kind(node) == symconst_size));
925 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
929 set_SymConst_type (ir_node *node, type *tp) {
930 assert ( (node->op == op_SymConst)
931 && ( get_SymConst_kind(node) == symconst_type_tag
932 || get_SymConst_kind(node) == symconst_size));
933 node->attr.i.sym.type_p = tp;
937 get_SymConst_name (ir_node *node) {
938 assert ( (node->op == op_SymConst)
939 && (get_SymConst_kind(node) == symconst_addr_name));
940 return node->attr.i.sym.ident_p;
944 set_SymConst_name (ir_node *node, ident *name) {
945 assert ( (node->op == op_SymConst)
946 && (get_SymConst_kind(node) == symconst_addr_name));
947 node->attr.i.sym.ident_p = name;
951 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
952 entity *get_SymConst_entity (ir_node *node) {
953 assert ( (node->op == op_SymConst)
954 && (get_SymConst_kind (node) == symconst_addr_ent));
955 return node->attr.i.sym.entity_p;
958 void set_SymConst_entity (ir_node *node, entity *ent) {
959 assert ( (node->op == op_SymConst)
960 && (get_SymConst_kind(node) == symconst_addr_ent));
961 node->attr.i.sym.entity_p = ent;
964 union symconst_symbol
965 get_SymConst_symbol (ir_node *node) {
966 assert (node->op == op_SymConst);
967 return node->attr.i.sym;
971 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
972 assert (node->op == op_SymConst);
973 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
974 node->attr.i.sym = sym;
978 get_SymConst_value_type (ir_node *node) {
979 assert (node->op == op_SymConst);
980 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
981 return node->attr.i.tp;
985 set_SymConst_value_type (ir_node *node, type *tp) {
986 assert (node->op == op_SymConst);
987 node->attr.i.tp = tp;
991 get_Sel_mem (ir_node *node) {
992 assert (node->op == op_Sel);
993 return get_irn_n(node, 0);
997 set_Sel_mem (ir_node *node, ir_node *mem) {
998 assert (node->op == op_Sel);
999 set_irn_n(node, 0, mem);
1003 get_Sel_ptr (ir_node *node) {
1004 assert (node->op == op_Sel);
1005 return get_irn_n(node, 1);
1009 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1010 assert (node->op == op_Sel);
1011 set_irn_n(node, 1, ptr);
1015 get_Sel_n_indexs (ir_node *node) {
1016 assert (node->op == op_Sel);
1017 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1021 get_Sel_index_arr (ir_node *node)
1023 assert ((node->op == op_Sel));
1024 if (get_Sel_n_indexs(node) > 0)
1025 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1031 get_Sel_index (ir_node *node, int pos) {
1032 assert (node->op == op_Sel);
1033 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1037 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1038 assert (node->op == op_Sel);
1039 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1043 get_Sel_entity (ir_node *node) {
1044 assert (node->op == op_Sel);
1045 return node->attr.s.ent;
1049 set_Sel_entity (ir_node *node, entity *ent) {
1050 assert (node->op == op_Sel);
1051 node->attr.s.ent = ent;
1055 get_InstOf_ent (ir_node *node) {
1056 assert (node->op = op_InstOf);
1057 return (node->attr.io.ent);
1061 set_InstOf_ent (ir_node *node, type *ent) {
1062 assert (node->op = op_InstOf);
1063 node->attr.io.ent = ent;
1067 get_InstOf_store (ir_node *node) {
1068 assert (node->op = op_InstOf);
1069 return (get_irn_n (node, 0));
1073 set_InstOf_store (ir_node *node, ir_node *obj) {
1074 assert (node->op = op_InstOf);
1075 set_irn_n (node, 0, obj);
1079 get_InstOf_obj (ir_node *node) {
1080 assert (node->op = op_InstOf);
1081 return (get_irn_n (node, 1));
1085 set_InstOf_obj (ir_node *node, ir_node *obj) {
1086 assert (node->op = op_InstOf);
1087 set_irn_n (node, 1, obj);
1091 /* For unary and binary arithmetic operations the access to the
1092 operands can be factored out. Left is the first, right the
1093 second arithmetic value as listed in tech report 0999-33.
1094 unops are: Minus, Abs, Not, Conv, Cast
1095 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1096 Shr, Shrs, Rotate, Cmp */
1100 get_Call_mem (ir_node *node) {
1101 assert (node->op == op_Call);
1102 return get_irn_n(node, 0);
1106 set_Call_mem (ir_node *node, ir_node *mem) {
1107 assert (node->op == op_Call);
1108 set_irn_n(node, 0, mem);
1112 get_Call_ptr (ir_node *node) {
1113 assert (node->op == op_Call);
1114 return get_irn_n(node, 1);
1118 set_Call_ptr (ir_node *node, ir_node *ptr) {
1119 assert (node->op == op_Call);
1120 set_irn_n(node, 1, ptr);
1124 get_Call_param_arr (ir_node *node) {
1125 assert (node->op == op_Call);
1126 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1130 get_Call_n_params (ir_node *node) {
1131 assert (node->op == op_Call);
1132 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1136 get_Call_arity (ir_node *node) {
1137 assert (node->op == op_Call);
1138 return get_Call_n_params(node);
1142 set_Call_arity (ir_node *node, ir_node *arity) {
1143 assert (node->op == op_Call);
1148 get_Call_param (ir_node *node, int pos) {
1149 assert (node->op == op_Call);
1150 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1154 set_Call_param (ir_node *node, int pos, ir_node *param) {
1155 assert (node->op == op_Call);
1156 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1160 get_Call_type (ir_node *node) {
1161 assert (node->op == op_Call);
1162 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1166 set_Call_type (ir_node *node, type *tp) {
1167 assert (node->op == op_Call);
1168 assert ((get_unknown_type() == tp) || is_method_type(tp));
1169 node->attr.call.cld_tp = tp;
1172 int Call_has_callees(ir_node *node) {
1173 assert(node && node->op == op_Call);
1174 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1175 (node->attr.call.callee_arr != NULL));
1178 int get_Call_n_callees(ir_node * node) {
1179 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1180 return ARR_LEN(node->attr.call.callee_arr);
1183 entity * get_Call_callee(ir_node * node, int pos) {
1184 assert(pos >= 0 && pos < get_Call_n_callees(node));
1185 return node->attr.call.callee_arr[pos];
1188 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1189 assert(node->op == op_Call);
1190 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1191 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1193 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1196 void remove_Call_callee_arr(ir_node * node) {
1197 assert(node->op == op_Call);
1198 node->attr.call.callee_arr = NULL;
1201 ir_node * get_CallBegin_ptr (ir_node *node) {
1202 assert(node->op == op_CallBegin);
1203 return get_irn_n(node, 0);
1205 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1206 assert(node->op == op_CallBegin);
1207 set_irn_n(node, 0, ptr);
1209 ir_node * get_CallBegin_call (ir_node *node) {
1210 assert(node->op == op_CallBegin);
1211 return node->attr.callbegin.call;
1213 void set_CallBegin_call (ir_node *node, ir_node *call) {
1214 assert(node->op == op_CallBegin);
1215 node->attr.callbegin.call = call;
1220 ir_node * get_##OP##_left(ir_node *node) { \
1221 assert(node->op == op_##OP); \
1222 return get_irn_n(node, node->op->op_index); \
1224 void set_##OP##_left(ir_node *node, ir_node *left) { \
1225 assert(node->op == op_##OP); \
1226 set_irn_n(node, node->op->op_index, left); \
1228 ir_node *get_##OP##_right(ir_node *node) { \
1229 assert(node->op == op_##OP); \
1230 return get_irn_n(node, node->op->op_index + 1); \
1232 void set_##OP##_right(ir_node *node, ir_node *right) { \
1233 assert(node->op == op_##OP); \
1234 set_irn_n(node, node->op->op_index + 1, right); \
1238 ir_node *get_##OP##_op(ir_node *node) { \
1239 assert(node->op == op_##OP); \
1240 return get_irn_n(node, node->op->op_index); \
1242 void set_##OP##_op (ir_node *node, ir_node *op) { \
1243 assert(node->op == op_##OP); \
1244 set_irn_n(node, node->op->op_index, op); \
1254 get_Quot_mem (ir_node *node) {
1255 assert (node->op == op_Quot);
1256 return get_irn_n(node, 0);
1260 set_Quot_mem (ir_node *node, ir_node *mem) {
1261 assert (node->op == op_Quot);
1262 set_irn_n(node, 0, mem);
1268 get_DivMod_mem (ir_node *node) {
1269 assert (node->op == op_DivMod);
1270 return get_irn_n(node, 0);
1274 set_DivMod_mem (ir_node *node, ir_node *mem) {
1275 assert (node->op == op_DivMod);
1276 set_irn_n(node, 0, mem);
1282 get_Div_mem (ir_node *node) {
1283 assert (node->op == op_Div);
1284 return get_irn_n(node, 0);
1288 set_Div_mem (ir_node *node, ir_node *mem) {
1289 assert (node->op == op_Div);
1290 set_irn_n(node, 0, mem);
1296 get_Mod_mem (ir_node *node) {
1297 assert (node->op == op_Mod);
1298 return get_irn_n(node, 0);
1302 set_Mod_mem (ir_node *node, ir_node *mem) {
1303 assert (node->op == op_Mod);
1304 set_irn_n(node, 0, mem);
1321 get_Cast_type (ir_node *node) {
1322 assert (node->op == op_Cast);
1323 return node->attr.cast.totype;
1327 set_Cast_type (ir_node *node, type *to_tp) {
1328 assert (node->op == op_Cast);
1329 node->attr.cast.totype = to_tp;
1333 (is_unop)(const ir_node *node) {
1334 return __is_unop(node);
1338 get_unop_op (ir_node *node) {
1339 if (node->op->opar == oparity_unary)
1340 return get_irn_n(node, node->op->op_index);
1342 assert(node->op->opar == oparity_unary);
1347 set_unop_op (ir_node *node, ir_node *op) {
1348 if (node->op->opar == oparity_unary)
1349 set_irn_n(node, node->op->op_index, op);
1351 assert(node->op->opar == oparity_unary);
1355 (is_binop)(const ir_node *node) {
1356 return __is_binop(node);
1360 get_binop_left (ir_node *node) {
1361 if (node->op->opar == oparity_binary)
1362 return get_irn_n(node, node->op->op_index);
1364 assert(node->op->opar == oparity_binary);
1369 set_binop_left (ir_node *node, ir_node *left) {
1370 if (node->op->opar == oparity_binary)
1371 set_irn_n(node, node->op->op_index, left);
1373 assert (node->op->opar == oparity_binary);
1377 get_binop_right (ir_node *node) {
1378 if (node->op->opar == oparity_binary)
1379 return get_irn_n(node, node->op->op_index + 1);
1381 assert(node->op->opar == oparity_binary);
1386 set_binop_right (ir_node *node, ir_node *right) {
1387 if (node->op->opar == oparity_binary)
1388 set_irn_n(node, node->op->op_index + 1, right);
1390 assert (node->op->opar == oparity_binary);
1393 int is_Phi (const ir_node *n) {
1399 if (op == op_Filter) return get_interprocedural_view();
1402 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1403 (get_irn_arity(n) > 0));
1408 int is_Phi0 (const ir_node *n) {
1411 return ((get_irn_op(n) == op_Phi) &&
1412 (get_irn_arity(n) == 0) &&
1413 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1417 get_Phi_preds_arr (ir_node *node) {
1418 assert (node->op == op_Phi);
1419 return (ir_node **)&(get_irn_in(node)[1]);
1423 get_Phi_n_preds (ir_node *node) {
1424 assert (is_Phi(node) || is_Phi0(node));
1425 return (get_irn_arity(node));
1429 void set_Phi_n_preds (ir_node *node, int n_preds) {
1430 assert (node->op == op_Phi);
1435 get_Phi_pred (ir_node *node, int pos) {
1436 assert (is_Phi(node) || is_Phi0(node));
1437 return get_irn_n(node, pos);
1441 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1442 assert (is_Phi(node) || is_Phi0(node));
1443 set_irn_n(node, pos, pred);
1447 int is_memop(ir_node *node) {
1448 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1451 ir_node *get_memop_mem (ir_node *node) {
1452 assert(is_memop(node));
1453 return get_irn_n(node, 0);
1456 void set_memop_mem (ir_node *node, ir_node *mem) {
1457 assert(is_memop(node));
1458 set_irn_n(node, 0, mem);
1461 ir_node *get_memop_ptr (ir_node *node) {
1462 assert(is_memop(node));
1463 return get_irn_n(node, 1);
1466 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1467 assert(is_memop(node));
1468 set_irn_n(node, 1, ptr);
1472 get_Load_mem (ir_node *node) {
1473 assert (node->op == op_Load);
1474 return get_irn_n(node, 0);
1478 set_Load_mem (ir_node *node, ir_node *mem) {
1479 assert (node->op == op_Load);
1480 set_irn_n(node, 0, mem);
1484 get_Load_ptr (ir_node *node) {
1485 assert (node->op == op_Load);
1486 return get_irn_n(node, 1);
1490 set_Load_ptr (ir_node *node, ir_node *ptr) {
1491 assert (node->op == op_Load);
1492 set_irn_n(node, 1, ptr);
1496 get_Load_mode (ir_node *node) {
1497 assert (node->op == op_Load);
1498 return node->attr.load.load_mode;
1502 set_Load_mode (ir_node *node, ir_mode *mode) {
1503 assert (node->op == op_Load);
1504 node->attr.load.load_mode = mode;
1508 get_Load_volatility (ir_node *node) {
1509 assert (node->op == op_Load);
1510 return node->attr.load.volatility;
1514 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1515 assert (node->op == op_Load);
1516 node->attr.load.volatility = volatility;
1521 get_Store_mem (ir_node *node) {
1522 assert (node->op == op_Store);
1523 return get_irn_n(node, 0);
1527 set_Store_mem (ir_node *node, ir_node *mem) {
1528 assert (node->op == op_Store);
1529 set_irn_n(node, 0, mem);
1533 get_Store_ptr (ir_node *node) {
1534 assert (node->op == op_Store);
1535 return get_irn_n(node, 1);
1539 set_Store_ptr (ir_node *node, ir_node *ptr) {
1540 assert (node->op == op_Store);
1541 set_irn_n(node, 1, ptr);
1545 get_Store_value (ir_node *node) {
1546 assert (node->op == op_Store);
1547 return get_irn_n(node, 2);
1551 set_Store_value (ir_node *node, ir_node *value) {
1552 assert (node->op == op_Store);
1553 set_irn_n(node, 2, value);
1557 get_Store_volatility (ir_node *node) {
1558 assert (node->op == op_Store);
1559 return node->attr.store.volatility;
1563 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1564 assert (node->op == op_Store);
1565 node->attr.store.volatility = volatility;
1570 get_Alloc_mem (ir_node *node) {
1571 assert (node->op == op_Alloc);
1572 return get_irn_n(node, 0);
1576 set_Alloc_mem (ir_node *node, ir_node *mem) {
1577 assert (node->op == op_Alloc);
1578 set_irn_n(node, 0, mem);
1582 get_Alloc_size (ir_node *node) {
1583 assert (node->op == op_Alloc);
1584 return get_irn_n(node, 1);
1588 set_Alloc_size (ir_node *node, ir_node *size) {
1589 assert (node->op == op_Alloc);
1590 set_irn_n(node, 1, size);
1594 get_Alloc_type (ir_node *node) {
1595 assert (node->op == op_Alloc);
1596 return node->attr.a.type = skip_tid(node->attr.a.type);
1600 set_Alloc_type (ir_node *node, type *tp) {
1601 assert (node->op == op_Alloc);
1602 node->attr.a.type = tp;
1606 get_Alloc_where (ir_node *node) {
1607 assert (node->op == op_Alloc);
1608 return node->attr.a.where;
1612 set_Alloc_where (ir_node *node, where_alloc where) {
1613 assert (node->op == op_Alloc);
1614 node->attr.a.where = where;
1619 get_Free_mem (ir_node *node) {
1620 assert (node->op == op_Free);
1621 return get_irn_n(node, 0);
1625 set_Free_mem (ir_node *node, ir_node *mem) {
1626 assert (node->op == op_Free);
1627 set_irn_n(node, 0, mem);
1631 get_Free_ptr (ir_node *node) {
1632 assert (node->op == op_Free);
1633 return get_irn_n(node, 1);
1637 set_Free_ptr (ir_node *node, ir_node *ptr) {
1638 assert (node->op == op_Free);
1639 set_irn_n(node, 1, ptr);
1643 get_Free_size (ir_node *node) {
1644 assert (node->op == op_Free);
1645 return get_irn_n(node, 2);
1649 set_Free_size (ir_node *node, ir_node *size) {
1650 assert (node->op == op_Free);
1651 set_irn_n(node, 2, size);
1655 get_Free_type (ir_node *node) {
1656 assert (node->op == op_Free);
1657 return node->attr.f = skip_tid(node->attr.f);
1661 set_Free_type (ir_node *node, type *tp) {
1662 assert (node->op == op_Free);
1667 get_Sync_preds_arr (ir_node *node) {
1668 assert (node->op == op_Sync);
1669 return (ir_node **)&(get_irn_in(node)[1]);
1673 get_Sync_n_preds (ir_node *node) {
1674 assert (node->op == op_Sync);
1675 return (get_irn_arity(node));
1680 set_Sync_n_preds (ir_node *node, int n_preds) {
1681 assert (node->op == op_Sync);
1686 get_Sync_pred (ir_node *node, int pos) {
1687 assert (node->op == op_Sync);
1688 return get_irn_n(node, pos);
1692 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1693 assert (node->op == op_Sync);
1694 set_irn_n(node, pos, pred);
1698 get_Proj_pred (ir_node *node) {
1699 assert (is_Proj(node));
1700 return get_irn_n(node, 0);
1704 set_Proj_pred (ir_node *node, ir_node *pred) {
1705 assert (is_Proj(node));
1706 set_irn_n(node, 0, pred);
1710 get_Proj_proj (ir_node *node) {
1711 assert (is_Proj(node));
1712 if (get_irn_opcode(node) == iro_Proj) {
1713 return node->attr.proj;
1715 assert(get_irn_opcode(node) == iro_Filter);
1716 return node->attr.filter.proj;
1721 set_Proj_proj (ir_node *node, long proj) {
1722 assert (node->op == op_Proj);
1723 node->attr.proj = proj;
1727 get_Tuple_preds_arr (ir_node *node) {
1728 assert (node->op == op_Tuple);
1729 return (ir_node **)&(get_irn_in(node)[1]);
1733 get_Tuple_n_preds (ir_node *node) {
1734 assert (node->op == op_Tuple);
1735 return (get_irn_arity(node));
1740 set_Tuple_n_preds (ir_node *node, int n_preds) {
1741 assert (node->op == op_Tuple);
1746 get_Tuple_pred (ir_node *node, int pos) {
1747 assert (node->op == op_Tuple);
1748 return get_irn_n(node, pos);
1752 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1753 assert (node->op == op_Tuple);
1754 set_irn_n(node, pos, pred);
1758 get_Id_pred (ir_node *node) {
1759 assert (node->op == op_Id);
1760 return get_irn_n(node, 0);
1764 set_Id_pred (ir_node *node, ir_node *pred) {
1765 assert (node->op == op_Id);
1766 set_irn_n(node, 0, pred);
1769 ir_node *get_Confirm_value (ir_node *node) {
1770 assert (node->op == op_Confirm);
1771 return get_irn_n(node, 0);
1773 void set_Confirm_value (ir_node *node, ir_node *value) {
1774 assert (node->op == op_Confirm);
1775 set_irn_n(node, 0, value);
1777 ir_node *get_Confirm_bound (ir_node *node) {
1778 assert (node->op == op_Confirm);
1779 return get_irn_n(node, 1);
1781 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1782 assert (node->op == op_Confirm);
1783 set_irn_n(node, 0, bound);
1785 pn_Cmp get_Confirm_cmp (ir_node *node) {
1786 assert (node->op == op_Confirm);
1787 return node->attr.confirm_cmp;
1789 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1790 assert (node->op == op_Confirm);
1791 node->attr.confirm_cmp = cmp;
1796 get_Filter_pred (ir_node *node) {
1797 assert(node->op == op_Filter);
1801 set_Filter_pred (ir_node *node, ir_node *pred) {
1802 assert(node->op == op_Filter);
1806 get_Filter_proj(ir_node *node) {
1807 assert(node->op == op_Filter);
1808 return node->attr.filter.proj;
1811 set_Filter_proj (ir_node *node, long proj) {
1812 assert(node->op == op_Filter);
1813 node->attr.filter.proj = proj;
1816 /* Don't use get_irn_arity, get_irn_n in implementation as access
1817 shall work independent of view!!! */
1818 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1819 assert(node->op == op_Filter);
1820 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1821 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1822 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1823 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1824 node->attr.filter.in_cg[0] = node->in[0];
1826 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1829 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1830 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1831 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1832 node->attr.filter.in_cg[pos + 1] = pred;
1834 int get_Filter_n_cg_preds(ir_node *node) {
1835 assert(node->op == op_Filter && node->attr.filter.in_cg);
1836 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1838 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1840 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1842 arity = ARR_LEN(node->attr.filter.in_cg);
1843 assert(pos < arity - 1);
1844 return node->attr.filter.in_cg[pos + 1];
1848 ir_node *get_Mux_sel (ir_node *node) {
1849 assert(node->op == op_Mux);
1852 void set_Mux_sel (ir_node *node, ir_node *sel) {
1853 assert(node->op == op_Mux);
1857 ir_node *get_Mux_false (ir_node *node) {
1858 assert(node->op == op_Mux);
1861 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1862 assert(node->op == op_Mux);
1863 node->in[2] = ir_false;
1866 ir_node *get_Mux_true (ir_node *node) {
1867 assert(node->op == op_Mux);
1870 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1871 assert(node->op == op_Mux);
1872 node->in[3] = ir_true;
1877 get_irn_irg(const ir_node *node) {
1878 if (! is_Block(node))
1879 node = get_nodes_block(node);
1880 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1881 node = get_nodes_block(node);
1882 assert(get_irn_op(node) == op_Block);
1883 return node->attr.block.irg;
1887 /*----------------------------------------------------------------*/
1888 /* Auxiliary routines */
1889 /*----------------------------------------------------------------*/
1892 skip_Proj (ir_node *node) {
1893 /* don't assert node !!! */
1894 if (node && is_Proj(node)) {
1895 return get_Proj_pred(node);
1902 skip_Tuple (ir_node *node) {
1905 if (!get_opt_normalize()) return node;
1907 node = skip_Id(node);
1908 if (get_irn_op(node) == op_Proj) {
1909 pred = skip_Id(get_Proj_pred(node));
1910 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1911 pred = skip_Id(skip_Tuple(pred));
1912 if (get_irn_op(pred) == op_Tuple)
1913 return get_Tuple_pred(pred, get_Proj_proj(node));
1918 /** returns operand of node if node is a Cast */
1919 ir_node *skip_Cast (ir_node *node) {
1920 if (node && get_irn_op(node) == op_Cast) {
1921 return skip_Id(get_irn_n(node, 0));
1928 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1929 than any other approach, as Id chains are resolved and all point to the real node, or
1930 all id's are self loops. */
1932 skip_Id (ir_node *node) {
1933 /* don't assert node !!! */
1935 if (!get_opt_normalize()) return node;
1937 /* Don't use get_Id_pred: We get into an endless loop for
1938 self-referencing Ids. */
1939 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1940 ir_node *rem_pred = node->in[0+1];
1943 assert (get_irn_arity (node) > 0);
1945 node->in[0+1] = node;
1946 res = skip_Id(rem_pred);
1947 if (res->op == op_Id) /* self-loop */ return node;
1949 node->in[0+1] = res;
1956 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1957 than any other approach, as Id chains are resolved and all point to the real node, or
1958 all id's are self loops. */
1960 skip_Id (ir_node *node) {
1962 /* don't assert node !!! */
1964 if (!node || (node->op != op_Id)) return node;
1966 if (!get_opt_normalize()) return node;
1968 /* Don't use get_Id_pred: We get into an endless loop for
1969 self-referencing Ids. */
1970 pred = node->in[0+1];
1972 if (pred->op != op_Id) return pred;
1974 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1975 ir_node *rem_pred, *res;
1977 if (pred->op != op_Id) return pred; /* shortcut */
1980 assert (get_irn_arity (node) > 0);
1982 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1983 res = skip_Id(rem_pred);
1984 if (res->op == op_Id) /* self-loop */ return node;
1986 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1995 (is_Bad)(const ir_node *node) {
1996 return __is_Bad(node);
2000 (is_no_Block)(const ir_node *node) {
2001 return __is_no_Block(node);
2005 (is_Block)(const ir_node *node) {
2006 return __is_Block(node);
2009 /* returns true if node is a Unknown node. */
2011 is_Unknown (const ir_node *node) {
2013 return (get_irn_op(node) == op_Unknown);
2017 is_Proj (const ir_node *node) {
2019 return node->op == op_Proj
2020 || (!get_interprocedural_view() && node->op == op_Filter);
2023 /* Returns true if the operation manipulates control flow. */
2025 is_cfop(const ir_node *node) {
2026 return is_cfopcode(get_irn_op(node));
2029 /* Returns true if the operation manipulates interprocedural control flow:
2030 CallBegin, EndReg, EndExcept */
2031 int is_ip_cfop(const ir_node *node) {
2032 return is_ip_cfopcode(get_irn_op(node));
2035 /* Returns true if the operation can change the control flow because
2038 is_fragile_op(const ir_node *node) {
2039 return is_op_fragile(get_irn_op(node));
2042 /* Returns the memory operand of fragile operations. */
2043 ir_node *get_fragile_op_mem(ir_node *node) {
2044 assert(node && is_fragile_op(node));
2046 switch (get_irn_opcode (node)) {
2055 return get_irn_n(node, 0);
2060 assert(0 && "should not be reached");
2065 /* Returns true if the operation is a forking control flow operation. */
2067 is_forking_op(const ir_node *node) {
2068 return is_op_forking(get_irn_op(node));
2072 #ifdef DEBUG_libfirm
2073 void dump_irn (ir_node *n) {
2074 int i, arity = get_irn_arity(n);
2075 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2077 ir_node *pred = get_irn_n(n, -1);
2078 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2079 get_irn_node_nr(pred), (void *)pred);
2081 printf(" preds: \n");
2082 for (i = 0; i < arity; ++i) {
2083 ir_node *pred = get_irn_n(n, i);
2084 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2085 get_irn_node_nr(pred), (void *)pred);
2089 #else /* DEBUG_libfirm */
2090 void dump_irn (ir_node *n) {}
2091 #endif /* DEBUG_libfirm */