3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
33 /* some constants fixing the positions of nodes predecessors
35 #define CALL_PARAM_OFFSET 2
36 #define FUNCCALL_PARAM_OFFSET 1
37 #define SEL_INDEX_OFFSET 2
38 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
39 #define END_KEEPALIVE_OFFSET 0
41 static const char *pnc_name_arr [] = {
42 "False", "Eq", "Lt", "Le",
43 "Gt", "Ge", "Lg", "Leg", "Uo",
44 "Ue", "Ul", "Ule", "Ug", "Uge",
49 * returns the pnc name from an pnc constant
51 const char *get_pnc_string(int pnc) {
52 return pnc_name_arr[pnc];
56 * Calculates the negated pnc condition.
59 get_negated_pnc(int pnc) {
61 case False: return True; break;
62 case Eq: return Ne; break;
63 case Lt: return Uge; break;
64 case Le: return Ug; break;
65 case Gt: return Ule; break;
66 case Ge: return Ul; break;
67 case Lg: return Ue; break;
68 case Leg: return Uo; break;
69 case Uo: return Leg; break;
70 case Ue: return Lg; break;
71 case Ul: return Ge; break;
72 case Ule: return Gt; break;
73 case Ug: return Le; break;
74 case Uge: return Lt; break;
75 case Ne: return Eq; break;
76 case True: return False; break;
78 return 99; /* to shut up gcc */
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
151 set_irn_dbg_info(res, db);
155 res->node_nr = get_irp_new_node_nr();
163 /*-- getting some parameters from ir_nodes --*/
166 (is_ir_node)(const void *thing) {
167 return __is_ir_node(thing);
171 (get_irn_intra_arity)(const ir_node *node) {
172 return __get_irn_intra_arity(node);
176 (get_irn_inter_arity)(const ir_node *node) {
177 return __get_irn_inter_arity(node);
180 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
183 (get_irn_arity)(const ir_node *node) {
184 return __get_irn_arity(node);
187 /* Returns the array with ins. This array is shifted with respect to the
188 array accessed by get_irn_n: The block operand is at position 0 not -1.
189 (@@@ This should be changed.)
190 The order of the predecessors in this array is not guaranteed, except that
191 lists of operands as predecessors of Block or arguments of a Call are
194 get_irn_in (const ir_node *node) {
196 if (get_interprocedural_view()) { /* handle Filter and Block specially */
197 if (get_irn_opcode(node) == iro_Filter) {
198 assert(node->attr.filter.in_cg);
199 return node->attr.filter.in_cg;
200 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
201 return node->attr.block.in_cg;
203 /* else fall through */
209 set_irn_in (ir_node *node, int arity, ir_node **in) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 arr = &node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 arr = &node->attr.block.in_cg;
224 if (arity != ARR_LEN(*arr) - 1) {
225 ir_node * block = (*arr)[0];
226 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
229 fix_backedges(current_ir_graph->obst, node);
230 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
234 (get_irn_intra_n)(const ir_node *node, int n) {
235 return __get_irn_intra_n (node, n);
239 (get_irn_inter_n)(const ir_node *node, int n) {
240 return __get_irn_inter_n (node, n);
243 ir_node *(*__get_irn_n)(const ir_node *node, int n) = __get_irn_intra_n;
246 (get_irn_n)(const ir_node *node, int n) {
247 return __get_irn_n(node, n);
251 set_irn_n (ir_node *node, int n, ir_node *in) {
252 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
253 assert(in && in->kind == k_ir_node);
255 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
256 /* Change block pred in both views! */
257 node->in[n + 1] = in;
258 assert(node->attr.filter.in_cg);
259 node->attr.filter.in_cg[n + 1] = in;
262 if (get_interprocedural_view()) { /* handle Filter and Block specially */
263 if (get_irn_opcode(node) == iro_Filter) {
264 assert(node->attr.filter.in_cg);
265 node->attr.filter.in_cg[n + 1] = in;
267 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
268 node->attr.block.in_cg[n + 1] = in;
271 /* else fall through */
273 node->in[n + 1] = in;
277 (get_irn_mode)(const ir_node *node) {
278 return __get_irn_mode(node);
282 (set_irn_mode)(ir_node *node, ir_mode *mode)
284 __set_irn_mode(node, mode);
288 get_irn_modecode (const ir_node *node)
291 return node->mode->code;
294 /** Gets the string representation of the mode .*/
296 get_irn_modename (const ir_node *node)
299 return get_mode_name(node->mode);
303 get_irn_modeident (const ir_node *node)
306 return get_mode_ident(node->mode);
310 (get_irn_op)(const ir_node *node)
312 return __get_irn_op(node);
315 /* should be private to the library: */
317 set_irn_op (ir_node *node, ir_op *op)
324 (get_irn_opcode)(const ir_node *node)
326 return __get_irn_opcode(node);
330 get_irn_opname (const ir_node *node)
333 if ((get_irn_op((ir_node *)node) == op_Phi) &&
334 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
335 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
336 return get_id_str(node->op->name);
340 get_irn_opident (const ir_node *node)
343 return node->op->name;
347 (get_irn_visited)(const ir_node *node)
349 return __get_irn_visited(node);
353 (set_irn_visited)(ir_node *node, unsigned long visited)
355 __set_irn_visited(node, visited);
359 (mark_irn_visited)(ir_node *node) {
360 __mark_irn_visited(node);
364 (irn_not_visited)(const ir_node *node) {
365 return __irn_not_visited(node);
369 (irn_visited)(const ir_node *node) {
370 return __irn_visited(node);
374 (set_irn_link)(ir_node *node, void *link) {
375 __set_irn_link(node, link);
379 (get_irn_link)(const ir_node *node) {
380 return __get_irn_link(node);
384 (get_irn_pinned)(const ir_node *node) {
385 return __get_irn_pinned(node);
388 void set_irn_pinned(ir_node *node, op_pin_state state) {
389 /* due to optimization an opt may be turned into a Tuple */
390 if (get_irn_op(node) == op_Tuple)
393 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
394 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
396 node->attr.except.pin_state = state;
399 #ifdef DO_HEAPANALYSIS
400 /* Access the abstract interpretation information of a node.
401 Returns NULL if no such information is available. */
402 struct abstval *get_irn_abst_value(ir_node *n) {
405 /* Set the abstract interpretation information of a node. */
406 void set_irn_abst_value(ir_node *n, struct abstval *os) {
409 struct section *firm_get_irn_section(ir_node *n) {
412 void firm_set_irn_section(ir_node *n, struct section *s) {
416 /* Dummies needed for firmjni. */
417 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
418 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
419 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
420 void firm_set_irn_section(ir_node *n, struct section *s) {}
421 #endif /* DO_HEAPANALYSIS */
424 /* Outputs a unique number for this node */
426 get_irn_node_nr(const ir_node *node) {
429 return node->node_nr;
436 get_irn_const_attr (ir_node *node)
438 assert (node->op == op_Const);
439 return node->attr.con;
443 get_irn_proj_attr (ir_node *node)
445 assert (node->op == op_Proj);
446 return node->attr.proj;
450 get_irn_alloc_attr (ir_node *node)
452 assert (node->op == op_Alloc);
457 get_irn_free_attr (ir_node *node)
459 assert (node->op == op_Free);
460 return node->attr.f = skip_tid(node->attr.f);
464 get_irn_symconst_attr (ir_node *node)
466 assert (node->op == op_SymConst);
471 get_irn_call_attr (ir_node *node)
473 assert (node->op == op_Call);
474 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
478 get_irn_sel_attr (ir_node *node)
480 assert (node->op == op_Sel);
485 get_irn_phi_attr (ir_node *node)
487 assert (node->op == op_Phi);
488 return node->attr.phi0_pos;
492 get_irn_block_attr (ir_node *node)
494 assert (node->op == op_Block);
495 return node->attr.block;
499 get_irn_load_attr (ir_node *node)
501 assert (node->op == op_Load);
502 return node->attr.load;
506 get_irn_store_attr (ir_node *node)
508 assert (node->op == op_Store);
509 return node->attr.store;
513 get_irn_except_attr (ir_node *node)
515 assert (node->op == op_Div || node->op == op_Quot ||
516 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
517 return node->attr.except;
520 /** manipulate fields of individual nodes **/
522 /* this works for all except Block */
524 get_nodes_block (const ir_node *node) {
525 assert (!(node->op == op_Block));
526 return get_irn_n(node, -1);
530 set_nodes_block (ir_node *node, ir_node *block) {
531 assert (!(node->op == op_Block));
532 set_irn_n(node, -1, block);
535 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
536 * from Start. If so returns frame type, else Null. */
537 type *is_frame_pointer(ir_node *n) {
538 if ((get_irn_op(n) == op_Proj) &&
539 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
540 ir_node *start = get_Proj_pred(n);
541 if (get_irn_op(start) == op_Start) {
542 return get_irg_frame_type(get_irn_irg(start));
548 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
549 * from Start. If so returns global type, else Null. */
550 type *is_globals_pointer(ir_node *n) {
551 if ((get_irn_op(n) == op_Proj) &&
552 (get_Proj_proj(n) == pn_Start_P_globals)) {
553 ir_node *start = get_Proj_pred(n);
554 if (get_irn_op(start) == op_Start) {
555 return get_glob_type();
561 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
562 * from Start. If so returns 1, else 0. */
563 int is_value_arg_pointer(ir_node *n) {
564 if ((get_irn_op(n) == op_Proj) &&
565 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
566 (get_irn_op(get_Proj_pred(n)) == op_Start))
571 /* Returns an array with the predecessors of the Block. Depending on
572 the implementation of the graph data structure this can be a copy of
573 the internal representation of predecessors as well as the internal
574 array itself. Therefore writing to this array might obstruct the ir. */
576 get_Block_cfgpred_arr (ir_node *node)
578 assert ((node->op == op_Block));
579 return (ir_node **)&(get_irn_in(node)[1]);
584 get_Block_n_cfgpreds (ir_node *node) {
585 assert ((node->op == op_Block));
586 return get_irn_arity(node);
590 get_Block_cfgpred (ir_node *node, int pos) {
591 assert(-1 <= pos && pos < get_irn_arity(node));
592 assert(node->op == op_Block);
593 return get_irn_n(node, pos);
597 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
598 assert (node->op == op_Block);
599 set_irn_n(node, pos, pred);
603 get_Block_matured (ir_node *node) {
604 assert (node->op == op_Block);
605 return node->attr.block.matured;
609 set_Block_matured (ir_node *node, bool matured) {
610 assert (node->op == op_Block);
611 node->attr.block.matured = matured;
614 get_Block_block_visited (ir_node *node) {
615 assert (node->op == op_Block);
616 return node->attr.block.block_visited;
620 set_Block_block_visited (ir_node *node, unsigned long visit) {
621 assert (node->op == op_Block);
622 node->attr.block.block_visited = visit;
625 /* For this current_ir_graph must be set. */
627 mark_Block_block_visited (ir_node *node) {
628 assert (node->op == op_Block);
629 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
633 Block_not_block_visited(ir_node *node) {
634 assert (node->op == op_Block);
635 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
639 get_Block_graph_arr (ir_node *node, int pos) {
640 assert (node->op == op_Block);
641 return node->attr.block.graph_arr[pos+1];
645 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
646 assert (node->op == op_Block);
647 node->attr.block.graph_arr[pos+1] = value;
650 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
651 assert(node->op == op_Block);
652 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
653 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
654 node->attr.block.in_cg[0] = NULL;
655 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
657 /* Fix backedge array. fix_backedges operates depending on
658 interprocedural_view. */
659 int ipv = get_interprocedural_view();
660 set_interprocedural_view(true);
661 fix_backedges(current_ir_graph->obst, node);
662 set_interprocedural_view(ipv);
665 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
668 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
669 assert(node->op == op_Block &&
670 node->attr.block.in_cg &&
671 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
672 node->attr.block.in_cg[pos + 1] = pred;
675 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
676 assert(node->op == op_Block);
677 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
680 int get_Block_cg_n_cfgpreds(ir_node * node) {
681 assert(node->op == op_Block);
682 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
685 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
686 assert(node->op == op_Block && node->attr.block.in_cg);
687 return node->attr.block.in_cg[pos + 1];
690 void remove_Block_cg_cfgpred_arr(ir_node * node) {
691 assert(node->op == op_Block);
692 node->attr.block.in_cg = NULL;
695 ir_node *(set_Block_dead)(ir_node *block) {
696 return __set_Block_dead(block);
699 int (is_Block_dead)(const ir_node *block) {
700 return __is_Block_dead(block);
704 set_Start_irg(ir_node *node, ir_graph *irg) {
705 assert(node->op == op_Start);
706 assert(is_ir_graph(irg));
707 assert(0 && " Why set irg? -- use set_irn_irg");
711 get_End_n_keepalives(ir_node *end) {
712 assert (end->op == op_End);
713 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
717 get_End_keepalive(ir_node *end, int pos) {
718 assert (end->op == op_End);
719 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
723 add_End_keepalive (ir_node *end, ir_node *ka) {
724 assert (end->op == op_End);
725 ARR_APP1 (ir_node *, end->in, ka);
729 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
730 assert (end->op == op_End);
731 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
735 free_End (ir_node *end) {
736 assert (end->op == op_End);
738 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
739 end->in = NULL; /* @@@ make sure we get an error if we use the
740 in array afterwards ... */
745 > Implementing the case construct (which is where the constant Proj node is
746 > important) involves far more than simply determining the constant values.
747 > We could argue that this is more properly a function of the translator from
748 > Firm to the target machine. That could be done if there was some way of
749 > projecting "default" out of the Cond node.
750 I know it's complicated.
751 Basically there are two proglems:
752 - determining the gaps between the projs
753 - determining the biggest case constant to know the proj number for
755 I see several solutions:
756 1. Introduce a ProjDefault node. Solves both problems.
757 This means to extend all optimizations executed during construction.
758 2. Give the Cond node for switch two flavors:
759 a) there are no gaps in the projs (existing flavor)
760 b) gaps may exist, default proj is still the Proj with the largest
761 projection number. This covers also the gaps.
762 3. Fix the semantic of the Cond to that of 2b)
764 Solution 2 seems to be the best:
765 Computing the gaps in the Firm representation is not too hard, i.e.,
766 libFIRM can implement a routine that transforms between the two
767 flavours. This is also possible for 1) but 2) does not require to
768 change any existing optimization.
769 Further it should be far simpler to determine the biggest constant than
771 I don't want to choose 3) as 2a) seems to have advantages for
772 dataflow analysis and 3) does not allow to convert the representation to
776 get_Cond_selector (ir_node *node) {
777 assert (node->op == op_Cond);
778 return get_irn_n(node, 0);
782 set_Cond_selector (ir_node *node, ir_node *selector) {
783 assert (node->op == op_Cond);
784 set_irn_n(node, 0, selector);
788 get_Cond_kind (ir_node *node) {
789 assert (node->op == op_Cond);
790 return node->attr.c.kind;
794 set_Cond_kind (ir_node *node, cond_kind kind) {
795 assert (node->op == op_Cond);
796 node->attr.c.kind = kind;
800 get_Cond_defaultProj (ir_node *node) {
801 assert (node->op == op_Cond);
802 return node->attr.c.default_proj;
806 get_Return_mem (ir_node *node) {
807 assert (node->op == op_Return);
808 return get_irn_n(node, 0);
812 set_Return_mem (ir_node *node, ir_node *mem) {
813 assert (node->op == op_Return);
814 set_irn_n(node, 0, mem);
818 get_Return_n_ress (ir_node *node) {
819 assert (node->op == op_Return);
820 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
824 get_Return_res_arr (ir_node *node)
826 assert ((node->op == op_Return));
827 if (get_Return_n_ress(node) > 0)
828 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
835 set_Return_n_res (ir_node *node, int results) {
836 assert (node->op == op_Return);
841 get_Return_res (ir_node *node, int pos) {
842 assert (node->op == op_Return);
843 assert (get_Return_n_ress(node) > pos);
844 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
848 set_Return_res (ir_node *node, int pos, ir_node *res){
849 assert (node->op == op_Return);
850 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
854 get_Raise_mem (ir_node *node) {
855 assert (node->op == op_Raise);
856 return get_irn_n(node, 0);
860 set_Raise_mem (ir_node *node, ir_node *mem) {
861 assert (node->op == op_Raise);
862 set_irn_n(node, 0, mem);
866 get_Raise_exo_ptr (ir_node *node) {
867 assert (node->op == op_Raise);
868 return get_irn_n(node, 1);
872 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
873 assert (node->op == op_Raise);
874 set_irn_n(node, 1, exo_ptr);
877 tarval *get_Const_tarval (ir_node *node) {
878 assert (node->op == op_Const);
879 return node->attr.con.tv;
883 set_Const_tarval (ir_node *node, tarval *con) {
884 assert (node->op == op_Const);
885 node->attr.con.tv = con;
889 /* The source language type. Must be an atomic type. Mode of type must
890 be mode of node. For tarvals from entities type must be pointer to
893 get_Const_type (ir_node *node) {
894 assert (node->op == op_Const);
895 return node->attr.con.tp;
899 set_Const_type (ir_node *node, type *tp) {
900 assert (node->op == op_Const);
901 if (tp != firm_unknown_type) {
902 assert (is_atomic_type(tp));
903 assert (get_type_mode(tp) == get_irn_mode(node));
905 node->attr.con.tp = tp;
910 get_SymConst_kind (const ir_node *node) {
911 assert (node->op == op_SymConst);
912 return node->attr.i.num;
916 set_SymConst_kind (ir_node *node, symconst_kind num) {
917 assert (node->op == op_SymConst);
918 node->attr.i.num = num;
922 get_SymConst_type (ir_node *node) {
923 assert ( (node->op == op_SymConst)
924 && ( get_SymConst_kind(node) == symconst_type_tag
925 || get_SymConst_kind(node) == symconst_size));
926 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
930 set_SymConst_type (ir_node *node, type *tp) {
931 assert ( (node->op == op_SymConst)
932 && ( get_SymConst_kind(node) == symconst_type_tag
933 || get_SymConst_kind(node) == symconst_size));
934 node->attr.i.sym.type_p = tp;
938 get_SymConst_name (ir_node *node) {
939 assert ( (node->op == op_SymConst)
940 && (get_SymConst_kind(node) == symconst_addr_name));
941 return node->attr.i.sym.ident_p;
945 set_SymConst_name (ir_node *node, ident *name) {
946 assert ( (node->op == op_SymConst)
947 && (get_SymConst_kind(node) == symconst_addr_name));
948 node->attr.i.sym.ident_p = name;
952 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
953 entity *get_SymConst_entity (ir_node *node) {
954 assert ( (node->op == op_SymConst)
955 && (get_SymConst_kind (node) == symconst_addr_ent));
956 return node->attr.i.sym.entity_p;
959 void set_SymConst_entity (ir_node *node, entity *ent) {
960 assert ( (node->op == op_SymConst)
961 && (get_SymConst_kind(node) == symconst_addr_ent));
962 node->attr.i.sym.entity_p = ent;
965 union symconst_symbol
966 get_SymConst_symbol (ir_node *node) {
967 assert (node->op == op_SymConst);
968 return node->attr.i.sym;
972 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
973 assert (node->op == op_SymConst);
974 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
975 node->attr.i.sym = sym;
979 get_SymConst_value_type (ir_node *node) {
980 assert (node->op == op_SymConst);
981 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
982 return node->attr.i.tp;
986 set_SymConst_value_type (ir_node *node, type *tp) {
987 assert (node->op == op_SymConst);
988 node->attr.i.tp = tp;
992 get_Sel_mem (ir_node *node) {
993 assert (node->op == op_Sel);
994 return get_irn_n(node, 0);
998 set_Sel_mem (ir_node *node, ir_node *mem) {
999 assert (node->op == op_Sel);
1000 set_irn_n(node, 0, mem);
1004 get_Sel_ptr (ir_node *node) {
1005 assert (node->op == op_Sel);
1006 return get_irn_n(node, 1);
1010 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1011 assert (node->op == op_Sel);
1012 set_irn_n(node, 1, ptr);
1016 get_Sel_n_indexs (ir_node *node) {
1017 assert (node->op == op_Sel);
1018 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1022 get_Sel_index_arr (ir_node *node)
1024 assert ((node->op == op_Sel));
1025 if (get_Sel_n_indexs(node) > 0)
1026 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1032 get_Sel_index (ir_node *node, int pos) {
1033 assert (node->op == op_Sel);
1034 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1038 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1039 assert (node->op == op_Sel);
1040 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1044 get_Sel_entity (ir_node *node) {
1045 assert (node->op == op_Sel);
1046 return node->attr.s.ent;
1050 set_Sel_entity (ir_node *node, entity *ent) {
1051 assert (node->op == op_Sel);
1052 node->attr.s.ent = ent;
1056 get_InstOf_ent (ir_node *node) {
1057 assert (node->op = op_InstOf);
1058 return (node->attr.io.ent);
1062 set_InstOf_ent (ir_node *node, type *ent) {
1063 assert (node->op = op_InstOf);
1064 node->attr.io.ent = ent;
1068 get_InstOf_store (ir_node *node) {
1069 assert (node->op = op_InstOf);
1070 return (get_irn_n (node, 0));
1074 set_InstOf_store (ir_node *node, ir_node *obj) {
1075 assert (node->op = op_InstOf);
1076 set_irn_n (node, 0, obj);
1080 get_InstOf_obj (ir_node *node) {
1081 assert (node->op = op_InstOf);
1082 return (get_irn_n (node, 1));
1086 set_InstOf_obj (ir_node *node, ir_node *obj) {
1087 assert (node->op = op_InstOf);
1088 set_irn_n (node, 1, obj);
1092 /* For unary and binary arithmetic operations the access to the
1093 operands can be factored out. Left is the first, right the
1094 second arithmetic value as listed in tech report 0999-33.
1095 unops are: Minus, Abs, Not, Conv, Cast
1096 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1097 Shr, Shrs, Rotate, Cmp */
1101 get_Call_mem (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return get_irn_n(node, 0);
1107 set_Call_mem (ir_node *node, ir_node *mem) {
1108 assert (node->op == op_Call);
1109 set_irn_n(node, 0, mem);
1113 get_Call_ptr (ir_node *node) {
1114 assert (node->op == op_Call);
1115 return get_irn_n(node, 1);
1119 set_Call_ptr (ir_node *node, ir_node *ptr) {
1120 assert (node->op == op_Call);
1121 set_irn_n(node, 1, ptr);
1125 get_Call_param_arr (ir_node *node) {
1126 assert (node->op == op_Call);
1127 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1131 get_Call_n_params (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1137 get_Call_arity (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return get_Call_n_params(node);
1143 set_Call_arity (ir_node *node, ir_node *arity) {
1144 assert (node->op == op_Call);
1149 get_Call_param (ir_node *node, int pos) {
1150 assert (node->op == op_Call);
1151 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1155 set_Call_param (ir_node *node, int pos, ir_node *param) {
1156 assert (node->op == op_Call);
1157 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1161 get_Call_type (ir_node *node) {
1162 assert (node->op == op_Call);
1163 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1167 set_Call_type (ir_node *node, type *tp) {
1168 assert (node->op == op_Call);
1169 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1170 node->attr.call.cld_tp = tp;
1173 int Call_has_callees(ir_node *node) {
1174 assert(node && node->op == op_Call);
1175 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1176 (node->attr.call.callee_arr != NULL));
1179 int get_Call_n_callees(ir_node * node) {
1180 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1181 return ARR_LEN(node->attr.call.callee_arr);
1184 entity * get_Call_callee(ir_node * node, int pos) {
1185 assert(pos >= 0 && pos < get_Call_n_callees(node));
1186 return node->attr.call.callee_arr[pos];
1189 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1190 assert(node->op == op_Call);
1191 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1192 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1194 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1197 void remove_Call_callee_arr(ir_node * node) {
1198 assert(node->op == op_Call);
1199 node->attr.call.callee_arr = NULL;
1202 ir_node * get_CallBegin_ptr (ir_node *node) {
1203 assert(node->op == op_CallBegin);
1204 return get_irn_n(node, 0);
1206 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1207 assert(node->op == op_CallBegin);
1208 set_irn_n(node, 0, ptr);
1210 ir_node * get_CallBegin_call (ir_node *node) {
1211 assert(node->op == op_CallBegin);
1212 return node->attr.callbegin.call;
1214 void set_CallBegin_call (ir_node *node, ir_node *call) {
1215 assert(node->op == op_CallBegin);
1216 node->attr.callbegin.call = call;
1221 ir_node * get_##OP##_left(ir_node *node) { \
1222 assert(node->op == op_##OP); \
1223 return get_irn_n(node, node->op->op_index); \
1225 void set_##OP##_left(ir_node *node, ir_node *left) { \
1226 assert(node->op == op_##OP); \
1227 set_irn_n(node, node->op->op_index, left); \
1229 ir_node *get_##OP##_right(ir_node *node) { \
1230 assert(node->op == op_##OP); \
1231 return get_irn_n(node, node->op->op_index + 1); \
1233 void set_##OP##_right(ir_node *node, ir_node *right) { \
1234 assert(node->op == op_##OP); \
1235 set_irn_n(node, node->op->op_index + 1, right); \
1239 ir_node *get_##OP##_op(ir_node *node) { \
1240 assert(node->op == op_##OP); \
1241 return get_irn_n(node, node->op->op_index); \
1243 void set_##OP##_op (ir_node *node, ir_node *op) { \
1244 assert(node->op == op_##OP); \
1245 set_irn_n(node, node->op->op_index, op); \
1255 get_Quot_mem (ir_node *node) {
1256 assert (node->op == op_Quot);
1257 return get_irn_n(node, 0);
1261 set_Quot_mem (ir_node *node, ir_node *mem) {
1262 assert (node->op == op_Quot);
1263 set_irn_n(node, 0, mem);
1269 get_DivMod_mem (ir_node *node) {
1270 assert (node->op == op_DivMod);
1271 return get_irn_n(node, 0);
1275 set_DivMod_mem (ir_node *node, ir_node *mem) {
1276 assert (node->op == op_DivMod);
1277 set_irn_n(node, 0, mem);
1283 get_Div_mem (ir_node *node) {
1284 assert (node->op == op_Div);
1285 return get_irn_n(node, 0);
1289 set_Div_mem (ir_node *node, ir_node *mem) {
1290 assert (node->op == op_Div);
1291 set_irn_n(node, 0, mem);
1297 get_Mod_mem (ir_node *node) {
1298 assert (node->op == op_Mod);
1299 return get_irn_n(node, 0);
1303 set_Mod_mem (ir_node *node, ir_node *mem) {
1304 assert (node->op == op_Mod);
1305 set_irn_n(node, 0, mem);
1322 get_Cast_type (ir_node *node) {
1323 assert (node->op == op_Cast);
1324 return node->attr.cast.totype;
1328 set_Cast_type (ir_node *node, type *to_tp) {
1329 assert (node->op == op_Cast);
1330 node->attr.cast.totype = to_tp;
1334 (is_unop)(const ir_node *node) {
1335 return __is_unop(node);
1339 get_unop_op (ir_node *node) {
1340 if (node->op->opar == oparity_unary)
1341 return get_irn_n(node, node->op->op_index);
1343 assert(node->op->opar == oparity_unary);
1348 set_unop_op (ir_node *node, ir_node *op) {
1349 if (node->op->opar == oparity_unary)
1350 set_irn_n(node, node->op->op_index, op);
1352 assert(node->op->opar == oparity_unary);
1356 (is_binop)(const ir_node *node) {
1357 return __is_binop(node);
1361 get_binop_left (ir_node *node) {
1362 if (node->op->opar == oparity_binary)
1363 return get_irn_n(node, node->op->op_index);
1365 assert(node->op->opar == oparity_binary);
1370 set_binop_left (ir_node *node, ir_node *left) {
1371 if (node->op->opar == oparity_binary)
1372 set_irn_n(node, node->op->op_index, left);
1374 assert (node->op->opar == oparity_binary);
1378 get_binop_right (ir_node *node) {
1379 if (node->op->opar == oparity_binary)
1380 return get_irn_n(node, node->op->op_index + 1);
1382 assert(node->op->opar == oparity_binary);
1387 set_binop_right (ir_node *node, ir_node *right) {
1388 if (node->op->opar == oparity_binary)
1389 set_irn_n(node, node->op->op_index + 1, right);
1391 assert (node->op->opar == oparity_binary);
1394 int is_Phi (const ir_node *n) {
1400 if (op == op_Filter) return get_interprocedural_view();
1403 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1404 (get_irn_arity(n) > 0));
1409 int is_Phi0 (const ir_node *n) {
1412 return ((get_irn_op(n) == op_Phi) &&
1413 (get_irn_arity(n) == 0) &&
1414 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1418 get_Phi_preds_arr (ir_node *node) {
1419 assert (node->op == op_Phi);
1420 return (ir_node **)&(get_irn_in(node)[1]);
1424 get_Phi_n_preds (ir_node *node) {
1425 assert (is_Phi(node) || is_Phi0(node));
1426 return (get_irn_arity(node));
1430 void set_Phi_n_preds (ir_node *node, int n_preds) {
1431 assert (node->op == op_Phi);
1436 get_Phi_pred (ir_node *node, int pos) {
1437 assert (is_Phi(node) || is_Phi0(node));
1438 return get_irn_n(node, pos);
1442 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1443 assert (is_Phi(node) || is_Phi0(node));
1444 set_irn_n(node, pos, pred);
1448 int is_memop(ir_node *node) {
1449 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1452 ir_node *get_memop_mem (ir_node *node) {
1453 assert(is_memop(node));
1454 return get_irn_n(node, 0);
1457 void set_memop_mem (ir_node *node, ir_node *mem) {
1458 assert(is_memop(node));
1459 set_irn_n(node, 0, mem);
1462 ir_node *get_memop_ptr (ir_node *node) {
1463 assert(is_memop(node));
1464 return get_irn_n(node, 1);
1467 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1468 assert(is_memop(node));
1469 set_irn_n(node, 1, ptr);
1473 get_Load_mem (ir_node *node) {
1474 assert (node->op == op_Load);
1475 return get_irn_n(node, 0);
1479 set_Load_mem (ir_node *node, ir_node *mem) {
1480 assert (node->op == op_Load);
1481 set_irn_n(node, 0, mem);
1485 get_Load_ptr (ir_node *node) {
1486 assert (node->op == op_Load);
1487 return get_irn_n(node, 1);
1491 set_Load_ptr (ir_node *node, ir_node *ptr) {
1492 assert (node->op == op_Load);
1493 set_irn_n(node, 1, ptr);
1497 get_Load_mode (ir_node *node) {
1498 assert (node->op == op_Load);
1499 return node->attr.load.load_mode;
1503 set_Load_mode (ir_node *node, ir_mode *mode) {
1504 assert (node->op == op_Load);
1505 node->attr.load.load_mode = mode;
1509 get_Load_volatility (ir_node *node) {
1510 assert (node->op == op_Load);
1511 return node->attr.load.volatility;
1515 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1516 assert (node->op == op_Load);
1517 node->attr.load.volatility = volatility;
1522 get_Store_mem (ir_node *node) {
1523 assert (node->op == op_Store);
1524 return get_irn_n(node, 0);
1528 set_Store_mem (ir_node *node, ir_node *mem) {
1529 assert (node->op == op_Store);
1530 set_irn_n(node, 0, mem);
1534 get_Store_ptr (ir_node *node) {
1535 assert (node->op == op_Store);
1536 return get_irn_n(node, 1);
1540 set_Store_ptr (ir_node *node, ir_node *ptr) {
1541 assert (node->op == op_Store);
1542 set_irn_n(node, 1, ptr);
1546 get_Store_value (ir_node *node) {
1547 assert (node->op == op_Store);
1548 return get_irn_n(node, 2);
1552 set_Store_value (ir_node *node, ir_node *value) {
1553 assert (node->op == op_Store);
1554 set_irn_n(node, 2, value);
1558 get_Store_volatility (ir_node *node) {
1559 assert (node->op == op_Store);
1560 return node->attr.store.volatility;
1564 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1565 assert (node->op == op_Store);
1566 node->attr.store.volatility = volatility;
1571 get_Alloc_mem (ir_node *node) {
1572 assert (node->op == op_Alloc);
1573 return get_irn_n(node, 0);
1577 set_Alloc_mem (ir_node *node, ir_node *mem) {
1578 assert (node->op == op_Alloc);
1579 set_irn_n(node, 0, mem);
1583 get_Alloc_size (ir_node *node) {
1584 assert (node->op == op_Alloc);
1585 return get_irn_n(node, 1);
1589 set_Alloc_size (ir_node *node, ir_node *size) {
1590 assert (node->op == op_Alloc);
1591 set_irn_n(node, 1, size);
1595 get_Alloc_type (ir_node *node) {
1596 assert (node->op == op_Alloc);
1597 return node->attr.a.type = skip_tid(node->attr.a.type);
1601 set_Alloc_type (ir_node *node, type *tp) {
1602 assert (node->op == op_Alloc);
1603 node->attr.a.type = tp;
1607 get_Alloc_where (ir_node *node) {
1608 assert (node->op == op_Alloc);
1609 return node->attr.a.where;
1613 set_Alloc_where (ir_node *node, where_alloc where) {
1614 assert (node->op == op_Alloc);
1615 node->attr.a.where = where;
1620 get_Free_mem (ir_node *node) {
1621 assert (node->op == op_Free);
1622 return get_irn_n(node, 0);
1626 set_Free_mem (ir_node *node, ir_node *mem) {
1627 assert (node->op == op_Free);
1628 set_irn_n(node, 0, mem);
1632 get_Free_ptr (ir_node *node) {
1633 assert (node->op == op_Free);
1634 return get_irn_n(node, 1);
1638 set_Free_ptr (ir_node *node, ir_node *ptr) {
1639 assert (node->op == op_Free);
1640 set_irn_n(node, 1, ptr);
1644 get_Free_size (ir_node *node) {
1645 assert (node->op == op_Free);
1646 return get_irn_n(node, 2);
1650 set_Free_size (ir_node *node, ir_node *size) {
1651 assert (node->op == op_Free);
1652 set_irn_n(node, 2, size);
1656 get_Free_type (ir_node *node) {
1657 assert (node->op == op_Free);
1658 return node->attr.f = skip_tid(node->attr.f);
1662 set_Free_type (ir_node *node, type *tp) {
1663 assert (node->op == op_Free);
1668 get_Sync_preds_arr (ir_node *node) {
1669 assert (node->op == op_Sync);
1670 return (ir_node **)&(get_irn_in(node)[1]);
1674 get_Sync_n_preds (ir_node *node) {
1675 assert (node->op == op_Sync);
1676 return (get_irn_arity(node));
1681 set_Sync_n_preds (ir_node *node, int n_preds) {
1682 assert (node->op == op_Sync);
1687 get_Sync_pred (ir_node *node, int pos) {
1688 assert (node->op == op_Sync);
1689 return get_irn_n(node, pos);
1693 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1694 assert (node->op == op_Sync);
1695 set_irn_n(node, pos, pred);
1699 get_Proj_pred (ir_node *node) {
1700 assert (is_Proj(node));
1701 return get_irn_n(node, 0);
1705 set_Proj_pred (ir_node *node, ir_node *pred) {
1706 assert (is_Proj(node));
1707 set_irn_n(node, 0, pred);
1711 get_Proj_proj (ir_node *node) {
1712 assert (is_Proj(node));
1713 if (get_irn_opcode(node) == iro_Proj) {
1714 return node->attr.proj;
1716 assert(get_irn_opcode(node) == iro_Filter);
1717 return node->attr.filter.proj;
1722 set_Proj_proj (ir_node *node, long proj) {
1723 assert (node->op == op_Proj);
1724 node->attr.proj = proj;
1728 get_Tuple_preds_arr (ir_node *node) {
1729 assert (node->op == op_Tuple);
1730 return (ir_node **)&(get_irn_in(node)[1]);
1734 get_Tuple_n_preds (ir_node *node) {
1735 assert (node->op == op_Tuple);
1736 return (get_irn_arity(node));
1741 set_Tuple_n_preds (ir_node *node, int n_preds) {
1742 assert (node->op == op_Tuple);
1747 get_Tuple_pred (ir_node *node, int pos) {
1748 assert (node->op == op_Tuple);
1749 return get_irn_n(node, pos);
1753 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1754 assert (node->op == op_Tuple);
1755 set_irn_n(node, pos, pred);
1759 get_Id_pred (ir_node *node) {
1760 assert (node->op == op_Id);
1761 return get_irn_n(node, 0);
1765 set_Id_pred (ir_node *node, ir_node *pred) {
1766 assert (node->op == op_Id);
1767 set_irn_n(node, 0, pred);
1770 ir_node *get_Confirm_value (ir_node *node) {
1771 assert (node->op == op_Confirm);
1772 return get_irn_n(node, 0);
1774 void set_Confirm_value (ir_node *node, ir_node *value) {
1775 assert (node->op == op_Confirm);
1776 set_irn_n(node, 0, value);
1778 ir_node *get_Confirm_bound (ir_node *node) {
1779 assert (node->op == op_Confirm);
1780 return get_irn_n(node, 1);
1782 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1783 assert (node->op == op_Confirm);
1784 set_irn_n(node, 0, bound);
1786 pn_Cmp get_Confirm_cmp (ir_node *node) {
1787 assert (node->op == op_Confirm);
1788 return node->attr.confirm_cmp;
1790 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1791 assert (node->op == op_Confirm);
1792 node->attr.confirm_cmp = cmp;
1797 get_Filter_pred (ir_node *node) {
1798 assert(node->op == op_Filter);
1802 set_Filter_pred (ir_node *node, ir_node *pred) {
1803 assert(node->op == op_Filter);
1807 get_Filter_proj(ir_node *node) {
1808 assert(node->op == op_Filter);
1809 return node->attr.filter.proj;
1812 set_Filter_proj (ir_node *node, long proj) {
1813 assert(node->op == op_Filter);
1814 node->attr.filter.proj = proj;
1817 /* Don't use get_irn_arity, get_irn_n in implementation as access
1818 shall work independent of view!!! */
1819 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1820 assert(node->op == op_Filter);
1821 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1822 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1823 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1824 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1825 node->attr.filter.in_cg[0] = node->in[0];
1827 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1830 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1831 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1832 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1833 node->attr.filter.in_cg[pos + 1] = pred;
1835 int get_Filter_n_cg_preds(ir_node *node) {
1836 assert(node->op == op_Filter && node->attr.filter.in_cg);
1837 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1839 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1841 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1843 arity = ARR_LEN(node->attr.filter.in_cg);
1844 assert(pos < arity - 1);
1845 return node->attr.filter.in_cg[pos + 1];
1849 ir_node *get_Mux_sel (ir_node *node) {
1850 assert(node->op == op_Mux);
1853 void set_Mux_sel (ir_node *node, ir_node *sel) {
1854 assert(node->op == op_Mux);
1858 ir_node *get_Mux_false (ir_node *node) {
1859 assert(node->op == op_Mux);
1862 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1863 assert(node->op == op_Mux);
1864 node->in[2] = ir_false;
1867 ir_node *get_Mux_true (ir_node *node) {
1868 assert(node->op == op_Mux);
1871 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1872 assert(node->op == op_Mux);
1873 node->in[3] = ir_true;
1878 get_irn_irg(const ir_node *node) {
1879 if (! is_Block(node))
1880 node = get_nodes_block(node);
1881 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1882 node = get_nodes_block(node);
1883 assert(get_irn_op(node) == op_Block);
1884 return node->attr.block.irg;
1888 /*----------------------------------------------------------------*/
1889 /* Auxiliary routines */
1890 /*----------------------------------------------------------------*/
1893 skip_Proj (ir_node *node) {
1894 /* don't assert node !!! */
1895 if (node && is_Proj(node)) {
1896 return get_Proj_pred(node);
1903 skip_Tuple (ir_node *node) {
1906 if (!get_opt_normalize()) return node;
1908 node = skip_Id(node);
1909 if (get_irn_op(node) == op_Proj) {
1910 pred = skip_Id(get_Proj_pred(node));
1911 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1912 pred = skip_Id(skip_Tuple(pred));
1913 if (get_irn_op(pred) == op_Tuple)
1914 return get_Tuple_pred(pred, get_Proj_proj(node));
1919 /** returns operand of node if node is a Cast */
1920 ir_node *skip_Cast (ir_node *node) {
1921 if (node && get_irn_op(node) == op_Cast) {
1922 return skip_Id(get_irn_n(node, 0));
1929 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1930 than any other approach, as Id chains are resolved and all point to the real node, or
1931 all id's are self loops. */
1933 skip_Id (ir_node *node) {
1934 /* don't assert node !!! */
1936 if (!get_opt_normalize()) return node;
1938 /* Don't use get_Id_pred: We get into an endless loop for
1939 self-referencing Ids. */
1940 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1941 ir_node *rem_pred = node->in[0+1];
1944 assert (get_irn_arity (node) > 0);
1946 node->in[0+1] = node;
1947 res = skip_Id(rem_pred);
1948 if (res->op == op_Id) /* self-loop */ return node;
1950 node->in[0+1] = res;
1957 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1958 than any other approach, as Id chains are resolved and all point to the real node, or
1959 all id's are self loops. */
1961 skip_Id (ir_node *node) {
1963 /* don't assert node !!! */
1965 if (!node || (node->op != op_Id)) return node;
1967 if (!get_opt_normalize()) return node;
1969 /* Don't use get_Id_pred: We get into an endless loop for
1970 self-referencing Ids. */
1971 pred = node->in[0+1];
1973 if (pred->op != op_Id) return pred;
1975 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1976 ir_node *rem_pred, *res;
1978 if (pred->op != op_Id) return pred; /* shortcut */
1981 assert (get_irn_arity (node) > 0);
1983 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1984 res = skip_Id(rem_pred);
1985 if (res->op == op_Id) /* self-loop */ return node;
1987 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1996 (is_Bad)(const ir_node *node) {
1997 return __is_Bad(node);
2001 (is_no_Block)(const ir_node *node) {
2002 return __is_no_Block(node);
2006 (is_Block)(const ir_node *node) {
2007 return __is_Block(node);
2010 /* returns true if node is a Unknown node. */
2012 is_Unknown (const ir_node *node) {
2014 return (get_irn_op(node) == op_Unknown);
2018 is_Proj (const ir_node *node) {
2020 return node->op == op_Proj
2021 || (!get_interprocedural_view() && node->op == op_Filter);
2024 /* Returns true if the operation manipulates control flow. */
2026 is_cfop(const ir_node *node) {
2027 return is_cfopcode(get_irn_op(node));
2030 /* Returns true if the operation manipulates interprocedural control flow:
2031 CallBegin, EndReg, EndExcept */
2032 int is_ip_cfop(const ir_node *node) {
2033 return is_ip_cfopcode(get_irn_op(node));
2036 /* Returns true if the operation can change the control flow because
2039 is_fragile_op(const ir_node *node) {
2040 return is_op_fragile(get_irn_op(node));
2043 /* Returns the memory operand of fragile operations. */
2044 ir_node *get_fragile_op_mem(ir_node *node) {
2045 assert(node && is_fragile_op(node));
2047 switch (get_irn_opcode (node)) {
2056 return get_irn_n(node, 0);
2061 assert(0 && "should not be reached");
2066 /* Returns true if the operation is a forking control flow operation. */
2068 is_forking_op(const ir_node *node) {
2069 return is_op_forking(get_irn_op(node));
2073 #ifdef DEBUG_libfirm
2074 void dump_irn (ir_node *n) {
2075 int i, arity = get_irn_arity(n);
2076 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2078 ir_node *pred = get_irn_n(n, -1);
2079 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2080 get_irn_node_nr(pred), (void *)pred);
2082 printf(" preds: \n");
2083 for (i = 0; i < arity; ++i) {
2084 ir_node *pred = get_irn_n(n, i);
2085 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2086 get_irn_node_nr(pred), (void *)pred);
2090 #else /* DEBUG_libfirm */
2091 void dump_irn (ir_node *n) {}
2092 #endif /* DEBUG_libfirm */