3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && -1 <= n && n < get_irn_arity(node));
227 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
228 /* Change block pred in both views! */
229 node->in[n + 1] = in;
230 assert(node->attr.filter.in_cg);
231 node->attr.filter.in_cg[n + 1] = in;
234 if (interprocedural_view) { /* handle Filter and Block specially */
235 if (get_irn_opcode(node) == iro_Filter) {
236 assert(node->attr.filter.in_cg);
237 node->attr.filter.in_cg[n + 1] = in;
239 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
240 node->attr.block.in_cg[n + 1] = in;
243 /* else fall through */
245 node->in[n + 1] = in;
249 (get_irn_mode)(const ir_node *node) {
250 return __get_irn_mode(node);
254 (set_irn_mode)(ir_node *node, ir_mode *mode)
256 __set_irn_mode(node, mode);
260 get_irn_modecode (const ir_node *node)
263 return node->mode->code;
266 /** Gets the string representation of the mode .*/
268 get_irn_modename (const ir_node *node)
271 return get_mode_name(node->mode);
275 get_irn_modeident (const ir_node *node)
278 return get_mode_ident(node->mode);
282 (get_irn_op)(const ir_node *node)
284 return __get_irn_op(node);
287 /* should be private to the library: */
289 set_irn_op (ir_node *node, ir_op *op)
296 (get_irn_opcode)(const ir_node *node)
298 return __get_irn_opcode(node);
302 get_irn_opname (const ir_node *node)
305 if ((get_irn_op((ir_node *)node) == op_Phi) &&
306 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
307 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
308 return get_id_str(node->op->name);
312 get_irn_opident (const ir_node *node)
315 return node->op->name;
319 (get_irn_visited)(const ir_node *node)
321 return __get_irn_visited(node);
325 (set_irn_visited)(ir_node *node, unsigned long visited)
327 __set_irn_visited(node, visited);
331 (mark_irn_visited)(ir_node *node) {
332 __mark_irn_visited(node);
336 (irn_not_visited)(const ir_node *node) {
337 return __irn_not_visited(node);
341 (irn_visited)(const ir_node *node) {
342 return __irn_visited(node);
346 (set_irn_link)(ir_node *node, void *link) {
347 __set_irn_link(node, link);
351 (get_irn_link)(const ir_node *node) {
352 return __get_irn_link(node);
356 #ifdef DO_HEAPANALYSIS
357 /* Access the abstract interpretation information of a node.
358 Returns NULL if no such information is available. */
359 struct abstval *get_irn_abst_value(ir_node *n) {
362 /* Set the abstract interpretation information of a node. */
363 void set_irn_abst_value(ir_node *n, struct abstval *os) {
366 struct section *firm_get_irn_section(ir_node *n) {
369 void firm_set_irn_section(ir_node *n, struct section *s) {
372 #endif /* DO_HEAPANALYSIS */
375 /* Outputs a unique number for this node */
377 get_irn_node_nr(const ir_node *node) {
380 return node->node_nr;
387 get_irn_const_attr (ir_node *node)
389 assert (node->op == op_Const);
390 return node->attr.con;
394 get_irn_proj_attr (ir_node *node)
396 assert (node->op == op_Proj);
397 return node->attr.proj;
401 get_irn_alloc_attr (ir_node *node)
403 assert (node->op == op_Alloc);
408 get_irn_free_attr (ir_node *node)
410 assert (node->op == op_Free);
411 return node->attr.f = skip_tid(node->attr.f);
415 get_irn_symconst_attr (ir_node *node)
417 assert (node->op == op_SymConst);
422 get_irn_call_attr (ir_node *node)
424 assert (node->op == op_Call);
425 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
429 get_irn_funccall_attr (ir_node *node)
431 assert (node->op == op_FuncCall);
432 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
436 get_irn_sel_attr (ir_node *node)
438 assert (node->op == op_Sel);
443 get_irn_phi_attr (ir_node *node)
445 assert (node->op == op_Phi);
446 return node->attr.phi0_pos;
450 get_irn_block_attr (ir_node *node)
452 assert (node->op == op_Block);
453 return node->attr.block;
456 /** manipulate fields of individual nodes **/
458 /* this works for all except Block */
460 get_nodes_block (ir_node *node) {
461 assert (!(node->op == op_Block));
462 return get_irn_n(node, -1);
466 set_nodes_block (ir_node *node, ir_node *block) {
467 assert (!(node->op == op_Block));
468 set_irn_n(node, -1, block);
471 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
472 * from Start. If so returns frame type, else Null. */
473 type *is_frame_pointer(ir_node *n) {
474 if ((get_irn_op(n) == op_Proj) &&
475 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
476 ir_node *start = get_Proj_pred(n);
477 if (get_irn_op(start) == op_Start) {
478 return get_irg_frame_type(get_irn_irg(start));
484 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
485 * from Start. If so returns global type, else Null. */
486 type *is_globals_pointer(ir_node *n) {
487 if ((get_irn_op(n) == op_Proj) &&
488 (get_Proj_proj(n) == pn_Start_P_globals)) {
489 ir_node *start = get_Proj_pred(n);
490 if (get_irn_op(start) == op_Start) {
491 return get_glob_type();
497 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
498 * from Start. If so returns 1, else 0. */
499 int is_value_arg_pointer(ir_node *n) {
500 if ((get_irn_op(n) == op_Proj) &&
501 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
502 (get_irn_op(get_Proj_pred(n)) == op_Start))
507 /* Returns an array with the predecessors of the Block. Depending on
508 the implementation of the graph data structure this can be a copy of
509 the internal representation of predecessors as well as the internal
510 array itself. Therefore writing to this array might obstruct the ir. */
512 get_Block_cfgpred_arr (ir_node *node)
514 assert ((node->op == op_Block));
515 return (ir_node **)&(get_irn_in(node)[1]);
520 get_Block_n_cfgpreds (ir_node *node) {
521 assert ((node->op == op_Block));
522 return get_irn_arity(node);
526 get_Block_cfgpred (ir_node *node, int pos) {
528 assert (node->op == op_Block);
529 assert(-1 <= pos && pos < get_irn_arity(node));
530 return get_irn_n(node, pos);
534 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
535 assert (node->op == op_Block);
536 set_irn_n(node, pos, pred);
540 get_Block_matured (ir_node *node) {
541 assert (node->op == op_Block);
542 return node->attr.block.matured;
546 set_Block_matured (ir_node *node, bool matured) {
547 assert (node->op == op_Block);
548 node->attr.block.matured = matured;
551 get_Block_block_visited (ir_node *node) {
552 assert (node->op == op_Block);
553 return node->attr.block.block_visited;
557 set_Block_block_visited (ir_node *node, unsigned long visit) {
558 assert (node->op == op_Block);
559 node->attr.block.block_visited = visit;
562 /* For this current_ir_graph must be set. */
564 mark_Block_block_visited (ir_node *node) {
565 assert (node->op == op_Block);
566 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
570 Block_not_block_visited(ir_node *node) {
571 assert (node->op == op_Block);
572 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
576 get_Block_graph_arr (ir_node *node, int pos) {
577 assert (node->op == op_Block);
578 return node->attr.block.graph_arr[pos+1];
582 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
583 assert (node->op == op_Block);
584 node->attr.block.graph_arr[pos+1] = value;
587 /* handler handling for Blocks * /
589 set_Block_handler (ir_node *block, ir_node *handler) {
590 assert ((block->op == op_Block));
591 assert ((handler->op == op_Block));
592 block->attr.block.handler_entry = handler;
596 get_Block_handler (ir_node *block) {
597 assert ((block->op == op_Block));
598 return (block->attr.block.handler_entry);
601 / * handler handling for Nodes * /
603 set_Node_handler (ir_node *node, ir_node *handler) {
604 set_Block_handler (get_nodes_block (node), handler);
608 get_Node_handler (ir_node *node) {
609 return (get_Block_handler (get_nodes_block (node)));
612 / * exc_t handling for Blocks * /
613 void set_Block_exc (ir_node *block, exc_t exc) {
614 assert ((block->op == op_Block));
615 block->attr.block.exc = exc;
618 exc_t get_Block_exc (ir_node *block) {
619 assert ((block->op == op_Block));
620 return (block->attr.block.exc);
623 / * exc_t handling for Nodes * /
624 void set_Node_exc (ir_node *node, exc_t exc) {
625 set_Block_exc (get_nodes_block (node), exc);
628 exc_t get_Node_exc (ir_node *node) {
629 return (get_Block_exc (get_nodes_block (node)));
633 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
634 assert(node->op == op_Block);
635 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
636 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
637 node->attr.block.in_cg[0] = NULL;
638 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
640 /* Fix backedge array. fix_backedges operates depending on
641 interprocedural_view. */
642 bool ipv = interprocedural_view;
643 interprocedural_view = true;
644 fix_backedges(current_ir_graph->obst, node);
645 interprocedural_view = ipv;
648 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
651 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
652 assert(node->op == op_Block &&
653 node->attr.block.in_cg &&
654 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
655 node->attr.block.in_cg[pos + 1] = pred;
658 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
659 assert(node->op == op_Block);
660 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
663 int get_Block_cg_n_cfgpreds(ir_node * node) {
664 assert(node->op == op_Block);
665 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
668 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
669 assert(node->op == op_Block && node->attr.block.in_cg);
670 return node->attr.block.in_cg[pos + 1];
673 void remove_Block_cg_cfgpred_arr(ir_node * node) {
674 assert(node->op == op_Block);
675 node->attr.block.in_cg = NULL;
679 set_Start_irg(ir_node *node, ir_graph *irg) {
680 assert(node->op == op_Start);
681 assert(is_ir_graph(irg));
682 assert(0 && " Why set irg? -- use set_irn_irg");
686 get_End_n_keepalives(ir_node *end) {
687 assert (end->op == op_End);
688 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
692 get_End_keepalive(ir_node *end, int pos) {
693 assert (end->op == op_End);
694 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
698 add_End_keepalive (ir_node *end, ir_node *ka) {
699 assert (end->op == op_End);
700 ARR_APP1 (ir_node *, end->in, ka);
704 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
705 assert (end->op == op_End);
706 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
710 free_End (ir_node *end) {
711 assert (end->op == op_End);
713 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
714 end->in = NULL; /* @@@ make sure we get an error if we use the
715 in array afterwards ... */
720 > Implementing the case construct (which is where the constant Proj node is
721 > important) involves far more than simply determining the constant values.
722 > We could argue that this is more properly a function of the translator from
723 > Firm to the target machine. That could be done if there was some way of
724 > projecting "default" out of the Cond node.
725 I know it's complicated.
726 Basically there are two proglems:
727 - determining the gaps between the projs
728 - determining the biggest case constant to know the proj number for
730 I see several solutions:
731 1. Introduce a ProjDefault node. Solves both problems.
732 This means to extend all optimizations executed during construction.
733 2. Give the Cond node for switch two flavors:
734 a) there are no gaps in the projs (existing flavor)
735 b) gaps may exist, default proj is still the Proj with the largest
736 projection number. This covers also the gaps.
737 3. Fix the semantic of the Cond to that of 2b)
739 Solution 2 seems to be the best:
740 Computing the gaps in the Firm representation is not too hard, i.e.,
741 libFIRM can implement a routine that transforms between the two
742 flavours. This is also possible for 1) but 2) does not require to
743 change any existing optimization.
744 Further it should be far simpler to determine the biggest constant than
746 I don't want to choose 3) as 2a) seems to have advantages for
747 dataflow analysis and 3) does not allow to convert the representation to
751 get_Cond_selector (ir_node *node) {
752 assert (node->op == op_Cond);
753 return get_irn_n(node, 0);
757 set_Cond_selector (ir_node *node, ir_node *selector) {
758 assert (node->op == op_Cond);
759 set_irn_n(node, 0, selector);
763 get_Cond_kind (ir_node *node) {
764 assert (node->op == op_Cond);
765 return node->attr.c.kind;
769 set_Cond_kind (ir_node *node, cond_kind kind) {
770 assert (node->op == op_Cond);
771 node->attr.c.kind = kind;
775 get_Cond_defaultProj (ir_node *node) {
776 assert (node->op == op_Cond);
777 return node->attr.c.default_proj;
781 get_Return_mem (ir_node *node) {
782 assert (node->op == op_Return);
783 return get_irn_n(node, 0);
787 set_Return_mem (ir_node *node, ir_node *mem) {
788 assert (node->op == op_Return);
789 set_irn_n(node, 0, mem);
793 get_Return_n_ress (ir_node *node) {
794 assert (node->op == op_Return);
795 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
799 get_Return_res_arr (ir_node *node)
801 assert ((node->op == op_Return));
802 if (get_Return_n_ress(node) > 0)
803 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
810 set_Return_n_res (ir_node *node, int results) {
811 assert (node->op == op_Return);
816 get_Return_res (ir_node *node, int pos) {
817 assert (node->op == op_Return);
818 assert (get_Return_n_ress(node) > pos);
819 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
823 set_Return_res (ir_node *node, int pos, ir_node *res){
824 assert (node->op == op_Return);
825 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
829 get_Raise_mem (ir_node *node) {
830 assert (node->op == op_Raise);
831 return get_irn_n(node, 0);
835 set_Raise_mem (ir_node *node, ir_node *mem) {
836 assert (node->op == op_Raise);
837 set_irn_n(node, 0, mem);
841 get_Raise_exo_ptr (ir_node *node) {
842 assert (node->op == op_Raise);
843 return get_irn_n(node, 1);
847 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
848 assert (node->op == op_Raise);
849 set_irn_n(node, 1, exo_ptr);
852 tarval *get_Const_tarval (ir_node *node) {
853 assert (node->op == op_Const);
854 return node->attr.con.tv;
858 set_Const_tarval (ir_node *node, tarval *con) {
859 assert (node->op == op_Const);
860 node->attr.con.tv = con;
864 /* The source language type. Must be an atomic type. Mode of type must
865 be mode of node. For tarvals from entities type must be pointer to
868 get_Const_type (ir_node *node) {
869 assert (node->op == op_Const);
870 return node->attr.con.tp;
874 set_Const_type (ir_node *node, type *tp) {
875 assert (node->op == op_Const);
876 if (tp != unknown_type) {
877 assert (is_atomic_type(tp));
878 assert (get_type_mode(tp) == get_irn_mode(node));
881 node->attr.con.tp = tp;
886 get_SymConst_kind (const ir_node *node) {
887 assert (node->op == op_SymConst);
888 return node->attr.i.num;
892 set_SymConst_kind (ir_node *node, symconst_kind num) {
893 assert (node->op == op_SymConst);
894 node->attr.i.num = num;
898 get_SymConst_type (ir_node *node) {
899 assert ( (node->op == op_SymConst)
900 && ( get_SymConst_kind(node) == symconst_type_tag
901 || get_SymConst_kind(node) == symconst_size));
902 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
906 set_SymConst_type (ir_node *node, type *tp) {
907 assert ( (node->op == op_SymConst)
908 && ( get_SymConst_kind(node) == symconst_type_tag
909 || get_SymConst_kind(node) == symconst_size));
910 node->attr.i.sym.type_p = tp;
914 get_SymConst_name (ir_node *node) {
915 assert ( (node->op == op_SymConst)
916 && (get_SymConst_kind(node) == symconst_addr_name));
917 return node->attr.i.sym.ident_p;
921 set_SymConst_name (ir_node *node, ident *name) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind(node) == symconst_addr_name));
924 node->attr.i.sym.ident_p = name;
928 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
929 entity *get_SymConst_entity (ir_node *node) {
930 assert ( (node->op == op_SymConst)
931 && (get_SymConst_kind (node) == symconst_addr_ent));
932 return node->attr.i.sym.entity_p;
935 void set_SymConst_entity (ir_node *node, entity *ent) {
936 assert ( (node->op == op_SymConst)
937 && (get_SymConst_kind(node) == symconst_addr_ent));
938 node->attr.i.sym.entity_p = ent;
942 union symconst_symbol
943 get_SymConst_symbol (ir_node *node) {
944 assert (node->op == op_SymConst);
945 return node->attr.i.sym;
949 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
950 assert (node->op == op_SymConst);
951 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
952 node->attr.i.sym = sym;
956 get_Sel_mem (ir_node *node) {
957 assert (node->op == op_Sel);
958 return get_irn_n(node, 0);
962 set_Sel_mem (ir_node *node, ir_node *mem) {
963 assert (node->op == op_Sel);
964 set_irn_n(node, 0, mem);
968 get_Sel_ptr (ir_node *node) {
969 assert (node->op == op_Sel);
970 return get_irn_n(node, 1);
974 set_Sel_ptr (ir_node *node, ir_node *ptr) {
975 assert (node->op == op_Sel);
976 set_irn_n(node, 1, ptr);
980 get_Sel_n_indexs (ir_node *node) {
981 assert (node->op == op_Sel);
982 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
986 get_Sel_index_arr (ir_node *node)
988 assert ((node->op == op_Sel));
989 if (get_Sel_n_indexs(node) > 0)
990 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
996 get_Sel_index (ir_node *node, int pos) {
997 assert (node->op == op_Sel);
998 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1002 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1003 assert (node->op == op_Sel);
1004 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1008 get_Sel_entity (ir_node *node) {
1009 assert (node->op == op_Sel);
1010 return node->attr.s.ent;
1014 set_Sel_entity (ir_node *node, entity *ent) {
1015 assert (node->op == op_Sel);
1016 node->attr.s.ent = ent;
1020 get_InstOf_ent (ir_node *node) {
1021 assert (node->op = op_InstOf);
1022 return (node->attr.io.ent);
1026 set_InstOf_ent (ir_node *node, type *ent) {
1027 assert (node->op = op_InstOf);
1028 node->attr.io.ent = ent;
1032 get_InstOf_store (ir_node *node) {
1033 assert (node->op = op_InstOf);
1034 return (get_irn_n (node, 0));
1038 set_InstOf_store (ir_node *node, ir_node *obj) {
1039 assert (node->op = op_InstOf);
1040 set_irn_n (node, 0, obj);
1044 get_InstOf_obj (ir_node *node) {
1045 assert (node->op = op_InstOf);
1046 return (get_irn_n (node, 1));
1050 set_InstOf_obj (ir_node *node, ir_node *obj) {
1051 assert (node->op = op_InstOf);
1052 set_irn_n (node, 1, obj);
1056 /* For unary and binary arithmetic operations the access to the
1057 operands can be factored out. Left is the first, right the
1058 second arithmetic value as listed in tech report 0999-33.
1059 unops are: Minus, Abs, Not, Conv, Cast
1060 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1061 Shr, Shrs, Rotate, Cmp */
1065 get_Call_mem (ir_node *node) {
1066 assert (node->op == op_Call);
1067 return get_irn_n(node, 0);
1071 set_Call_mem (ir_node *node, ir_node *mem) {
1072 assert (node->op == op_Call);
1073 set_irn_n(node, 0, mem);
1077 get_Call_ptr (ir_node *node) {
1078 assert (node->op == op_Call);
1079 return get_irn_n(node, 1);
1083 set_Call_ptr (ir_node *node, ir_node *ptr) {
1084 assert (node->op == op_Call);
1085 set_irn_n(node, 1, ptr);
1089 get_Call_param_arr (ir_node *node) {
1090 assert (node->op == op_Call);
1091 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1095 get_Call_n_params (ir_node *node) {
1096 assert (node->op == op_Call);
1097 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1101 get_Call_arity (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return get_Call_n_params(node);
1107 set_Call_arity (ir_node *node, ir_node *arity) {
1108 assert (node->op == op_Call);
1113 get_Call_param (ir_node *node, int pos) {
1114 assert (node->op == op_Call);
1115 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1119 set_Call_param (ir_node *node, int pos, ir_node *param) {
1120 assert (node->op == op_Call);
1121 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1125 get_Call_type (ir_node *node) {
1126 assert (node->op == op_Call);
1127 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1131 set_Call_type (ir_node *node, type *tp) {
1132 assert (node->op == op_Call);
1133 assert (is_method_type(tp));
1134 node->attr.call.cld_tp = tp;
1137 int Call_has_callees(ir_node *node) {
1138 return (node->attr.call.callee_arr != NULL);
1141 int get_Call_n_callees(ir_node * node) {
1142 assert(node->op == op_Call && node->attr.call.callee_arr);
1143 return ARR_LEN(node->attr.call.callee_arr);
1146 entity * get_Call_callee(ir_node * node, int pos) {
1147 assert(node->op == op_Call && node->attr.call.callee_arr);
1148 return node->attr.call.callee_arr[pos];
1151 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1152 assert(node->op == op_Call);
1153 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1154 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1156 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1159 void remove_Call_callee_arr(ir_node * node) {
1160 assert(node->op == op_Call);
1161 node->attr.call.callee_arr = NULL;
1164 ir_node * get_CallBegin_ptr (ir_node *node) {
1165 assert(node->op == op_CallBegin);
1166 return get_irn_n(node, 0);
1168 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1169 assert(node->op == op_CallBegin);
1170 set_irn_n(node, 0, ptr);
1172 ir_node * get_CallBegin_call (ir_node *node) {
1173 assert(node->op == op_CallBegin);
1174 return node->attr.callbegin.call;
1176 void set_CallBegin_call (ir_node *node, ir_node *call) {
1177 assert(node->op == op_CallBegin);
1178 node->attr.callbegin.call = call;
1182 get_FuncCall_ptr (ir_node *node) {
1183 assert (node->op == op_FuncCall);
1184 return get_irn_n(node, 0);
1188 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1189 assert (node->op == op_FuncCall);
1190 set_irn_n(node, 0, ptr);
1194 get_FuncCall_param_arr (ir_node *node) {
1195 assert (node->op == op_FuncCall);
1196 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1200 get_FuncCall_n_params (ir_node *node) {
1201 assert (node->op == op_FuncCall);
1202 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1206 get_FuncCall_arity (ir_node *node) {
1207 assert (node->op == op_FuncCall);
1208 return get_FuncCall_n_params(node);
1212 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1213 assert (node->op == op_FuncCall);
1218 get_FuncCall_param (ir_node *node, int pos) {
1219 assert (node->op == op_FuncCall);
1220 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1224 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1225 assert (node->op == op_FuncCall);
1226 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1230 get_FuncCall_type (ir_node *node) {
1231 assert (node->op == op_FuncCall);
1232 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1236 set_FuncCall_type (ir_node *node, type *tp) {
1237 assert (node->op == op_FuncCall);
1238 assert (is_method_type(tp));
1239 node->attr.call.cld_tp = tp;
1242 int FuncCall_has_callees(ir_node *node) {
1243 return (node->attr.call.callee_arr != NULL);
1246 int get_FuncCall_n_callees(ir_node * node) {
1247 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1248 return ARR_LEN(node->attr.call.callee_arr);
1251 entity * get_FuncCall_callee(ir_node * node, int pos) {
1252 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1253 return node->attr.call.callee_arr[pos];
1256 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1257 assert(node->op == op_FuncCall);
1258 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1259 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1261 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1264 void remove_FuncCall_callee_arr(ir_node * node) {
1265 assert(node->op == op_FuncCall);
1266 node->attr.call.callee_arr = NULL;
1271 ir_node * get_##OP##_left(ir_node *node) { \
1272 assert(node->op == op_##OP); \
1273 return get_irn_n(node, node->op->op_index); \
1275 void set_##OP##_left(ir_node *node, ir_node *left) { \
1276 assert(node->op == op_##OP); \
1277 set_irn_n(node, node->op->op_index, left); \
1279 ir_node *get_##OP##_right(ir_node *node) { \
1280 assert(node->op == op_##OP); \
1281 return get_irn_n(node, node->op->op_index + 1); \
1283 void set_##OP##_right(ir_node *node, ir_node *right) { \
1284 assert(node->op == op_##OP); \
1285 set_irn_n(node, node->op->op_index + 1, right); \
1289 ir_node *get_##OP##_op(ir_node *node) { \
1290 assert(node->op == op_##OP); \
1291 return get_irn_n(node, node->op->op_index); \
1293 void set_##OP##_op (ir_node *node, ir_node *op) { \
1294 assert(node->op == op_##OP); \
1295 set_irn_n(node, node->op->op_index, op); \
1305 get_Quot_mem (ir_node *node) {
1306 assert (node->op == op_Quot);
1307 return get_irn_n(node, 0);
1311 set_Quot_mem (ir_node *node, ir_node *mem) {
1312 assert (node->op == op_Quot);
1313 set_irn_n(node, 0, mem);
1319 get_DivMod_mem (ir_node *node) {
1320 assert (node->op == op_DivMod);
1321 return get_irn_n(node, 0);
1325 set_DivMod_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_DivMod);
1327 set_irn_n(node, 0, mem);
1333 get_Div_mem (ir_node *node) {
1334 assert (node->op == op_Div);
1335 return get_irn_n(node, 0);
1339 set_Div_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_Div);
1341 set_irn_n(node, 0, mem);
1347 get_Mod_mem (ir_node *node) {
1348 assert (node->op == op_Mod);
1349 return get_irn_n(node, 0);
1353 set_Mod_mem (ir_node *node, ir_node *mem) {
1354 assert (node->op == op_Mod);
1355 set_irn_n(node, 0, mem);
1372 get_Cast_type (ir_node *node) {
1373 assert (node->op == op_Cast);
1374 return node->attr.cast.totype;
1378 set_Cast_type (ir_node *node, type *to_tp) {
1379 assert (node->op == op_Cast);
1380 node->attr.cast.totype = to_tp;
1384 is_unop (ir_node *node) {
1385 return (node->op->opar == oparity_unary);
1389 get_unop_op (ir_node *node) {
1390 if (node->op->opar == oparity_unary)
1391 return get_irn_n(node, node->op->op_index);
1393 assert(node->op->opar == oparity_unary);
1398 set_unop_op (ir_node *node, ir_node *op) {
1399 if (node->op->opar == oparity_unary)
1400 set_irn_n(node, node->op->op_index, op);
1402 assert(node->op->opar == oparity_unary);
1406 is_binop (ir_node *node) {
1407 return (node->op->opar == oparity_binary);
1411 get_binop_left (ir_node *node) {
1412 if (node->op->opar == oparity_binary)
1413 return get_irn_n(node, node->op->op_index);
1415 assert(node->op->opar == oparity_binary);
1420 set_binop_left (ir_node *node, ir_node *left) {
1421 if (node->op->opar == oparity_binary)
1422 set_irn_n(node, node->op->op_index, left);
1424 assert (node->op->opar == oparity_binary);
1428 get_binop_right (ir_node *node) {
1429 if (node->op->opar == oparity_binary)
1430 return get_irn_n(node, node->op->op_index + 1);
1432 assert(node->op->opar == oparity_binary);
1437 set_binop_right (ir_node *node, ir_node *right) {
1438 if (node->op->opar == oparity_binary)
1439 set_irn_n(node, node->op->op_index + 1, right);
1441 assert (node->op->opar == oparity_binary);
1444 int is_Phi (ir_node *n) {
1450 if (op == op_Filter) return interprocedural_view;
1453 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1454 (get_irn_arity(n) > 0));
1459 int is_Phi0 (ir_node *n) {
1462 return ((get_irn_op(n) == op_Phi) &&
1463 (get_irn_arity(n) == 0) &&
1464 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1468 get_Phi_preds_arr (ir_node *node) {
1469 assert (node->op == op_Phi);
1470 return (ir_node **)&(get_irn_in(node)[1]);
1474 get_Phi_n_preds (ir_node *node) {
1475 assert (is_Phi(node) || is_Phi0(node));
1476 return (get_irn_arity(node));
1480 void set_Phi_n_preds (ir_node *node, int n_preds) {
1481 assert (node->op == op_Phi);
1486 get_Phi_pred (ir_node *node, int pos) {
1487 assert (is_Phi(node) || is_Phi0(node));
1488 return get_irn_n(node, pos);
1492 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1493 assert (is_Phi(node) || is_Phi0(node));
1494 set_irn_n(node, pos, pred);
1498 get_Load_mem (ir_node *node) {
1499 assert (node->op == op_Load);
1500 return get_irn_n(node, 0);
1504 set_Load_mem (ir_node *node, ir_node *mem) {
1505 assert (node->op == op_Load);
1506 set_irn_n(node, 0, mem);
1510 get_Load_ptr (ir_node *node) {
1511 assert (node->op == op_Load);
1512 return get_irn_n(node, 1);
1516 set_Load_ptr (ir_node *node, ir_node *ptr) {
1517 assert (node->op == op_Load);
1518 set_irn_n(node, 1, ptr);
1523 get_Store_mem (ir_node *node) {
1524 assert (node->op == op_Store);
1525 return get_irn_n(node, 0);
1529 set_Store_mem (ir_node *node, ir_node *mem) {
1530 assert (node->op == op_Store);
1531 set_irn_n(node, 0, mem);
1535 get_Store_ptr (ir_node *node) {
1536 assert (node->op == op_Store);
1537 return get_irn_n(node, 1);
1541 set_Store_ptr (ir_node *node, ir_node *ptr) {
1542 assert (node->op == op_Store);
1543 set_irn_n(node, 1, ptr);
1547 get_Store_value (ir_node *node) {
1548 assert (node->op == op_Store);
1549 return get_irn_n(node, 2);
1553 set_Store_value (ir_node *node, ir_node *value) {
1554 assert (node->op == op_Store);
1555 set_irn_n(node, 2, value);
1559 get_Alloc_mem (ir_node *node) {
1560 assert (node->op == op_Alloc);
1561 return get_irn_n(node, 0);
1565 set_Alloc_mem (ir_node *node, ir_node *mem) {
1566 assert (node->op == op_Alloc);
1567 set_irn_n(node, 0, mem);
1571 get_Alloc_size (ir_node *node) {
1572 assert (node->op == op_Alloc);
1573 return get_irn_n(node, 1);
1577 set_Alloc_size (ir_node *node, ir_node *size) {
1578 assert (node->op == op_Alloc);
1579 set_irn_n(node, 1, size);
1583 get_Alloc_type (ir_node *node) {
1584 assert (node->op == op_Alloc);
1585 return node->attr.a.type = skip_tid(node->attr.a.type);
1589 set_Alloc_type (ir_node *node, type *tp) {
1590 assert (node->op == op_Alloc);
1591 node->attr.a.type = tp;
1595 get_Alloc_where (ir_node *node) {
1596 assert (node->op == op_Alloc);
1597 return node->attr.a.where;
1601 set_Alloc_where (ir_node *node, where_alloc where) {
1602 assert (node->op == op_Alloc);
1603 node->attr.a.where = where;
1608 get_Free_mem (ir_node *node) {
1609 assert (node->op == op_Free);
1610 return get_irn_n(node, 0);
1614 set_Free_mem (ir_node *node, ir_node *mem) {
1615 assert (node->op == op_Free);
1616 set_irn_n(node, 0, mem);
1620 get_Free_ptr (ir_node *node) {
1621 assert (node->op == op_Free);
1622 return get_irn_n(node, 1);
1626 set_Free_ptr (ir_node *node, ir_node *ptr) {
1627 assert (node->op == op_Free);
1628 set_irn_n(node, 1, ptr);
1632 get_Free_size (ir_node *node) {
1633 assert (node->op == op_Free);
1634 return get_irn_n(node, 2);
1638 set_Free_size (ir_node *node, ir_node *size) {
1639 assert (node->op == op_Free);
1640 set_irn_n(node, 2, size);
1644 get_Free_type (ir_node *node) {
1645 assert (node->op == op_Free);
1646 return node->attr.f = skip_tid(node->attr.f);
1650 set_Free_type (ir_node *node, type *tp) {
1651 assert (node->op == op_Free);
1656 get_Sync_preds_arr (ir_node *node) {
1657 assert (node->op == op_Sync);
1658 return (ir_node **)&(get_irn_in(node)[1]);
1662 get_Sync_n_preds (ir_node *node) {
1663 assert (node->op == op_Sync);
1664 return (get_irn_arity(node));
1669 set_Sync_n_preds (ir_node *node, int n_preds) {
1670 assert (node->op == op_Sync);
1675 get_Sync_pred (ir_node *node, int pos) {
1676 assert (node->op == op_Sync);
1677 return get_irn_n(node, pos);
1681 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1682 assert (node->op == op_Sync);
1683 set_irn_n(node, pos, pred);
1687 get_Proj_pred (ir_node *node) {
1688 assert (is_Proj(node));
1689 return get_irn_n(node, 0);
1693 set_Proj_pred (ir_node *node, ir_node *pred) {
1694 assert (is_Proj(node));
1695 set_irn_n(node, 0, pred);
1699 get_Proj_proj (ir_node *node) {
1700 assert (is_Proj(node));
1701 if (get_irn_opcode(node) == iro_Proj) {
1702 return node->attr.proj;
1704 assert(get_irn_opcode(node) == iro_Filter);
1705 return node->attr.filter.proj;
1710 set_Proj_proj (ir_node *node, long proj) {
1711 assert (node->op == op_Proj);
1712 node->attr.proj = proj;
1716 get_Tuple_preds_arr (ir_node *node) {
1717 assert (node->op == op_Tuple);
1718 return (ir_node **)&(get_irn_in(node)[1]);
1722 get_Tuple_n_preds (ir_node *node) {
1723 assert (node->op == op_Tuple);
1724 return (get_irn_arity(node));
1729 set_Tuple_n_preds (ir_node *node, int n_preds) {
1730 assert (node->op == op_Tuple);
1735 get_Tuple_pred (ir_node *node, int pos) {
1736 assert (node->op == op_Tuple);
1737 return get_irn_n(node, pos);
1741 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1742 assert (node->op == op_Tuple);
1743 set_irn_n(node, pos, pred);
1747 get_Id_pred (ir_node *node) {
1748 assert (node->op == op_Id);
1749 return get_irn_n(node, 0);
1753 set_Id_pred (ir_node *node, ir_node *pred) {
1754 assert (node->op == op_Id);
1755 set_irn_n(node, 0, pred);
1758 ir_node *get_Confirm_value (ir_node *node) {
1759 assert (node->op == op_Confirm);
1760 return get_irn_n(node, 0);
1762 void set_Confirm_value (ir_node *node, ir_node *value) {
1763 assert (node->op == op_Confirm);
1764 set_irn_n(node, 0, value);
1766 ir_node *get_Confirm_bound (ir_node *node) {
1767 assert (node->op == op_Confirm);
1768 return get_irn_n(node, 1);
1770 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1771 assert (node->op == op_Confirm);
1772 set_irn_n(node, 0, bound);
1774 pn_Cmp get_Confirm_cmp (ir_node *node) {
1775 assert (node->op == op_Confirm);
1776 return node->attr.confirm_cmp;
1778 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1779 assert (node->op == op_Confirm);
1780 node->attr.confirm_cmp = cmp;
1785 get_Filter_pred (ir_node *node) {
1786 assert(node->op == op_Filter);
1790 set_Filter_pred (ir_node *node, ir_node *pred) {
1791 assert(node->op == op_Filter);
1795 get_Filter_proj(ir_node *node) {
1796 assert(node->op == op_Filter);
1797 return node->attr.filter.proj;
1800 set_Filter_proj (ir_node *node, long proj) {
1801 assert(node->op == op_Filter);
1802 node->attr.filter.proj = proj;
1805 /* Don't use get_irn_arity, get_irn_n in implementation as access
1806 shall work independent of view!!! */
1807 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1808 assert(node->op == op_Filter);
1809 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1810 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1811 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1812 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1813 node->attr.filter.in_cg[0] = node->in[0];
1815 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1818 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1819 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1820 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1821 node->attr.filter.in_cg[pos + 1] = pred;
1823 int get_Filter_n_cg_preds(ir_node *node) {
1824 assert(node->op == op_Filter && node->attr.filter.in_cg);
1825 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1827 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1829 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1831 arity = ARR_LEN(node->attr.filter.in_cg);
1832 assert(pos < arity - 1);
1833 return node->attr.filter.in_cg[pos + 1];
1838 get_irn_irg(ir_node *node) {
1839 if (get_irn_op(node) != op_Block)
1840 node = get_nodes_block(node);
1841 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1842 node = get_nodes_block(node);
1843 assert(get_irn_op(node) == op_Block);
1844 return node->attr.block.irg;
1848 /*----------------------------------------------------------------*/
1849 /* Auxiliary routines */
1850 /*----------------------------------------------------------------*/
1853 skip_Proj (ir_node *node) {
1854 /* don't assert node !!! */
1855 if (node && is_Proj(node)) {
1856 return get_Proj_pred(node);
1863 skip_Tuple (ir_node *node) {
1866 if (!get_opt_normalize()) return node;
1868 node = skip_Id(node);
1869 if (get_irn_op(node) == op_Proj) {
1870 pred = skip_Id(get_Proj_pred(node));
1871 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1872 pred = skip_Id(skip_Tuple(pred));
1873 if (get_irn_op(pred) == op_Tuple)
1874 return get_Tuple_pred(pred, get_Proj_proj(node));
1879 /** returns operand of node if node is a Cast */
1880 ir_node *skip_Cast (ir_node *node) {
1881 if (node && get_irn_op(node) == op_Cast) {
1882 return skip_Id(get_irn_n(node, 0));
1889 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1890 than any other approach, as Id chains are resolved and all point to the real node, or
1891 all id's are self loops. */
1893 skip_Id (ir_node *node) {
1894 /* don't assert node !!! */
1896 if (!get_opt_normalize()) return node;
1898 /* Don't use get_Id_pred: We get into an endless loop for
1899 self-referencing Ids. */
1900 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1901 ir_node *rem_pred = node->in[0+1];
1904 assert (get_irn_arity (node) > 0);
1906 node->in[0+1] = node;
1907 res = skip_Id(rem_pred);
1908 if (res->op == op_Id) /* self-loop */ return node;
1910 node->in[0+1] = res;
1917 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1918 than any other approach, as Id chains are resolved and all point to the real node, or
1919 all id's are self loops. */
1921 skip_Id (ir_node *node) {
1923 /* don't assert node !!! */
1925 if (!node || (node->op != op_Id)) return node;
1927 if (!get_opt_normalize()) return node;
1929 /* Don't use get_Id_pred: We get into an endless loop for
1930 self-referencing Ids. */
1931 pred = node->in[0+1];
1933 if (pred->op != op_Id) return pred;
1935 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1936 ir_node *rem_pred, *res;
1938 if (pred->op != op_Id) return pred; /* shortcut */
1941 assert (get_irn_arity (node) > 0);
1943 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1944 res = skip_Id(rem_pred);
1945 if (res->op == op_Id) /* self-loop */ return node;
1947 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1956 is_Bad (ir_node *node) {
1958 if ((node) && get_irn_opcode(node) == iro_Bad)
1964 is_no_Block (ir_node *node) {
1966 return (get_irn_opcode(node) != iro_Block);
1970 is_Block (ir_node *node) {
1972 return (get_irn_opcode(node) == iro_Block);
1975 /* returns true if node is a Unknown node. */
1977 is_Unknown (ir_node *node) {
1979 return (get_irn_opcode(node) == iro_Unknown);
1983 is_Proj (const ir_node *node) {
1985 return node->op == op_Proj
1986 || (!interprocedural_view && node->op == op_Filter);
1989 /* Returns true if the operation manipulates control flow. */
1991 is_cfop(ir_node *node) {
1992 return is_cfopcode(get_irn_op(node));
1995 /* Returns true if the operation manipulates interprocedural control flow:
1996 CallBegin, EndReg, EndExcept */
1997 int is_ip_cfop(ir_node *node) {
1998 return is_ip_cfopcode(get_irn_op(node));
2001 /* Returns true if the operation can change the control flow because
2004 is_fragile_op(ir_node *node) {
2005 return is_op_fragile(get_irn_op(node));
2008 /* Returns the memory operand of fragile operations. */
2009 ir_node *get_fragile_op_mem(ir_node *node) {
2010 assert(node && is_fragile_op(node));
2012 switch (get_irn_opcode (node)) {
2021 return get_irn_n(node, 0);
2026 assert(0 && "should not be reached");
2031 #ifdef DEBUG_libfirm
2032 void dump_irn (ir_node *n) {
2033 int i, arity = get_irn_arity(n);
2034 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2036 ir_node *pred = get_irn_n(n, -1);
2037 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2038 get_irn_node_nr(pred), (void *)pred);
2040 printf(" preds: \n");
2041 for (i = 0; i < arity; ++i) {
2042 ir_node *pred = get_irn_n(n, i);
2043 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2044 get_irn_node_nr(pred), (void *)pred);
2048 #else /* DEBUG_libfirm */
2049 void dump_irn (ir_node *n) {}
2050 #endif /* DEBUG_libfirm */