3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "addr_name", "addr_ent"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
106 memset((void *)res, 0, node_size);
108 res->kind = k_ir_node;
114 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
116 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
117 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
120 set_irn_dbg_info(res, db);
124 res->node_nr = get_irp_new_node_nr();
132 /* Copies all attributes stored in the old node to the new node.
133 Assumes both have the same opcode and sufficient size. */
135 copy_attrs (const ir_node *old_node, ir_node *new_node) {
136 assert(get_irn_op(old_node) == get_irn_op(new_node));
137 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
140 /*-- getting some parameters from ir_nodes --*/
143 (is_ir_node)(const void *thing) {
144 return __is_ir_node(thing);
148 (get_irn_intra_arity)(const ir_node *node) {
149 return __get_irn_intra_arity(node);
153 (get_irn_inter_arity)(const ir_node *node) {
154 return __get_irn_inter_arity(node);
158 (get_irn_arity)(const ir_node *node) {
159 return __get_irn_arity(node);
162 /* Returns the array with ins. This array is shifted with respect to the
163 array accessed by get_irn_n: The block operand is at position 0 not -1.
164 (@@@ This should be changed.)
165 The order of the predecessors in this array is not guaranteed, except that
166 lists of operands as predecessors of Block or arguments of a Call are
169 get_irn_in (const ir_node *node) {
171 if (interprocedural_view) { /* handle Filter and Block specially */
172 if (get_irn_opcode(node) == iro_Filter) {
173 assert(node->attr.filter.in_cg);
174 return node->attr.filter.in_cg;
175 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
176 return node->attr.block.in_cg;
178 /* else fall through */
184 set_irn_in (ir_node *node, int arity, ir_node **in) {
187 if (interprocedural_view) { /* handle Filter and Block specially */
188 if (get_irn_opcode(node) == iro_Filter) {
189 assert(node->attr.filter.in_cg);
190 arr = &node->attr.filter.in_cg;
191 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
192 arr = &node->attr.block.in_cg;
199 if (arity != ARR_LEN(*arr) - 1) {
200 ir_node * block = (*arr)[0];
201 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
204 fix_backedges(current_ir_graph->obst, node);
205 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
209 (get_irn_intra_n)(ir_node *node, int n) {
210 return __get_irn_intra_n (node, n);
214 (get_irn_inter_n)(ir_node *node, int n) {
215 return __get_irn_inter_n (node, n);
219 (get_irn_n)(ir_node *node, int n) {
220 return __get_irn_n (node, n);
224 set_irn_n (ir_node *node, int n, ir_node *in) {
225 assert(node && -1 <= n && n < get_irn_arity(node));
226 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
227 /* Change block pred in both views! */
228 node->in[n + 1] = in;
229 assert(node->attr.filter.in_cg);
230 node->attr.filter.in_cg[n + 1] = in;
233 if (interprocedural_view) { /* handle Filter and Block specially */
234 if (get_irn_opcode(node) == iro_Filter) {
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
238 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
239 node->attr.block.in_cg[n + 1] = in;
242 /* else fall through */
244 node->in[n + 1] = in;
248 (get_irn_mode)(const ir_node *node) {
249 return __get_irn_mode(node);
253 (set_irn_mode)(ir_node *node, ir_mode *mode)
255 __set_irn_mode(node, mode);
259 get_irn_modecode (const ir_node *node)
262 return node->mode->code;
265 /** Gets the string representation of the mode .*/
267 get_irn_modename (const ir_node *node)
270 return get_mode_name(node->mode);
274 get_irn_modeident (const ir_node *node)
277 return get_mode_ident(node->mode);
281 (get_irn_op)(const ir_node *node)
283 return __get_irn_op(node);
286 /* should be private to the library: */
288 set_irn_op (ir_node *node, ir_op *op)
295 (get_irn_opcode)(const ir_node *node)
297 return __get_irn_opcode(node);
301 get_irn_opname (const ir_node *node)
304 if ((get_irn_op((ir_node *)node) == op_Phi) &&
305 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
306 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
307 return get_id_str(node->op->name);
311 get_irn_opident (const ir_node *node)
314 return node->op->name;
318 (get_irn_visited)(const ir_node *node)
320 return __get_irn_visited(node);
324 (set_irn_visited)(ir_node *node, unsigned long visited)
326 __set_irn_visited(node, visited);
330 (mark_irn_visited)(ir_node *node) {
331 __mark_irn_visited(node);
335 (irn_not_visited)(const ir_node *node) {
336 return __irn_not_visited(node);
340 (irn_visited)(const ir_node *node) {
341 return __irn_visited(node);
345 (set_irn_link)(ir_node *node, void *link) {
346 __set_irn_link(node, link);
350 (get_irn_link)(const ir_node *node) {
351 return __get_irn_link(node);
354 /* Outputs a unique number for this node */
356 get_irn_node_nr(const ir_node *node) {
359 return node->node_nr;
366 get_irn_const_attr (ir_node *node)
368 assert (node->op == op_Const);
369 return node->attr.con;
373 get_irn_proj_attr (ir_node *node)
375 assert (node->op == op_Proj);
376 return node->attr.proj;
380 get_irn_alloc_attr (ir_node *node)
382 assert (node->op == op_Alloc);
387 get_irn_free_attr (ir_node *node)
389 assert (node->op == op_Free);
390 return node->attr.f = skip_tid(node->attr.f);
394 get_irn_symconst_attr (ir_node *node)
396 assert (node->op == op_SymConst);
401 get_irn_call_attr (ir_node *node)
403 assert (node->op == op_Call);
404 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
408 get_irn_funccall_attr (ir_node *node)
410 assert (node->op == op_FuncCall);
411 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
415 get_irn_sel_attr (ir_node *node)
417 assert (node->op == op_Sel);
422 get_irn_phi_attr (ir_node *node)
424 assert (node->op == op_Phi);
425 return node->attr.phi0_pos;
429 get_irn_block_attr (ir_node *node)
431 assert (node->op == op_Block);
432 return node->attr.block;
435 /** manipulate fields of individual nodes **/
437 /* this works for all except Block */
439 get_nodes_Block (ir_node *node) {
440 assert (!(node->op == op_Block));
441 return get_irn_n(node, -1);
445 set_nodes_Block (ir_node *node, ir_node *block) {
446 assert (!(node->op == op_Block));
447 set_irn_n(node, -1, block);
450 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
451 * from Start. If so returns frame type, else Null. */
452 type *is_frame_pointer(ir_node *n) {
453 if ((get_irn_op(n) == op_Proj) &&
454 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
455 ir_node *start = get_Proj_pred(n);
456 if (get_irn_op(start) == op_Start) {
457 return get_irg_frame_type(get_irn_irg(start));
463 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
464 * from Start. If so returns global type, else Null. */
465 type *is_globals_pointer(ir_node *n) {
466 if ((get_irn_op(n) == op_Proj) &&
467 (get_Proj_proj(n) == pn_Start_P_globals)) {
468 ir_node *start = get_Proj_pred(n);
469 if (get_irn_op(start) == op_Start) {
470 return get_glob_type();
476 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
477 * from Start. If so returns 1, else 0. */
478 int is_value_arg_pointer(ir_node *n) {
479 if ((get_irn_op(n) == op_Proj) &&
480 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
481 (get_irn_op(get_Proj_pred(n)) == op_Start))
486 /* Returns an array with the predecessors of the Block. Depending on
487 the implementation of the graph data structure this can be a copy of
488 the internal representation of predecessors as well as the internal
489 array itself. Therefore writing to this array might obstruct the ir. */
491 get_Block_cfgpred_arr (ir_node *node)
493 assert ((node->op == op_Block));
494 return (ir_node **)&(get_irn_in(node)[1]);
499 get_Block_n_cfgpreds (ir_node *node) {
500 assert ((node->op == op_Block));
501 return get_irn_arity(node);
505 get_Block_cfgpred (ir_node *node, int pos) {
507 assert (node->op == op_Block);
508 assert(-1 <= pos && pos < get_irn_arity(node));
509 return get_irn_n(node, pos);
513 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
514 assert (node->op == op_Block);
515 set_irn_n(node, pos, pred);
519 get_Block_matured (ir_node *node) {
520 assert (node->op == op_Block);
521 return node->attr.block.matured;
525 set_Block_matured (ir_node *node, bool matured) {
526 assert (node->op == op_Block);
527 node->attr.block.matured = matured;
530 get_Block_block_visited (ir_node *node) {
531 assert (node->op == op_Block);
532 return node->attr.block.block_visited;
536 set_Block_block_visited (ir_node *node, unsigned long visit) {
537 assert (node->op == op_Block);
538 node->attr.block.block_visited = visit;
541 /* For this current_ir_graph must be set. */
543 mark_Block_block_visited (ir_node *node) {
544 assert (node->op == op_Block);
545 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
549 Block_not_block_visited(ir_node *node) {
550 assert (node->op == op_Block);
551 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
555 get_Block_graph_arr (ir_node *node, int pos) {
556 assert (node->op == op_Block);
557 return node->attr.block.graph_arr[pos+1];
561 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
562 assert (node->op == op_Block);
563 node->attr.block.graph_arr[pos+1] = value;
566 /* handler handling for Blocks * /
568 set_Block_handler (ir_node *block, ir_node *handler) {
569 assert ((block->op == op_Block));
570 assert ((handler->op == op_Block));
571 block->attr.block.handler_entry = handler;
575 get_Block_handler (ir_node *block) {
576 assert ((block->op == op_Block));
577 return (block->attr.block.handler_entry);
580 / * handler handling for Nodes * /
582 set_Node_handler (ir_node *node, ir_node *handler) {
583 set_Block_handler (get_nodes_Block (node), handler);
587 get_Node_handler (ir_node *node) {
588 return (get_Block_handler (get_nodes_Block (node)));
591 / * exc_t handling for Blocks * /
592 void set_Block_exc (ir_node *block, exc_t exc) {
593 assert ((block->op == op_Block));
594 block->attr.block.exc = exc;
597 exc_t get_Block_exc (ir_node *block) {
598 assert ((block->op == op_Block));
599 return (block->attr.block.exc);
602 / * exc_t handling for Nodes * /
603 void set_Node_exc (ir_node *node, exc_t exc) {
604 set_Block_exc (get_nodes_Block (node), exc);
607 exc_t get_Node_exc (ir_node *node) {
608 return (get_Block_exc (get_nodes_Block (node)));
612 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
613 assert(node->op == op_Block);
614 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
615 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
616 node->attr.block.in_cg[0] = NULL;
617 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
619 /* Fix backedge array. fix_backedges operates depending on
620 interprocedural_view. */
621 bool ipv = interprocedural_view;
622 interprocedural_view = true;
623 fix_backedges(current_ir_graph->obst, node);
624 interprocedural_view = ipv;
627 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
630 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
631 assert(node->op == op_Block &&
632 node->attr.block.in_cg &&
633 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
634 node->attr.block.in_cg[pos + 1] = pred;
637 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
638 assert(node->op == op_Block);
639 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
642 int get_Block_cg_n_cfgpreds(ir_node * node) {
643 assert(node->op == op_Block);
644 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
647 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
648 assert(node->op == op_Block && node->attr.block.in_cg);
649 return node->attr.block.in_cg[pos + 1];
652 void remove_Block_cg_cfgpred_arr(ir_node * node) {
653 assert(node->op == op_Block);
654 node->attr.block.in_cg = NULL;
657 /* Start references the irg it is in. */
659 get_Start_irg(ir_node *node) {
660 return get_irn_irg(node);
664 set_Start_irg(ir_node *node, ir_graph *irg) {
665 assert(node->op == op_Start);
666 assert(is_ir_graph(irg));
667 assert(0 && " Why set irg? -- use set_irn_irg");
671 get_End_n_keepalives(ir_node *end) {
672 assert (end->op == op_End);
673 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
677 get_End_keepalive(ir_node *end, int pos) {
678 assert (end->op == op_End);
679 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
683 add_End_keepalive (ir_node *end, ir_node *ka) {
684 assert (end->op == op_End);
685 ARR_APP1 (ir_node *, end->in, ka);
689 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
690 assert (end->op == op_End);
691 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
695 free_End (ir_node *end) {
696 assert (end->op == op_End);
698 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
699 end->in = NULL; /* @@@ make sure we get an error if we use the
700 in array afterwards ... */
703 ir_graph *get_EndReg_irg (ir_node *end) {
704 return get_irn_irg(end);
707 ir_graph *get_EndExcept_irg (ir_node *end) {
708 return get_irn_irg(end);
712 > Implementing the case construct (which is where the constant Proj node is
713 > important) involves far more than simply determining the constant values.
714 > We could argue that this is more properly a function of the translator from
715 > Firm to the target machine. That could be done if there was some way of
716 > projecting "default" out of the Cond node.
717 I know it's complicated.
718 Basically there are two proglems:
719 - determining the gaps between the projs
720 - determining the biggest case constant to know the proj number for
722 I see several solutions:
723 1. Introduce a ProjDefault node. Solves both problems.
724 This means to extend all optimizations executed during construction.
725 2. Give the Cond node for switch two flavors:
726 a) there are no gaps in the projs (existing flavor)
727 b) gaps may exist, default proj is still the Proj with the largest
728 projection number. This covers also the gaps.
729 3. Fix the semantic of the Cond to that of 2b)
731 Solution 2 seems to be the best:
732 Computing the gaps in the Firm representation is not too hard, i.e.,
733 libFIRM can implement a routine that transforms between the two
734 flavours. This is also possible for 1) but 2) does not require to
735 change any existing optimization.
736 Further it should be far simpler to determine the biggest constant than
738 I don't want to choose 3) as 2a) seems to have advantages for
739 dataflow analysis and 3) does not allow to convert the representation to
743 get_Cond_selector (ir_node *node) {
744 assert (node->op == op_Cond);
745 return get_irn_n(node, 0);
749 set_Cond_selector (ir_node *node, ir_node *selector) {
750 assert (node->op == op_Cond);
751 set_irn_n(node, 0, selector);
755 get_Cond_kind (ir_node *node) {
756 assert (node->op == op_Cond);
757 return node->attr.c.kind;
761 set_Cond_kind (ir_node *node, cond_kind kind) {
762 assert (node->op == op_Cond);
763 node->attr.c.kind = kind;
767 get_Cond_defaultProj (ir_node *node) {
768 assert (node->op == op_Cond);
769 return node->attr.c.default_proj;
773 get_Return_mem (ir_node *node) {
774 assert (node->op == op_Return);
775 return get_irn_n(node, 0);
779 set_Return_mem (ir_node *node, ir_node *mem) {
780 assert (node->op == op_Return);
781 set_irn_n(node, 0, mem);
785 get_Return_n_ress (ir_node *node) {
786 assert (node->op == op_Return);
787 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
791 get_Return_res_arr (ir_node *node)
793 assert ((node->op == op_Return));
794 if (get_Return_n_ress(node) > 0)
795 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
802 set_Return_n_res (ir_node *node, int results) {
803 assert (node->op == op_Return);
808 get_Return_res (ir_node *node, int pos) {
809 assert (node->op == op_Return);
810 assert (get_Return_n_ress(node) > pos);
811 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
815 set_Return_res (ir_node *node, int pos, ir_node *res){
816 assert (node->op == op_Return);
817 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
821 get_Raise_mem (ir_node *node) {
822 assert (node->op == op_Raise);
823 return get_irn_n(node, 0);
827 set_Raise_mem (ir_node *node, ir_node *mem) {
828 assert (node->op == op_Raise);
829 set_irn_n(node, 0, mem);
833 get_Raise_exo_ptr (ir_node *node) {
834 assert (node->op == op_Raise);
835 return get_irn_n(node, 1);
839 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
840 assert (node->op == op_Raise);
841 set_irn_n(node, 1, exo_ptr);
844 tarval *get_Const_tarval (ir_node *node) {
845 assert (node->op == op_Const);
846 return node->attr.con.tv;
850 set_Const_tarval (ir_node *node, tarval *con) {
851 assert (node->op == op_Const);
852 node->attr.con.tv = con;
856 /* The source language type. Must be an atomic type. Mode of type must
857 be mode of node. For tarvals from entities type must be pointer to
860 get_Const_type (ir_node *node) {
861 assert (node->op == op_Const);
862 return node->attr.con.tp;
866 set_Const_type (ir_node *node, type *tp) {
867 assert (node->op == op_Const);
868 if (tp != unknown_type) {
869 assert (is_atomic_type(tp));
870 assert (get_type_mode(tp) == get_irn_mode(node));
871 assert (!tarval_is_entity(get_Const_tarval(node)) ||
872 (is_pointer_type(tp) &&
873 (get_pointer_points_to_type(tp) ==
874 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
877 node->attr.con.tp = tp;
882 get_SymConst_kind (const ir_node *node) {
883 assert (node->op == op_SymConst);
884 return node->attr.i.num;
888 set_SymConst_kind (ir_node *node, symconst_kind num) {
889 assert (node->op == op_SymConst);
890 node->attr.i.num = num;
894 get_SymConst_type (ir_node *node) {
895 assert ( (node->op == op_SymConst)
896 && ( get_SymConst_kind(node) == symconst_type_tag
897 || get_SymConst_kind(node) == symconst_size));
898 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
902 set_SymConst_type (ir_node *node, type *tp) {
903 assert ( (node->op == op_SymConst)
904 && ( get_SymConst_kind(node) == symconst_type_tag
905 || get_SymConst_kind(node) == symconst_size));
906 node->attr.i.sym.type_p = tp;
910 get_SymConst_name (ir_node *node) {
911 assert ( (node->op == op_SymConst)
912 && (get_SymConst_kind(node) == symconst_addr_name));
913 return node->attr.i.sym.ident_p;
917 set_SymConst_name (ir_node *node, ident *name) {
918 assert ( (node->op == op_SymConst)
919 && (get_SymConst_kind(node) == symconst_addr_name));
920 node->attr.i.sym.ident_p = name;
924 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
925 entity *get_SymConst_entity (ir_node *node) {
926 assert ( (node->op == op_SymConst)
927 && (get_SymConst_kind (node) == symconst_addr_ent));
928 return node->attr.i.sym.entity_p;
931 void set_SymConst_entity (ir_node *node, entity *ent) {
932 assert ( (node->op == op_SymConst)
933 && (get_SymConst_kind(node) == symconst_addr_ent));
934 node->attr.i.sym.entity_p = ent;
939 get_SymConst_type_or_id (ir_node *node) {
940 assert (node->op == op_SymConst);
941 return node->attr.i.sym;
945 set_SymConst_type_or_id (ir_node *node, symconst_symbol sym) {
946 assert (node->op == op_SymConst);
947 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
948 node->attr.i.sym = sym;
952 get_Sel_mem (ir_node *node) {
953 assert (node->op == op_Sel);
954 return get_irn_n(node, 0);
958 set_Sel_mem (ir_node *node, ir_node *mem) {
959 assert (node->op == op_Sel);
960 set_irn_n(node, 0, mem);
964 get_Sel_ptr (ir_node *node) {
965 assert (node->op == op_Sel);
966 return get_irn_n(node, 1);
970 set_Sel_ptr (ir_node *node, ir_node *ptr) {
971 assert (node->op == op_Sel);
972 set_irn_n(node, 1, ptr);
976 get_Sel_n_indexs (ir_node *node) {
977 assert (node->op == op_Sel);
978 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
982 get_Sel_index_arr (ir_node *node)
984 assert ((node->op == op_Sel));
985 if (get_Sel_n_indexs(node) > 0)
986 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
992 get_Sel_index (ir_node *node, int pos) {
993 assert (node->op == op_Sel);
994 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
998 set_Sel_index (ir_node *node, int pos, ir_node *index) {
999 assert (node->op == op_Sel);
1000 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1004 get_Sel_entity (ir_node *node) {
1005 assert (node->op == op_Sel);
1006 return node->attr.s.ent;
1010 set_Sel_entity (ir_node *node, entity *ent) {
1011 assert (node->op == op_Sel);
1012 node->attr.s.ent = ent;
1016 get_InstOf_ent (ir_node *node) {
1017 assert (node->op = op_InstOf);
1018 return (node->attr.io.ent);
1022 set_InstOf_ent (ir_node *node, type *ent) {
1023 assert (node->op = op_InstOf);
1024 node->attr.io.ent = ent;
1028 get_InstOf_store (ir_node *node) {
1029 assert (node->op = op_InstOf);
1030 return (get_irn_n (node, 0));
1034 set_InstOf_store (ir_node *node, ir_node *obj) {
1035 assert (node->op = op_InstOf);
1036 set_irn_n (node, 0, obj);
1040 get_InstOf_obj (ir_node *node) {
1041 assert (node->op = op_InstOf);
1042 return (get_irn_n (node, 1));
1046 set_InstOf_obj (ir_node *node, ir_node *obj) {
1047 assert (node->op = op_InstOf);
1048 set_irn_n (node, 1, obj);
1052 /* For unary and binary arithmetic operations the access to the
1053 operands can be factored out. Left is the first, right the
1054 second arithmetic value as listed in tech report 0999-33.
1055 unops are: Minus, Abs, Not, Conv, Cast
1056 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1057 Shr, Shrs, Rotate, Cmp */
1061 get_Call_mem (ir_node *node) {
1062 assert (node->op == op_Call);
1063 return get_irn_n(node, 0);
1067 set_Call_mem (ir_node *node, ir_node *mem) {
1068 assert (node->op == op_Call);
1069 set_irn_n(node, 0, mem);
1073 get_Call_ptr (ir_node *node) {
1074 assert (node->op == op_Call);
1075 return get_irn_n(node, 1);
1079 set_Call_ptr (ir_node *node, ir_node *ptr) {
1080 assert (node->op == op_Call);
1081 set_irn_n(node, 1, ptr);
1085 get_Call_param_arr (ir_node *node) {
1086 assert (node->op == op_Call);
1087 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1091 get_Call_n_params (ir_node *node) {
1092 assert (node->op == op_Call);
1093 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1097 get_Call_arity (ir_node *node) {
1098 assert (node->op == op_Call);
1099 return get_Call_n_params(node);
1103 set_Call_arity (ir_node *node, ir_node *arity) {
1104 assert (node->op == op_Call);
1109 get_Call_param (ir_node *node, int pos) {
1110 assert (node->op == op_Call);
1111 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1115 set_Call_param (ir_node *node, int pos, ir_node *param) {
1116 assert (node->op == op_Call);
1117 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1121 get_Call_type (ir_node *node) {
1122 assert (node->op == op_Call);
1123 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1127 set_Call_type (ir_node *node, type *tp) {
1128 assert (node->op == op_Call);
1129 assert (is_method_type(tp));
1130 node->attr.call.cld_tp = tp;
1133 int Call_has_callees(ir_node *node) {
1134 return (node->attr.call.callee_arr != NULL);
1137 int get_Call_n_callees(ir_node * node) {
1138 assert(node->op == op_Call && node->attr.call.callee_arr);
1139 return ARR_LEN(node->attr.call.callee_arr);
1142 entity * get_Call_callee(ir_node * node, int pos) {
1143 assert(node->op == op_Call && node->attr.call.callee_arr);
1144 return node->attr.call.callee_arr[pos];
1147 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1148 assert(node->op == op_Call);
1149 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1150 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1152 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1155 void remove_Call_callee_arr(ir_node * node) {
1156 assert(node->op == op_Call);
1157 node->attr.call.callee_arr = NULL;
1160 ir_node * get_CallBegin_ptr (ir_node *node) {
1161 assert(node->op == op_CallBegin);
1162 return get_irn_n(node, 0);
1164 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1165 assert(node->op == op_CallBegin);
1166 set_irn_n(node, 0, ptr);
1168 ir_graph * get_CallBegin_irg (ir_node *node) {
1169 return get_irn_irg(node);
1171 ir_node * get_CallBegin_call (ir_node *node) {
1172 assert(node->op == op_CallBegin);
1173 return node->attr.callbegin.call;
1175 void set_CallBegin_call (ir_node *node, ir_node *call) {
1176 assert(node->op == op_CallBegin);
1177 node->attr.callbegin.call = call;
1181 get_FuncCall_ptr (ir_node *node) {
1182 assert (node->op == op_FuncCall);
1183 return get_irn_n(node, 0);
1187 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1188 assert (node->op == op_FuncCall);
1189 set_irn_n(node, 0, ptr);
1193 get_FuncCall_param_arr (ir_node *node) {
1194 assert (node->op == op_FuncCall);
1195 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1199 get_FuncCall_n_params (ir_node *node) {
1200 assert (node->op == op_FuncCall);
1201 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1205 get_FuncCall_arity (ir_node *node) {
1206 assert (node->op == op_FuncCall);
1207 return get_FuncCall_n_params(node);
1211 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1212 assert (node->op == op_FuncCall);
1217 get_FuncCall_param (ir_node *node, int pos) {
1218 assert (node->op == op_FuncCall);
1219 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1223 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1224 assert (node->op == op_FuncCall);
1225 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1229 get_FuncCall_type (ir_node *node) {
1230 assert (node->op == op_FuncCall);
1231 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1235 set_FuncCall_type (ir_node *node, type *tp) {
1236 assert (node->op == op_FuncCall);
1237 assert (is_method_type(tp));
1238 node->attr.call.cld_tp = tp;
1241 int FuncCall_has_callees(ir_node *node) {
1242 return (node->attr.call.callee_arr != NULL);
1245 int get_FuncCall_n_callees(ir_node * node) {
1246 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1247 return ARR_LEN(node->attr.call.callee_arr);
1250 entity * get_FuncCall_callee(ir_node * node, int pos) {
1251 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1252 return node->attr.call.callee_arr[pos];
1255 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1256 assert(node->op == op_FuncCall);
1257 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1258 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1260 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1263 void remove_FuncCall_callee_arr(ir_node * node) {
1264 assert(node->op == op_FuncCall);
1265 node->attr.call.callee_arr = NULL;
1270 ir_node * get_##OP##_left(ir_node *node) { \
1271 assert(node->op == op_##OP); \
1272 return get_irn_n(node, node->op->op_index); \
1274 void set_##OP##_left(ir_node *node, ir_node *left) { \
1275 assert(node->op == op_##OP); \
1276 set_irn_n(node, node->op->op_index, left); \
1278 ir_node *get_##OP##_right(ir_node *node) { \
1279 assert(node->op == op_##OP); \
1280 return get_irn_n(node, node->op->op_index + 1); \
1282 void set_##OP##_right(ir_node *node, ir_node *right) { \
1283 assert(node->op == op_##OP); \
1284 set_irn_n(node, node->op->op_index + 1, right); \
1288 ir_node *get_##OP##_op(ir_node *node) { \
1289 assert(node->op == op_##OP); \
1290 return get_irn_n(node, node->op->op_index); \
1292 void set_##OP##_op (ir_node *node, ir_node *op) { \
1293 assert(node->op == op_##OP); \
1294 set_irn_n(node, node->op->op_index, op); \
1304 get_Quot_mem (ir_node *node) {
1305 assert (node->op == op_Quot);
1306 return get_irn_n(node, 0);
1310 set_Quot_mem (ir_node *node, ir_node *mem) {
1311 assert (node->op == op_Quot);
1312 set_irn_n(node, 0, mem);
1318 get_DivMod_mem (ir_node *node) {
1319 assert (node->op == op_DivMod);
1320 return get_irn_n(node, 0);
1324 set_DivMod_mem (ir_node *node, ir_node *mem) {
1325 assert (node->op == op_DivMod);
1326 set_irn_n(node, 0, mem);
1332 get_Div_mem (ir_node *node) {
1333 assert (node->op == op_Div);
1334 return get_irn_n(node, 0);
1338 set_Div_mem (ir_node *node, ir_node *mem) {
1339 assert (node->op == op_Div);
1340 set_irn_n(node, 0, mem);
1346 get_Mod_mem (ir_node *node) {
1347 assert (node->op == op_Mod);
1348 return get_irn_n(node, 0);
1352 set_Mod_mem (ir_node *node, ir_node *mem) {
1353 assert (node->op == op_Mod);
1354 set_irn_n(node, 0, mem);
1371 get_Cast_type (ir_node *node) {
1372 assert (node->op == op_Cast);
1373 return node->attr.cast.totype;
1377 set_Cast_type (ir_node *node, type *to_tp) {
1378 assert (node->op == op_Cast);
1379 node->attr.cast.totype = to_tp;
1383 is_unop (ir_node *node) {
1384 return (node->op->opar == oparity_unary);
1388 get_unop_op (ir_node *node) {
1389 if (node->op->opar == oparity_unary)
1390 return get_irn_n(node, node->op->op_index);
1392 assert(node->op->opar == oparity_unary);
1397 set_unop_op (ir_node *node, ir_node *op) {
1398 if (node->op->opar == oparity_unary)
1399 set_irn_n(node, node->op->op_index, op);
1401 assert(node->op->opar == oparity_unary);
1405 is_binop (ir_node *node) {
1406 return (node->op->opar == oparity_binary);
1410 get_binop_left (ir_node *node) {
1411 if (node->op->opar == oparity_binary)
1412 return get_irn_n(node, node->op->op_index);
1414 assert(node->op->opar == oparity_binary);
1419 set_binop_left (ir_node *node, ir_node *left) {
1420 if (node->op->opar == oparity_binary)
1421 set_irn_n(node, node->op->op_index, left);
1423 assert (node->op->opar == oparity_binary);
1427 get_binop_right (ir_node *node) {
1428 if (node->op->opar == oparity_binary)
1429 return get_irn_n(node, node->op->op_index + 1);
1431 assert(node->op->opar == oparity_binary);
1436 set_binop_right (ir_node *node, ir_node *right) {
1437 if (node->op->opar == oparity_binary)
1438 set_irn_n(node, node->op->op_index + 1, right);
1440 assert (node->op->opar == oparity_binary);
1443 int is_Phi (ir_node *n) {
1449 if (op == op_Filter) return interprocedural_view;
1452 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1453 (get_irn_arity(n) > 0));
1458 int is_Phi0 (ir_node *n) {
1461 return ((get_irn_op(n) == op_Phi) &&
1462 (get_irn_arity(n) == 0) &&
1463 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1467 get_Phi_preds_arr (ir_node *node) {
1468 assert (node->op == op_Phi);
1469 return (ir_node **)&(get_irn_in(node)[1]);
1473 get_Phi_n_preds (ir_node *node) {
1474 assert (is_Phi(node) || is_Phi0(node));
1475 return (get_irn_arity(node));
1479 void set_Phi_n_preds (ir_node *node, int n_preds) {
1480 assert (node->op == op_Phi);
1485 get_Phi_pred (ir_node *node, int pos) {
1486 assert (is_Phi(node) || is_Phi0(node));
1487 return get_irn_n(node, pos);
1491 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1492 assert (is_Phi(node) || is_Phi0(node));
1493 set_irn_n(node, pos, pred);
1497 get_Load_mem (ir_node *node) {
1498 assert (node->op == op_Load);
1499 return get_irn_n(node, 0);
1503 set_Load_mem (ir_node *node, ir_node *mem) {
1504 assert (node->op == op_Load);
1505 set_irn_n(node, 0, mem);
1509 get_Load_ptr (ir_node *node) {
1510 assert (node->op == op_Load);
1511 return get_irn_n(node, 1);
1515 set_Load_ptr (ir_node *node, ir_node *ptr) {
1516 assert (node->op == op_Load);
1517 set_irn_n(node, 1, ptr);
1522 get_Store_mem (ir_node *node) {
1523 assert (node->op == op_Store);
1524 return get_irn_n(node, 0);
1528 set_Store_mem (ir_node *node, ir_node *mem) {
1529 assert (node->op == op_Store);
1530 set_irn_n(node, 0, mem);
1534 get_Store_ptr (ir_node *node) {
1535 assert (node->op == op_Store);
1536 return get_irn_n(node, 1);
1540 set_Store_ptr (ir_node *node, ir_node *ptr) {
1541 assert (node->op == op_Store);
1542 set_irn_n(node, 1, ptr);
1546 get_Store_value (ir_node *node) {
1547 assert (node->op == op_Store);
1548 return get_irn_n(node, 2);
1552 set_Store_value (ir_node *node, ir_node *value) {
1553 assert (node->op == op_Store);
1554 set_irn_n(node, 2, value);
1558 get_Alloc_mem (ir_node *node) {
1559 assert (node->op == op_Alloc);
1560 return get_irn_n(node, 0);
1564 set_Alloc_mem (ir_node *node, ir_node *mem) {
1565 assert (node->op == op_Alloc);
1566 set_irn_n(node, 0, mem);
1570 get_Alloc_size (ir_node *node) {
1571 assert (node->op == op_Alloc);
1572 return get_irn_n(node, 1);
1576 set_Alloc_size (ir_node *node, ir_node *size) {
1577 assert (node->op == op_Alloc);
1578 set_irn_n(node, 1, size);
1582 get_Alloc_type (ir_node *node) {
1583 assert (node->op == op_Alloc);
1584 return node->attr.a.type = skip_tid(node->attr.a.type);
1588 set_Alloc_type (ir_node *node, type *tp) {
1589 assert (node->op == op_Alloc);
1590 node->attr.a.type = tp;
1594 get_Alloc_where (ir_node *node) {
1595 assert (node->op == op_Alloc);
1596 return node->attr.a.where;
1600 set_Alloc_where (ir_node *node, where_alloc where) {
1601 assert (node->op == op_Alloc);
1602 node->attr.a.where = where;
1607 get_Free_mem (ir_node *node) {
1608 assert (node->op == op_Free);
1609 return get_irn_n(node, 0);
1613 set_Free_mem (ir_node *node, ir_node *mem) {
1614 assert (node->op == op_Free);
1615 set_irn_n(node, 0, mem);
1619 get_Free_ptr (ir_node *node) {
1620 assert (node->op == op_Free);
1621 return get_irn_n(node, 1);
1625 set_Free_ptr (ir_node *node, ir_node *ptr) {
1626 assert (node->op == op_Free);
1627 set_irn_n(node, 1, ptr);
1631 get_Free_size (ir_node *node) {
1632 assert (node->op == op_Free);
1633 return get_irn_n(node, 2);
1637 set_Free_size (ir_node *node, ir_node *size) {
1638 assert (node->op == op_Free);
1639 set_irn_n(node, 2, size);
1643 get_Free_type (ir_node *node) {
1644 assert (node->op == op_Free);
1645 return node->attr.f = skip_tid(node->attr.f);
1649 set_Free_type (ir_node *node, type *tp) {
1650 assert (node->op == op_Free);
1655 get_Sync_preds_arr (ir_node *node) {
1656 assert (node->op == op_Sync);
1657 return (ir_node **)&(get_irn_in(node)[1]);
1661 get_Sync_n_preds (ir_node *node) {
1662 assert (node->op == op_Sync);
1663 return (get_irn_arity(node));
1668 set_Sync_n_preds (ir_node *node, int n_preds) {
1669 assert (node->op == op_Sync);
1674 get_Sync_pred (ir_node *node, int pos) {
1675 assert (node->op == op_Sync);
1676 return get_irn_n(node, pos);
1680 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1681 assert (node->op == op_Sync);
1682 set_irn_n(node, pos, pred);
1686 get_Proj_pred (ir_node *node) {
1687 assert (is_Proj(node));
1688 return get_irn_n(node, 0);
1692 set_Proj_pred (ir_node *node, ir_node *pred) {
1693 assert (is_Proj(node));
1694 set_irn_n(node, 0, pred);
1698 get_Proj_proj (ir_node *node) {
1699 assert (is_Proj(node));
1700 if (get_irn_opcode(node) == iro_Proj) {
1701 return node->attr.proj;
1703 assert(get_irn_opcode(node) == iro_Filter);
1704 return node->attr.filter.proj;
1709 set_Proj_proj (ir_node *node, long proj) {
1710 assert (node->op == op_Proj);
1711 node->attr.proj = proj;
1715 get_Tuple_preds_arr (ir_node *node) {
1716 assert (node->op == op_Tuple);
1717 return (ir_node **)&(get_irn_in(node)[1]);
1721 get_Tuple_n_preds (ir_node *node) {
1722 assert (node->op == op_Tuple);
1723 return (get_irn_arity(node));
1728 set_Tuple_n_preds (ir_node *node, int n_preds) {
1729 assert (node->op == op_Tuple);
1734 get_Tuple_pred (ir_node *node, int pos) {
1735 assert (node->op == op_Tuple);
1736 return get_irn_n(node, pos);
1740 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1741 assert (node->op == op_Tuple);
1742 set_irn_n(node, pos, pred);
1746 get_Id_pred (ir_node *node) {
1747 assert (node->op == op_Id);
1748 return get_irn_n(node, 0);
1752 set_Id_pred (ir_node *node, ir_node *pred) {
1753 assert (node->op == op_Id);
1754 set_irn_n(node, 0, pred);
1757 ir_node *get_Confirm_value (ir_node *node) {
1758 assert (node->op == op_Confirm);
1759 return get_irn_n(node, 0);
1761 void set_Confirm_value (ir_node *node, ir_node *value) {
1762 assert (node->op == op_Confirm);
1763 set_irn_n(node, 0, value);
1765 ir_node *get_Confirm_bound (ir_node *node) {
1766 assert (node->op == op_Confirm);
1767 return get_irn_n(node, 1);
1769 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1770 assert (node->op == op_Confirm);
1771 set_irn_n(node, 0, bound);
1773 pn_Cmp get_Confirm_cmp (ir_node *node) {
1774 assert (node->op == op_Confirm);
1775 return node->attr.confirm_cmp;
1777 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1778 assert (node->op == op_Confirm);
1779 node->attr.confirm_cmp = cmp;
1784 get_Filter_pred (ir_node *node) {
1785 assert(node->op == op_Filter);
1789 set_Filter_pred (ir_node *node, ir_node *pred) {
1790 assert(node->op == op_Filter);
1794 get_Filter_proj(ir_node *node) {
1795 assert(node->op == op_Filter);
1796 return node->attr.filter.proj;
1799 set_Filter_proj (ir_node *node, long proj) {
1800 assert(node->op == op_Filter);
1801 node->attr.filter.proj = proj;
1804 /* Don't use get_irn_arity, get_irn_n in implementation as access
1805 shall work independent of view!!! */
1806 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1807 assert(node->op == op_Filter);
1808 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1809 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1810 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1811 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1812 node->attr.filter.in_cg[0] = node->in[0];
1814 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1817 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1818 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1819 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1820 node->attr.filter.in_cg[pos + 1] = pred;
1822 int get_Filter_n_cg_preds(ir_node *node) {
1823 assert(node->op == op_Filter && node->attr.filter.in_cg);
1824 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1826 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1828 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1830 arity = ARR_LEN(node->attr.filter.in_cg);
1831 assert(pos < arity - 1);
1832 return node->attr.filter.in_cg[pos + 1];
1837 get_irn_irg(ir_node *node) {
1838 if (get_irn_op(node) != op_Block)
1839 node = get_nodes_block(node);
1840 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1841 node = get_nodes_block(node);
1842 assert(get_irn_op(node) == op_Block);
1843 return node->attr.block.irg;
1847 /*----------------------------------------------------------------*/
1848 /* Auxiliary routines */
1849 /*----------------------------------------------------------------*/
1852 skip_Proj (ir_node *node) {
1853 /* don't assert node !!! */
1854 if (node && is_Proj(node)) {
1855 return get_Proj_pred(node);
1862 skip_Tuple (ir_node *node) {
1865 if (!get_opt_normalize()) return node;
1867 node = skip_nop(node);
1868 if (get_irn_op(node) == op_Proj) {
1869 pred = skip_nop(get_Proj_pred(node));
1870 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1871 pred = skip_nop(skip_Tuple(pred));
1872 if (get_irn_op(pred) == op_Tuple)
1873 return get_Tuple_pred(pred, get_Proj_proj(node));
1878 /** returns operand of node if node is a Cast */
1879 ir_node *skip_Cast (ir_node *node) {
1880 if (node && get_irn_op(node) == op_Cast) {
1881 return skip_nop(get_irn_n(node, 0));
1888 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1889 than any other approach, as Id chains are resolved and all point to the real node, or
1890 all id's are self loops. */
1892 skip_nop (ir_node *node) {
1893 /* don't assert node !!! */
1895 if (!get_opt_normalize()) return node;
1897 /* Don't use get_Id_pred: We get into an endless loop for
1898 self-referencing Ids. */
1899 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1900 ir_node *rem_pred = node->in[0+1];
1903 assert (get_irn_arity (node) > 0);
1905 node->in[0+1] = node;
1906 res = skip_nop(rem_pred);
1907 if (res->op == op_Id) /* self-loop */ return node;
1909 node->in[0+1] = res;
1916 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1917 than any other approach, as Id chains are resolved and all point to the real node, or
1918 all id's are self loops. */
1920 skip_nop (ir_node *node) {
1922 /* don't assert node !!! */
1924 if (!get_opt_normalize()) return node;
1926 /* Don't use get_Id_pred: We get into an endless loop for
1927 self-referencing Ids. */
1928 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1929 ir_node *rem_pred, *res;
1931 if (pred->op != op_Id) return pred; /* shortcut */
1934 assert (get_irn_arity (node) > 0);
1936 node->in[0+1] = node;
1937 res = skip_nop(rem_pred);
1938 if (res->op == op_Id) /* self-loop */ return node;
1940 node->in[0+1] = res;
1949 skip_Id (ir_node *node) {
1950 return skip_nop(node);
1954 is_Bad (ir_node *node) {
1956 if ((node) && get_irn_opcode(node) == iro_Bad)
1962 is_no_Block (ir_node *node) {
1964 return (get_irn_opcode(node) != iro_Block);
1968 is_Block (ir_node *node) {
1970 return (get_irn_opcode(node) == iro_Block);
1973 /* returns true if node is a Unknown node. */
1975 is_Unknown (ir_node *node) {
1977 return (get_irn_opcode(node) == iro_Unknown);
1981 is_Proj (const ir_node *node) {
1983 return node->op == op_Proj
1984 || (!interprocedural_view && node->op == op_Filter);
1987 /* Returns true if the operation manipulates control flow. */
1989 is_cfop(ir_node *node) {
1990 return is_cfopcode(get_irn_op(node));
1993 /* Returns true if the operation manipulates interprocedural control flow:
1994 CallBegin, EndReg, EndExcept */
1995 int is_ip_cfop(ir_node *node) {
1996 return is_ip_cfopcode(get_irn_op(node));
1999 ir_graph *get_ip_cfop_irg(ir_node *n) {
2000 return get_irn_irg(n);
2003 /* Returns true if the operation can change the control flow because
2006 is_fragile_op(ir_node *node) {
2007 return is_op_fragile(get_irn_op(node));
2010 /* Returns the memory operand of fragile operations. */
2011 ir_node *get_fragile_op_mem(ir_node *node) {
2012 assert(node && is_fragile_op(node));
2014 switch (get_irn_opcode (node)) {
2023 return get_irn_n(node, 0);
2028 assert(0 && "should not be reached");
2033 #ifdef DEBUG_libfirm
2034 void dump_irn (ir_node *n) {
2035 int i, arity = get_irn_arity(n);
2036 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2038 ir_node *pred = get_irn_n(n, -1);
2039 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2040 get_irn_node_nr(pred), (void *)pred);
2042 printf(" preds: \n");
2043 for (i = 0; i < arity; ++i) {
2044 ir_node *pred = get_irn_n(n, i);
2045 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2046 get_irn_node_nr(pred), (void *)pred);
2050 #else /* DEBUG_libfirm */
2051 void dump_irn (ir_node *n) {}
2052 #endif /* DEBUG_libfirm */