3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && -1 <= n && n < get_irn_arity(node));
227 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
228 /* Change block pred in both views! */
229 node->in[n + 1] = in;
230 assert(node->attr.filter.in_cg);
231 node->attr.filter.in_cg[n + 1] = in;
234 if (interprocedural_view) { /* handle Filter and Block specially */
235 if (get_irn_opcode(node) == iro_Filter) {
236 assert(node->attr.filter.in_cg);
237 node->attr.filter.in_cg[n + 1] = in;
239 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
240 node->attr.block.in_cg[n + 1] = in;
243 /* else fall through */
245 node->in[n + 1] = in;
249 (get_irn_mode)(const ir_node *node) {
250 return __get_irn_mode(node);
254 (set_irn_mode)(ir_node *node, ir_mode *mode)
256 __set_irn_mode(node, mode);
260 get_irn_modecode (const ir_node *node)
263 return node->mode->code;
266 /** Gets the string representation of the mode .*/
268 get_irn_modename (const ir_node *node)
271 return get_mode_name(node->mode);
275 get_irn_modeident (const ir_node *node)
278 return get_mode_ident(node->mode);
282 (get_irn_op)(const ir_node *node)
284 return __get_irn_op(node);
287 /* should be private to the library: */
289 set_irn_op (ir_node *node, ir_op *op)
296 (get_irn_opcode)(const ir_node *node)
298 return __get_irn_opcode(node);
302 get_irn_opname (const ir_node *node)
305 if ((get_irn_op((ir_node *)node) == op_Phi) &&
306 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
307 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
308 return get_id_str(node->op->name);
312 get_irn_opident (const ir_node *node)
315 return node->op->name;
319 (get_irn_visited)(const ir_node *node)
321 return __get_irn_visited(node);
325 (set_irn_visited)(ir_node *node, unsigned long visited)
327 __set_irn_visited(node, visited);
331 (mark_irn_visited)(ir_node *node) {
332 __mark_irn_visited(node);
336 (irn_not_visited)(const ir_node *node) {
337 return __irn_not_visited(node);
341 (irn_visited)(const ir_node *node) {
342 return __irn_visited(node);
346 (set_irn_link)(ir_node *node, void *link) {
347 __set_irn_link(node, link);
351 (get_irn_link)(const ir_node *node) {
352 return __get_irn_link(node);
355 /* Outputs a unique number for this node */
357 get_irn_node_nr(const ir_node *node) {
360 return node->node_nr;
367 get_irn_const_attr (ir_node *node)
369 assert (node->op == op_Const);
370 return node->attr.con;
374 get_irn_proj_attr (ir_node *node)
376 assert (node->op == op_Proj);
377 return node->attr.proj;
381 get_irn_alloc_attr (ir_node *node)
383 assert (node->op == op_Alloc);
388 get_irn_free_attr (ir_node *node)
390 assert (node->op == op_Free);
391 return node->attr.f = skip_tid(node->attr.f);
395 get_irn_symconst_attr (ir_node *node)
397 assert (node->op == op_SymConst);
402 get_irn_call_attr (ir_node *node)
404 assert (node->op == op_Call);
405 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
409 get_irn_funccall_attr (ir_node *node)
411 assert (node->op == op_FuncCall);
412 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
416 get_irn_sel_attr (ir_node *node)
418 assert (node->op == op_Sel);
423 get_irn_phi_attr (ir_node *node)
425 assert (node->op == op_Phi);
426 return node->attr.phi0_pos;
430 get_irn_block_attr (ir_node *node)
432 assert (node->op == op_Block);
433 return node->attr.block;
436 /** manipulate fields of individual nodes **/
438 /* this works for all except Block */
440 get_nodes_Block (ir_node *node) {
441 assert (!(node->op == op_Block));
442 return get_irn_n(node, -1);
446 set_nodes_Block (ir_node *node, ir_node *block) {
447 assert (!(node->op == op_Block));
448 set_irn_n(node, -1, block);
451 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
452 * from Start. If so returns frame type, else Null. */
453 type *is_frame_pointer(ir_node *n) {
454 if ((get_irn_op(n) == op_Proj) &&
455 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
456 ir_node *start = get_Proj_pred(n);
457 if (get_irn_op(start) == op_Start) {
458 return get_irg_frame_type(get_irn_irg(start));
464 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
465 * from Start. If so returns global type, else Null. */
466 type *is_globals_pointer(ir_node *n) {
467 if ((get_irn_op(n) == op_Proj) &&
468 (get_Proj_proj(n) == pn_Start_P_globals)) {
469 ir_node *start = get_Proj_pred(n);
470 if (get_irn_op(start) == op_Start) {
471 return get_glob_type();
477 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
478 * from Start. If so returns 1, else 0. */
479 int is_value_arg_pointer(ir_node *n) {
480 if ((get_irn_op(n) == op_Proj) &&
481 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
482 (get_irn_op(get_Proj_pred(n)) == op_Start))
487 /* Returns an array with the predecessors of the Block. Depending on
488 the implementation of the graph data structure this can be a copy of
489 the internal representation of predecessors as well as the internal
490 array itself. Therefore writing to this array might obstruct the ir. */
492 get_Block_cfgpred_arr (ir_node *node)
494 assert ((node->op == op_Block));
495 return (ir_node **)&(get_irn_in(node)[1]);
500 get_Block_n_cfgpreds (ir_node *node) {
501 assert ((node->op == op_Block));
502 return get_irn_arity(node);
506 get_Block_cfgpred (ir_node *node, int pos) {
508 assert (node->op == op_Block);
509 assert(-1 <= pos && pos < get_irn_arity(node));
510 return get_irn_n(node, pos);
514 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
515 assert (node->op == op_Block);
516 set_irn_n(node, pos, pred);
520 get_Block_matured (ir_node *node) {
521 assert (node->op == op_Block);
522 return node->attr.block.matured;
526 set_Block_matured (ir_node *node, bool matured) {
527 assert (node->op == op_Block);
528 node->attr.block.matured = matured;
531 get_Block_block_visited (ir_node *node) {
532 assert (node->op == op_Block);
533 return node->attr.block.block_visited;
537 set_Block_block_visited (ir_node *node, unsigned long visit) {
538 assert (node->op == op_Block);
539 node->attr.block.block_visited = visit;
542 /* For this current_ir_graph must be set. */
544 mark_Block_block_visited (ir_node *node) {
545 assert (node->op == op_Block);
546 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
550 Block_not_block_visited(ir_node *node) {
551 assert (node->op == op_Block);
552 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
556 get_Block_graph_arr (ir_node *node, int pos) {
557 assert (node->op == op_Block);
558 return node->attr.block.graph_arr[pos+1];
562 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
563 assert (node->op == op_Block);
564 node->attr.block.graph_arr[pos+1] = value;
567 /* handler handling for Blocks * /
569 set_Block_handler (ir_node *block, ir_node *handler) {
570 assert ((block->op == op_Block));
571 assert ((handler->op == op_Block));
572 block->attr.block.handler_entry = handler;
576 get_Block_handler (ir_node *block) {
577 assert ((block->op == op_Block));
578 return (block->attr.block.handler_entry);
581 / * handler handling for Nodes * /
583 set_Node_handler (ir_node *node, ir_node *handler) {
584 set_Block_handler (get_nodes_Block (node), handler);
588 get_Node_handler (ir_node *node) {
589 return (get_Block_handler (get_nodes_Block (node)));
592 / * exc_t handling for Blocks * /
593 void set_Block_exc (ir_node *block, exc_t exc) {
594 assert ((block->op == op_Block));
595 block->attr.block.exc = exc;
598 exc_t get_Block_exc (ir_node *block) {
599 assert ((block->op == op_Block));
600 return (block->attr.block.exc);
603 / * exc_t handling for Nodes * /
604 void set_Node_exc (ir_node *node, exc_t exc) {
605 set_Block_exc (get_nodes_Block (node), exc);
608 exc_t get_Node_exc (ir_node *node) {
609 return (get_Block_exc (get_nodes_Block (node)));
613 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
614 assert(node->op == op_Block);
615 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
616 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
617 node->attr.block.in_cg[0] = NULL;
618 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
620 /* Fix backedge array. fix_backedges operates depending on
621 interprocedural_view. */
622 bool ipv = interprocedural_view;
623 interprocedural_view = true;
624 fix_backedges(current_ir_graph->obst, node);
625 interprocedural_view = ipv;
628 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
631 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
632 assert(node->op == op_Block &&
633 node->attr.block.in_cg &&
634 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
635 node->attr.block.in_cg[pos + 1] = pred;
638 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
639 assert(node->op == op_Block);
640 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
643 int get_Block_cg_n_cfgpreds(ir_node * node) {
644 assert(node->op == op_Block);
645 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
648 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
649 assert(node->op == op_Block && node->attr.block.in_cg);
650 return node->attr.block.in_cg[pos + 1];
653 void remove_Block_cg_cfgpred_arr(ir_node * node) {
654 assert(node->op == op_Block);
655 node->attr.block.in_cg = NULL;
658 /* Start references the irg it is in. */
660 get_Start_irg(ir_node *node) {
661 return get_irn_irg(node);
665 set_Start_irg(ir_node *node, ir_graph *irg) {
666 assert(node->op == op_Start);
667 assert(is_ir_graph(irg));
668 assert(0 && " Why set irg? -- use set_irn_irg");
672 get_End_n_keepalives(ir_node *end) {
673 assert (end->op == op_End);
674 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
678 get_End_keepalive(ir_node *end, int pos) {
679 assert (end->op == op_End);
680 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
684 add_End_keepalive (ir_node *end, ir_node *ka) {
685 assert (end->op == op_End);
686 ARR_APP1 (ir_node *, end->in, ka);
690 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
691 assert (end->op == op_End);
692 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
696 free_End (ir_node *end) {
697 assert (end->op == op_End);
699 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
700 end->in = NULL; /* @@@ make sure we get an error if we use the
701 in array afterwards ... */
704 ir_graph *get_EndReg_irg (ir_node *end) {
705 return get_irn_irg(end);
708 ir_graph *get_EndExcept_irg (ir_node *end) {
709 return get_irn_irg(end);
713 > Implementing the case construct (which is where the constant Proj node is
714 > important) involves far more than simply determining the constant values.
715 > We could argue that this is more properly a function of the translator from
716 > Firm to the target machine. That could be done if there was some way of
717 > projecting "default" out of the Cond node.
718 I know it's complicated.
719 Basically there are two proglems:
720 - determining the gaps between the projs
721 - determining the biggest case constant to know the proj number for
723 I see several solutions:
724 1. Introduce a ProjDefault node. Solves both problems.
725 This means to extend all optimizations executed during construction.
726 2. Give the Cond node for switch two flavors:
727 a) there are no gaps in the projs (existing flavor)
728 b) gaps may exist, default proj is still the Proj with the largest
729 projection number. This covers also the gaps.
730 3. Fix the semantic of the Cond to that of 2b)
732 Solution 2 seems to be the best:
733 Computing the gaps in the Firm representation is not too hard, i.e.,
734 libFIRM can implement a routine that transforms between the two
735 flavours. This is also possible for 1) but 2) does not require to
736 change any existing optimization.
737 Further it should be far simpler to determine the biggest constant than
739 I don't want to choose 3) as 2a) seems to have advantages for
740 dataflow analysis and 3) does not allow to convert the representation to
744 get_Cond_selector (ir_node *node) {
745 assert (node->op == op_Cond);
746 return get_irn_n(node, 0);
750 set_Cond_selector (ir_node *node, ir_node *selector) {
751 assert (node->op == op_Cond);
752 set_irn_n(node, 0, selector);
756 get_Cond_kind (ir_node *node) {
757 assert (node->op == op_Cond);
758 return node->attr.c.kind;
762 set_Cond_kind (ir_node *node, cond_kind kind) {
763 assert (node->op == op_Cond);
764 node->attr.c.kind = kind;
768 get_Cond_defaultProj (ir_node *node) {
769 assert (node->op == op_Cond);
770 return node->attr.c.default_proj;
774 get_Return_mem (ir_node *node) {
775 assert (node->op == op_Return);
776 return get_irn_n(node, 0);
780 set_Return_mem (ir_node *node, ir_node *mem) {
781 assert (node->op == op_Return);
782 set_irn_n(node, 0, mem);
786 get_Return_n_ress (ir_node *node) {
787 assert (node->op == op_Return);
788 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
792 get_Return_res_arr (ir_node *node)
794 assert ((node->op == op_Return));
795 if (get_Return_n_ress(node) > 0)
796 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
803 set_Return_n_res (ir_node *node, int results) {
804 assert (node->op == op_Return);
809 get_Return_res (ir_node *node, int pos) {
810 assert (node->op == op_Return);
811 assert (get_Return_n_ress(node) > pos);
812 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
816 set_Return_res (ir_node *node, int pos, ir_node *res){
817 assert (node->op == op_Return);
818 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
822 get_Raise_mem (ir_node *node) {
823 assert (node->op == op_Raise);
824 return get_irn_n(node, 0);
828 set_Raise_mem (ir_node *node, ir_node *mem) {
829 assert (node->op == op_Raise);
830 set_irn_n(node, 0, mem);
834 get_Raise_exo_ptr (ir_node *node) {
835 assert (node->op == op_Raise);
836 return get_irn_n(node, 1);
840 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
841 assert (node->op == op_Raise);
842 set_irn_n(node, 1, exo_ptr);
845 tarval *get_Const_tarval (ir_node *node) {
846 assert (node->op == op_Const);
847 return node->attr.con.tv;
851 set_Const_tarval (ir_node *node, tarval *con) {
852 assert (node->op == op_Const);
853 node->attr.con.tv = con;
857 /* The source language type. Must be an atomic type. Mode of type must
858 be mode of node. For tarvals from entities type must be pointer to
861 get_Const_type (ir_node *node) {
862 assert (node->op == op_Const);
863 return node->attr.con.tp;
867 set_Const_type (ir_node *node, type *tp) {
868 assert (node->op == op_Const);
869 if (tp != unknown_type) {
870 assert (is_atomic_type(tp));
871 assert (get_type_mode(tp) == get_irn_mode(node));
872 assert (!tarval_is_entity(get_Const_tarval(node)) ||
873 (is_pointer_type(tp) &&
874 (get_pointer_points_to_type(tp) ==
875 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
878 node->attr.con.tp = tp;
883 get_SymConst_kind (const ir_node *node) {
884 assert (node->op == op_SymConst);
885 return node->attr.i.num;
889 set_SymConst_kind (ir_node *node, symconst_kind num) {
890 assert (node->op == op_SymConst);
891 node->attr.i.num = num;
895 get_SymConst_type (ir_node *node) {
896 assert ( (node->op == op_SymConst)
897 && ( get_SymConst_kind(node) == symconst_type_tag
898 || get_SymConst_kind(node) == symconst_size));
899 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
903 set_SymConst_type (ir_node *node, type *tp) {
904 assert ( (node->op == op_SymConst)
905 && ( get_SymConst_kind(node) == symconst_type_tag
906 || get_SymConst_kind(node) == symconst_size));
907 node->attr.i.sym.type_p = tp;
911 get_SymConst_name (ir_node *node) {
912 assert ( (node->op == op_SymConst)
913 && (get_SymConst_kind(node) == symconst_addr_name));
914 return node->attr.i.sym.ident_p;
918 set_SymConst_name (ir_node *node, ident *name) {
919 assert ( (node->op == op_SymConst)
920 && (get_SymConst_kind(node) == symconst_addr_name));
921 node->attr.i.sym.ident_p = name;
925 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
926 entity *get_SymConst_entity (ir_node *node) {
927 assert ( (node->op == op_SymConst)
928 && (get_SymConst_kind (node) == symconst_addr_ent));
929 return node->attr.i.sym.entity_p;
932 void set_SymConst_entity (ir_node *node, entity *ent) {
933 assert ( (node->op == op_SymConst)
934 && (get_SymConst_kind(node) == symconst_addr_ent));
935 node->attr.i.sym.entity_p = ent;
939 union symconst_symbol
940 get_SymConst_symbol (ir_node *node) {
941 assert (node->op == op_SymConst);
942 return node->attr.i.sym;
946 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
947 assert (node->op == op_SymConst);
948 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
949 node->attr.i.sym = sym;
953 get_Sel_mem (ir_node *node) {
954 assert (node->op == op_Sel);
955 return get_irn_n(node, 0);
959 set_Sel_mem (ir_node *node, ir_node *mem) {
960 assert (node->op == op_Sel);
961 set_irn_n(node, 0, mem);
965 get_Sel_ptr (ir_node *node) {
966 assert (node->op == op_Sel);
967 return get_irn_n(node, 1);
971 set_Sel_ptr (ir_node *node, ir_node *ptr) {
972 assert (node->op == op_Sel);
973 set_irn_n(node, 1, ptr);
977 get_Sel_n_indexs (ir_node *node) {
978 assert (node->op == op_Sel);
979 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
983 get_Sel_index_arr (ir_node *node)
985 assert ((node->op == op_Sel));
986 if (get_Sel_n_indexs(node) > 0)
987 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
993 get_Sel_index (ir_node *node, int pos) {
994 assert (node->op == op_Sel);
995 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
999 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1000 assert (node->op == op_Sel);
1001 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1005 get_Sel_entity (ir_node *node) {
1006 assert (node->op == op_Sel);
1007 return node->attr.s.ent;
1011 set_Sel_entity (ir_node *node, entity *ent) {
1012 assert (node->op == op_Sel);
1013 node->attr.s.ent = ent;
1017 get_InstOf_ent (ir_node *node) {
1018 assert (node->op = op_InstOf);
1019 return (node->attr.io.ent);
1023 set_InstOf_ent (ir_node *node, type *ent) {
1024 assert (node->op = op_InstOf);
1025 node->attr.io.ent = ent;
1029 get_InstOf_store (ir_node *node) {
1030 assert (node->op = op_InstOf);
1031 return (get_irn_n (node, 0));
1035 set_InstOf_store (ir_node *node, ir_node *obj) {
1036 assert (node->op = op_InstOf);
1037 set_irn_n (node, 0, obj);
1041 get_InstOf_obj (ir_node *node) {
1042 assert (node->op = op_InstOf);
1043 return (get_irn_n (node, 1));
1047 set_InstOf_obj (ir_node *node, ir_node *obj) {
1048 assert (node->op = op_InstOf);
1049 set_irn_n (node, 1, obj);
1053 /* For unary and binary arithmetic operations the access to the
1054 operands can be factored out. Left is the first, right the
1055 second arithmetic value as listed in tech report 0999-33.
1056 unops are: Minus, Abs, Not, Conv, Cast
1057 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1058 Shr, Shrs, Rotate, Cmp */
1062 get_Call_mem (ir_node *node) {
1063 assert (node->op == op_Call);
1064 return get_irn_n(node, 0);
1068 set_Call_mem (ir_node *node, ir_node *mem) {
1069 assert (node->op == op_Call);
1070 set_irn_n(node, 0, mem);
1074 get_Call_ptr (ir_node *node) {
1075 assert (node->op == op_Call);
1076 return get_irn_n(node, 1);
1080 set_Call_ptr (ir_node *node, ir_node *ptr) {
1081 assert (node->op == op_Call);
1082 set_irn_n(node, 1, ptr);
1086 get_Call_param_arr (ir_node *node) {
1087 assert (node->op == op_Call);
1088 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1092 get_Call_n_params (ir_node *node) {
1093 assert (node->op == op_Call);
1094 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1098 get_Call_arity (ir_node *node) {
1099 assert (node->op == op_Call);
1100 return get_Call_n_params(node);
1104 set_Call_arity (ir_node *node, ir_node *arity) {
1105 assert (node->op == op_Call);
1110 get_Call_param (ir_node *node, int pos) {
1111 assert (node->op == op_Call);
1112 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1116 set_Call_param (ir_node *node, int pos, ir_node *param) {
1117 assert (node->op == op_Call);
1118 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1122 get_Call_type (ir_node *node) {
1123 assert (node->op == op_Call);
1124 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1128 set_Call_type (ir_node *node, type *tp) {
1129 assert (node->op == op_Call);
1130 assert (is_method_type(tp));
1131 node->attr.call.cld_tp = tp;
1134 int Call_has_callees(ir_node *node) {
1135 return (node->attr.call.callee_arr != NULL);
1138 int get_Call_n_callees(ir_node * node) {
1139 assert(node->op == op_Call && node->attr.call.callee_arr);
1140 return ARR_LEN(node->attr.call.callee_arr);
1143 entity * get_Call_callee(ir_node * node, int pos) {
1144 assert(node->op == op_Call && node->attr.call.callee_arr);
1145 return node->attr.call.callee_arr[pos];
1148 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1149 assert(node->op == op_Call);
1150 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1151 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1153 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1156 void remove_Call_callee_arr(ir_node * node) {
1157 assert(node->op == op_Call);
1158 node->attr.call.callee_arr = NULL;
1161 ir_node * get_CallBegin_ptr (ir_node *node) {
1162 assert(node->op == op_CallBegin);
1163 return get_irn_n(node, 0);
1165 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1166 assert(node->op == op_CallBegin);
1167 set_irn_n(node, 0, ptr);
1169 ir_graph * get_CallBegin_irg (ir_node *node) {
1170 return get_irn_irg(node);
1172 ir_node * get_CallBegin_call (ir_node *node) {
1173 assert(node->op == op_CallBegin);
1174 return node->attr.callbegin.call;
1176 void set_CallBegin_call (ir_node *node, ir_node *call) {
1177 assert(node->op == op_CallBegin);
1178 node->attr.callbegin.call = call;
1182 get_FuncCall_ptr (ir_node *node) {
1183 assert (node->op == op_FuncCall);
1184 return get_irn_n(node, 0);
1188 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1189 assert (node->op == op_FuncCall);
1190 set_irn_n(node, 0, ptr);
1194 get_FuncCall_param_arr (ir_node *node) {
1195 assert (node->op == op_FuncCall);
1196 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1200 get_FuncCall_n_params (ir_node *node) {
1201 assert (node->op == op_FuncCall);
1202 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1206 get_FuncCall_arity (ir_node *node) {
1207 assert (node->op == op_FuncCall);
1208 return get_FuncCall_n_params(node);
1212 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1213 assert (node->op == op_FuncCall);
1218 get_FuncCall_param (ir_node *node, int pos) {
1219 assert (node->op == op_FuncCall);
1220 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1224 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1225 assert (node->op == op_FuncCall);
1226 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1230 get_FuncCall_type (ir_node *node) {
1231 assert (node->op == op_FuncCall);
1232 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1236 set_FuncCall_type (ir_node *node, type *tp) {
1237 assert (node->op == op_FuncCall);
1238 assert (is_method_type(tp));
1239 node->attr.call.cld_tp = tp;
1242 int FuncCall_has_callees(ir_node *node) {
1243 return (node->attr.call.callee_arr != NULL);
1246 int get_FuncCall_n_callees(ir_node * node) {
1247 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1248 return ARR_LEN(node->attr.call.callee_arr);
1251 entity * get_FuncCall_callee(ir_node * node, int pos) {
1252 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1253 return node->attr.call.callee_arr[pos];
1256 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1257 assert(node->op == op_FuncCall);
1258 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1259 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1261 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1264 void remove_FuncCall_callee_arr(ir_node * node) {
1265 assert(node->op == op_FuncCall);
1266 node->attr.call.callee_arr = NULL;
1271 ir_node * get_##OP##_left(ir_node *node) { \
1272 assert(node->op == op_##OP); \
1273 return get_irn_n(node, node->op->op_index); \
1275 void set_##OP##_left(ir_node *node, ir_node *left) { \
1276 assert(node->op == op_##OP); \
1277 set_irn_n(node, node->op->op_index, left); \
1279 ir_node *get_##OP##_right(ir_node *node) { \
1280 assert(node->op == op_##OP); \
1281 return get_irn_n(node, node->op->op_index + 1); \
1283 void set_##OP##_right(ir_node *node, ir_node *right) { \
1284 assert(node->op == op_##OP); \
1285 set_irn_n(node, node->op->op_index + 1, right); \
1289 ir_node *get_##OP##_op(ir_node *node) { \
1290 assert(node->op == op_##OP); \
1291 return get_irn_n(node, node->op->op_index); \
1293 void set_##OP##_op (ir_node *node, ir_node *op) { \
1294 assert(node->op == op_##OP); \
1295 set_irn_n(node, node->op->op_index, op); \
1305 get_Quot_mem (ir_node *node) {
1306 assert (node->op == op_Quot);
1307 return get_irn_n(node, 0);
1311 set_Quot_mem (ir_node *node, ir_node *mem) {
1312 assert (node->op == op_Quot);
1313 set_irn_n(node, 0, mem);
1319 get_DivMod_mem (ir_node *node) {
1320 assert (node->op == op_DivMod);
1321 return get_irn_n(node, 0);
1325 set_DivMod_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_DivMod);
1327 set_irn_n(node, 0, mem);
1333 get_Div_mem (ir_node *node) {
1334 assert (node->op == op_Div);
1335 return get_irn_n(node, 0);
1339 set_Div_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_Div);
1341 set_irn_n(node, 0, mem);
1347 get_Mod_mem (ir_node *node) {
1348 assert (node->op == op_Mod);
1349 return get_irn_n(node, 0);
1353 set_Mod_mem (ir_node *node, ir_node *mem) {
1354 assert (node->op == op_Mod);
1355 set_irn_n(node, 0, mem);
1372 get_Cast_type (ir_node *node) {
1373 assert (node->op == op_Cast);
1374 return node->attr.cast.totype;
1378 set_Cast_type (ir_node *node, type *to_tp) {
1379 assert (node->op == op_Cast);
1380 node->attr.cast.totype = to_tp;
1384 is_unop (ir_node *node) {
1385 return (node->op->opar == oparity_unary);
1389 get_unop_op (ir_node *node) {
1390 if (node->op->opar == oparity_unary)
1391 return get_irn_n(node, node->op->op_index);
1393 assert(node->op->opar == oparity_unary);
1398 set_unop_op (ir_node *node, ir_node *op) {
1399 if (node->op->opar == oparity_unary)
1400 set_irn_n(node, node->op->op_index, op);
1402 assert(node->op->opar == oparity_unary);
1406 is_binop (ir_node *node) {
1407 return (node->op->opar == oparity_binary);
1411 get_binop_left (ir_node *node) {
1412 if (node->op->opar == oparity_binary)
1413 return get_irn_n(node, node->op->op_index);
1415 assert(node->op->opar == oparity_binary);
1420 set_binop_left (ir_node *node, ir_node *left) {
1421 if (node->op->opar == oparity_binary)
1422 set_irn_n(node, node->op->op_index, left);
1424 assert (node->op->opar == oparity_binary);
1428 get_binop_right (ir_node *node) {
1429 if (node->op->opar == oparity_binary)
1430 return get_irn_n(node, node->op->op_index + 1);
1432 assert(node->op->opar == oparity_binary);
1437 set_binop_right (ir_node *node, ir_node *right) {
1438 if (node->op->opar == oparity_binary)
1439 set_irn_n(node, node->op->op_index + 1, right);
1441 assert (node->op->opar == oparity_binary);
1444 int is_Phi (ir_node *n) {
1450 if (op == op_Filter) return interprocedural_view;
1453 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1454 (get_irn_arity(n) > 0));
1459 int is_Phi0 (ir_node *n) {
1462 return ((get_irn_op(n) == op_Phi) &&
1463 (get_irn_arity(n) == 0) &&
1464 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1468 get_Phi_preds_arr (ir_node *node) {
1469 assert (node->op == op_Phi);
1470 return (ir_node **)&(get_irn_in(node)[1]);
1474 get_Phi_n_preds (ir_node *node) {
1475 assert (is_Phi(node) || is_Phi0(node));
1476 return (get_irn_arity(node));
1480 void set_Phi_n_preds (ir_node *node, int n_preds) {
1481 assert (node->op == op_Phi);
1486 get_Phi_pred (ir_node *node, int pos) {
1487 assert (is_Phi(node) || is_Phi0(node));
1488 return get_irn_n(node, pos);
1492 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1493 assert (is_Phi(node) || is_Phi0(node));
1494 set_irn_n(node, pos, pred);
1498 get_Load_mem (ir_node *node) {
1499 assert (node->op == op_Load);
1500 return get_irn_n(node, 0);
1504 set_Load_mem (ir_node *node, ir_node *mem) {
1505 assert (node->op == op_Load);
1506 set_irn_n(node, 0, mem);
1510 get_Load_ptr (ir_node *node) {
1511 assert (node->op == op_Load);
1512 return get_irn_n(node, 1);
1516 set_Load_ptr (ir_node *node, ir_node *ptr) {
1517 assert (node->op == op_Load);
1518 set_irn_n(node, 1, ptr);
1523 get_Store_mem (ir_node *node) {
1524 assert (node->op == op_Store);
1525 return get_irn_n(node, 0);
1529 set_Store_mem (ir_node *node, ir_node *mem) {
1530 assert (node->op == op_Store);
1531 set_irn_n(node, 0, mem);
1535 get_Store_ptr (ir_node *node) {
1536 assert (node->op == op_Store);
1537 return get_irn_n(node, 1);
1541 set_Store_ptr (ir_node *node, ir_node *ptr) {
1542 assert (node->op == op_Store);
1543 set_irn_n(node, 1, ptr);
1547 get_Store_value (ir_node *node) {
1548 assert (node->op == op_Store);
1549 return get_irn_n(node, 2);
1553 set_Store_value (ir_node *node, ir_node *value) {
1554 assert (node->op == op_Store);
1555 set_irn_n(node, 2, value);
1559 get_Alloc_mem (ir_node *node) {
1560 assert (node->op == op_Alloc);
1561 return get_irn_n(node, 0);
1565 set_Alloc_mem (ir_node *node, ir_node *mem) {
1566 assert (node->op == op_Alloc);
1567 set_irn_n(node, 0, mem);
1571 get_Alloc_size (ir_node *node) {
1572 assert (node->op == op_Alloc);
1573 return get_irn_n(node, 1);
1577 set_Alloc_size (ir_node *node, ir_node *size) {
1578 assert (node->op == op_Alloc);
1579 set_irn_n(node, 1, size);
1583 get_Alloc_type (ir_node *node) {
1584 assert (node->op == op_Alloc);
1585 return node->attr.a.type = skip_tid(node->attr.a.type);
1589 set_Alloc_type (ir_node *node, type *tp) {
1590 assert (node->op == op_Alloc);
1591 node->attr.a.type = tp;
1595 get_Alloc_where (ir_node *node) {
1596 assert (node->op == op_Alloc);
1597 return node->attr.a.where;
1601 set_Alloc_where (ir_node *node, where_alloc where) {
1602 assert (node->op == op_Alloc);
1603 node->attr.a.where = where;
1608 get_Free_mem (ir_node *node) {
1609 assert (node->op == op_Free);
1610 return get_irn_n(node, 0);
1614 set_Free_mem (ir_node *node, ir_node *mem) {
1615 assert (node->op == op_Free);
1616 set_irn_n(node, 0, mem);
1620 get_Free_ptr (ir_node *node) {
1621 assert (node->op == op_Free);
1622 return get_irn_n(node, 1);
1626 set_Free_ptr (ir_node *node, ir_node *ptr) {
1627 assert (node->op == op_Free);
1628 set_irn_n(node, 1, ptr);
1632 get_Free_size (ir_node *node) {
1633 assert (node->op == op_Free);
1634 return get_irn_n(node, 2);
1638 set_Free_size (ir_node *node, ir_node *size) {
1639 assert (node->op == op_Free);
1640 set_irn_n(node, 2, size);
1644 get_Free_type (ir_node *node) {
1645 assert (node->op == op_Free);
1646 return node->attr.f = skip_tid(node->attr.f);
1650 set_Free_type (ir_node *node, type *tp) {
1651 assert (node->op == op_Free);
1656 get_Sync_preds_arr (ir_node *node) {
1657 assert (node->op == op_Sync);
1658 return (ir_node **)&(get_irn_in(node)[1]);
1662 get_Sync_n_preds (ir_node *node) {
1663 assert (node->op == op_Sync);
1664 return (get_irn_arity(node));
1669 set_Sync_n_preds (ir_node *node, int n_preds) {
1670 assert (node->op == op_Sync);
1675 get_Sync_pred (ir_node *node, int pos) {
1676 assert (node->op == op_Sync);
1677 return get_irn_n(node, pos);
1681 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1682 assert (node->op == op_Sync);
1683 set_irn_n(node, pos, pred);
1687 get_Proj_pred (ir_node *node) {
1688 assert (is_Proj(node));
1689 return get_irn_n(node, 0);
1693 set_Proj_pred (ir_node *node, ir_node *pred) {
1694 assert (is_Proj(node));
1695 set_irn_n(node, 0, pred);
1699 get_Proj_proj (ir_node *node) {
1700 assert (is_Proj(node));
1701 if (get_irn_opcode(node) == iro_Proj) {
1702 return node->attr.proj;
1704 assert(get_irn_opcode(node) == iro_Filter);
1705 return node->attr.filter.proj;
1710 set_Proj_proj (ir_node *node, long proj) {
1711 assert (node->op == op_Proj);
1712 node->attr.proj = proj;
1716 get_Tuple_preds_arr (ir_node *node) {
1717 assert (node->op == op_Tuple);
1718 return (ir_node **)&(get_irn_in(node)[1]);
1722 get_Tuple_n_preds (ir_node *node) {
1723 assert (node->op == op_Tuple);
1724 return (get_irn_arity(node));
1729 set_Tuple_n_preds (ir_node *node, int n_preds) {
1730 assert (node->op == op_Tuple);
1735 get_Tuple_pred (ir_node *node, int pos) {
1736 assert (node->op == op_Tuple);
1737 return get_irn_n(node, pos);
1741 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1742 assert (node->op == op_Tuple);
1743 set_irn_n(node, pos, pred);
1747 get_Id_pred (ir_node *node) {
1748 assert (node->op == op_Id);
1749 return get_irn_n(node, 0);
1753 set_Id_pred (ir_node *node, ir_node *pred) {
1754 assert (node->op == op_Id);
1755 set_irn_n(node, 0, pred);
1758 ir_node *get_Confirm_value (ir_node *node) {
1759 assert (node->op == op_Confirm);
1760 return get_irn_n(node, 0);
1762 void set_Confirm_value (ir_node *node, ir_node *value) {
1763 assert (node->op == op_Confirm);
1764 set_irn_n(node, 0, value);
1766 ir_node *get_Confirm_bound (ir_node *node) {
1767 assert (node->op == op_Confirm);
1768 return get_irn_n(node, 1);
1770 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1771 assert (node->op == op_Confirm);
1772 set_irn_n(node, 0, bound);
1774 pn_Cmp get_Confirm_cmp (ir_node *node) {
1775 assert (node->op == op_Confirm);
1776 return node->attr.confirm_cmp;
1778 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1779 assert (node->op == op_Confirm);
1780 node->attr.confirm_cmp = cmp;
1785 get_Filter_pred (ir_node *node) {
1786 assert(node->op == op_Filter);
1790 set_Filter_pred (ir_node *node, ir_node *pred) {
1791 assert(node->op == op_Filter);
1795 get_Filter_proj(ir_node *node) {
1796 assert(node->op == op_Filter);
1797 return node->attr.filter.proj;
1800 set_Filter_proj (ir_node *node, long proj) {
1801 assert(node->op == op_Filter);
1802 node->attr.filter.proj = proj;
1805 /* Don't use get_irn_arity, get_irn_n in implementation as access
1806 shall work independent of view!!! */
1807 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1808 assert(node->op == op_Filter);
1809 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1810 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1811 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1812 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1813 node->attr.filter.in_cg[0] = node->in[0];
1815 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1818 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1819 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1820 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1821 node->attr.filter.in_cg[pos + 1] = pred;
1823 int get_Filter_n_cg_preds(ir_node *node) {
1824 assert(node->op == op_Filter && node->attr.filter.in_cg);
1825 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1827 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1829 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1831 arity = ARR_LEN(node->attr.filter.in_cg);
1832 assert(pos < arity - 1);
1833 return node->attr.filter.in_cg[pos + 1];
1838 get_irn_irg(ir_node *node) {
1839 if (get_irn_op(node) != op_Block)
1840 node = get_nodes_block(node);
1841 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1842 node = get_nodes_block(node);
1843 assert(get_irn_op(node) == op_Block);
1844 return node->attr.block.irg;
1848 /*----------------------------------------------------------------*/
1849 /* Auxiliary routines */
1850 /*----------------------------------------------------------------*/
1853 skip_Proj (ir_node *node) {
1854 /* don't assert node !!! */
1855 if (node && is_Proj(node)) {
1856 return get_Proj_pred(node);
1863 skip_Tuple (ir_node *node) {
1866 if (!get_opt_normalize()) return node;
1868 node = skip_nop(node);
1869 if (get_irn_op(node) == op_Proj) {
1870 pred = skip_nop(get_Proj_pred(node));
1871 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1872 pred = skip_nop(skip_Tuple(pred));
1873 if (get_irn_op(pred) == op_Tuple)
1874 return get_Tuple_pred(pred, get_Proj_proj(node));
1879 /** returns operand of node if node is a Cast */
1880 ir_node *skip_Cast (ir_node *node) {
1881 if (node && get_irn_op(node) == op_Cast) {
1882 return skip_nop(get_irn_n(node, 0));
1889 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1890 than any other approach, as Id chains are resolved and all point to the real node, or
1891 all id's are self loops. */
1893 skip_nop (ir_node *node) {
1894 /* don't assert node !!! */
1896 if (!get_opt_normalize()) return node;
1898 /* Don't use get_Id_pred: We get into an endless loop for
1899 self-referencing Ids. */
1900 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1901 ir_node *rem_pred = node->in[0+1];
1904 assert (get_irn_arity (node) > 0);
1906 node->in[0+1] = node;
1907 res = skip_nop(rem_pred);
1908 if (res->op == op_Id) /* self-loop */ return node;
1910 node->in[0+1] = res;
1917 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1918 than any other approach, as Id chains are resolved and all point to the real node, or
1919 all id's are self loops. */
1921 skip_nop (ir_node *node) {
1923 /* don't assert node !!! */
1925 if (!node || (node->op != op_Id)) return node;
1927 if (!get_opt_normalize()) return node;
1929 /* Don't use get_Id_pred: We get into an endless loop for
1930 self-referencing Ids. */
1931 pred = node->in[0+1];
1933 if (pred->op != op_Id) return pred;
1935 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1936 ir_node *rem_pred, *res;
1938 if (pred->op != op_Id) return pred; /* shortcut */
1941 assert (get_irn_arity (node) > 0);
1943 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1944 res = skip_nop(rem_pred);
1945 if (res->op == op_Id) /* self-loop */ return node;
1947 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1956 skip_Id (ir_node *node) {
1957 return skip_nop(node);
1961 is_Bad (ir_node *node) {
1963 if ((node) && get_irn_opcode(node) == iro_Bad)
1969 is_no_Block (ir_node *node) {
1971 return (get_irn_opcode(node) != iro_Block);
1975 is_Block (ir_node *node) {
1977 return (get_irn_opcode(node) == iro_Block);
1980 /* returns true if node is a Unknown node. */
1982 is_Unknown (ir_node *node) {
1984 return (get_irn_opcode(node) == iro_Unknown);
1988 is_Proj (const ir_node *node) {
1990 return node->op == op_Proj
1991 || (!interprocedural_view && node->op == op_Filter);
1994 /* Returns true if the operation manipulates control flow. */
1996 is_cfop(ir_node *node) {
1997 return is_cfopcode(get_irn_op(node));
2000 /* Returns true if the operation manipulates interprocedural control flow:
2001 CallBegin, EndReg, EndExcept */
2002 int is_ip_cfop(ir_node *node) {
2003 return is_ip_cfopcode(get_irn_op(node));
2006 ir_graph *get_ip_cfop_irg(ir_node *n) {
2007 return get_irn_irg(n);
2010 /* Returns true if the operation can change the control flow because
2013 is_fragile_op(ir_node *node) {
2014 return is_op_fragile(get_irn_op(node));
2017 /* Returns the memory operand of fragile operations. */
2018 ir_node *get_fragile_op_mem(ir_node *node) {
2019 assert(node && is_fragile_op(node));
2021 switch (get_irn_opcode (node)) {
2030 return get_irn_n(node, 0);
2035 assert(0 && "should not be reached");
2040 #ifdef DEBUG_libfirm
2041 void dump_irn (ir_node *n) {
2042 int i, arity = get_irn_arity(n);
2043 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2045 ir_node *pred = get_irn_n(n, -1);
2046 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2047 get_irn_node_nr(pred), (void *)pred);
2049 printf(" preds: \n");
2050 for (i = 0; i < arity; ++i) {
2051 ir_node *pred = get_irn_n(n, i);
2052 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2053 get_irn_node_nr(pred), (void *)pred);
2057 #else /* DEBUG_libfirm */
2058 void dump_irn (ir_node *n) {}
2059 #endif /* DEBUG_libfirm */