3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
106 memset((void *)res, 0, node_size);
108 res->kind = k_ir_node;
114 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
116 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
117 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
120 set_irn_dbg_info(res, db);
124 res->node_nr = get_irp_new_node_nr();
132 /* Copies all attributes stored in the old node to the new node.
133 Assumes both have the same opcode and sufficient size. */
135 copy_attrs (const ir_node *old_node, ir_node *new_node) {
136 assert(get_irn_op(old_node) == get_irn_op(new_node));
137 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
140 /*-- getting some parameters from ir_nodes --*/
143 (is_ir_node)(const void *thing) {
144 return __is_ir_node(thing);
148 (get_irn_intra_arity)(const ir_node *node) {
149 return __get_irn_intra_arity(node);
153 (get_irn_inter_arity)(const ir_node *node) {
154 return __get_irn_inter_arity(node);
158 (get_irn_arity)(const ir_node *node) {
159 return __get_irn_arity(node);
162 /* Returns the array with ins. This array is shifted with respect to the
163 array accessed by get_irn_n: The block operand is at position 0 not -1.
164 (@@@ This should be changed.)
165 The order of the predecessors in this array is not guaranteed, except that
166 lists of operands as predecessors of Block or arguments of a Call are
169 get_irn_in (const ir_node *node) {
171 if (interprocedural_view) { /* handle Filter and Block specially */
172 if (get_irn_opcode(node) == iro_Filter) {
173 assert(node->attr.filter.in_cg);
174 return node->attr.filter.in_cg;
175 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
176 return node->attr.block.in_cg;
178 /* else fall through */
184 set_irn_in (ir_node *node, int arity, ir_node **in) {
187 if (interprocedural_view) { /* handle Filter and Block specially */
188 if (get_irn_opcode(node) == iro_Filter) {
189 assert(node->attr.filter.in_cg);
190 arr = &node->attr.filter.in_cg;
191 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
192 arr = &node->attr.block.in_cg;
199 if (arity != ARR_LEN(*arr) - 1) {
200 ir_node * block = (*arr)[0];
201 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
204 fix_backedges(current_ir_graph->obst, node);
205 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
209 (get_irn_intra_n)(ir_node *node, int n) {
210 return __get_irn_intra_n (node, n);
214 (get_irn_inter_n)(ir_node *node, int n) {
215 return __get_irn_inter_n (node, n);
219 (get_irn_n)(ir_node *node, int n) {
220 return __get_irn_n (node, n);
224 set_irn_n (ir_node *node, int n, ir_node *in) {
225 assert(node && -1 <= n && n < get_irn_arity(node));
226 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
227 /* Change block pred in both views! */
228 node->in[n + 1] = in;
229 assert(node->attr.filter.in_cg);
230 node->attr.filter.in_cg[n + 1] = in;
233 if (interprocedural_view) { /* handle Filter and Block specially */
234 if (get_irn_opcode(node) == iro_Filter) {
235 assert(node->attr.filter.in_cg);
236 node->attr.filter.in_cg[n + 1] = in;
238 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
239 node->attr.block.in_cg[n + 1] = in;
242 /* else fall through */
244 node->in[n + 1] = in;
248 (get_irn_mode)(const ir_node *node) {
249 return __get_irn_mode(node);
253 (set_irn_mode)(ir_node *node, ir_mode *mode)
255 __set_irn_mode(node, mode);
259 get_irn_modecode (const ir_node *node)
262 return node->mode->code;
265 /** Gets the string representation of the mode .*/
267 get_irn_modename (const ir_node *node)
270 return get_mode_name(node->mode);
274 get_irn_modeident (const ir_node *node)
277 return get_mode_ident(node->mode);
281 (get_irn_op)(const ir_node *node)
283 return __get_irn_op(node);
286 /* should be private to the library: */
288 set_irn_op (ir_node *node, ir_op *op)
295 (get_irn_opcode)(const ir_node *node)
297 return __get_irn_opcode(node);
301 get_irn_opname (const ir_node *node)
304 if ((get_irn_op((ir_node *)node) == op_Phi) &&
305 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
306 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
307 return get_id_str(node->op->name);
311 get_irn_opident (const ir_node *node)
314 return node->op->name;
318 (get_irn_visited)(const ir_node *node)
320 return __get_irn_visited(node);
324 (set_irn_visited)(ir_node *node, unsigned long visited)
326 __set_irn_visited(node, visited);
330 (mark_irn_visited)(ir_node *node) {
331 __mark_irn_visited(node);
335 (irn_not_visited)(const ir_node *node) {
336 return __irn_not_visited(node);
340 (irn_visited)(const ir_node *node) {
341 return __irn_visited(node);
345 (set_irn_link)(ir_node *node, void *link) {
346 __set_irn_link(node, link);
350 (get_irn_link)(const ir_node *node) {
351 return __get_irn_link(node);
354 /* Outputs a unique number for this node */
356 get_irn_node_nr(const ir_node *node) {
359 return node->node_nr;
366 get_irn_const_attr (ir_node *node)
368 assert (node->op == op_Const);
369 return node->attr.con;
373 get_irn_proj_attr (ir_node *node)
375 assert (node->op == op_Proj);
376 return node->attr.proj;
380 get_irn_alloc_attr (ir_node *node)
382 assert (node->op == op_Alloc);
387 get_irn_free_attr (ir_node *node)
389 assert (node->op == op_Free);
390 return node->attr.f = skip_tid(node->attr.f);
394 get_irn_symconst_attr (ir_node *node)
396 assert (node->op == op_SymConst);
401 get_irn_call_attr (ir_node *node)
403 assert (node->op == op_Call);
404 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
408 get_irn_funccall_attr (ir_node *node)
410 assert (node->op == op_FuncCall);
411 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
415 get_irn_sel_attr (ir_node *node)
417 assert (node->op == op_Sel);
422 get_irn_phi_attr (ir_node *node)
424 assert (node->op == op_Phi);
425 return node->attr.phi0_pos;
429 get_irn_block_attr (ir_node *node)
431 assert (node->op == op_Block);
432 return node->attr.block;
435 /** manipulate fields of individual nodes **/
437 /* this works for all except Block */
439 get_nodes_Block (ir_node *node) {
440 assert (!(node->op == op_Block));
441 return get_irn_n(node, -1);
445 set_nodes_Block (ir_node *node, ir_node *block) {
446 assert (!(node->op == op_Block));
447 set_irn_n(node, -1, block);
450 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
451 * from Start. If so returns frame type, else Null. */
452 type *is_frame_pointer(ir_node *n) {
453 if ((get_irn_op(n) == op_Proj) &&
454 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
455 ir_node *start = get_Proj_pred(n);
456 if (get_irn_op(start) == op_Start) {
457 return get_irg_frame_type(get_irn_irg(start));
463 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
464 * from Start. If so returns global type, else Null. */
465 type *is_globals_pointer(ir_node *n) {
466 if ((get_irn_op(n) == op_Proj) &&
467 (get_Proj_proj(n) == pn_Start_P_globals)) {
468 ir_node *start = get_Proj_pred(n);
469 if (get_irn_op(start) == op_Start) {
470 return get_glob_type();
476 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
477 * from Start. If so returns 1, else 0. */
478 int is_value_arg_pointer(ir_node *n) {
479 if ((get_irn_op(n) == op_Proj) &&
480 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
481 (get_irn_op(get_Proj_pred(n)) == op_Start))
486 /* Returns an array with the predecessors of the Block. Depending on
487 the implementation of the graph data structure this can be a copy of
488 the internal representation of predecessors as well as the internal
489 array itself. Therefore writing to this array might obstruct the ir. */
491 get_Block_cfgpred_arr (ir_node *node)
493 assert ((node->op == op_Block));
494 return (ir_node **)&(get_irn_in(node)[1]);
499 get_Block_n_cfgpreds (ir_node *node) {
500 assert ((node->op == op_Block));
501 return get_irn_arity(node);
505 get_Block_cfgpred (ir_node *node, int pos) {
507 assert (node->op == op_Block);
508 assert(-1 <= pos && pos < get_irn_arity(node));
509 return get_irn_n(node, pos);
513 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
514 assert (node->op == op_Block);
515 set_irn_n(node, pos, pred);
519 get_Block_matured (ir_node *node) {
520 assert (node->op == op_Block);
521 return node->attr.block.matured;
525 set_Block_matured (ir_node *node, bool matured) {
526 assert (node->op == op_Block);
527 node->attr.block.matured = matured;
530 get_Block_block_visited (ir_node *node) {
531 assert (node->op == op_Block);
532 return node->attr.block.block_visited;
536 set_Block_block_visited (ir_node *node, unsigned long visit) {
537 assert (node->op == op_Block);
538 node->attr.block.block_visited = visit;
541 /* For this current_ir_graph must be set. */
543 mark_Block_block_visited (ir_node *node) {
544 assert (node->op == op_Block);
545 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
549 Block_not_block_visited(ir_node *node) {
550 assert (node->op == op_Block);
551 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
555 get_Block_graph_arr (ir_node *node, int pos) {
556 assert (node->op == op_Block);
557 return node->attr.block.graph_arr[pos+1];
561 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
562 assert (node->op == op_Block);
563 node->attr.block.graph_arr[pos+1] = value;
566 /* handler handling for Blocks * /
568 set_Block_handler (ir_node *block, ir_node *handler) {
569 assert ((block->op == op_Block));
570 assert ((handler->op == op_Block));
571 block->attr.block.handler_entry = handler;
575 get_Block_handler (ir_node *block) {
576 assert ((block->op == op_Block));
577 return (block->attr.block.handler_entry);
580 / * handler handling for Nodes * /
582 set_Node_handler (ir_node *node, ir_node *handler) {
583 set_Block_handler (get_nodes_Block (node), handler);
587 get_Node_handler (ir_node *node) {
588 return (get_Block_handler (get_nodes_Block (node)));
591 / * exc_t handling for Blocks * /
592 void set_Block_exc (ir_node *block, exc_t exc) {
593 assert ((block->op == op_Block));
594 block->attr.block.exc = exc;
597 exc_t get_Block_exc (ir_node *block) {
598 assert ((block->op == op_Block));
599 return (block->attr.block.exc);
602 / * exc_t handling for Nodes * /
603 void set_Node_exc (ir_node *node, exc_t exc) {
604 set_Block_exc (get_nodes_Block (node), exc);
607 exc_t get_Node_exc (ir_node *node) {
608 return (get_Block_exc (get_nodes_Block (node)));
612 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
613 assert(node->op == op_Block);
614 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
615 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
616 node->attr.block.in_cg[0] = NULL;
617 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
619 /* Fix backedge array. fix_backedges operates depending on
620 interprocedural_view. */
621 bool ipv = interprocedural_view;
622 interprocedural_view = true;
623 fix_backedges(current_ir_graph->obst, node);
624 interprocedural_view = ipv;
627 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
630 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
631 assert(node->op == op_Block &&
632 node->attr.block.in_cg &&
633 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
634 node->attr.block.in_cg[pos + 1] = pred;
637 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
638 assert(node->op == op_Block);
639 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
642 int get_Block_cg_n_cfgpreds(ir_node * node) {
643 assert(node->op == op_Block);
644 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
647 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
648 assert(node->op == op_Block && node->attr.block.in_cg);
649 return node->attr.block.in_cg[pos + 1];
652 void remove_Block_cg_cfgpred_arr(ir_node * node) {
653 assert(node->op == op_Block);
654 node->attr.block.in_cg = NULL;
657 /* Start references the irg it is in. */
659 get_Start_irg(ir_node *node) {
660 return get_irn_irg(node);
664 set_Start_irg(ir_node *node, ir_graph *irg) {
665 assert(node->op == op_Start);
666 assert(is_ir_graph(irg));
667 assert(0 && " Why set irg? -- use set_irn_irg");
671 get_End_n_keepalives(ir_node *end) {
672 assert (end->op == op_End);
673 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
677 get_End_keepalive(ir_node *end, int pos) {
678 assert (end->op == op_End);
679 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
683 add_End_keepalive (ir_node *end, ir_node *ka) {
684 assert (end->op == op_End);
685 ARR_APP1 (ir_node *, end->in, ka);
689 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
690 assert (end->op == op_End);
691 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
695 free_End (ir_node *end) {
696 assert (end->op == op_End);
698 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
699 end->in = NULL; /* @@@ make sure we get an error if we use the
700 in array afterwards ... */
703 ir_graph *get_EndReg_irg (ir_node *end) {
704 return get_irn_irg(end);
707 ir_graph *get_EndExcept_irg (ir_node *end) {
708 return get_irn_irg(end);
712 > Implementing the case construct (which is where the constant Proj node is
713 > important) involves far more than simply determining the constant values.
714 > We could argue that this is more properly a function of the translator from
715 > Firm to the target machine. That could be done if there was some way of
716 > projecting "default" out of the Cond node.
717 I know it's complicated.
718 Basically there are two proglems:
719 - determining the gaps between the projs
720 - determining the biggest case constant to know the proj number for
722 I see several solutions:
723 1. Introduce a ProjDefault node. Solves both problems.
724 This means to extend all optimizations executed during construction.
725 2. Give the Cond node for switch two flavors:
726 a) there are no gaps in the projs (existing flavor)
727 b) gaps may exist, default proj is still the Proj with the largest
728 projection number. This covers also the gaps.
729 3. Fix the semantic of the Cond to that of 2b)
731 Solution 2 seems to be the best:
732 Computing the gaps in the Firm representation is not too hard, i.e.,
733 libFIRM can implement a routine that transforms between the two
734 flavours. This is also possible for 1) but 2) does not require to
735 change any existing optimization.
736 Further it should be far simpler to determine the biggest constant than
738 I don't want to choose 3) as 2a) seems to have advantages for
739 dataflow analysis and 3) does not allow to convert the representation to
743 get_Cond_selector (ir_node *node) {
744 assert (node->op == op_Cond);
745 return get_irn_n(node, 0);
749 set_Cond_selector (ir_node *node, ir_node *selector) {
750 assert (node->op == op_Cond);
751 set_irn_n(node, 0, selector);
755 get_Cond_kind (ir_node *node) {
756 assert (node->op == op_Cond);
757 return node->attr.c.kind;
761 set_Cond_kind (ir_node *node, cond_kind kind) {
762 assert (node->op == op_Cond);
763 node->attr.c.kind = kind;
767 get_Cond_defaultProj (ir_node *node) {
768 assert (node->op == op_Cond);
769 return node->attr.c.default_proj;
773 get_Return_mem (ir_node *node) {
774 assert (node->op == op_Return);
775 return get_irn_n(node, 0);
779 set_Return_mem (ir_node *node, ir_node *mem) {
780 assert (node->op == op_Return);
781 set_irn_n(node, 0, mem);
785 get_Return_n_ress (ir_node *node) {
786 assert (node->op == op_Return);
787 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
791 get_Return_res_arr (ir_node *node)
793 assert ((node->op == op_Return));
794 if (get_Return_n_ress(node) > 0)
795 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
802 set_Return_n_res (ir_node *node, int results) {
803 assert (node->op == op_Return);
808 get_Return_res (ir_node *node, int pos) {
809 assert (node->op == op_Return);
810 assert (get_Return_n_ress(node) > pos);
811 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
815 set_Return_res (ir_node *node, int pos, ir_node *res){
816 assert (node->op == op_Return);
817 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
821 get_Raise_mem (ir_node *node) {
822 assert (node->op == op_Raise);
823 return get_irn_n(node, 0);
827 set_Raise_mem (ir_node *node, ir_node *mem) {
828 assert (node->op == op_Raise);
829 set_irn_n(node, 0, mem);
833 get_Raise_exo_ptr (ir_node *node) {
834 assert (node->op == op_Raise);
835 return get_irn_n(node, 1);
839 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
840 assert (node->op == op_Raise);
841 set_irn_n(node, 1, exo_ptr);
844 tarval *get_Const_tarval (ir_node *node) {
845 assert (node->op == op_Const);
846 return node->attr.con.tv;
850 set_Const_tarval (ir_node *node, tarval *con) {
851 assert (node->op == op_Const);
852 node->attr.con.tv = con;
856 /* The source language type. Must be an atomic type. Mode of type must
857 be mode of node. For tarvals from entities type must be pointer to
860 get_Const_type (ir_node *node) {
861 assert (node->op == op_Const);
862 return node->attr.con.tp;
866 set_Const_type (ir_node *node, type *tp) {
867 assert (node->op == op_Const);
868 if (tp != unknown_type) {
869 assert (is_atomic_type(tp));
870 assert (get_type_mode(tp) == get_irn_mode(node));
871 assert (!tarval_is_entity(get_Const_tarval(node)) ||
872 (is_pointer_type(tp) &&
873 (get_pointer_points_to_type(tp) ==
874 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
877 node->attr.con.tp = tp;
882 get_SymConst_kind (const ir_node *node) {
883 assert (node->op == op_SymConst);
884 return node->attr.i.num;
888 set_SymConst_kind (ir_node *node, symconst_kind num) {
889 assert (node->op == op_SymConst);
890 node->attr.i.num = num;
894 get_SymConst_type (ir_node *node) {
895 assert ( (node->op == op_SymConst)
896 && ( get_SymConst_kind(node) == type_tag
897 || get_SymConst_kind(node) == size));
898 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
902 set_SymConst_type (ir_node *node, type *tp) {
903 assert ( (node->op == op_SymConst)
904 && ( get_SymConst_kind(node) == type_tag
905 || get_SymConst_kind(node) == size));
906 node->attr.i.tori.typ = tp;
910 get_SymConst_ptrinfo (ir_node *node) {
911 assert ( (node->op == op_SymConst)
912 && (get_SymConst_kind(node) == linkage_ptr_info));
913 return node->attr.i.tori.ptrinfo;
917 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
918 assert ( (node->op == op_SymConst)
919 && (get_SymConst_kind(node) == linkage_ptr_info));
920 node->attr.i.tori.ptrinfo = ptrinfo;
924 get_SymConst_type_or_id (ir_node *node) {
925 assert (node->op == op_SymConst);
926 return &(node->attr.i.tori);
930 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
931 assert (node->op == op_SymConst);
932 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
936 get_Sel_mem (ir_node *node) {
937 assert (node->op == op_Sel);
938 return get_irn_n(node, 0);
942 set_Sel_mem (ir_node *node, ir_node *mem) {
943 assert (node->op == op_Sel);
944 set_irn_n(node, 0, mem);
948 get_Sel_ptr (ir_node *node) {
949 assert (node->op == op_Sel);
950 return get_irn_n(node, 1);
954 set_Sel_ptr (ir_node *node, ir_node *ptr) {
955 assert (node->op == op_Sel);
956 set_irn_n(node, 1, ptr);
960 get_Sel_n_indexs (ir_node *node) {
961 assert (node->op == op_Sel);
962 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
966 get_Sel_index_arr (ir_node *node)
968 assert ((node->op == op_Sel));
969 if (get_Sel_n_indexs(node) > 0)
970 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
976 get_Sel_index (ir_node *node, int pos) {
977 assert (node->op == op_Sel);
978 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
982 set_Sel_index (ir_node *node, int pos, ir_node *index) {
983 assert (node->op == op_Sel);
984 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
988 get_Sel_entity (ir_node *node) {
989 assert (node->op == op_Sel);
990 return node->attr.s.ent;
994 set_Sel_entity (ir_node *node, entity *ent) {
995 assert (node->op == op_Sel);
996 node->attr.s.ent = ent;
1000 get_InstOf_ent (ir_node *node) {
1001 assert (node->op = op_InstOf);
1002 return (node->attr.io.ent);
1006 set_InstOf_ent (ir_node *node, type *ent) {
1007 assert (node->op = op_InstOf);
1008 node->attr.io.ent = ent;
1012 get_InstOf_store (ir_node *node) {
1013 assert (node->op = op_InstOf);
1014 return (get_irn_n (node, 0));
1018 set_InstOf_store (ir_node *node, ir_node *obj) {
1019 assert (node->op = op_InstOf);
1020 set_irn_n (node, 0, obj);
1024 get_InstOf_obj (ir_node *node) {
1025 assert (node->op = op_InstOf);
1026 return (get_irn_n (node, 1));
1030 set_InstOf_obj (ir_node *node, ir_node *obj) {
1031 assert (node->op = op_InstOf);
1032 set_irn_n (node, 1, obj);
1036 /* For unary and binary arithmetic operations the access to the
1037 operands can be factored out. Left is the first, right the
1038 second arithmetic value as listed in tech report 0999-33.
1039 unops are: Minus, Abs, Not, Conv, Cast
1040 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1041 Shr, Shrs, Rotate, Cmp */
1045 get_Call_mem (ir_node *node) {
1046 assert (node->op == op_Call);
1047 return get_irn_n(node, 0);
1051 set_Call_mem (ir_node *node, ir_node *mem) {
1052 assert (node->op == op_Call);
1053 set_irn_n(node, 0, mem);
1057 get_Call_ptr (ir_node *node) {
1058 assert (node->op == op_Call);
1059 return get_irn_n(node, 1);
1063 set_Call_ptr (ir_node *node, ir_node *ptr) {
1064 assert (node->op == op_Call);
1065 set_irn_n(node, 1, ptr);
1069 get_Call_param_arr (ir_node *node) {
1070 assert (node->op == op_Call);
1071 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1075 get_Call_n_params (ir_node *node) {
1076 assert (node->op == op_Call);
1077 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1081 get_Call_arity (ir_node *node) {
1082 assert (node->op == op_Call);
1083 return get_Call_n_params(node);
1087 set_Call_arity (ir_node *node, ir_node *arity) {
1088 assert (node->op == op_Call);
1093 get_Call_param (ir_node *node, int pos) {
1094 assert (node->op == op_Call);
1095 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1099 set_Call_param (ir_node *node, int pos, ir_node *param) {
1100 assert (node->op == op_Call);
1101 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1105 get_Call_type (ir_node *node) {
1106 assert (node->op == op_Call);
1107 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1111 set_Call_type (ir_node *node, type *tp) {
1112 assert (node->op == op_Call);
1113 assert (is_method_type(tp));
1114 node->attr.call.cld_tp = tp;
1117 int Call_has_callees(ir_node *node) {
1118 return (node->attr.call.callee_arr != NULL);
1121 int get_Call_n_callees(ir_node * node) {
1122 assert(node->op == op_Call && node->attr.call.callee_arr);
1123 return ARR_LEN(node->attr.call.callee_arr);
1126 entity * get_Call_callee(ir_node * node, int pos) {
1127 assert(node->op == op_Call && node->attr.call.callee_arr);
1128 return node->attr.call.callee_arr[pos];
1131 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1132 assert(node->op == op_Call);
1133 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1134 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1136 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1139 void remove_Call_callee_arr(ir_node * node) {
1140 assert(node->op == op_Call);
1141 node->attr.call.callee_arr = NULL;
1144 ir_node * get_CallBegin_ptr (ir_node *node) {
1145 assert(node->op == op_CallBegin);
1146 return get_irn_n(node, 0);
1148 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1149 assert(node->op == op_CallBegin);
1150 set_irn_n(node, 0, ptr);
1152 ir_graph * get_CallBegin_irg (ir_node *node) {
1153 return get_irn_irg(node);
1155 ir_node * get_CallBegin_call (ir_node *node) {
1156 assert(node->op == op_CallBegin);
1157 return node->attr.callbegin.call;
1159 void set_CallBegin_call (ir_node *node, ir_node *call) {
1160 assert(node->op == op_CallBegin);
1161 node->attr.callbegin.call = call;
1165 get_FuncCall_ptr (ir_node *node) {
1166 assert (node->op == op_FuncCall);
1167 return get_irn_n(node, 0);
1171 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1172 assert (node->op == op_FuncCall);
1173 set_irn_n(node, 0, ptr);
1177 get_FuncCall_param_arr (ir_node *node) {
1178 assert (node->op == op_FuncCall);
1179 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1183 get_FuncCall_n_params (ir_node *node) {
1184 assert (node->op == op_FuncCall);
1185 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1189 get_FuncCall_arity (ir_node *node) {
1190 assert (node->op == op_FuncCall);
1191 return get_FuncCall_n_params(node);
1195 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1196 assert (node->op == op_FuncCall);
1201 get_FuncCall_param (ir_node *node, int pos) {
1202 assert (node->op == op_FuncCall);
1203 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1207 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1208 assert (node->op == op_FuncCall);
1209 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1213 get_FuncCall_type (ir_node *node) {
1214 assert (node->op == op_FuncCall);
1215 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1219 set_FuncCall_type (ir_node *node, type *tp) {
1220 assert (node->op == op_FuncCall);
1221 assert (is_method_type(tp));
1222 node->attr.call.cld_tp = tp;
1225 int FuncCall_has_callees(ir_node *node) {
1226 return (node->attr.call.callee_arr != NULL);
1229 int get_FuncCall_n_callees(ir_node * node) {
1230 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1231 return ARR_LEN(node->attr.call.callee_arr);
1234 entity * get_FuncCall_callee(ir_node * node, int pos) {
1235 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1236 return node->attr.call.callee_arr[pos];
1239 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1240 assert(node->op == op_FuncCall);
1241 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1242 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1244 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1247 void remove_FuncCall_callee_arr(ir_node * node) {
1248 assert(node->op == op_FuncCall);
1249 node->attr.call.callee_arr = NULL;
1254 ir_node * get_##OP##_left(ir_node *node) { \
1255 assert(node->op == op_##OP); \
1256 return get_irn_n(node, node->op->op_index); \
1258 void set_##OP##_left(ir_node *node, ir_node *left) { \
1259 assert(node->op == op_##OP); \
1260 set_irn_n(node, node->op->op_index, left); \
1262 ir_node *get_##OP##_right(ir_node *node) { \
1263 assert(node->op == op_##OP); \
1264 return get_irn_n(node, node->op->op_index + 1); \
1266 void set_##OP##_right(ir_node *node, ir_node *right) { \
1267 assert(node->op == op_##OP); \
1268 set_irn_n(node, node->op->op_index + 1, right); \
1272 ir_node *get_##OP##_op(ir_node *node) { \
1273 assert(node->op == op_##OP); \
1274 return get_irn_n(node, node->op->op_index); \
1276 void set_##OP##_op (ir_node *node, ir_node *op) { \
1277 assert(node->op == op_##OP); \
1278 set_irn_n(node, node->op->op_index, op); \
1288 get_Quot_mem (ir_node *node) {
1289 assert (node->op == op_Quot);
1290 return get_irn_n(node, 0);
1294 set_Quot_mem (ir_node *node, ir_node *mem) {
1295 assert (node->op == op_Quot);
1296 set_irn_n(node, 0, mem);
1302 get_DivMod_mem (ir_node *node) {
1303 assert (node->op == op_DivMod);
1304 return get_irn_n(node, 0);
1308 set_DivMod_mem (ir_node *node, ir_node *mem) {
1309 assert (node->op == op_DivMod);
1310 set_irn_n(node, 0, mem);
1316 get_Div_mem (ir_node *node) {
1317 assert (node->op == op_Div);
1318 return get_irn_n(node, 0);
1322 set_Div_mem (ir_node *node, ir_node *mem) {
1323 assert (node->op == op_Div);
1324 set_irn_n(node, 0, mem);
1330 get_Mod_mem (ir_node *node) {
1331 assert (node->op == op_Mod);
1332 return get_irn_n(node, 0);
1336 set_Mod_mem (ir_node *node, ir_node *mem) {
1337 assert (node->op == op_Mod);
1338 set_irn_n(node, 0, mem);
1355 get_Cast_type (ir_node *node) {
1356 assert (node->op == op_Cast);
1357 return node->attr.cast.totype;
1361 set_Cast_type (ir_node *node, type *to_tp) {
1362 assert (node->op == op_Cast);
1363 node->attr.cast.totype = to_tp;
1367 is_unop (ir_node *node) {
1368 return (node->op->opar == oparity_unary);
1372 get_unop_op (ir_node *node) {
1373 if (node->op->opar == oparity_unary)
1374 return get_irn_n(node, node->op->op_index);
1376 assert(node->op->opar == oparity_unary);
1381 set_unop_op (ir_node *node, ir_node *op) {
1382 if (node->op->opar == oparity_unary)
1383 set_irn_n(node, node->op->op_index, op);
1385 assert(node->op->opar == oparity_unary);
1389 is_binop (ir_node *node) {
1390 return (node->op->opar == oparity_binary);
1394 get_binop_left (ir_node *node) {
1395 if (node->op->opar == oparity_binary)
1396 return get_irn_n(node, node->op->op_index);
1398 assert(node->op->opar == oparity_binary);
1403 set_binop_left (ir_node *node, ir_node *left) {
1404 if (node->op->opar == oparity_binary)
1405 set_irn_n(node, node->op->op_index, left);
1407 assert (node->op->opar == oparity_binary);
1411 get_binop_right (ir_node *node) {
1412 if (node->op->opar == oparity_binary)
1413 return get_irn_n(node, node->op->op_index + 1);
1415 assert(node->op->opar == oparity_binary);
1420 set_binop_right (ir_node *node, ir_node *right) {
1421 if (node->op->opar == oparity_binary)
1422 set_irn_n(node, node->op->op_index + 1, right);
1424 assert (node->op->opar == oparity_binary);
1427 int is_Phi (ir_node *n) {
1433 if (op == op_Filter) return interprocedural_view;
1436 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1437 (get_irn_arity(n) > 0));
1442 int is_Phi0 (ir_node *n) {
1445 return ((get_irn_op(n) == op_Phi) &&
1446 (get_irn_arity(n) == 0) &&
1447 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1451 get_Phi_preds_arr (ir_node *node) {
1452 assert (node->op == op_Phi);
1453 return (ir_node **)&(get_irn_in(node)[1]);
1457 get_Phi_n_preds (ir_node *node) {
1458 assert (is_Phi(node) || is_Phi0(node));
1459 return (get_irn_arity(node));
1463 void set_Phi_n_preds (ir_node *node, int n_preds) {
1464 assert (node->op == op_Phi);
1469 get_Phi_pred (ir_node *node, int pos) {
1470 assert (is_Phi(node) || is_Phi0(node));
1471 return get_irn_n(node, pos);
1475 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1476 assert (is_Phi(node) || is_Phi0(node));
1477 set_irn_n(node, pos, pred);
1481 get_Load_mem (ir_node *node) {
1482 assert (node->op == op_Load);
1483 return get_irn_n(node, 0);
1487 set_Load_mem (ir_node *node, ir_node *mem) {
1488 assert (node->op == op_Load);
1489 set_irn_n(node, 0, mem);
1493 get_Load_ptr (ir_node *node) {
1494 assert (node->op == op_Load);
1495 return get_irn_n(node, 1);
1499 set_Load_ptr (ir_node *node, ir_node *ptr) {
1500 assert (node->op == op_Load);
1501 set_irn_n(node, 1, ptr);
1506 get_Store_mem (ir_node *node) {
1507 assert (node->op == op_Store);
1508 return get_irn_n(node, 0);
1512 set_Store_mem (ir_node *node, ir_node *mem) {
1513 assert (node->op == op_Store);
1514 set_irn_n(node, 0, mem);
1518 get_Store_ptr (ir_node *node) {
1519 assert (node->op == op_Store);
1520 return get_irn_n(node, 1);
1524 set_Store_ptr (ir_node *node, ir_node *ptr) {
1525 assert (node->op == op_Store);
1526 set_irn_n(node, 1, ptr);
1530 get_Store_value (ir_node *node) {
1531 assert (node->op == op_Store);
1532 return get_irn_n(node, 2);
1536 set_Store_value (ir_node *node, ir_node *value) {
1537 assert (node->op == op_Store);
1538 set_irn_n(node, 2, value);
1542 get_Alloc_mem (ir_node *node) {
1543 assert (node->op == op_Alloc);
1544 return get_irn_n(node, 0);
1548 set_Alloc_mem (ir_node *node, ir_node *mem) {
1549 assert (node->op == op_Alloc);
1550 set_irn_n(node, 0, mem);
1554 get_Alloc_size (ir_node *node) {
1555 assert (node->op == op_Alloc);
1556 return get_irn_n(node, 1);
1560 set_Alloc_size (ir_node *node, ir_node *size) {
1561 assert (node->op == op_Alloc);
1562 set_irn_n(node, 1, size);
1566 get_Alloc_type (ir_node *node) {
1567 assert (node->op == op_Alloc);
1568 return node->attr.a.type = skip_tid(node->attr.a.type);
1572 set_Alloc_type (ir_node *node, type *tp) {
1573 assert (node->op == op_Alloc);
1574 node->attr.a.type = tp;
1578 get_Alloc_where (ir_node *node) {
1579 assert (node->op == op_Alloc);
1580 return node->attr.a.where;
1584 set_Alloc_where (ir_node *node, where_alloc where) {
1585 assert (node->op == op_Alloc);
1586 node->attr.a.where = where;
1591 get_Free_mem (ir_node *node) {
1592 assert (node->op == op_Free);
1593 return get_irn_n(node, 0);
1597 set_Free_mem (ir_node *node, ir_node *mem) {
1598 assert (node->op == op_Free);
1599 set_irn_n(node, 0, mem);
1603 get_Free_ptr (ir_node *node) {
1604 assert (node->op == op_Free);
1605 return get_irn_n(node, 1);
1609 set_Free_ptr (ir_node *node, ir_node *ptr) {
1610 assert (node->op == op_Free);
1611 set_irn_n(node, 1, ptr);
1615 get_Free_size (ir_node *node) {
1616 assert (node->op == op_Free);
1617 return get_irn_n(node, 2);
1621 set_Free_size (ir_node *node, ir_node *size) {
1622 assert (node->op == op_Free);
1623 set_irn_n(node, 2, size);
1627 get_Free_type (ir_node *node) {
1628 assert (node->op == op_Free);
1629 return node->attr.f = skip_tid(node->attr.f);
1633 set_Free_type (ir_node *node, type *tp) {
1634 assert (node->op == op_Free);
1639 get_Sync_preds_arr (ir_node *node) {
1640 assert (node->op == op_Sync);
1641 return (ir_node **)&(get_irn_in(node)[1]);
1645 get_Sync_n_preds (ir_node *node) {
1646 assert (node->op == op_Sync);
1647 return (get_irn_arity(node));
1652 set_Sync_n_preds (ir_node *node, int n_preds) {
1653 assert (node->op == op_Sync);
1658 get_Sync_pred (ir_node *node, int pos) {
1659 assert (node->op == op_Sync);
1660 return get_irn_n(node, pos);
1664 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1665 assert (node->op == op_Sync);
1666 set_irn_n(node, pos, pred);
1670 get_Proj_pred (ir_node *node) {
1671 assert (is_Proj(node));
1672 return get_irn_n(node, 0);
1676 set_Proj_pred (ir_node *node, ir_node *pred) {
1677 assert (is_Proj(node));
1678 set_irn_n(node, 0, pred);
1682 get_Proj_proj (ir_node *node) {
1683 assert (is_Proj(node));
1684 if (get_irn_opcode(node) == iro_Proj) {
1685 return node->attr.proj;
1687 assert(get_irn_opcode(node) == iro_Filter);
1688 return node->attr.filter.proj;
1693 set_Proj_proj (ir_node *node, long proj) {
1694 assert (node->op == op_Proj);
1695 node->attr.proj = proj;
1699 get_Tuple_preds_arr (ir_node *node) {
1700 assert (node->op == op_Tuple);
1701 return (ir_node **)&(get_irn_in(node)[1]);
1705 get_Tuple_n_preds (ir_node *node) {
1706 assert (node->op == op_Tuple);
1707 return (get_irn_arity(node));
1712 set_Tuple_n_preds (ir_node *node, int n_preds) {
1713 assert (node->op == op_Tuple);
1718 get_Tuple_pred (ir_node *node, int pos) {
1719 assert (node->op == op_Tuple);
1720 return get_irn_n(node, pos);
1724 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1725 assert (node->op == op_Tuple);
1726 set_irn_n(node, pos, pred);
1730 get_Id_pred (ir_node *node) {
1731 assert (node->op == op_Id);
1732 return get_irn_n(node, 0);
1736 set_Id_pred (ir_node *node, ir_node *pred) {
1737 assert (node->op == op_Id);
1738 set_irn_n(node, 0, pred);
1741 ir_node *get_Confirm_value (ir_node *node) {
1742 assert (node->op == op_Confirm);
1743 return get_irn_n(node, 0);
1745 void set_Confirm_value (ir_node *node, ir_node *value) {
1746 assert (node->op == op_Confirm);
1747 set_irn_n(node, 0, value);
1749 ir_node *get_Confirm_bound (ir_node *node) {
1750 assert (node->op == op_Confirm);
1751 return get_irn_n(node, 1);
1753 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1754 assert (node->op == op_Confirm);
1755 set_irn_n(node, 0, bound);
1757 pn_Cmp get_Confirm_cmp (ir_node *node) {
1758 assert (node->op == op_Confirm);
1759 return node->attr.confirm_cmp;
1761 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1762 assert (node->op == op_Confirm);
1763 node->attr.confirm_cmp = cmp;
1768 get_Filter_pred (ir_node *node) {
1769 assert(node->op == op_Filter);
1773 set_Filter_pred (ir_node *node, ir_node *pred) {
1774 assert(node->op == op_Filter);
1778 get_Filter_proj(ir_node *node) {
1779 assert(node->op == op_Filter);
1780 return node->attr.filter.proj;
1783 set_Filter_proj (ir_node *node, long proj) {
1784 assert(node->op == op_Filter);
1785 node->attr.filter.proj = proj;
1788 /* Don't use get_irn_arity, get_irn_n in implementation as access
1789 shall work independent of view!!! */
1790 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1791 assert(node->op == op_Filter);
1792 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1793 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1794 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1795 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1796 node->attr.filter.in_cg[0] = node->in[0];
1798 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1801 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1802 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1803 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1804 node->attr.filter.in_cg[pos + 1] = pred;
1806 int get_Filter_n_cg_preds(ir_node *node) {
1807 assert(node->op == op_Filter && node->attr.filter.in_cg);
1808 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1810 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1812 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1814 arity = ARR_LEN(node->attr.filter.in_cg);
1815 assert(pos < arity - 1);
1816 return node->attr.filter.in_cg[pos + 1];
1821 get_irn_irg(ir_node *node) {
1822 if (get_irn_op(node) != op_Block)
1823 node = get_nodes_block(node);
1824 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1825 node = get_nodes_block(node);
1826 assert(get_irn_op(node) == op_Block);
1827 return node->attr.block.irg;
1831 /*----------------------------------------------------------------*/
1832 /* Auxiliary routines */
1833 /*----------------------------------------------------------------*/
1836 skip_Proj (ir_node *node) {
1837 /* don't assert node !!! */
1838 if (node && is_Proj(node)) {
1839 return get_Proj_pred(node);
1846 skip_Tuple (ir_node *node) {
1849 if (!get_opt_normalize()) return node;
1851 node = skip_nop(node);
1852 if (get_irn_op(node) == op_Proj) {
1853 pred = skip_nop(get_Proj_pred(node));
1854 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1855 pred = skip_nop(skip_Tuple(pred));
1856 if (get_irn_op(pred) == op_Tuple)
1857 return get_Tuple_pred(pred, get_Proj_proj(node));
1862 /** returns operand of node if node is a Cast */
1863 ir_node *skip_Cast (ir_node *node) {
1864 if (node && get_irn_op(node) == op_Cast) {
1865 return skip_nop(get_irn_n(node, 0));
1872 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1873 than any other approach, as Id chains are resolved and all point to the real node, or
1874 all id's are self loops. */
1876 skip_nop (ir_node *node) {
1877 /* don't assert node !!! */
1879 if (!get_opt_normalize()) return node;
1881 /* Don't use get_Id_pred: We get into an endless loop for
1882 self-referencing Ids. */
1883 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1884 ir_node *rem_pred = node->in[0+1];
1887 assert (get_irn_arity (node) > 0);
1889 node->in[0+1] = node;
1890 res = skip_nop(rem_pred);
1891 if (res->op == op_Id) /* self-loop */ return node;
1893 node->in[0+1] = res;
1900 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1901 than any other approach, as Id chains are resolved and all point to the real node, or
1902 all id's are self loops. */
1904 skip_nop (ir_node *node) {
1906 /* don't assert node !!! */
1908 if (!get_opt_normalize()) return node;
1910 /* Don't use get_Id_pred: We get into an endless loop for
1911 self-referencing Ids. */
1912 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1913 ir_node *rem_pred, *res;
1915 if (pred->op != op_Id) return pred; /* shortcut */
1918 assert (get_irn_arity (node) > 0);
1920 node->in[0+1] = node;
1921 res = skip_nop(rem_pred);
1922 if (res->op == op_Id) /* self-loop */ return node;
1924 node->in[0+1] = res;
1933 skip_Id (ir_node *node) {
1934 return skip_nop(node);
1938 is_Bad (ir_node *node) {
1940 if ((node) && get_irn_opcode(node) == iro_Bad)
1946 is_no_Block (ir_node *node) {
1948 return (get_irn_opcode(node) != iro_Block);
1952 is_Block (ir_node *node) {
1954 return (get_irn_opcode(node) == iro_Block);
1957 /* returns true if node is a Unknown node. */
1959 is_Unknown (ir_node *node) {
1961 return (get_irn_opcode(node) == iro_Unknown);
1965 is_Proj (const ir_node *node) {
1967 return node->op == op_Proj
1968 || (!interprocedural_view && node->op == op_Filter);
1971 /* Returns true if the operation manipulates control flow. */
1973 is_cfop(ir_node *node) {
1974 return is_cfopcode(get_irn_op(node));
1977 /* Returns true if the operation manipulates interprocedural control flow:
1978 CallBegin, EndReg, EndExcept */
1979 int is_ip_cfop(ir_node *node) {
1980 return is_ip_cfopcode(get_irn_op(node));
1983 ir_graph *get_ip_cfop_irg(ir_node *n) {
1984 return get_irn_irg(n);
1987 /* Returns true if the operation can change the control flow because
1990 is_fragile_op(ir_node *node) {
1991 return is_op_fragile(get_irn_op(node));
1994 /* Returns the memory operand of fragile operations. */
1995 ir_node *get_fragile_op_mem(ir_node *node) {
1996 assert(node && is_fragile_op(node));
1998 switch (get_irn_opcode (node)) {
2007 return get_irn_n(node, 0);
2012 assert(0 && "should not be reached");
2017 #ifdef DEBUG_libfirm
2018 void dump_irn (ir_node *n) {
2019 int i, arity = get_irn_arity(n);
2020 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2022 ir_node *pred = get_irn_n(n, -1);
2023 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2024 get_irn_node_nr(pred), (void *)pred);
2026 printf(" preds: \n");
2027 for (i = 0; i < arity; ++i) {
2028 ir_node *pred = get_irn_n(n, i);
2029 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2030 get_irn_node_nr(pred), (void *)pred);
2034 #else /* DEBUG_libfirm */
2035 void dump_irn (ir_node *n) {}
2036 #endif /* DEBUG_libfirm */