3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 res->kind = k_ir_node;
113 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
115 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
116 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
119 set_irn_dbg_info(res, db);
123 res->node_nr = get_irp_new_node_nr();
131 /* Copies all attributes stored in the old node to the new node.
132 Assumes both have the same opcode and sufficient size. */
134 copy_attrs (const ir_node *old_node, ir_node *new_node) {
135 assert(get_irn_op(old_node) == get_irn_op(new_node));
136 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 /*-- getting some parameters from ir_nodes --*/
142 (is_ir_node)(const void *thing) {
143 return __is_ir_node(thing);
147 (get_irn_intra_arity)(const ir_node *node) {
148 return __get_irn_intra_arity(node);
152 (get_irn_inter_arity)(const ir_node *node) {
153 return __get_irn_inter_arity(node);
157 (get_irn_arity)(const ir_node *node) {
158 return __get_irn_arity(node);
161 /* Returns the array with ins. This array is shifted with respect to the
162 array accessed by get_irn_n: The block operand is at position 0 not -1.
163 (@@@ This should be changed.)
164 The order of the predecessors in this array is not guaranteed, except that
165 lists of operands as predecessors of Block or arguments of a Call are
168 get_irn_in (const ir_node *node) {
170 if (interprocedural_view) { /* handle Filter and Block specially */
171 if (get_irn_opcode(node) == iro_Filter) {
172 assert(node->attr.filter.in_cg);
173 return node->attr.filter.in_cg;
174 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
175 return node->attr.block.in_cg;
177 /* else fall through */
183 set_irn_in (ir_node *node, int arity, ir_node **in) {
186 if (interprocedural_view) { /* handle Filter and Block specially */
187 if (get_irn_opcode(node) == iro_Filter) {
188 assert(node->attr.filter.in_cg);
189 arr = &node->attr.filter.in_cg;
190 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
191 arr = &node->attr.block.in_cg;
198 if (arity != ARR_LEN(*arr) - 1) {
199 ir_node * block = (*arr)[0];
200 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
203 fix_backedges(current_ir_graph->obst, node);
204 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
208 (get_irn_intra_n)(ir_node *node, int n) {
209 return __get_irn_intra_n (node, n);
213 (get_irn_inter_n)(ir_node *node, int n) {
214 return __get_irn_inter_n (node, n);
218 (get_irn_n)(ir_node *node, int n) {
219 return __get_irn_n (node, n);
223 set_irn_n (ir_node *node, int n, ir_node *in) {
224 assert(node && -1 <= n && n < get_irn_arity(node));
225 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
226 /* Change block pred in both views! */
227 node->in[n + 1] = in;
228 assert(node->attr.filter.in_cg);
229 node->attr.filter.in_cg[n + 1] = in;
232 if (interprocedural_view) { /* handle Filter and Block specially */
233 if (get_irn_opcode(node) == iro_Filter) {
234 assert(node->attr.filter.in_cg);
235 node->attr.filter.in_cg[n + 1] = in;
237 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
238 node->attr.block.in_cg[n + 1] = in;
241 /* else fall through */
243 node->in[n + 1] = in;
247 (get_irn_mode)(const ir_node *node) {
248 return __get_irn_mode(node);
252 (set_irn_mode)(ir_node *node, ir_mode *mode)
254 __set_irn_mode(node, mode);
258 get_irn_modecode (const ir_node *node)
261 return node->mode->code;
264 /** Gets the string representation of the mode .*/
266 get_irn_modename (const ir_node *node)
269 return get_mode_name(node->mode);
273 get_irn_modeident (const ir_node *node)
276 return get_mode_ident(node->mode);
280 (get_irn_op)(const ir_node *node)
282 return __get_irn_op(node);
285 /* should be private to the library: */
287 set_irn_op (ir_node *node, ir_op *op)
294 (get_irn_opcode)(const ir_node *node)
296 return __get_irn_opcode(node);
300 get_irn_opname (const ir_node *node)
303 if ((get_irn_op((ir_node *)node) == op_Phi) &&
304 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
305 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
306 return get_id_str(node->op->name);
310 get_irn_opident (const ir_node *node)
313 return node->op->name;
317 (get_irn_visited)(const ir_node *node)
319 return __get_irn_visited(node);
323 (set_irn_visited)(ir_node *node, unsigned long visited)
325 __set_irn_visited(node, visited);
329 (mark_irn_visited)(ir_node *node) {
330 __mark_irn_visited(node);
334 (irn_not_visited)(const ir_node *node) {
335 return __irn_not_visited(node);
339 (irn_visited)(const ir_node *node) {
340 return __irn_visited(node);
344 (set_irn_link)(ir_node *node, void *link) {
345 __set_irn_link(node, link);
349 (get_irn_link)(const ir_node *node) {
350 return __get_irn_link(node);
353 /* Outputs a unique number for this node */
355 get_irn_node_nr(const ir_node *node) {
358 return node->node_nr;
365 get_irn_const_attr (ir_node *node)
367 assert (node->op == op_Const);
368 return node->attr.con;
372 get_irn_proj_attr (ir_node *node)
374 assert (node->op == op_Proj);
375 return node->attr.proj;
379 get_irn_alloc_attr (ir_node *node)
381 assert (node->op == op_Alloc);
386 get_irn_free_attr (ir_node *node)
388 assert (node->op == op_Free);
389 return node->attr.f = skip_tid(node->attr.f);
393 get_irn_symconst_attr (ir_node *node)
395 assert (node->op == op_SymConst);
400 get_irn_call_attr (ir_node *node)
402 assert (node->op == op_Call);
403 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
407 get_irn_funccall_attr (ir_node *node)
409 assert (node->op == op_FuncCall);
410 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
414 get_irn_sel_attr (ir_node *node)
416 assert (node->op == op_Sel);
421 get_irn_phi_attr (ir_node *node)
423 assert (node->op == op_Phi);
424 return node->attr.phi0_pos;
428 get_irn_block_attr (ir_node *node)
430 assert (node->op == op_Block);
431 return node->attr.block;
434 /** manipulate fields of individual nodes **/
436 /* this works for all except Block */
438 get_nodes_Block (ir_node *node) {
439 assert (!(node->op == op_Block));
440 return get_irn_n(node, -1);
444 set_nodes_Block (ir_node *node, ir_node *block) {
445 assert (!(node->op == op_Block));
446 set_irn_n(node, -1, block);
449 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
450 * from Start. If so returns frame type, else Null. */
451 type *is_frame_pointer(ir_node *n) {
452 if ((get_irn_op(n) == op_Proj) &&
453 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
454 ir_node *start = get_Proj_pred(n);
455 if (get_irn_op(start) == op_Start) {
456 return get_irg_frame_type(get_irn_irg(start));
462 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
463 * from Start. If so returns global type, else Null. */
464 type *is_globals_pointer(ir_node *n) {
465 if ((get_irn_op(n) == op_Proj) &&
466 (get_Proj_proj(n) == pn_Start_P_globals)) {
467 ir_node *start = get_Proj_pred(n);
468 if (get_irn_op(start) == op_Start) {
469 return get_glob_type();
475 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
476 * from Start. If so returns 1, else 0. */
477 int is_value_arg_pointer(ir_node *n) {
478 if ((get_irn_op(n) == op_Proj) &&
479 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
480 (get_irn_op(get_Proj_pred(n)) == op_Start))
485 /* Returns an array with the predecessors of the Block. Depending on
486 the implementation of the graph data structure this can be a copy of
487 the internal representation of predecessors as well as the internal
488 array itself. Therefore writing to this array might obstruct the ir. */
490 get_Block_cfgpred_arr (ir_node *node)
492 assert ((node->op == op_Block));
493 return (ir_node **)&(get_irn_in(node)[1]);
498 get_Block_n_cfgpreds (ir_node *node) {
499 assert ((node->op == op_Block));
500 return (get_irn_arity(node));
504 get_Block_cfgpred (ir_node *node, int pos) {
506 assert (node->op == op_Block);
507 assert(-1 <= pos && pos < get_irn_arity(node));
508 return get_irn_n(node, pos);
512 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
513 assert (node->op == op_Block);
514 set_irn_n(node, pos, pred);
518 get_Block_matured (ir_node *node) {
519 assert (node->op == op_Block);
520 return node->attr.block.matured;
524 set_Block_matured (ir_node *node, bool matured) {
525 assert (node->op == op_Block);
526 node->attr.block.matured = matured;
529 get_Block_block_visited (ir_node *node) {
530 assert (node->op == op_Block);
531 return node->attr.block.block_visited;
535 set_Block_block_visited (ir_node *node, unsigned long visit) {
536 assert (node->op == op_Block);
537 node->attr.block.block_visited = visit;
540 /* For this current_ir_graph must be set. */
542 mark_Block_block_visited (ir_node *node) {
543 assert (node->op == op_Block);
544 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
548 Block_not_block_visited(ir_node *node) {
549 assert (node->op == op_Block);
550 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
554 get_Block_graph_arr (ir_node *node, int pos) {
555 assert (node->op == op_Block);
556 return node->attr.block.graph_arr[pos+1];
560 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
561 assert (node->op == op_Block);
562 node->attr.block.graph_arr[pos+1] = value;
565 /* handler handling for Blocks * /
567 set_Block_handler (ir_node *block, ir_node *handler) {
568 assert ((block->op == op_Block));
569 assert ((handler->op == op_Block));
570 block->attr.block.handler_entry = handler;
574 get_Block_handler (ir_node *block) {
575 assert ((block->op == op_Block));
576 return (block->attr.block.handler_entry);
579 / * handler handling for Nodes * /
581 set_Node_handler (ir_node *node, ir_node *handler) {
582 set_Block_handler (get_nodes_Block (node), handler);
586 get_Node_handler (ir_node *node) {
587 return (get_Block_handler (get_nodes_Block (node)));
590 / * exc_t handling for Blocks * /
591 void set_Block_exc (ir_node *block, exc_t exc) {
592 assert ((block->op == op_Block));
593 block->attr.block.exc = exc;
596 exc_t get_Block_exc (ir_node *block) {
597 assert ((block->op == op_Block));
598 return (block->attr.block.exc);
601 / * exc_t handling for Nodes * /
602 void set_Node_exc (ir_node *node, exc_t exc) {
603 set_Block_exc (get_nodes_Block (node), exc);
606 exc_t get_Node_exc (ir_node *node) {
607 return (get_Block_exc (get_nodes_Block (node)));
611 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
612 assert(node->op == op_Block);
613 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
614 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
615 node->attr.block.in_cg[0] = NULL;
616 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
618 /* Fix backedge array. fix_backedges operates depending on
619 interprocedural_view. */
620 bool ipv = interprocedural_view;
621 interprocedural_view = true;
622 fix_backedges(current_ir_graph->obst, node);
623 interprocedural_view = ipv;
626 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
629 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
630 assert(node->op == op_Block &&
631 node->attr.block.in_cg &&
632 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
633 node->attr.block.in_cg[pos + 1] = pred;
636 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
637 assert(node->op == op_Block);
638 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
641 int get_Block_cg_n_cfgpreds(ir_node * node) {
642 assert(node->op == op_Block);
643 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
646 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
647 assert(node->op == op_Block && node->attr.block.in_cg);
648 return node->attr.block.in_cg[pos + 1];
651 void remove_Block_cg_cfgpred_arr(ir_node * node) {
652 assert(node->op == op_Block);
653 node->attr.block.in_cg = NULL;
656 /* Start references the irg it is in. */
658 get_Start_irg(ir_node *node) {
659 return get_irn_irg(node);
663 set_Start_irg(ir_node *node, ir_graph *irg) {
664 assert(node->op == op_Start);
665 assert(is_ir_graph(irg));
666 assert(0 && " Why set irg? -- use set_irn_irg");
670 get_End_n_keepalives(ir_node *end) {
671 assert (end->op == op_End);
672 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
676 get_End_keepalive(ir_node *end, int pos) {
677 assert (end->op == op_End);
678 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
682 add_End_keepalive (ir_node *end, ir_node *ka) {
683 assert (end->op == op_End);
684 ARR_APP1 (ir_node *, end->in, ka);
688 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
689 assert (end->op == op_End);
690 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
694 free_End (ir_node *end) {
695 assert (end->op == op_End);
697 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
698 end->in = NULL; /* @@@ make sure we get an error if we use the
699 in array afterwards ... */
702 ir_graph *get_EndReg_irg (ir_node *end) {
703 return get_irn_irg(end);
706 ir_graph *get_EndExcept_irg (ir_node *end) {
707 return get_irn_irg(end);
711 > Implementing the case construct (which is where the constant Proj node is
712 > important) involves far more than simply determining the constant values.
713 > We could argue that this is more properly a function of the translator from
714 > Firm to the target machine. That could be done if there was some way of
715 > projecting "default" out of the Cond node.
716 I know it's complicated.
717 Basically there are two proglems:
718 - determining the gaps between the projs
719 - determining the biggest case constant to know the proj number for
721 I see several solutions:
722 1. Introduce a ProjDefault node. Solves both problems.
723 This means to extend all optimizations executed during construction.
724 2. Give the Cond node for switch two flavors:
725 a) there are no gaps in the projs (existing flavor)
726 b) gaps may exist, default proj is still the Proj with the largest
727 projection number. This covers also the gaps.
728 3. Fix the semantic of the Cond to that of 2b)
730 Solution 2 seems to be the best:
731 Computing the gaps in the Firm representation is not too hard, i.e.,
732 libFIRM can implement a routine that transforms between the two
733 flavours. This is also possible for 1) but 2) does not require to
734 change any existing optimization.
735 Further it should be far simpler to determine the biggest constant than
737 I don't want to choose 3) as 2a) seems to have advantages for
738 dataflow analysis and 3) does not allow to convert the representation to
742 get_Cond_selector (ir_node *node) {
743 assert (node->op == op_Cond);
744 return get_irn_n(node, 0);
748 set_Cond_selector (ir_node *node, ir_node *selector) {
749 assert (node->op == op_Cond);
750 set_irn_n(node, 0, selector);
754 get_Cond_kind (ir_node *node) {
755 assert (node->op == op_Cond);
756 return node->attr.c.kind;
760 set_Cond_kind (ir_node *node, cond_kind kind) {
761 assert (node->op == op_Cond);
762 node->attr.c.kind = kind;
766 get_Cond_defaultProj (ir_node *node) {
767 assert (node->op == op_Cond);
768 return node->attr.c.default_proj;
772 get_Return_mem (ir_node *node) {
773 assert (node->op == op_Return);
774 return get_irn_n(node, 0);
778 set_Return_mem (ir_node *node, ir_node *mem) {
779 assert (node->op == op_Return);
780 set_irn_n(node, 0, mem);
784 get_Return_n_ress (ir_node *node) {
785 assert (node->op == op_Return);
786 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
790 get_Return_res_arr (ir_node *node)
792 assert ((node->op == op_Return));
793 if (get_Return_n_ress(node) > 0)
794 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
801 set_Return_n_res (ir_node *node, int results) {
802 assert (node->op == op_Return);
807 get_Return_res (ir_node *node, int pos) {
808 assert (node->op == op_Return);
809 assert (get_Return_n_ress(node) > pos);
810 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
814 set_Return_res (ir_node *node, int pos, ir_node *res){
815 assert (node->op == op_Return);
816 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
820 get_Raise_mem (ir_node *node) {
821 assert (node->op == op_Raise);
822 return get_irn_n(node, 0);
826 set_Raise_mem (ir_node *node, ir_node *mem) {
827 assert (node->op == op_Raise);
828 set_irn_n(node, 0, mem);
832 get_Raise_exo_ptr (ir_node *node) {
833 assert (node->op == op_Raise);
834 return get_irn_n(node, 1);
838 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
839 assert (node->op == op_Raise);
840 set_irn_n(node, 1, exo_ptr);
843 tarval *get_Const_tarval (ir_node *node) {
844 assert (node->op == op_Const);
845 return node->attr.con.tv;
849 set_Const_tarval (ir_node *node, tarval *con) {
850 assert (node->op == op_Const);
851 node->attr.con.tv = con;
855 /* The source language type. Must be an atomic type. Mode of type must
856 be mode of node. For tarvals from entities type must be pointer to
859 get_Const_type (ir_node *node) {
860 assert (node->op == op_Const);
861 return node->attr.con.tp;
865 set_Const_type (ir_node *node, type *tp) {
866 assert (node->op == op_Const);
867 if (tp != unknown_type) {
868 assert (is_atomic_type(tp));
869 assert (get_type_mode(tp) == get_irn_mode(node));
870 assert (!tarval_is_entity(get_Const_tarval(node)) ||
871 (is_pointer_type(tp) &&
872 (get_pointer_points_to_type(tp) ==
873 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
876 node->attr.con.tp = tp;
881 get_SymConst_kind (const ir_node *node) {
882 assert (node->op == op_SymConst);
883 return node->attr.i.num;
887 set_SymConst_kind (ir_node *node, symconst_kind num) {
888 assert (node->op == op_SymConst);
889 node->attr.i.num = num;
893 get_SymConst_type (ir_node *node) {
894 assert ( (node->op == op_SymConst)
895 && ( get_SymConst_kind(node) == type_tag
896 || get_SymConst_kind(node) == size));
897 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
901 set_SymConst_type (ir_node *node, type *tp) {
902 assert ( (node->op == op_SymConst)
903 && ( get_SymConst_kind(node) == type_tag
904 || get_SymConst_kind(node) == size));
905 node->attr.i.tori.typ = tp;
909 get_SymConst_ptrinfo (ir_node *node) {
910 assert ( (node->op == op_SymConst)
911 && (get_SymConst_kind(node) == linkage_ptr_info));
912 return node->attr.i.tori.ptrinfo;
916 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
917 assert ( (node->op == op_SymConst)
918 && (get_SymConst_kind(node) == linkage_ptr_info));
919 node->attr.i.tori.ptrinfo = ptrinfo;
923 get_SymConst_type_or_id (ir_node *node) {
924 assert (node->op == op_SymConst);
925 return &(node->attr.i.tori);
929 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
930 assert (node->op == op_SymConst);
931 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
935 get_Sel_mem (ir_node *node) {
936 assert (node->op == op_Sel);
937 return get_irn_n(node, 0);
941 set_Sel_mem (ir_node *node, ir_node *mem) {
942 assert (node->op == op_Sel);
943 set_irn_n(node, 0, mem);
947 get_Sel_ptr (ir_node *node) {
948 assert (node->op == op_Sel);
949 return get_irn_n(node, 1);
953 set_Sel_ptr (ir_node *node, ir_node *ptr) {
954 assert (node->op == op_Sel);
955 set_irn_n(node, 1, ptr);
959 get_Sel_n_indexs (ir_node *node) {
960 assert (node->op == op_Sel);
961 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
965 get_Sel_index_arr (ir_node *node)
967 assert ((node->op == op_Sel));
968 if (get_Sel_n_indexs(node) > 0)
969 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
975 get_Sel_index (ir_node *node, int pos) {
976 assert (node->op == op_Sel);
977 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
981 set_Sel_index (ir_node *node, int pos, ir_node *index) {
982 assert (node->op == op_Sel);
983 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
987 get_Sel_entity (ir_node *node) {
988 assert (node->op == op_Sel);
989 return node->attr.s.ent;
993 set_Sel_entity (ir_node *node, entity *ent) {
994 assert (node->op == op_Sel);
995 node->attr.s.ent = ent;
999 get_InstOf_ent (ir_node *node) {
1000 assert (node->op = op_InstOf);
1001 return (node->attr.io.ent);
1005 set_InstOf_ent (ir_node *node, type *ent) {
1006 assert (node->op = op_InstOf);
1007 node->attr.io.ent = ent;
1011 get_InstOf_store (ir_node *node) {
1012 assert (node->op = op_InstOf);
1013 return (get_irn_n (node, 0));
1017 set_InstOf_store (ir_node *node, ir_node *obj) {
1018 assert (node->op = op_InstOf);
1019 set_irn_n (node, 0, obj);
1023 get_InstOf_obj (ir_node *node) {
1024 assert (node->op = op_InstOf);
1025 return (get_irn_n (node, 1));
1029 set_InstOf_obj (ir_node *node, ir_node *obj) {
1030 assert (node->op = op_InstOf);
1031 set_irn_n (node, 1, obj);
1035 /* For unary and binary arithmetic operations the access to the
1036 operands can be factored out. Left is the first, right the
1037 second arithmetic value as listed in tech report 0999-33.
1038 unops are: Minus, Abs, Not, Conv, Cast
1039 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1040 Shr, Shrs, Rotate, Cmp */
1044 get_Call_mem (ir_node *node) {
1045 assert (node->op == op_Call);
1046 return get_irn_n(node, 0);
1050 set_Call_mem (ir_node *node, ir_node *mem) {
1051 assert (node->op == op_Call);
1052 set_irn_n(node, 0, mem);
1056 get_Call_ptr (ir_node *node) {
1057 assert (node->op == op_Call);
1058 return get_irn_n(node, 1);
1062 set_Call_ptr (ir_node *node, ir_node *ptr) {
1063 assert (node->op == op_Call);
1064 set_irn_n(node, 1, ptr);
1068 get_Call_param_arr (ir_node *node) {
1069 assert (node->op == op_Call);
1070 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1074 get_Call_n_params (ir_node *node) {
1075 assert (node->op == op_Call);
1076 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1080 get_Call_arity (ir_node *node) {
1081 assert (node->op == op_Call);
1082 return get_Call_n_params(node);
1086 set_Call_arity (ir_node *node, ir_node *arity) {
1087 assert (node->op == op_Call);
1092 get_Call_param (ir_node *node, int pos) {
1093 assert (node->op == op_Call);
1094 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1098 set_Call_param (ir_node *node, int pos, ir_node *param) {
1099 assert (node->op == op_Call);
1100 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1104 get_Call_type (ir_node *node) {
1105 assert (node->op == op_Call);
1106 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1110 set_Call_type (ir_node *node, type *tp) {
1111 assert (node->op == op_Call);
1112 assert (is_method_type(tp));
1113 node->attr.call.cld_tp = tp;
1116 int Call_has_callees(ir_node *node) {
1117 return (node->attr.call.callee_arr != NULL);
1120 int get_Call_n_callees(ir_node * node) {
1121 assert(node->op == op_Call && node->attr.call.callee_arr);
1122 return ARR_LEN(node->attr.call.callee_arr);
1125 entity * get_Call_callee(ir_node * node, int pos) {
1126 assert(node->op == op_Call && node->attr.call.callee_arr);
1127 return node->attr.call.callee_arr[pos];
1130 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1131 assert(node->op == op_Call);
1132 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1133 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1135 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1138 void remove_Call_callee_arr(ir_node * node) {
1139 assert(node->op == op_Call);
1140 node->attr.call.callee_arr = NULL;
1143 ir_node * get_CallBegin_ptr (ir_node *node) {
1144 assert(node->op == op_CallBegin);
1145 return get_irn_n(node, 0);
1147 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1148 assert(node->op == op_CallBegin);
1149 set_irn_n(node, 0, ptr);
1151 ir_graph * get_CallBegin_irg (ir_node *node) {
1152 return get_irn_irg(node);
1154 ir_node * get_CallBegin_call (ir_node *node) {
1155 assert(node->op == op_CallBegin);
1156 return node->attr.callbegin.call;
1158 void set_CallBegin_call (ir_node *node, ir_node *call) {
1159 assert(node->op == op_CallBegin);
1160 node->attr.callbegin.call = call;
1164 get_FuncCall_ptr (ir_node *node) {
1165 assert (node->op == op_FuncCall);
1166 return get_irn_n(node, 0);
1170 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1171 assert (node->op == op_FuncCall);
1172 set_irn_n(node, 0, ptr);
1176 get_FuncCall_param_arr (ir_node *node) {
1177 assert (node->op == op_FuncCall);
1178 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1182 get_FuncCall_n_params (ir_node *node) {
1183 assert (node->op == op_FuncCall);
1184 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1188 get_FuncCall_arity (ir_node *node) {
1189 assert (node->op == op_FuncCall);
1190 return get_FuncCall_n_params(node);
1194 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1195 assert (node->op == op_FuncCall);
1200 get_FuncCall_param (ir_node *node, int pos) {
1201 assert (node->op == op_FuncCall);
1202 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1206 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1207 assert (node->op == op_FuncCall);
1208 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1212 get_FuncCall_type (ir_node *node) {
1213 assert (node->op == op_FuncCall);
1214 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1218 set_FuncCall_type (ir_node *node, type *tp) {
1219 assert (node->op == op_FuncCall);
1220 assert (is_method_type(tp));
1221 node->attr.call.cld_tp = tp;
1224 int FuncCall_has_callees(ir_node *node) {
1225 return (node->attr.call.callee_arr != NULL);
1228 int get_FuncCall_n_callees(ir_node * node) {
1229 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1230 return ARR_LEN(node->attr.call.callee_arr);
1233 entity * get_FuncCall_callee(ir_node * node, int pos) {
1234 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1235 return node->attr.call.callee_arr[pos];
1238 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1239 assert(node->op == op_FuncCall);
1240 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1241 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1243 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1246 void remove_FuncCall_callee_arr(ir_node * node) {
1247 assert(node->op == op_FuncCall);
1248 node->attr.call.callee_arr = NULL;
1253 ir_node * get_##OP##_left(ir_node *node) { \
1254 assert(node->op == op_##OP); \
1255 return get_irn_n(node, node->op->op_index); \
1257 void set_##OP##_left(ir_node *node, ir_node *left) { \
1258 assert(node->op == op_##OP); \
1259 set_irn_n(node, node->op->op_index, left); \
1261 ir_node *get_##OP##_right(ir_node *node) { \
1262 assert(node->op == op_##OP); \
1263 return get_irn_n(node, node->op->op_index + 1); \
1265 void set_##OP##_right(ir_node *node, ir_node *right) { \
1266 assert(node->op == op_##OP); \
1267 set_irn_n(node, node->op->op_index + 1, right); \
1271 ir_node *get_##OP##_op(ir_node *node) { \
1272 assert(node->op == op_##OP); \
1273 return get_irn_n(node, node->op->op_index); \
1275 void set_##OP##_op (ir_node *node, ir_node *op) { \
1276 assert(node->op == op_##OP); \
1277 set_irn_n(node, node->op->op_index, op); \
1287 get_Quot_mem (ir_node *node) {
1288 assert (node->op == op_Quot);
1289 return get_irn_n(node, 0);
1293 set_Quot_mem (ir_node *node, ir_node *mem) {
1294 assert (node->op == op_Quot);
1295 set_irn_n(node, 0, mem);
1301 get_DivMod_mem (ir_node *node) {
1302 assert (node->op == op_DivMod);
1303 return get_irn_n(node, 0);
1307 set_DivMod_mem (ir_node *node, ir_node *mem) {
1308 assert (node->op == op_DivMod);
1309 set_irn_n(node, 0, mem);
1315 get_Div_mem (ir_node *node) {
1316 assert (node->op == op_Div);
1317 return get_irn_n(node, 0);
1321 set_Div_mem (ir_node *node, ir_node *mem) {
1322 assert (node->op == op_Div);
1323 set_irn_n(node, 0, mem);
1329 get_Mod_mem (ir_node *node) {
1330 assert (node->op == op_Mod);
1331 return get_irn_n(node, 0);
1335 set_Mod_mem (ir_node *node, ir_node *mem) {
1336 assert (node->op == op_Mod);
1337 set_irn_n(node, 0, mem);
1354 get_Cast_type (ir_node *node) {
1355 assert (node->op == op_Cast);
1356 return node->attr.cast.totype;
1360 set_Cast_type (ir_node *node, type *to_tp) {
1361 assert (node->op == op_Cast);
1362 node->attr.cast.totype = to_tp;
1366 is_unop (ir_node *node) {
1367 return (node->op->opar == oparity_unary);
1371 get_unop_op (ir_node *node) {
1372 if (node->op->opar == oparity_unary)
1373 return get_irn_n(node, node->op->op_index);
1375 assert(node->op->opar == oparity_unary);
1380 set_unop_op (ir_node *node, ir_node *op) {
1381 if (node->op->opar == oparity_unary)
1382 set_irn_n(node, node->op->op_index, op);
1384 assert(node->op->opar == oparity_unary);
1388 is_binop (ir_node *node) {
1389 return (node->op->opar == oparity_binary);
1393 get_binop_left (ir_node *node) {
1394 if (node->op->opar == oparity_binary)
1395 return get_irn_n(node, node->op->op_index);
1397 assert(node->op->opar == oparity_binary);
1402 set_binop_left (ir_node *node, ir_node *left) {
1403 if (node->op->opar == oparity_binary)
1404 set_irn_n(node, node->op->op_index, left);
1406 assert (node->op->opar == oparity_binary);
1410 get_binop_right (ir_node *node) {
1411 if (node->op->opar == oparity_binary)
1412 return get_irn_n(node, node->op->op_index + 1);
1414 assert(node->op->opar == oparity_binary);
1419 set_binop_right (ir_node *node, ir_node *right) {
1420 if (node->op->opar == oparity_binary)
1421 set_irn_n(node, node->op->op_index + 1, right);
1423 assert (node->op->opar == oparity_binary);
1426 int is_Phi (ir_node *n) {
1432 if (op == op_Filter) return interprocedural_view;
1435 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1436 (get_irn_arity(n) > 0));
1441 int is_Phi0 (ir_node *n) {
1444 return ((get_irn_op(n) == op_Phi) &&
1445 (get_irn_arity(n) == 0) &&
1446 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1450 get_Phi_preds_arr (ir_node *node) {
1451 assert (node->op == op_Phi);
1452 return (ir_node **)&(get_irn_in(node)[1]);
1456 get_Phi_n_preds (ir_node *node) {
1457 assert (is_Phi(node) || is_Phi0(node));
1458 return (get_irn_arity(node));
1462 void set_Phi_n_preds (ir_node *node, int n_preds) {
1463 assert (node->op == op_Phi);
1468 get_Phi_pred (ir_node *node, int pos) {
1469 assert (is_Phi(node) || is_Phi0(node));
1470 return get_irn_n(node, pos);
1474 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1475 assert (is_Phi(node) || is_Phi0(node));
1476 set_irn_n(node, pos, pred);
1480 get_Load_mem (ir_node *node) {
1481 assert (node->op == op_Load);
1482 return get_irn_n(node, 0);
1486 set_Load_mem (ir_node *node, ir_node *mem) {
1487 assert (node->op == op_Load);
1488 set_irn_n(node, 0, mem);
1492 get_Load_ptr (ir_node *node) {
1493 assert (node->op == op_Load);
1494 return get_irn_n(node, 1);
1498 set_Load_ptr (ir_node *node, ir_node *ptr) {
1499 assert (node->op == op_Load);
1500 set_irn_n(node, 1, ptr);
1505 get_Store_mem (ir_node *node) {
1506 assert (node->op == op_Store);
1507 return get_irn_n(node, 0);
1511 set_Store_mem (ir_node *node, ir_node *mem) {
1512 assert (node->op == op_Store);
1513 set_irn_n(node, 0, mem);
1517 get_Store_ptr (ir_node *node) {
1518 assert (node->op == op_Store);
1519 return get_irn_n(node, 1);
1523 set_Store_ptr (ir_node *node, ir_node *ptr) {
1524 assert (node->op == op_Store);
1525 set_irn_n(node, 1, ptr);
1529 get_Store_value (ir_node *node) {
1530 assert (node->op == op_Store);
1531 return get_irn_n(node, 2);
1535 set_Store_value (ir_node *node, ir_node *value) {
1536 assert (node->op == op_Store);
1537 set_irn_n(node, 2, value);
1541 get_Alloc_mem (ir_node *node) {
1542 assert (node->op == op_Alloc);
1543 return get_irn_n(node, 0);
1547 set_Alloc_mem (ir_node *node, ir_node *mem) {
1548 assert (node->op == op_Alloc);
1549 set_irn_n(node, 0, mem);
1553 get_Alloc_size (ir_node *node) {
1554 assert (node->op == op_Alloc);
1555 return get_irn_n(node, 1);
1559 set_Alloc_size (ir_node *node, ir_node *size) {
1560 assert (node->op == op_Alloc);
1561 set_irn_n(node, 1, size);
1565 get_Alloc_type (ir_node *node) {
1566 assert (node->op == op_Alloc);
1567 return node->attr.a.type = skip_tid(node->attr.a.type);
1571 set_Alloc_type (ir_node *node, type *tp) {
1572 assert (node->op == op_Alloc);
1573 node->attr.a.type = tp;
1577 get_Alloc_where (ir_node *node) {
1578 assert (node->op == op_Alloc);
1579 return node->attr.a.where;
1583 set_Alloc_where (ir_node *node, where_alloc where) {
1584 assert (node->op == op_Alloc);
1585 node->attr.a.where = where;
1590 get_Free_mem (ir_node *node) {
1591 assert (node->op == op_Free);
1592 return get_irn_n(node, 0);
1596 set_Free_mem (ir_node *node, ir_node *mem) {
1597 assert (node->op == op_Free);
1598 set_irn_n(node, 0, mem);
1602 get_Free_ptr (ir_node *node) {
1603 assert (node->op == op_Free);
1604 return get_irn_n(node, 1);
1608 set_Free_ptr (ir_node *node, ir_node *ptr) {
1609 assert (node->op == op_Free);
1610 set_irn_n(node, 1, ptr);
1614 get_Free_size (ir_node *node) {
1615 assert (node->op == op_Free);
1616 return get_irn_n(node, 2);
1620 set_Free_size (ir_node *node, ir_node *size) {
1621 assert (node->op == op_Free);
1622 set_irn_n(node, 2, size);
1626 get_Free_type (ir_node *node) {
1627 assert (node->op == op_Free);
1628 return node->attr.f = skip_tid(node->attr.f);
1632 set_Free_type (ir_node *node, type *tp) {
1633 assert (node->op == op_Free);
1638 get_Sync_preds_arr (ir_node *node) {
1639 assert (node->op == op_Sync);
1640 return (ir_node **)&(get_irn_in(node)[1]);
1644 get_Sync_n_preds (ir_node *node) {
1645 assert (node->op == op_Sync);
1646 return (get_irn_arity(node));
1651 set_Sync_n_preds (ir_node *node, int n_preds) {
1652 assert (node->op == op_Sync);
1657 get_Sync_pred (ir_node *node, int pos) {
1658 assert (node->op == op_Sync);
1659 return get_irn_n(node, pos);
1663 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1664 assert (node->op == op_Sync);
1665 set_irn_n(node, pos, pred);
1669 get_Proj_pred (ir_node *node) {
1670 assert (is_Proj(node));
1671 return get_irn_n(node, 0);
1675 set_Proj_pred (ir_node *node, ir_node *pred) {
1676 assert (is_Proj(node));
1677 set_irn_n(node, 0, pred);
1681 get_Proj_proj (ir_node *node) {
1682 assert (is_Proj(node));
1683 if (get_irn_opcode(node) == iro_Proj) {
1684 return node->attr.proj;
1686 assert(get_irn_opcode(node) == iro_Filter);
1687 return node->attr.filter.proj;
1692 set_Proj_proj (ir_node *node, long proj) {
1693 assert (node->op == op_Proj);
1694 node->attr.proj = proj;
1698 get_Tuple_preds_arr (ir_node *node) {
1699 assert (node->op == op_Tuple);
1700 return (ir_node **)&(get_irn_in(node)[1]);
1704 get_Tuple_n_preds (ir_node *node) {
1705 assert (node->op == op_Tuple);
1706 return (get_irn_arity(node));
1711 set_Tuple_n_preds (ir_node *node, int n_preds) {
1712 assert (node->op == op_Tuple);
1717 get_Tuple_pred (ir_node *node, int pos) {
1718 assert (node->op == op_Tuple);
1719 return get_irn_n(node, pos);
1723 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1724 assert (node->op == op_Tuple);
1725 set_irn_n(node, pos, pred);
1729 get_Id_pred (ir_node *node) {
1730 assert (node->op == op_Id);
1731 return get_irn_n(node, 0);
1735 set_Id_pred (ir_node *node, ir_node *pred) {
1736 assert (node->op == op_Id);
1737 set_irn_n(node, 0, pred);
1740 ir_node *get_Confirm_value (ir_node *node) {
1741 assert (node->op == op_Confirm);
1742 return get_irn_n(node, 0);
1744 void set_Confirm_value (ir_node *node, ir_node *value) {
1745 assert (node->op == op_Confirm);
1746 set_irn_n(node, 0, value);
1748 ir_node *get_Confirm_bound (ir_node *node) {
1749 assert (node->op == op_Confirm);
1750 return get_irn_n(node, 1);
1752 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1753 assert (node->op == op_Confirm);
1754 set_irn_n(node, 0, bound);
1756 pn_Cmp get_Confirm_cmp (ir_node *node) {
1757 assert (node->op == op_Confirm);
1758 return node->attr.confirm_cmp;
1760 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1761 assert (node->op == op_Confirm);
1762 node->attr.confirm_cmp = cmp;
1767 get_Filter_pred (ir_node *node) {
1768 assert(node->op == op_Filter);
1772 set_Filter_pred (ir_node *node, ir_node *pred) {
1773 assert(node->op == op_Filter);
1777 get_Filter_proj(ir_node *node) {
1778 assert(node->op == op_Filter);
1779 return node->attr.filter.proj;
1782 set_Filter_proj (ir_node *node, long proj) {
1783 assert(node->op == op_Filter);
1784 node->attr.filter.proj = proj;
1787 /* Don't use get_irn_arity, get_irn_n in implementation as access
1788 shall work independent of view!!! */
1789 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1790 assert(node->op == op_Filter);
1791 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1792 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1793 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1794 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1795 node->attr.filter.in_cg[0] = node->in[0];
1797 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1800 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1801 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1802 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1803 node->attr.filter.in_cg[pos + 1] = pred;
1805 int get_Filter_n_cg_preds(ir_node *node) {
1806 assert(node->op == op_Filter && node->attr.filter.in_cg);
1807 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1809 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1811 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1813 arity = ARR_LEN(node->attr.filter.in_cg);
1814 assert(pos < arity - 1);
1815 return node->attr.filter.in_cg[pos + 1];
1820 get_irn_irg(ir_node *node) {
1821 if (get_irn_op(node) != op_Block)
1822 node = get_nodes_block(node);
1823 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1824 node = get_nodes_block(node);
1825 assert(get_irn_op(node) == op_Block);
1826 return node->attr.block.irg;
1830 /*----------------------------------------------------------------*/
1831 /* Auxiliary routines */
1832 /*----------------------------------------------------------------*/
1835 skip_Proj (ir_node *node) {
1836 /* don't assert node !!! */
1837 if (node && is_Proj(node)) {
1838 return get_Proj_pred(node);
1845 skip_Tuple (ir_node *node) {
1848 if (!get_opt_normalize()) return node;
1850 node = skip_nop(node);
1851 if (get_irn_op(node) == op_Proj) {
1852 pred = skip_nop(get_Proj_pred(node));
1853 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1854 pred = skip_nop(skip_Tuple(pred));
1855 if (get_irn_op(pred) == op_Tuple)
1856 return get_Tuple_pred(pred, get_Proj_proj(node));
1861 /** returns operand of node if node is a Cast */
1862 ir_node *skip_Cast (ir_node *node) {
1863 if (node && get_irn_op(node) == op_Cast) {
1864 return skip_nop(get_irn_n(node, 0));
1871 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1872 than any other approach, as Id chains are resolved and all point to the real node, or
1873 all id's are self loops. */
1875 skip_nop (ir_node *node) {
1876 /* don't assert node !!! */
1878 if (!get_opt_normalize()) return node;
1880 /* Don't use get_Id_pred: We get into an endless loop for
1881 self-referencing Ids. */
1882 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1883 ir_node *rem_pred = node->in[0+1];
1886 assert (get_irn_arity (node) > 0);
1888 node->in[0+1] = node;
1889 res = skip_nop(rem_pred);
1890 if (res->op == op_Id) /* self-loop */ return node;
1892 node->in[0+1] = res;
1899 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1900 than any other approach, as Id chains are resolved and all point to the real node, or
1901 all id's are self loops. */
1903 skip_nop (ir_node *node) {
1905 /* don't assert node !!! */
1907 if (!get_opt_normalize()) return node;
1909 /* Don't use get_Id_pred: We get into an endless loop for
1910 self-referencing Ids. */
1911 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1912 ir_node *rem_pred, *res;
1914 if (pred->op != op_Id) return pred; /* shortcut */
1917 assert (get_irn_arity (node) > 0);
1919 node->in[0+1] = node;
1920 res = skip_nop(rem_pred);
1921 if (res->op == op_Id) /* self-loop */ return node;
1923 node->in[0+1] = res;
1932 skip_Id (ir_node *node) {
1933 return skip_nop(node);
1937 is_Bad (ir_node *node) {
1939 if ((node) && get_irn_opcode(node) == iro_Bad)
1945 is_no_Block (ir_node *node) {
1947 return (get_irn_opcode(node) != iro_Block);
1951 is_Block (ir_node *node) {
1953 return (get_irn_opcode(node) == iro_Block);
1956 /* returns true if node is a Unknown node. */
1958 is_Unknown (ir_node *node) {
1960 return (get_irn_opcode(node) == iro_Unknown);
1964 is_Proj (const ir_node *node) {
1966 return node->op == op_Proj
1967 || (!interprocedural_view && node->op == op_Filter);
1970 /* Returns true if the operation manipulates control flow. */
1972 is_cfop(ir_node *node) {
1973 return is_cfopcode(get_irn_op(node));
1976 /* Returns true if the operation manipulates interprocedural control flow:
1977 CallBegin, EndReg, EndExcept */
1978 int is_ip_cfop(ir_node *node) {
1979 return is_ip_cfopcode(get_irn_op(node));
1982 ir_graph *get_ip_cfop_irg(ir_node *n) {
1983 return get_irn_irg(n);
1986 /* Returns true if the operation can change the control flow because
1989 is_fragile_op(ir_node *node) {
1990 return is_op_fragile(get_irn_op(node));
1993 /* Returns the memory operand of fragile operations. */
1994 ir_node *get_fragile_op_mem(ir_node *node) {
1995 assert(node && is_fragile_op(node));
1997 switch (get_irn_opcode (node)) {
2006 return get_irn_n(node, 0);
2011 assert(0 && "should not be reached");
2016 #ifdef DEBUG_libfirm
2017 void dump_irn (ir_node *n) {
2019 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2021 ir_node *pred = get_irn_n(n, -1);
2022 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2023 get_irn_node_nr(pred), (void *)pred);
2025 printf(" preds: \n");
2026 for (i = 0; i < get_irn_arity(n); ++i) {
2027 ir_node *pred = get_irn_n(n, i);
2028 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2029 get_irn_node_nr(pred), (void *)pred);
2033 #else /* DEBUG_libfirm */
2034 void dump_irn (ir_node *n) {}
2035 #endif /* DEBUG_libfirm */