3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define SEL_INDEX_OFFSET 2
34 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
35 #define END_KEEPALIVE_OFFSET 0
37 static const char *pnc_name_arr [] = {
38 "False", "Eq", "Lt", "Le",
39 "Gt", "Ge", "Lg", "Leg", "Uo",
40 "Ue", "Ul", "Ule", "Ug", "Uge",
45 * returns the pnc name from an pnc constant
47 const char *get_pnc_string(int pnc) {
48 return pnc_name_arr[pnc];
52 * Calculates the negated pnc condition.
55 get_negated_pnc(int pnc) {
57 case False: return True; break;
58 case Eq: return Ne; break;
59 case Lt: return Uge; break;
60 case Le: return Ug; break;
61 case Gt: return Ule; break;
62 case Ge: return Ul; break;
63 case Lg: return Ue; break;
64 case Leg: return Uo; break;
65 case Uo: return Leg; break;
66 case Ue: return Lg; break;
67 case Ul: return Ge; break;
68 case Ule: return Gt; break;
69 case Ug: return Le; break;
70 case Uge: return Lt; break;
71 case Ne: return Eq; break;
72 case True: return False; break;
74 return 99; /* to shut up gcc */
77 const char *pns_name_arr [] = {
78 "initial_exec", "global_store",
79 "frame_base", "globals", "args"
82 const char *symconst_name_arr [] = {
83 "type_tag", "size", "linkage_ptr_info"
93 * Create a new irnode in irg, with an op, mode, arity and
94 * some incoming irnodes.
95 * If arity is negative, a node with a dynamic array is created.
98 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
99 int arity, ir_node **in)
102 int node_size = offsetof (ir_node, attr) + op->attr_size;
104 assert(irg && op && mode);
105 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 res->kind = k_ir_node;
113 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
115 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
116 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
119 set_irn_dbg_info(res, db);
123 res->node_nr = get_irp_new_node_nr();
131 /* Copies all attributes stored in the old node to the new node.
132 Assumes both have the same opcode and sufficient size. */
134 copy_attrs (const ir_node *old_node, ir_node *new_node) {
135 assert(get_irn_op(old_node) == get_irn_op(new_node));
136 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 /*-- getting some parameters from ir_nodes --*/
142 (is_ir_node)(const void *thing) {
143 return __is_ir_node(thing);
147 (get_irn_intra_arity)(const ir_node *node) {
148 return __get_irn_intra_arity(node);
152 (get_irn_inter_arity)(const ir_node *node) {
153 return __get_irn_inter_arity(node);
157 (get_irn_arity)(const ir_node *node) {
158 return __get_irn_arity(node);
161 /* Returns the array with ins. This array is shifted with respect to the
162 array accessed by get_irn_n: The block operand is at position 0 not -1.
163 (@@@ This should be changed.)
164 The order of the predecessors in this array is not guaranteed, except that
165 lists of operands as predecessors of Block or arguments of a Call are
168 get_irn_in (const ir_node *node) {
170 if (interprocedural_view) { /* handle Filter and Block specially */
171 if (get_irn_opcode(node) == iro_Filter) {
172 assert(node->attr.filter.in_cg);
173 return node->attr.filter.in_cg;
174 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
175 return node->attr.block.in_cg;
177 /* else fall through */
183 set_irn_in (ir_node *node, int arity, ir_node **in) {
186 if (interprocedural_view) { /* handle Filter and Block specially */
187 if (get_irn_opcode(node) == iro_Filter) {
188 assert(node->attr.filter.in_cg);
189 arr = &node->attr.filter.in_cg;
190 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
191 arr = &node->attr.block.in_cg;
198 if (arity != ARR_LEN(*arr) - 1) {
199 ir_node * block = (*arr)[0];
200 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
203 fix_backedges(current_ir_graph->obst, node);
204 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
208 (get_irn_intra_n)(ir_node *node, int n) {
209 return __get_irn_intra_n (node, n);
213 (get_irn_inter_n)(ir_node *node, int n) {
214 return __get_irn_inter_n (node, n);
218 (get_irn_n)(ir_node *node, int n) {
219 return __get_irn_n (node, n);
223 set_irn_n (ir_node *node, int n, ir_node *in) {
224 assert(node && -1 <= n && n < get_irn_arity(node));
225 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
226 /* Change block pred in both views! */
227 node->in[n + 1] = in;
228 assert(node->attr.filter.in_cg);
229 node->attr.filter.in_cg[n + 1] = in;
232 if (interprocedural_view) { /* handle Filter and Block specially */
233 if (get_irn_opcode(node) == iro_Filter) {
234 assert(node->attr.filter.in_cg);
235 node->attr.filter.in_cg[n + 1] = in;
237 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
238 node->attr.block.in_cg[n + 1] = in;
241 /* else fall through */
243 node->in[n + 1] = in;
247 (get_irn_mode)(const ir_node *node) {
248 return __get_irn_mode(node);
252 (set_irn_mode)(ir_node *node, ir_mode *mode)
254 __set_irn_mode(node, mode);
258 get_irn_modecode (const ir_node *node)
261 return node->mode->code;
264 /** Gets the string representation of the mode .*/
266 get_irn_modename (const ir_node *node)
269 return get_mode_name(node->mode);
273 get_irn_modeident (const ir_node *node)
276 return get_mode_ident(node->mode);
280 (get_irn_op)(const ir_node *node)
282 return __get_irn_op(node);
285 /* should be private to the library: */
287 set_irn_op (ir_node *node, ir_op *op)
294 (get_irn_opcode)(const ir_node *node)
296 return __get_irn_opcode(node);
300 get_irn_opname (const ir_node *node)
303 if ((get_irn_op((ir_node *)node) == op_Phi) &&
304 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
305 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
306 return get_id_str(node->op->name);
310 get_irn_opident (const ir_node *node)
313 return node->op->name;
317 (get_irn_visited)(const ir_node *node)
319 return __get_irn_visited(node);
323 (set_irn_visited)(ir_node *node, unsigned long visited)
325 __set_irn_visited(node, visited);
329 (mark_irn_visited)(ir_node *node) {
330 __mark_irn_visited(node);
334 (irn_not_visited)(const ir_node *node) {
335 return __irn_not_visited(node);
339 (irn_visited)(const ir_node *node) {
340 return __irn_visited(node);
344 (set_irn_link)(ir_node *node, void *link) {
345 __set_irn_link(node, link);
349 (get_irn_link)(const ir_node *node) {
350 return __get_irn_link(node);
353 /* Outputs a unique number for this node */
355 get_irn_node_nr(const ir_node *node) {
358 return node->node_nr;
365 get_irn_const_attr (ir_node *node)
367 assert (node->op == op_Const);
368 return node->attr.con;
372 get_irn_proj_attr (ir_node *node)
374 assert (node->op == op_Proj);
375 return node->attr.proj;
379 get_irn_alloc_attr (ir_node *node)
381 assert (node->op == op_Alloc);
386 get_irn_free_attr (ir_node *node)
388 assert (node->op == op_Free);
389 return node->attr.f = skip_tid(node->attr.f);
393 get_irn_symconst_attr (ir_node *node)
395 assert (node->op == op_SymConst);
400 get_irn_call_attr (ir_node *node)
402 assert (node->op == op_Call);
403 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
407 get_irn_funccall_attr (ir_node *node)
409 assert (node->op == op_FuncCall);
410 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
414 get_irn_sel_attr (ir_node *node)
416 assert (node->op == op_Sel);
421 get_irn_phi_attr (ir_node *node)
423 assert (node->op == op_Phi);
424 return node->attr.phi0_pos;
428 get_irn_block_attr (ir_node *node)
430 assert (node->op == op_Block);
431 return node->attr.block;
434 /** manipulate fields of individual nodes **/
436 /* this works for all except Block */
438 get_nodes_Block (ir_node *node) {
439 assert (!(node->op == op_Block));
440 return get_irn_n(node, -1);
444 set_nodes_Block (ir_node *node, ir_node *block) {
445 assert (!(node->op == op_Block));
446 set_irn_n(node, -1, block);
449 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
450 * from Start. If so returns frame type, else Null. */
451 type *is_frame_pointer(ir_node *n) {
452 if ((get_irn_op(n) == op_Proj) &&
453 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
454 ir_node *start = get_Proj_pred(n);
455 if (get_irn_op(start) == op_Start) {
456 return get_irg_frame_type(get_irn_irg(start));
462 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
463 * from Start. If so returns global type, else Null. */
464 type *is_globals_pointer(ir_node *n) {
465 if ((get_irn_op(n) == op_Proj) &&
466 (get_Proj_proj(n) == pn_Start_P_globals)) {
467 ir_node *start = get_Proj_pred(n);
468 if (get_irn_op(start) == op_Start) {
469 return get_glob_type();
475 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
476 * from Start. If so returns 1, else 0. */
477 int is_value_arg_pointer(ir_node *n) {
478 if ((get_irn_op(n) == op_Proj) &&
479 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
480 (get_irn_op(get_Proj_pred(n)) == op_Start))
485 /* Returns an array with the predecessors of the Block. Depending on
486 the implementation of the graph data structure this can be a copy of
487 the internal representation of predecessors as well as the internal
488 array itself. Therefore writing to this array might obstruct the ir. */
490 get_Block_cfgpred_arr (ir_node *node)
492 assert ((node->op == op_Block));
493 return (ir_node **)&(get_irn_in(node)[1]);
498 get_Block_n_cfgpreds (ir_node *node) {
499 assert ((node->op == op_Block));
500 return (get_irn_arity(node));
504 get_Block_cfgpred (ir_node *node, int pos) {
505 assert (node->op == op_Block);
507 if (-1 > pos || get_irn_arity(node) <= pos) {
508 dump_ir_block_graph(current_ir_graph);
509 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
512 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
513 return get_irn_n(node, pos);
517 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
518 assert (node->op == op_Block);
519 set_irn_n(node, pos, pred);
523 get_Block_matured (ir_node *node) {
524 assert (node->op == op_Block);
525 return node->attr.block.matured;
529 set_Block_matured (ir_node *node, bool matured) {
530 assert (node->op == op_Block);
531 node->attr.block.matured = matured;
534 get_Block_block_visited (ir_node *node) {
535 assert (node->op == op_Block);
536 return node->attr.block.block_visited;
540 set_Block_block_visited (ir_node *node, unsigned long visit) {
541 assert (node->op == op_Block);
542 node->attr.block.block_visited = visit;
545 /* For this current_ir_graph must be set. */
547 mark_Block_block_visited (ir_node *node) {
548 assert (node->op == op_Block);
549 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
553 Block_not_block_visited(ir_node *node) {
554 assert (node->op == op_Block);
555 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
559 get_Block_graph_arr (ir_node *node, int pos) {
560 assert (node->op == op_Block);
561 return node->attr.block.graph_arr[pos+1];
565 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
566 assert (node->op == op_Block);
567 node->attr.block.graph_arr[pos+1] = value;
570 /* handler handling for Blocks * /
572 set_Block_handler (ir_node *block, ir_node *handler) {
573 assert ((block->op == op_Block));
574 assert ((handler->op == op_Block));
575 block->attr.block.handler_entry = handler;
579 get_Block_handler (ir_node *block) {
580 assert ((block->op == op_Block));
581 return (block->attr.block.handler_entry);
584 / * handler handling for Nodes * /
586 set_Node_handler (ir_node *node, ir_node *handler) {
587 set_Block_handler (get_nodes_Block (node), handler);
591 get_Node_handler (ir_node *node) {
592 return (get_Block_handler (get_nodes_Block (node)));
595 / * exc_t handling for Blocks * /
596 void set_Block_exc (ir_node *block, exc_t exc) {
597 assert ((block->op == op_Block));
598 block->attr.block.exc = exc;
601 exc_t get_Block_exc (ir_node *block) {
602 assert ((block->op == op_Block));
603 return (block->attr.block.exc);
606 / * exc_t handling for Nodes * /
607 void set_Node_exc (ir_node *node, exc_t exc) {
608 set_Block_exc (get_nodes_Block (node), exc);
611 exc_t get_Node_exc (ir_node *node) {
612 return (get_Block_exc (get_nodes_Block (node)));
616 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
617 assert(node->op == op_Block);
618 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
619 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
620 node->attr.block.in_cg[0] = NULL;
621 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
623 /* Fix backedge array. fix_backedges operates depending on
624 interprocedural_view. */
625 bool ipv = interprocedural_view;
626 interprocedural_view = true;
627 fix_backedges(current_ir_graph->obst, node);
628 interprocedural_view = ipv;
631 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
634 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
635 assert(node->op == op_Block &&
636 node->attr.block.in_cg &&
637 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
638 node->attr.block.in_cg[pos + 1] = pred;
641 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
642 assert(node->op == op_Block);
643 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
646 int get_Block_cg_n_cfgpreds(ir_node * node) {
647 assert(node->op == op_Block);
648 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
651 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
652 assert(node->op == op_Block && node->attr.block.in_cg);
653 return node->attr.block.in_cg[pos + 1];
656 void remove_Block_cg_cfgpred_arr(ir_node * node) {
657 assert(node->op == op_Block);
658 node->attr.block.in_cg = NULL;
661 /* Start references the irg it is in. */
663 get_Start_irg(ir_node *node) {
664 return get_irn_irg(node);
668 set_Start_irg(ir_node *node, ir_graph *irg) {
669 assert(node->op == op_Start);
670 assert(is_ir_graph(irg));
671 assert(0 && " Why set irg? -- use set_irn_irg");
675 get_End_n_keepalives(ir_node *end) {
676 assert (end->op == op_End);
677 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
681 get_End_keepalive(ir_node *end, int pos) {
682 assert (end->op == op_End);
683 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
687 add_End_keepalive (ir_node *end, ir_node *ka) {
688 assert (end->op == op_End);
689 ARR_APP1 (ir_node *, end->in, ka);
693 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
694 assert (end->op == op_End);
695 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
699 free_End (ir_node *end) {
700 assert (end->op == op_End);
702 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
703 end->in = NULL; /* @@@ make sure we get an error if we use the
704 in array afterwards ... */
707 ir_graph *get_EndReg_irg (ir_node *end) {
708 return get_irn_irg(end);
711 ir_graph *get_EndExcept_irg (ir_node *end) {
712 return get_irn_irg(end);
716 > Implementing the case construct (which is where the constant Proj node is
717 > important) involves far more than simply determining the constant values.
718 > We could argue that this is more properly a function of the translator from
719 > Firm to the target machine. That could be done if there was some way of
720 > projecting "default" out of the Cond node.
721 I know it's complicated.
722 Basically there are two proglems:
723 - determining the gaps between the projs
724 - determining the biggest case constant to know the proj number for
726 I see several solutions:
727 1. Introduce a ProjDefault node. Solves both problems.
728 This means to extend all optimizations executed during construction.
729 2. Give the Cond node for switch two flavors:
730 a) there are no gaps in the projs (existing flavor)
731 b) gaps may exist, default proj is still the Proj with the largest
732 projection number. This covers also the gaps.
733 3. Fix the semantic of the Cond to that of 2b)
735 Solution 2 seems to be the best:
736 Computing the gaps in the Firm representation is not too hard, i.e.,
737 libFIRM can implement a routine that transforms between the two
738 flavours. This is also possible for 1) but 2) does not require to
739 change any existing optimization.
740 Further it should be far simpler to determine the biggest constant than
742 I don't want to choose 3) as 2a) seems to have advantages for
743 dataflow analysis and 3) does not allow to convert the representation to
747 get_Cond_selector (ir_node *node) {
748 assert (node->op == op_Cond);
749 return get_irn_n(node, 0);
753 set_Cond_selector (ir_node *node, ir_node *selector) {
754 assert (node->op == op_Cond);
755 set_irn_n(node, 0, selector);
759 get_Cond_kind (ir_node *node) {
760 assert (node->op == op_Cond);
761 return node->attr.c.kind;
765 set_Cond_kind (ir_node *node, cond_kind kind) {
766 assert (node->op == op_Cond);
767 node->attr.c.kind = kind;
771 get_Cond_defaultProj (ir_node *node) {
772 assert (node->op == op_Cond);
773 return node->attr.c.default_proj;
777 get_Return_mem (ir_node *node) {
778 assert (node->op == op_Return);
779 return get_irn_n(node, 0);
783 set_Return_mem (ir_node *node, ir_node *mem) {
784 assert (node->op == op_Return);
785 set_irn_n(node, 0, mem);
789 get_Return_n_ress (ir_node *node) {
790 assert (node->op == op_Return);
791 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
795 get_Return_res_arr (ir_node *node)
797 assert ((node->op == op_Return));
798 if (get_Return_n_ress(node) > 0)
799 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
806 set_Return_n_res (ir_node *node, int results) {
807 assert (node->op == op_Return);
812 get_Return_res (ir_node *node, int pos) {
813 assert (node->op == op_Return);
814 assert (get_Return_n_ress(node) > pos);
815 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
819 set_Return_res (ir_node *node, int pos, ir_node *res){
820 assert (node->op == op_Return);
821 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
825 get_Raise_mem (ir_node *node) {
826 assert (node->op == op_Raise);
827 return get_irn_n(node, 0);
831 set_Raise_mem (ir_node *node, ir_node *mem) {
832 assert (node->op == op_Raise);
833 set_irn_n(node, 0, mem);
837 get_Raise_exo_ptr (ir_node *node) {
838 assert (node->op == op_Raise);
839 return get_irn_n(node, 1);
843 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
844 assert (node->op == op_Raise);
845 set_irn_n(node, 1, exo_ptr);
848 tarval *get_Const_tarval (ir_node *node) {
849 assert (node->op == op_Const);
850 return node->attr.con.tv;
854 set_Const_tarval (ir_node *node, tarval *con) {
855 assert (node->op == op_Const);
856 node->attr.con.tv = con;
860 /* The source language type. Must be an atomic type. Mode of type must
861 be mode of node. For tarvals from entities type must be pointer to
864 get_Const_type (ir_node *node) {
865 assert (node->op == op_Const);
866 return node->attr.con.tp;
870 set_Const_type (ir_node *node, type *tp) {
871 assert (node->op == op_Const);
872 if (tp != unknown_type) {
873 assert (is_atomic_type(tp));
874 assert (get_type_mode(tp) == get_irn_mode(node));
875 assert (!tarval_is_entity(get_Const_tarval(node)) ||
876 (is_pointer_type(tp) &&
877 (get_pointer_points_to_type(tp) ==
878 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
881 node->attr.con.tp = tp;
886 get_SymConst_kind (const ir_node *node) {
887 assert (node->op == op_SymConst);
888 return node->attr.i.num;
892 set_SymConst_kind (ir_node *node, symconst_kind num) {
893 assert (node->op == op_SymConst);
894 node->attr.i.num = num;
898 get_SymConst_type (ir_node *node) {
899 assert ( (node->op == op_SymConst)
900 && ( get_SymConst_kind(node) == type_tag
901 || get_SymConst_kind(node) == size));
902 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
906 set_SymConst_type (ir_node *node, type *tp) {
907 assert ( (node->op == op_SymConst)
908 && ( get_SymConst_kind(node) == type_tag
909 || get_SymConst_kind(node) == size));
910 node->attr.i.tori.typ = tp;
914 get_SymConst_ptrinfo (ir_node *node) {
915 assert ( (node->op == op_SymConst)
916 && (get_SymConst_kind(node) == linkage_ptr_info));
917 return node->attr.i.tori.ptrinfo;
921 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind(node) == linkage_ptr_info));
924 node->attr.i.tori.ptrinfo = ptrinfo;
928 get_SymConst_type_or_id (ir_node *node) {
929 assert (node->op == op_SymConst);
930 return &(node->attr.i.tori);
934 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
935 assert (node->op == op_SymConst);
936 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
940 get_Sel_mem (ir_node *node) {
941 assert (node->op == op_Sel);
942 return get_irn_n(node, 0);
946 set_Sel_mem (ir_node *node, ir_node *mem) {
947 assert (node->op == op_Sel);
948 set_irn_n(node, 0, mem);
952 get_Sel_ptr (ir_node *node) {
953 assert (node->op == op_Sel);
954 return get_irn_n(node, 1);
958 set_Sel_ptr (ir_node *node, ir_node *ptr) {
959 assert (node->op == op_Sel);
960 set_irn_n(node, 1, ptr);
964 get_Sel_n_indexs (ir_node *node) {
965 assert (node->op == op_Sel);
966 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
970 get_Sel_index_arr (ir_node *node)
972 assert ((node->op == op_Sel));
973 if (get_Sel_n_indexs(node) > 0)
974 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
980 get_Sel_index (ir_node *node, int pos) {
981 assert (node->op == op_Sel);
982 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
986 set_Sel_index (ir_node *node, int pos, ir_node *index) {
987 assert (node->op == op_Sel);
988 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
992 get_Sel_entity (ir_node *node) {
993 assert (node->op == op_Sel);
994 return node->attr.s.ent;
998 set_Sel_entity (ir_node *node, entity *ent) {
999 assert (node->op == op_Sel);
1000 node->attr.s.ent = ent;
1004 get_InstOf_ent (ir_node *node) {
1005 assert (node->op = op_InstOf);
1006 return (node->attr.io.ent);
1010 set_InstOf_ent (ir_node *node, type *ent) {
1011 assert (node->op = op_InstOf);
1012 node->attr.io.ent = ent;
1016 get_InstOf_store (ir_node *node) {
1017 assert (node->op = op_InstOf);
1018 return (get_irn_n (node, 0));
1022 set_InstOf_store (ir_node *node, ir_node *obj) {
1023 assert (node->op = op_InstOf);
1024 set_irn_n (node, 0, obj);
1028 get_InstOf_obj (ir_node *node) {
1029 assert (node->op = op_InstOf);
1030 return (get_irn_n (node, 1));
1034 set_InstOf_obj (ir_node *node, ir_node *obj) {
1035 assert (node->op = op_InstOf);
1036 set_irn_n (node, 1, obj);
1040 /* For unary and binary arithmetic operations the access to the
1041 operands can be factored out. Left is the first, right the
1042 second arithmetic value as listed in tech report 0999-33.
1043 unops are: Minus, Abs, Not, Conv, Cast
1044 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1045 Shr, Shrs, Rotate, Cmp */
1049 get_Call_mem (ir_node *node) {
1050 assert (node->op == op_Call);
1051 return get_irn_n(node, 0);
1055 set_Call_mem (ir_node *node, ir_node *mem) {
1056 assert (node->op == op_Call);
1057 set_irn_n(node, 0, mem);
1061 get_Call_ptr (ir_node *node) {
1062 assert (node->op == op_Call);
1063 return get_irn_n(node, 1);
1067 set_Call_ptr (ir_node *node, ir_node *ptr) {
1068 assert (node->op == op_Call);
1069 set_irn_n(node, 1, ptr);
1073 get_Call_param_arr (ir_node *node) {
1074 assert (node->op == op_Call);
1075 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1079 get_Call_n_params (ir_node *node) {
1080 assert (node->op == op_Call);
1081 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1085 get_Call_arity (ir_node *node) {
1086 assert (node->op == op_Call);
1087 return get_Call_n_params(node);
1091 set_Call_arity (ir_node *node, ir_node *arity) {
1092 assert (node->op == op_Call);
1097 get_Call_param (ir_node *node, int pos) {
1098 assert (node->op == op_Call);
1099 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1103 set_Call_param (ir_node *node, int pos, ir_node *param) {
1104 assert (node->op == op_Call);
1105 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1109 get_Call_type (ir_node *node) {
1110 assert (node->op == op_Call);
1111 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1115 set_Call_type (ir_node *node, type *tp) {
1116 assert (node->op == op_Call);
1117 assert (is_method_type(tp));
1118 node->attr.call.cld_tp = tp;
1121 int Call_has_callees(ir_node *node) {
1122 return (node->attr.call.callee_arr != NULL);
1125 int get_Call_n_callees(ir_node * node) {
1126 assert(node->op == op_Call && node->attr.call.callee_arr);
1127 return ARR_LEN(node->attr.call.callee_arr);
1130 entity * get_Call_callee(ir_node * node, int pos) {
1131 assert(node->op == op_Call && node->attr.call.callee_arr);
1132 return node->attr.call.callee_arr[pos];
1135 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1136 assert(node->op == op_Call);
1137 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1138 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1140 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1143 void remove_Call_callee_arr(ir_node * node) {
1144 assert(node->op == op_Call);
1145 node->attr.call.callee_arr = NULL;
1148 ir_node * get_CallBegin_ptr (ir_node *node) {
1149 assert(node->op == op_CallBegin);
1150 return get_irn_n(node, 0);
1152 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1153 assert(node->op == op_CallBegin);
1154 set_irn_n(node, 0, ptr);
1156 ir_graph * get_CallBegin_irg (ir_node *node) {
1157 return get_irn_irg(node);
1159 ir_node * get_CallBegin_call (ir_node *node) {
1160 assert(node->op == op_CallBegin);
1161 return node->attr.callbegin.call;
1163 void set_CallBegin_call (ir_node *node, ir_node *call) {
1164 assert(node->op == op_CallBegin);
1165 node->attr.callbegin.call = call;
1169 get_FuncCall_ptr (ir_node *node) {
1170 assert (node->op == op_FuncCall);
1171 return get_irn_n(node, 0);
1175 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1176 assert (node->op == op_FuncCall);
1177 set_irn_n(node, 0, ptr);
1181 get_FuncCall_param_arr (ir_node *node) {
1182 assert (node->op == op_FuncCall);
1183 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET];
1187 get_FuncCall_n_params (ir_node *node) {
1188 assert (node->op == op_FuncCall);
1189 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1193 get_FuncCall_arity (ir_node *node) {
1194 assert (node->op == op_FuncCall);
1195 return get_FuncCall_n_params(node);
1199 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1200 assert (node->op == op_FuncCall);
1205 get_FuncCall_param (ir_node *node, int pos) {
1206 assert (node->op == op_FuncCall);
1207 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1211 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1212 assert (node->op == op_FuncCall);
1213 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1217 get_FuncCall_type (ir_node *node) {
1218 assert (node->op == op_FuncCall);
1219 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1223 set_FuncCall_type (ir_node *node, type *tp) {
1224 assert (node->op == op_FuncCall);
1225 assert (is_method_type(tp));
1226 node->attr.call.cld_tp = tp;
1229 int FuncCall_has_callees(ir_node *node) {
1230 return (node->attr.call.callee_arr != NULL);
1233 int get_FuncCall_n_callees(ir_node * node) {
1234 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1235 return ARR_LEN(node->attr.call.callee_arr);
1238 entity * get_FuncCall_callee(ir_node * node, int pos) {
1239 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1240 return node->attr.call.callee_arr[pos];
1243 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1244 assert(node->op == op_FuncCall);
1245 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1246 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1248 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1251 void remove_FuncCall_callee_arr(ir_node * node) {
1252 assert(node->op == op_FuncCall);
1253 node->attr.call.callee_arr = NULL;
1258 ir_node * get_##OP##_left(ir_node *node) { \
1259 assert(node->op == op_##OP); \
1260 return get_irn_n(node, node->op->op_index); \
1262 void set_##OP##_left(ir_node *node, ir_node *left) { \
1263 assert(node->op == op_##OP); \
1264 set_irn_n(node, node->op->op_index, left); \
1266 ir_node *get_##OP##_right(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index + 1); \
1270 void set_##OP##_right(ir_node *node, ir_node *right) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index + 1, right); \
1276 ir_node *get_##OP##_op(ir_node *node) { \
1277 assert(node->op == op_##OP); \
1278 return get_irn_n(node, node->op->op_index); \
1280 void set_##OP##_op (ir_node *node, ir_node *op) { \
1281 assert(node->op == op_##OP); \
1282 set_irn_n(node, node->op->op_index, op); \
1292 get_Quot_mem (ir_node *node) {
1293 assert (node->op == op_Quot);
1294 return get_irn_n(node, 0);
1298 set_Quot_mem (ir_node *node, ir_node *mem) {
1299 assert (node->op == op_Quot);
1300 set_irn_n(node, 0, mem);
1306 get_DivMod_mem (ir_node *node) {
1307 assert (node->op == op_DivMod);
1308 return get_irn_n(node, 0);
1312 set_DivMod_mem (ir_node *node, ir_node *mem) {
1313 assert (node->op == op_DivMod);
1314 set_irn_n(node, 0, mem);
1320 get_Div_mem (ir_node *node) {
1321 assert (node->op == op_Div);
1322 return get_irn_n(node, 0);
1326 set_Div_mem (ir_node *node, ir_node *mem) {
1327 assert (node->op == op_Div);
1328 set_irn_n(node, 0, mem);
1334 get_Mod_mem (ir_node *node) {
1335 assert (node->op == op_Mod);
1336 return get_irn_n(node, 0);
1340 set_Mod_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_Mod);
1342 set_irn_n(node, 0, mem);
1359 get_Cast_type (ir_node *node) {
1360 assert (node->op == op_Cast);
1361 return node->attr.cast.totype;
1365 set_Cast_type (ir_node *node, type *to_tp) {
1366 assert (node->op == op_Cast);
1367 node->attr.cast.totype = to_tp;
1371 is_unop (ir_node *node) {
1372 return (node->op->opar == oparity_unary);
1376 get_unop_op (ir_node *node) {
1377 if (node->op->opar == oparity_unary)
1378 return get_irn_n(node, node->op->op_index);
1380 assert(node->op->opar == oparity_unary);
1385 set_unop_op (ir_node *node, ir_node *op) {
1386 if (node->op->opar == oparity_unary)
1387 set_irn_n(node, node->op->op_index, op);
1389 assert(node->op->opar == oparity_unary);
1393 is_binop (ir_node *node) {
1394 return (node->op->opar == oparity_binary);
1398 get_binop_left (ir_node *node) {
1399 if (node->op->opar == oparity_binary)
1400 return get_irn_n(node, node->op->op_index);
1402 assert(node->op->opar == oparity_binary);
1407 set_binop_left (ir_node *node, ir_node *left) {
1408 if (node->op->opar == oparity_binary)
1409 set_irn_n(node, node->op->op_index, left);
1411 assert (node->op->opar == oparity_binary);
1415 get_binop_right (ir_node *node) {
1416 if (node->op->opar == oparity_binary)
1417 return get_irn_n(node, node->op->op_index + 1);
1419 assert(node->op->opar == oparity_binary);
1424 set_binop_right (ir_node *node, ir_node *right) {
1425 if (node->op->opar == oparity_binary)
1426 set_irn_n(node, node->op->op_index + 1, right);
1428 assert (node->op->opar == oparity_binary);
1431 int is_Phi (ir_node *n) {
1437 if (op == op_Filter) return interprocedural_view;
1440 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1441 (get_irn_arity(n) > 0));
1446 int is_Phi0 (ir_node *n) {
1449 return ((get_irn_op(n) == op_Phi) &&
1450 (get_irn_arity(n) == 0) &&
1451 (get_irg_phase_state(get_irn_irg(n)) != phase_building));
1455 get_Phi_preds_arr (ir_node *node) {
1456 assert (node->op == op_Phi);
1457 return (ir_node **)&(get_irn_in(node)[1]);
1461 get_Phi_n_preds (ir_node *node) {
1462 assert (is_Phi(node));
1463 return (get_irn_arity(node));
1467 void set_Phi_n_preds (ir_node *node, int n_preds) {
1468 assert (node->op == op_Phi);
1473 get_Phi_pred (ir_node *node, int pos) {
1474 assert (is_Phi(node));
1475 return get_irn_n(node, pos);
1479 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1480 assert (is_Phi(node));
1481 set_irn_n(node, pos, pred);
1485 get_Load_mem (ir_node *node) {
1486 assert (node->op == op_Load);
1487 return get_irn_n(node, 0);
1491 set_Load_mem (ir_node *node, ir_node *mem) {
1492 assert (node->op == op_Load);
1493 set_irn_n(node, 0, mem);
1497 get_Load_ptr (ir_node *node) {
1498 assert (node->op == op_Load);
1499 return get_irn_n(node, 1);
1503 set_Load_ptr (ir_node *node, ir_node *ptr) {
1504 assert (node->op == op_Load);
1505 set_irn_n(node, 1, ptr);
1510 get_Store_mem (ir_node *node) {
1511 assert (node->op == op_Store);
1512 return get_irn_n(node, 0);
1516 set_Store_mem (ir_node *node, ir_node *mem) {
1517 assert (node->op == op_Store);
1518 set_irn_n(node, 0, mem);
1522 get_Store_ptr (ir_node *node) {
1523 assert (node->op == op_Store);
1524 return get_irn_n(node, 1);
1528 set_Store_ptr (ir_node *node, ir_node *ptr) {
1529 assert (node->op == op_Store);
1530 set_irn_n(node, 1, ptr);
1534 get_Store_value (ir_node *node) {
1535 assert (node->op == op_Store);
1536 return get_irn_n(node, 2);
1540 set_Store_value (ir_node *node, ir_node *value) {
1541 assert (node->op == op_Store);
1542 set_irn_n(node, 2, value);
1546 get_Alloc_mem (ir_node *node) {
1547 assert (node->op == op_Alloc);
1548 return get_irn_n(node, 0);
1552 set_Alloc_mem (ir_node *node, ir_node *mem) {
1553 assert (node->op == op_Alloc);
1554 set_irn_n(node, 0, mem);
1558 get_Alloc_size (ir_node *node) {
1559 assert (node->op == op_Alloc);
1560 return get_irn_n(node, 1);
1564 set_Alloc_size (ir_node *node, ir_node *size) {
1565 assert (node->op == op_Alloc);
1566 set_irn_n(node, 1, size);
1570 get_Alloc_type (ir_node *node) {
1571 assert (node->op == op_Alloc);
1572 return node->attr.a.type = skip_tid(node->attr.a.type);
1576 set_Alloc_type (ir_node *node, type *tp) {
1577 assert (node->op == op_Alloc);
1578 node->attr.a.type = tp;
1582 get_Alloc_where (ir_node *node) {
1583 assert (node->op == op_Alloc);
1584 return node->attr.a.where;
1588 set_Alloc_where (ir_node *node, where_alloc where) {
1589 assert (node->op == op_Alloc);
1590 node->attr.a.where = where;
1595 get_Free_mem (ir_node *node) {
1596 assert (node->op == op_Free);
1597 return get_irn_n(node, 0);
1601 set_Free_mem (ir_node *node, ir_node *mem) {
1602 assert (node->op == op_Free);
1603 set_irn_n(node, 0, mem);
1607 get_Free_ptr (ir_node *node) {
1608 assert (node->op == op_Free);
1609 return get_irn_n(node, 1);
1613 set_Free_ptr (ir_node *node, ir_node *ptr) {
1614 assert (node->op == op_Free);
1615 set_irn_n(node, 1, ptr);
1619 get_Free_size (ir_node *node) {
1620 assert (node->op == op_Free);
1621 return get_irn_n(node, 2);
1625 set_Free_size (ir_node *node, ir_node *size) {
1626 assert (node->op == op_Free);
1627 set_irn_n(node, 2, size);
1631 get_Free_type (ir_node *node) {
1632 assert (node->op == op_Free);
1633 return node->attr.f = skip_tid(node->attr.f);
1637 set_Free_type (ir_node *node, type *tp) {
1638 assert (node->op == op_Free);
1643 get_Sync_preds_arr (ir_node *node) {
1644 assert (node->op == op_Sync);
1645 return (ir_node **)&(get_irn_in(node)[1]);
1649 get_Sync_n_preds (ir_node *node) {
1650 assert (node->op == op_Sync);
1651 return (get_irn_arity(node));
1656 set_Sync_n_preds (ir_node *node, int n_preds) {
1657 assert (node->op == op_Sync);
1662 get_Sync_pred (ir_node *node, int pos) {
1663 assert (node->op == op_Sync);
1664 return get_irn_n(node, pos);
1668 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1669 assert (node->op == op_Sync);
1670 set_irn_n(node, pos, pred);
1674 get_Proj_pred (ir_node *node) {
1675 assert (is_Proj(node));
1676 return get_irn_n(node, 0);
1680 set_Proj_pred (ir_node *node, ir_node *pred) {
1681 assert (is_Proj(node));
1682 set_irn_n(node, 0, pred);
1686 get_Proj_proj (ir_node *node) {
1687 assert (is_Proj(node));
1688 if (get_irn_opcode(node) == iro_Proj) {
1689 return node->attr.proj;
1691 assert(get_irn_opcode(node) == iro_Filter);
1692 return node->attr.filter.proj;
1697 set_Proj_proj (ir_node *node, long proj) {
1698 assert (node->op == op_Proj);
1699 node->attr.proj = proj;
1703 get_Tuple_preds_arr (ir_node *node) {
1704 assert (node->op == op_Tuple);
1705 return (ir_node **)&(get_irn_in(node)[1]);
1709 get_Tuple_n_preds (ir_node *node) {
1710 assert (node->op == op_Tuple);
1711 return (get_irn_arity(node));
1716 set_Tuple_n_preds (ir_node *node, int n_preds) {
1717 assert (node->op == op_Tuple);
1722 get_Tuple_pred (ir_node *node, int pos) {
1723 assert (node->op == op_Tuple);
1724 return get_irn_n(node, pos);
1728 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1729 assert (node->op == op_Tuple);
1730 set_irn_n(node, pos, pred);
1734 get_Id_pred (ir_node *node) {
1735 assert (node->op == op_Id);
1736 return get_irn_n(node, 0);
1740 set_Id_pred (ir_node *node, ir_node *pred) {
1741 assert (node->op == op_Id);
1742 set_irn_n(node, 0, pred);
1745 ir_node *get_Confirm_value (ir_node *node) {
1746 assert (node->op == op_Confirm);
1747 return get_irn_n(node, 0);
1749 void set_Confirm_value (ir_node *node, ir_node *value) {
1750 assert (node->op == op_Confirm);
1751 set_irn_n(node, 0, value);
1753 ir_node *get_Confirm_bound (ir_node *node) {
1754 assert (node->op == op_Confirm);
1755 return get_irn_n(node, 1);
1757 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1758 assert (node->op == op_Confirm);
1759 set_irn_n(node, 0, bound);
1761 pn_Cmp get_Confirm_cmp (ir_node *node) {
1762 assert (node->op == op_Confirm);
1763 return node->attr.confirm_cmp;
1765 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1766 assert (node->op == op_Confirm);
1767 node->attr.confirm_cmp = cmp;
1772 get_Filter_pred (ir_node *node) {
1773 assert(node->op == op_Filter);
1777 set_Filter_pred (ir_node *node, ir_node *pred) {
1778 assert(node->op == op_Filter);
1782 get_Filter_proj(ir_node *node) {
1783 assert(node->op == op_Filter);
1784 return node->attr.filter.proj;
1787 set_Filter_proj (ir_node *node, long proj) {
1788 assert(node->op == op_Filter);
1789 node->attr.filter.proj = proj;
1792 /* Don't use get_irn_arity, get_irn_n in implementation as access
1793 shall work independent of view!!! */
1794 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1795 assert(node->op == op_Filter);
1796 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1797 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1798 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1799 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1800 node->attr.filter.in_cg[0] = node->in[0];
1802 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1805 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1806 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1807 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1808 node->attr.filter.in_cg[pos + 1] = pred;
1810 int get_Filter_n_cg_preds(ir_node *node) {
1811 assert(node->op == op_Filter && node->attr.filter.in_cg);
1812 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1814 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1816 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1818 arity = ARR_LEN(node->attr.filter.in_cg);
1819 assert(pos < arity - 1);
1820 return node->attr.filter.in_cg[pos + 1];
1825 get_irn_irg(ir_node *node) {
1826 if (get_irn_op(node) != op_Block)
1827 node = get_nodes_block(node);
1828 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1829 node = get_nodes_block(node);
1830 assert(get_irn_op(node) == op_Block);
1831 return node->attr.block.irg;
1835 /*----------------------------------------------------------------*/
1836 /* Auxiliary routines */
1837 /*----------------------------------------------------------------*/
1840 skip_Proj (ir_node *node) {
1841 /* don't assert node !!! */
1842 if (node && is_Proj(node)) {
1843 return get_Proj_pred(node);
1850 skip_Tuple (ir_node *node) {
1853 if (!get_opt_normalize()) return node;
1855 node = skip_nop(node);
1856 if (get_irn_op(node) == op_Proj) {
1857 pred = skip_nop(get_Proj_pred(node));
1858 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1859 pred = skip_nop(skip_Tuple(pred));
1860 if (get_irn_op(pred) == op_Tuple)
1861 return get_Tuple_pred(pred, get_Proj_proj(node));
1866 /** returns operand of node if node is a Cast */
1867 ir_node *skip_Cast (ir_node *node) {
1868 if (node && get_irn_op(node) == op_Cast) {
1869 return skip_nop(get_irn_n(node, 0));
1876 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1877 than any other approach, as Id chains are resolved and all point to the real node, or
1878 all id's are self loops. */
1880 skip_nop (ir_node *node) {
1881 /* don't assert node !!! */
1883 if (!get_opt_normalize()) return node;
1885 /* Don't use get_Id_pred: We get into an endless loop for
1886 self-referencing Ids. */
1887 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1888 ir_node *rem_pred = node->in[0+1];
1891 assert (get_irn_arity (node) > 0);
1893 node->in[0+1] = node;
1894 res = skip_nop(rem_pred);
1895 if (res->op == op_Id) /* self-loop */ return node;
1897 node->in[0+1] = res;
1904 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1905 than any other approach, as Id chains are resolved and all point to the real node, or
1906 all id's are self loops. */
1908 skip_nop (ir_node *node) {
1910 /* don't assert node !!! */
1912 if (!get_opt_normalize()) return node;
1914 /* Don't use get_Id_pred: We get into an endless loop for
1915 self-referencing Ids. */
1916 if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
1917 ir_node *rem_pred, *res;
1919 if (pred->op != op_Id) return pred; /* shortcut */
1922 assert (get_irn_arity (node) > 0);
1924 node->in[0+1] = node;
1925 res = skip_nop(rem_pred);
1926 if (res->op == op_Id) /* self-loop */ return node;
1928 node->in[0+1] = res;
1937 skip_Id (ir_node *node) {
1938 return skip_nop(node);
1942 is_Bad (ir_node *node) {
1944 if ((node) && get_irn_opcode(node) == iro_Bad)
1950 is_no_Block (ir_node *node) {
1952 return (get_irn_opcode(node) != iro_Block);
1956 is_Block (ir_node *node) {
1958 return (get_irn_opcode(node) == iro_Block);
1961 /* returns true if node is a Unknown node. */
1963 is_Unknown (ir_node *node) {
1965 return (get_irn_opcode(node) == iro_Unknown);
1969 is_Proj (const ir_node *node) {
1971 return node->op == op_Proj
1972 || (!interprocedural_view && node->op == op_Filter);
1975 /* Returns true if the operation manipulates control flow. */
1977 is_cfop(ir_node *node) {
1978 return is_cfopcode(get_irn_op(node));
1981 /* Returns true if the operation manipulates interprocedural control flow:
1982 CallBegin, EndReg, EndExcept */
1983 int is_ip_cfop(ir_node *node) {
1984 return is_ip_cfopcode(get_irn_op(node));
1987 ir_graph *get_ip_cfop_irg(ir_node *n) {
1988 return get_irn_irg(n);
1991 /* Returns true if the operation can change the control flow because
1994 is_fragile_op(ir_node *node) {
1995 return is_op_fragile(get_irn_op(node));
1998 /* Returns the memory operand of fragile operations. */
1999 ir_node *get_fragile_op_mem(ir_node *node) {
2000 assert(node && is_fragile_op(node));
2002 switch (get_irn_opcode (node)) {
2011 return get_irn_n(node, 0);
2016 assert(0 && "should not be reached");
2021 #ifdef DEBUG_libfirm
2022 void dump_node (ir_node *n) {
2024 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2026 ir_node *pred = get_irn_n(n, -1);
2027 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2028 get_irn_node_nr(pred), (void *)pred);
2030 printf(" preds: \n");
2031 for (i = 0; i < get_irn_arity(n); ++i) {
2032 ir_node *pred = get_irn_n(n, i);
2033 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2034 get_irn_node_nr(pred), (void *)pred);
2038 #else /* DEBUG_libfirm */
2039 void dump_node (ir_node *n) {}
2040 #endif /* DEBUG_libfirm */