3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 #ifdef DO_HEAPANALYSIS
358 /* Access the abstract interpretation information of a node.
359 Returns NULL if no such information is available. */
360 struct abstval *get_irn_abst_value(ir_node *n) {
363 /* Set the abstract interpretation information of a node. */
364 void set_irn_abst_value(ir_node *n, struct abstval *os) {
367 struct section *firm_get_irn_section(ir_node *n) {
370 void firm_set_irn_section(ir_node *n, struct section *s) {
373 #endif /* DO_HEAPANALYSIS */
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_funccall_attr (ir_node *node)
432 assert (node->op == op_FuncCall);
433 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
437 get_irn_sel_attr (ir_node *node)
439 assert (node->op == op_Sel);
444 get_irn_phi_attr (ir_node *node)
446 assert (node->op == op_Phi);
447 return node->attr.phi0_pos;
451 get_irn_block_attr (ir_node *node)
453 assert (node->op == op_Block);
454 return node->attr.block;
457 /** manipulate fields of individual nodes **/
459 /* this works for all except Block */
461 get_nodes_block (ir_node *node) {
462 assert (!(node->op == op_Block));
463 return get_irn_n(node, -1);
467 set_nodes_block (ir_node *node, ir_node *block) {
468 assert (!(node->op == op_Block));
469 set_irn_n(node, -1, block);
472 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
473 * from Start. If so returns frame type, else Null. */
474 type *is_frame_pointer(ir_node *n) {
475 if ((get_irn_op(n) == op_Proj) &&
476 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
477 ir_node *start = get_Proj_pred(n);
478 if (get_irn_op(start) == op_Start) {
479 return get_irg_frame_type(get_irn_irg(start));
485 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
486 * from Start. If so returns global type, else Null. */
487 type *is_globals_pointer(ir_node *n) {
488 if ((get_irn_op(n) == op_Proj) &&
489 (get_Proj_proj(n) == pn_Start_P_globals)) {
490 ir_node *start = get_Proj_pred(n);
491 if (get_irn_op(start) == op_Start) {
492 return get_glob_type();
498 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
499 * from Start. If so returns 1, else 0. */
500 int is_value_arg_pointer(ir_node *n) {
501 if ((get_irn_op(n) == op_Proj) &&
502 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
503 (get_irn_op(get_Proj_pred(n)) == op_Start))
508 /* Returns an array with the predecessors of the Block. Depending on
509 the implementation of the graph data structure this can be a copy of
510 the internal representation of predecessors as well as the internal
511 array itself. Therefore writing to this array might obstruct the ir. */
513 get_Block_cfgpred_arr (ir_node *node)
515 assert ((node->op == op_Block));
516 return (ir_node **)&(get_irn_in(node)[1]);
521 get_Block_n_cfgpreds (ir_node *node) {
522 assert ((node->op == op_Block));
523 return get_irn_arity(node);
527 get_Block_cfgpred (ir_node *node, int pos) {
529 assert (node->op == op_Block);
530 assert(-1 <= pos && pos < get_irn_arity(node));
531 return get_irn_n(node, pos);
535 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
536 assert (node->op == op_Block);
537 set_irn_n(node, pos, pred);
541 get_Block_matured (ir_node *node) {
542 assert (node->op == op_Block);
543 return node->attr.block.matured;
547 set_Block_matured (ir_node *node, bool matured) {
548 assert (node->op == op_Block);
549 node->attr.block.matured = matured;
552 get_Block_block_visited (ir_node *node) {
553 assert (node->op == op_Block);
554 return node->attr.block.block_visited;
558 set_Block_block_visited (ir_node *node, unsigned long visit) {
559 assert (node->op == op_Block);
560 node->attr.block.block_visited = visit;
563 /* For this current_ir_graph must be set. */
565 mark_Block_block_visited (ir_node *node) {
566 assert (node->op == op_Block);
567 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
571 Block_not_block_visited(ir_node *node) {
572 assert (node->op == op_Block);
573 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
577 get_Block_graph_arr (ir_node *node, int pos) {
578 assert (node->op == op_Block);
579 return node->attr.block.graph_arr[pos+1];
583 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
584 assert (node->op == op_Block);
585 node->attr.block.graph_arr[pos+1] = value;
588 /* handler handling for Blocks * /
590 set_Block_handler (ir_node *block, ir_node *handler) {
591 assert ((block->op == op_Block));
592 assert ((handler->op == op_Block));
593 block->attr.block.handler_entry = handler;
597 get_Block_handler (ir_node *block) {
598 assert ((block->op == op_Block));
599 return (block->attr.block.handler_entry);
602 / * handler handling for Nodes * /
604 set_Node_handler (ir_node *node, ir_node *handler) {
605 set_Block_handler (get_nodes_block (node), handler);
609 get_Node_handler (ir_node *node) {
610 return (get_Block_handler (get_nodes_block (node)));
613 / * exc_t handling for Blocks * /
614 void set_Block_exc (ir_node *block, exc_t exc) {
615 assert ((block->op == op_Block));
616 block->attr.block.exc = exc;
619 exc_t get_Block_exc (ir_node *block) {
620 assert ((block->op == op_Block));
621 return (block->attr.block.exc);
624 / * exc_t handling for Nodes * /
625 void set_Node_exc (ir_node *node, exc_t exc) {
626 set_Block_exc (get_nodes_block (node), exc);
629 exc_t get_Node_exc (ir_node *node) {
630 return (get_Block_exc (get_nodes_block (node)));
634 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
635 assert(node->op == op_Block);
636 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
637 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
638 node->attr.block.in_cg[0] = NULL;
639 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
641 /* Fix backedge array. fix_backedges operates depending on
642 interprocedural_view. */
643 bool ipv = interprocedural_view;
644 interprocedural_view = true;
645 fix_backedges(current_ir_graph->obst, node);
646 interprocedural_view = ipv;
649 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
652 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
653 assert(node->op == op_Block &&
654 node->attr.block.in_cg &&
655 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
656 node->attr.block.in_cg[pos + 1] = pred;
659 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
664 int get_Block_cg_n_cfgpreds(ir_node * node) {
665 assert(node->op == op_Block);
666 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
669 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
670 assert(node->op == op_Block && node->attr.block.in_cg);
671 return node->attr.block.in_cg[pos + 1];
674 void remove_Block_cg_cfgpred_arr(ir_node * node) {
675 assert(node->op == op_Block);
676 node->attr.block.in_cg = NULL;
680 set_Start_irg(ir_node *node, ir_graph *irg) {
681 assert(node->op == op_Start);
682 assert(is_ir_graph(irg));
683 assert(0 && " Why set irg? -- use set_irn_irg");
687 get_End_n_keepalives(ir_node *end) {
688 assert (end->op == op_End);
689 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
693 get_End_keepalive(ir_node *end, int pos) {
694 assert (end->op == op_End);
695 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
699 add_End_keepalive (ir_node *end, ir_node *ka) {
700 assert (end->op == op_End);
701 ARR_APP1 (ir_node *, end->in, ka);
705 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
706 assert (end->op == op_End);
707 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
711 free_End (ir_node *end) {
712 assert (end->op == op_End);
714 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
715 end->in = NULL; /* @@@ make sure we get an error if we use the
716 in array afterwards ... */
721 > Implementing the case construct (which is where the constant Proj node is
722 > important) involves far more than simply determining the constant values.
723 > We could argue that this is more properly a function of the translator from
724 > Firm to the target machine. That could be done if there was some way of
725 > projecting "default" out of the Cond node.
726 I know it's complicated.
727 Basically there are two proglems:
728 - determining the gaps between the projs
729 - determining the biggest case constant to know the proj number for
731 I see several solutions:
732 1. Introduce a ProjDefault node. Solves both problems.
733 This means to extend all optimizations executed during construction.
734 2. Give the Cond node for switch two flavors:
735 a) there are no gaps in the projs (existing flavor)
736 b) gaps may exist, default proj is still the Proj with the largest
737 projection number. This covers also the gaps.
738 3. Fix the semantic of the Cond to that of 2b)
740 Solution 2 seems to be the best:
741 Computing the gaps in the Firm representation is not too hard, i.e.,
742 libFIRM can implement a routine that transforms between the two
743 flavours. This is also possible for 1) but 2) does not require to
744 change any existing optimization.
745 Further it should be far simpler to determine the biggest constant than
747 I don't want to choose 3) as 2a) seems to have advantages for
748 dataflow analysis and 3) does not allow to convert the representation to
752 get_Cond_selector (ir_node *node) {
753 assert (node->op == op_Cond);
754 return get_irn_n(node, 0);
758 set_Cond_selector (ir_node *node, ir_node *selector) {
759 assert (node->op == op_Cond);
760 set_irn_n(node, 0, selector);
764 get_Cond_kind (ir_node *node) {
765 assert (node->op == op_Cond);
766 return node->attr.c.kind;
770 set_Cond_kind (ir_node *node, cond_kind kind) {
771 assert (node->op == op_Cond);
772 node->attr.c.kind = kind;
776 get_Cond_defaultProj (ir_node *node) {
777 assert (node->op == op_Cond);
778 return node->attr.c.default_proj;
782 get_Return_mem (ir_node *node) {
783 assert (node->op == op_Return);
784 return get_irn_n(node, 0);
788 set_Return_mem (ir_node *node, ir_node *mem) {
789 assert (node->op == op_Return);
790 set_irn_n(node, 0, mem);
794 get_Return_n_ress (ir_node *node) {
795 assert (node->op == op_Return);
796 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
800 get_Return_res_arr (ir_node *node)
802 assert ((node->op == op_Return));
803 if (get_Return_n_ress(node) > 0)
804 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
811 set_Return_n_res (ir_node *node, int results) {
812 assert (node->op == op_Return);
817 get_Return_res (ir_node *node, int pos) {
818 assert (node->op == op_Return);
819 assert (get_Return_n_ress(node) > pos);
820 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
824 set_Return_res (ir_node *node, int pos, ir_node *res){
825 assert (node->op == op_Return);
826 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
830 get_Raise_mem (ir_node *node) {
831 assert (node->op == op_Raise);
832 return get_irn_n(node, 0);
836 set_Raise_mem (ir_node *node, ir_node *mem) {
837 assert (node->op == op_Raise);
838 set_irn_n(node, 0, mem);
842 get_Raise_exo_ptr (ir_node *node) {
843 assert (node->op == op_Raise);
844 return get_irn_n(node, 1);
848 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
849 assert (node->op == op_Raise);
850 set_irn_n(node, 1, exo_ptr);
853 tarval *get_Const_tarval (ir_node *node) {
854 assert (node->op == op_Const);
855 return node->attr.con.tv;
859 set_Const_tarval (ir_node *node, tarval *con) {
860 assert (node->op == op_Const);
861 node->attr.con.tv = con;
865 /* The source language type. Must be an atomic type. Mode of type must
866 be mode of node. For tarvals from entities type must be pointer to
869 get_Const_type (ir_node *node) {
870 assert (node->op == op_Const);
871 return node->attr.con.tp;
875 set_Const_type (ir_node *node, type *tp) {
876 assert (node->op == op_Const);
877 if (tp != unknown_type) {
878 assert (is_atomic_type(tp));
879 assert (get_type_mode(tp) == get_irn_mode(node));
882 node->attr.con.tp = tp;
887 get_SymConst_kind (const ir_node *node) {
888 assert (node->op == op_SymConst);
889 return node->attr.i.num;
893 set_SymConst_kind (ir_node *node, symconst_kind num) {
894 assert (node->op == op_SymConst);
895 node->attr.i.num = num;
899 get_SymConst_type (ir_node *node) {
900 assert ( (node->op == op_SymConst)
901 && ( get_SymConst_kind(node) == symconst_type_tag
902 || get_SymConst_kind(node) == symconst_size));
903 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
907 set_SymConst_type (ir_node *node, type *tp) {
908 assert ( (node->op == op_SymConst)
909 && ( get_SymConst_kind(node) == symconst_type_tag
910 || get_SymConst_kind(node) == symconst_size));
911 node->attr.i.sym.type_p = tp;
915 get_SymConst_name (ir_node *node) {
916 assert ( (node->op == op_SymConst)
917 && (get_SymConst_kind(node) == symconst_addr_name));
918 return node->attr.i.sym.ident_p;
922 set_SymConst_name (ir_node *node, ident *name) {
923 assert ( (node->op == op_SymConst)
924 && (get_SymConst_kind(node) == symconst_addr_name));
925 node->attr.i.sym.ident_p = name;
929 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
930 entity *get_SymConst_entity (ir_node *node) {
931 assert ( (node->op == op_SymConst)
932 && (get_SymConst_kind (node) == symconst_addr_ent));
933 return node->attr.i.sym.entity_p;
936 void set_SymConst_entity (ir_node *node, entity *ent) {
937 assert ( (node->op == op_SymConst)
938 && (get_SymConst_kind(node) == symconst_addr_ent));
939 node->attr.i.sym.entity_p = ent;
943 union symconst_symbol
944 get_SymConst_symbol (ir_node *node) {
945 assert (node->op == op_SymConst);
946 return node->attr.i.sym;
950 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
951 assert (node->op == op_SymConst);
952 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
953 node->attr.i.sym = sym;
957 get_Sel_mem (ir_node *node) {
958 assert (node->op == op_Sel);
959 return get_irn_n(node, 0);
963 set_Sel_mem (ir_node *node, ir_node *mem) {
964 assert (node->op == op_Sel);
965 set_irn_n(node, 0, mem);
969 get_Sel_ptr (ir_node *node) {
970 assert (node->op == op_Sel);
971 return get_irn_n(node, 1);
975 set_Sel_ptr (ir_node *node, ir_node *ptr) {
976 assert (node->op == op_Sel);
977 set_irn_n(node, 1, ptr);
981 get_Sel_n_indexs (ir_node *node) {
982 assert (node->op == op_Sel);
983 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
987 get_Sel_index_arr (ir_node *node)
989 assert ((node->op == op_Sel));
990 if (get_Sel_n_indexs(node) > 0)
991 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
997 get_Sel_index (ir_node *node, int pos) {
998 assert (node->op == op_Sel);
999 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1003 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1004 assert (node->op == op_Sel);
1005 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1009 get_Sel_entity (ir_node *node) {
1010 assert (node->op == op_Sel);
1011 return node->attr.s.ent;
1015 set_Sel_entity (ir_node *node, entity *ent) {
1016 assert (node->op == op_Sel);
1017 node->attr.s.ent = ent;
1021 get_InstOf_ent (ir_node *node) {
1022 assert (node->op = op_InstOf);
1023 return (node->attr.io.ent);
1027 set_InstOf_ent (ir_node *node, type *ent) {
1028 assert (node->op = op_InstOf);
1029 node->attr.io.ent = ent;
1033 get_InstOf_store (ir_node *node) {
1034 assert (node->op = op_InstOf);
1035 return (get_irn_n (node, 0));
1039 set_InstOf_store (ir_node *node, ir_node *obj) {
1040 assert (node->op = op_InstOf);
1041 set_irn_n (node, 0, obj);
1045 get_InstOf_obj (ir_node *node) {
1046 assert (node->op = op_InstOf);
1047 return (get_irn_n (node, 1));
1051 set_InstOf_obj (ir_node *node, ir_node *obj) {
1052 assert (node->op = op_InstOf);
1053 set_irn_n (node, 1, obj);
1057 /* For unary and binary arithmetic operations the access to the
1058 operands can be factored out. Left is the first, right the
1059 second arithmetic value as listed in tech report 0999-33.
1060 unops are: Minus, Abs, Not, Conv, Cast
1061 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1062 Shr, Shrs, Rotate, Cmp */
1066 get_Call_mem (ir_node *node) {
1067 assert (node->op == op_Call);
1068 return get_irn_n(node, 0);
1072 set_Call_mem (ir_node *node, ir_node *mem) {
1073 assert (node->op == op_Call);
1074 set_irn_n(node, 0, mem);
1078 get_Call_ptr (ir_node *node) {
1079 assert (node->op == op_Call);
1080 return get_irn_n(node, 1);
1084 set_Call_ptr (ir_node *node, ir_node *ptr) {
1085 assert (node->op == op_Call);
1086 set_irn_n(node, 1, ptr);
1090 get_Call_param_arr (ir_node *node) {
1091 assert (node->op == op_Call);
1092 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1096 get_Call_n_params (ir_node *node) {
1097 assert (node->op == op_Call);
1098 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1102 get_Call_arity (ir_node *node) {
1103 assert (node->op == op_Call);
1104 return get_Call_n_params(node);
1108 set_Call_arity (ir_node *node, ir_node *arity) {
1109 assert (node->op == op_Call);
1114 get_Call_param (ir_node *node, int pos) {
1115 assert (node->op == op_Call);
1116 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1120 set_Call_param (ir_node *node, int pos, ir_node *param) {
1121 assert (node->op == op_Call);
1122 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1126 get_Call_type (ir_node *node) {
1127 assert (node->op == op_Call);
1128 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1132 set_Call_type (ir_node *node, type *tp) {
1133 assert (node->op == op_Call);
1134 assert (is_method_type(tp));
1135 node->attr.call.cld_tp = tp;
1138 int Call_has_callees(ir_node *node) {
1139 return (node->attr.call.callee_arr != NULL);
1142 int get_Call_n_callees(ir_node * node) {
1143 assert(node->op == op_Call && node->attr.call.callee_arr);
1144 return ARR_LEN(node->attr.call.callee_arr);
1147 entity * get_Call_callee(ir_node * node, int pos) {
1148 assert(node->op == op_Call && node->attr.call.callee_arr);
1149 return node->attr.call.callee_arr[pos];
1152 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1153 assert(node->op == op_Call);
1154 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1155 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1157 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1160 void remove_Call_callee_arr(ir_node * node) {
1161 assert(node->op == op_Call);
1162 node->attr.call.callee_arr = NULL;
1165 ir_node * get_CallBegin_ptr (ir_node *node) {
1166 assert(node->op == op_CallBegin);
1167 return get_irn_n(node, 0);
1169 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1170 assert(node->op == op_CallBegin);
1171 set_irn_n(node, 0, ptr);
1173 ir_node * get_CallBegin_call (ir_node *node) {
1174 assert(node->op == op_CallBegin);
1175 return node->attr.callbegin.call;
1177 void set_CallBegin_call (ir_node *node, ir_node *call) {
1178 assert(node->op == op_CallBegin);
1179 node->attr.callbegin.call = call;
1183 get_FuncCall_ptr (ir_node *node) {
1184 assert (node->op == op_FuncCall);
1185 return get_irn_n(node, 0);
1189 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1190 assert (node->op == op_FuncCall);
1191 set_irn_n(node, 0, ptr);
1195 get_FuncCall_param_arr (ir_node *node) {
1196 assert (node->op == op_FuncCall);
1197 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1201 get_FuncCall_n_params (ir_node *node) {
1202 assert (node->op == op_FuncCall);
1203 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1207 get_FuncCall_arity (ir_node *node) {
1208 assert (node->op == op_FuncCall);
1209 return get_FuncCall_n_params(node);
1213 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1214 assert (node->op == op_FuncCall);
1219 get_FuncCall_param (ir_node *node, int pos) {
1220 assert (node->op == op_FuncCall);
1221 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1225 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1226 assert (node->op == op_FuncCall);
1227 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1231 get_FuncCall_type (ir_node *node) {
1232 assert (node->op == op_FuncCall);
1233 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1237 set_FuncCall_type (ir_node *node, type *tp) {
1238 assert (node->op == op_FuncCall);
1239 assert (is_method_type(tp));
1240 node->attr.call.cld_tp = tp;
1243 int FuncCall_has_callees(ir_node *node) {
1244 return (node->attr.call.callee_arr != NULL);
1247 int get_FuncCall_n_callees(ir_node * node) {
1248 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1249 return ARR_LEN(node->attr.call.callee_arr);
1252 entity * get_FuncCall_callee(ir_node * node, int pos) {
1253 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1254 return node->attr.call.callee_arr[pos];
1257 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1258 assert(node->op == op_FuncCall);
1259 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1260 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1262 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1265 void remove_FuncCall_callee_arr(ir_node * node) {
1266 assert(node->op == op_FuncCall);
1267 node->attr.call.callee_arr = NULL;
1272 ir_node * get_##OP##_left(ir_node *node) { \
1273 assert(node->op == op_##OP); \
1274 return get_irn_n(node, node->op->op_index); \
1276 void set_##OP##_left(ir_node *node, ir_node *left) { \
1277 assert(node->op == op_##OP); \
1278 set_irn_n(node, node->op->op_index, left); \
1280 ir_node *get_##OP##_right(ir_node *node) { \
1281 assert(node->op == op_##OP); \
1282 return get_irn_n(node, node->op->op_index + 1); \
1284 void set_##OP##_right(ir_node *node, ir_node *right) { \
1285 assert(node->op == op_##OP); \
1286 set_irn_n(node, node->op->op_index + 1, right); \
1290 ir_node *get_##OP##_op(ir_node *node) { \
1291 assert(node->op == op_##OP); \
1292 return get_irn_n(node, node->op->op_index); \
1294 void set_##OP##_op (ir_node *node, ir_node *op) { \
1295 assert(node->op == op_##OP); \
1296 set_irn_n(node, node->op->op_index, op); \
1306 get_Quot_mem (ir_node *node) {
1307 assert (node->op == op_Quot);
1308 return get_irn_n(node, 0);
1312 set_Quot_mem (ir_node *node, ir_node *mem) {
1313 assert (node->op == op_Quot);
1314 set_irn_n(node, 0, mem);
1320 get_DivMod_mem (ir_node *node) {
1321 assert (node->op == op_DivMod);
1322 return get_irn_n(node, 0);
1326 set_DivMod_mem (ir_node *node, ir_node *mem) {
1327 assert (node->op == op_DivMod);
1328 set_irn_n(node, 0, mem);
1334 get_Div_mem (ir_node *node) {
1335 assert (node->op == op_Div);
1336 return get_irn_n(node, 0);
1340 set_Div_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_Div);
1342 set_irn_n(node, 0, mem);
1348 get_Mod_mem (ir_node *node) {
1349 assert (node->op == op_Mod);
1350 return get_irn_n(node, 0);
1354 set_Mod_mem (ir_node *node, ir_node *mem) {
1355 assert (node->op == op_Mod);
1356 set_irn_n(node, 0, mem);
1373 get_Cast_type (ir_node *node) {
1374 assert (node->op == op_Cast);
1375 return node->attr.cast.totype;
1379 set_Cast_type (ir_node *node, type *to_tp) {
1380 assert (node->op == op_Cast);
1381 node->attr.cast.totype = to_tp;
1385 is_unop (ir_node *node) {
1386 return (node->op->opar == oparity_unary);
1390 get_unop_op (ir_node *node) {
1391 if (node->op->opar == oparity_unary)
1392 return get_irn_n(node, node->op->op_index);
1394 assert(node->op->opar == oparity_unary);
1399 set_unop_op (ir_node *node, ir_node *op) {
1400 if (node->op->opar == oparity_unary)
1401 set_irn_n(node, node->op->op_index, op);
1403 assert(node->op->opar == oparity_unary);
1407 is_binop (ir_node *node) {
1408 return (node->op->opar == oparity_binary);
1412 get_binop_left (ir_node *node) {
1413 if (node->op->opar == oparity_binary)
1414 return get_irn_n(node, node->op->op_index);
1416 assert(node->op->opar == oparity_binary);
1421 set_binop_left (ir_node *node, ir_node *left) {
1422 if (node->op->opar == oparity_binary)
1423 set_irn_n(node, node->op->op_index, left);
1425 assert (node->op->opar == oparity_binary);
1429 get_binop_right (ir_node *node) {
1430 if (node->op->opar == oparity_binary)
1431 return get_irn_n(node, node->op->op_index + 1);
1433 assert(node->op->opar == oparity_binary);
1438 set_binop_right (ir_node *node, ir_node *right) {
1439 if (node->op->opar == oparity_binary)
1440 set_irn_n(node, node->op->op_index + 1, right);
1442 assert (node->op->opar == oparity_binary);
1445 int is_Phi (ir_node *n) {
1451 if (op == op_Filter) return interprocedural_view;
1454 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1455 (get_irn_arity(n) > 0));
1460 int is_Phi0 (ir_node *n) {
1463 return ((get_irn_op(n) == op_Phi) &&
1464 (get_irn_arity(n) == 0) &&
1465 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1469 get_Phi_preds_arr (ir_node *node) {
1470 assert (node->op == op_Phi);
1471 return (ir_node **)&(get_irn_in(node)[1]);
1475 get_Phi_n_preds (ir_node *node) {
1476 assert (is_Phi(node) || is_Phi0(node));
1477 return (get_irn_arity(node));
1481 void set_Phi_n_preds (ir_node *node, int n_preds) {
1482 assert (node->op == op_Phi);
1487 get_Phi_pred (ir_node *node, int pos) {
1488 assert (is_Phi(node) || is_Phi0(node));
1489 return get_irn_n(node, pos);
1493 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1494 assert (is_Phi(node) || is_Phi0(node));
1495 set_irn_n(node, pos, pred);
1499 get_Load_mem (ir_node *node) {
1500 assert (node->op == op_Load);
1501 return get_irn_n(node, 0);
1505 set_Load_mem (ir_node *node, ir_node *mem) {
1506 assert (node->op == op_Load);
1507 set_irn_n(node, 0, mem);
1511 get_Load_ptr (ir_node *node) {
1512 assert (node->op == op_Load);
1513 return get_irn_n(node, 1);
1517 set_Load_ptr (ir_node *node, ir_node *ptr) {
1518 assert (node->op == op_Load);
1519 set_irn_n(node, 1, ptr);
1524 get_Store_mem (ir_node *node) {
1525 assert (node->op == op_Store);
1526 return get_irn_n(node, 0);
1530 set_Store_mem (ir_node *node, ir_node *mem) {
1531 assert (node->op == op_Store);
1532 set_irn_n(node, 0, mem);
1536 get_Store_ptr (ir_node *node) {
1537 assert (node->op == op_Store);
1538 return get_irn_n(node, 1);
1542 set_Store_ptr (ir_node *node, ir_node *ptr) {
1543 assert (node->op == op_Store);
1544 set_irn_n(node, 1, ptr);
1548 get_Store_value (ir_node *node) {
1549 assert (node->op == op_Store);
1550 return get_irn_n(node, 2);
1554 set_Store_value (ir_node *node, ir_node *value) {
1555 assert (node->op == op_Store);
1556 set_irn_n(node, 2, value);
1560 get_Alloc_mem (ir_node *node) {
1561 assert (node->op == op_Alloc);
1562 return get_irn_n(node, 0);
1566 set_Alloc_mem (ir_node *node, ir_node *mem) {
1567 assert (node->op == op_Alloc);
1568 set_irn_n(node, 0, mem);
1572 get_Alloc_size (ir_node *node) {
1573 assert (node->op == op_Alloc);
1574 return get_irn_n(node, 1);
1578 set_Alloc_size (ir_node *node, ir_node *size) {
1579 assert (node->op == op_Alloc);
1580 set_irn_n(node, 1, size);
1584 get_Alloc_type (ir_node *node) {
1585 assert (node->op == op_Alloc);
1586 return node->attr.a.type = skip_tid(node->attr.a.type);
1590 set_Alloc_type (ir_node *node, type *tp) {
1591 assert (node->op == op_Alloc);
1592 node->attr.a.type = tp;
1596 get_Alloc_where (ir_node *node) {
1597 assert (node->op == op_Alloc);
1598 return node->attr.a.where;
1602 set_Alloc_where (ir_node *node, where_alloc where) {
1603 assert (node->op == op_Alloc);
1604 node->attr.a.where = where;
1609 get_Free_mem (ir_node *node) {
1610 assert (node->op == op_Free);
1611 return get_irn_n(node, 0);
1615 set_Free_mem (ir_node *node, ir_node *mem) {
1616 assert (node->op == op_Free);
1617 set_irn_n(node, 0, mem);
1621 get_Free_ptr (ir_node *node) {
1622 assert (node->op == op_Free);
1623 return get_irn_n(node, 1);
1627 set_Free_ptr (ir_node *node, ir_node *ptr) {
1628 assert (node->op == op_Free);
1629 set_irn_n(node, 1, ptr);
1633 get_Free_size (ir_node *node) {
1634 assert (node->op == op_Free);
1635 return get_irn_n(node, 2);
1639 set_Free_size (ir_node *node, ir_node *size) {
1640 assert (node->op == op_Free);
1641 set_irn_n(node, 2, size);
1645 get_Free_type (ir_node *node) {
1646 assert (node->op == op_Free);
1647 return node->attr.f = skip_tid(node->attr.f);
1651 set_Free_type (ir_node *node, type *tp) {
1652 assert (node->op == op_Free);
1657 get_Sync_preds_arr (ir_node *node) {
1658 assert (node->op == op_Sync);
1659 return (ir_node **)&(get_irn_in(node)[1]);
1663 get_Sync_n_preds (ir_node *node) {
1664 assert (node->op == op_Sync);
1665 return (get_irn_arity(node));
1670 set_Sync_n_preds (ir_node *node, int n_preds) {
1671 assert (node->op == op_Sync);
1676 get_Sync_pred (ir_node *node, int pos) {
1677 assert (node->op == op_Sync);
1678 return get_irn_n(node, pos);
1682 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1683 assert (node->op == op_Sync);
1684 set_irn_n(node, pos, pred);
1688 get_Proj_pred (ir_node *node) {
1689 assert (is_Proj(node));
1690 return get_irn_n(node, 0);
1694 set_Proj_pred (ir_node *node, ir_node *pred) {
1695 assert (is_Proj(node));
1696 set_irn_n(node, 0, pred);
1700 get_Proj_proj (ir_node *node) {
1701 assert (is_Proj(node));
1702 if (get_irn_opcode(node) == iro_Proj) {
1703 return node->attr.proj;
1705 assert(get_irn_opcode(node) == iro_Filter);
1706 return node->attr.filter.proj;
1711 set_Proj_proj (ir_node *node, long proj) {
1712 assert (node->op == op_Proj);
1713 node->attr.proj = proj;
1717 get_Tuple_preds_arr (ir_node *node) {
1718 assert (node->op == op_Tuple);
1719 return (ir_node **)&(get_irn_in(node)[1]);
1723 get_Tuple_n_preds (ir_node *node) {
1724 assert (node->op == op_Tuple);
1725 return (get_irn_arity(node));
1730 set_Tuple_n_preds (ir_node *node, int n_preds) {
1731 assert (node->op == op_Tuple);
1736 get_Tuple_pred (ir_node *node, int pos) {
1737 assert (node->op == op_Tuple);
1738 return get_irn_n(node, pos);
1742 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1743 assert (node->op == op_Tuple);
1744 set_irn_n(node, pos, pred);
1748 get_Id_pred (ir_node *node) {
1749 assert (node->op == op_Id);
1750 return get_irn_n(node, 0);
1754 set_Id_pred (ir_node *node, ir_node *pred) {
1755 assert (node->op == op_Id);
1756 set_irn_n(node, 0, pred);
1759 ir_node *get_Confirm_value (ir_node *node) {
1760 assert (node->op == op_Confirm);
1761 return get_irn_n(node, 0);
1763 void set_Confirm_value (ir_node *node, ir_node *value) {
1764 assert (node->op == op_Confirm);
1765 set_irn_n(node, 0, value);
1767 ir_node *get_Confirm_bound (ir_node *node) {
1768 assert (node->op == op_Confirm);
1769 return get_irn_n(node, 1);
1771 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1772 assert (node->op == op_Confirm);
1773 set_irn_n(node, 0, bound);
1775 pn_Cmp get_Confirm_cmp (ir_node *node) {
1776 assert (node->op == op_Confirm);
1777 return node->attr.confirm_cmp;
1779 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1780 assert (node->op == op_Confirm);
1781 node->attr.confirm_cmp = cmp;
1786 get_Filter_pred (ir_node *node) {
1787 assert(node->op == op_Filter);
1791 set_Filter_pred (ir_node *node, ir_node *pred) {
1792 assert(node->op == op_Filter);
1796 get_Filter_proj(ir_node *node) {
1797 assert(node->op == op_Filter);
1798 return node->attr.filter.proj;
1801 set_Filter_proj (ir_node *node, long proj) {
1802 assert(node->op == op_Filter);
1803 node->attr.filter.proj = proj;
1806 /* Don't use get_irn_arity, get_irn_n in implementation as access
1807 shall work independent of view!!! */
1808 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1809 assert(node->op == op_Filter);
1810 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1811 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1812 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1813 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1814 node->attr.filter.in_cg[0] = node->in[0];
1816 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1819 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1820 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1821 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1822 node->attr.filter.in_cg[pos + 1] = pred;
1824 int get_Filter_n_cg_preds(ir_node *node) {
1825 assert(node->op == op_Filter && node->attr.filter.in_cg);
1826 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1828 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1830 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1832 arity = ARR_LEN(node->attr.filter.in_cg);
1833 assert(pos < arity - 1);
1834 return node->attr.filter.in_cg[pos + 1];
1839 get_irn_irg(ir_node *node) {
1840 if (get_irn_op(node) != op_Block)
1841 node = get_nodes_block(node);
1842 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1843 node = get_nodes_block(node);
1844 assert(get_irn_op(node) == op_Block);
1845 return node->attr.block.irg;
1849 /*----------------------------------------------------------------*/
1850 /* Auxiliary routines */
1851 /*----------------------------------------------------------------*/
1854 skip_Proj (ir_node *node) {
1855 /* don't assert node !!! */
1856 if (node && is_Proj(node)) {
1857 return get_Proj_pred(node);
1864 skip_Tuple (ir_node *node) {
1867 if (!get_opt_normalize()) return node;
1869 node = skip_Id(node);
1870 if (get_irn_op(node) == op_Proj) {
1871 pred = skip_Id(get_Proj_pred(node));
1872 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1873 pred = skip_Id(skip_Tuple(pred));
1874 if (get_irn_op(pred) == op_Tuple)
1875 return get_Tuple_pred(pred, get_Proj_proj(node));
1880 /** returns operand of node if node is a Cast */
1881 ir_node *skip_Cast (ir_node *node) {
1882 if (node && get_irn_op(node) == op_Cast) {
1883 return skip_Id(get_irn_n(node, 0));
1890 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1891 than any other approach, as Id chains are resolved and all point to the real node, or
1892 all id's are self loops. */
1894 skip_Id (ir_node *node) {
1895 /* don't assert node !!! */
1897 if (!get_opt_normalize()) return node;
1899 /* Don't use get_Id_pred: We get into an endless loop for
1900 self-referencing Ids. */
1901 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1902 ir_node *rem_pred = node->in[0+1];
1905 assert (get_irn_arity (node) > 0);
1907 node->in[0+1] = node;
1908 res = skip_Id(rem_pred);
1909 if (res->op == op_Id) /* self-loop */ return node;
1911 node->in[0+1] = res;
1918 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1919 than any other approach, as Id chains are resolved and all point to the real node, or
1920 all id's are self loops. */
1922 skip_Id (ir_node *node) {
1924 /* don't assert node !!! */
1926 if (!node || (node->op != op_Id)) return node;
1928 if (!get_opt_normalize()) return node;
1930 /* Don't use get_Id_pred: We get into an endless loop for
1931 self-referencing Ids. */
1932 pred = node->in[0+1];
1934 if (pred->op != op_Id) return pred;
1936 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1937 ir_node *rem_pred, *res;
1939 if (pred->op != op_Id) return pred; /* shortcut */
1942 assert (get_irn_arity (node) > 0);
1944 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1945 res = skip_Id(rem_pred);
1946 if (res->op == op_Id) /* self-loop */ return node;
1948 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1957 is_Bad (ir_node *node) {
1959 if ((node) && get_irn_opcode(node) == iro_Bad)
1965 is_no_Block (ir_node *node) {
1967 return (get_irn_opcode(node) != iro_Block);
1971 is_Block (ir_node *node) {
1973 return (get_irn_opcode(node) == iro_Block);
1976 /* returns true if node is a Unknown node. */
1978 is_Unknown (ir_node *node) {
1980 return (get_irn_opcode(node) == iro_Unknown);
1984 is_Proj (const ir_node *node) {
1986 return node->op == op_Proj
1987 || (!interprocedural_view && node->op == op_Filter);
1990 /* Returns true if the operation manipulates control flow. */
1992 is_cfop(ir_node *node) {
1993 return is_cfopcode(get_irn_op(node));
1996 /* Returns true if the operation manipulates interprocedural control flow:
1997 CallBegin, EndReg, EndExcept */
1998 int is_ip_cfop(ir_node *node) {
1999 return is_ip_cfopcode(get_irn_op(node));
2002 /* Returns true if the operation can change the control flow because
2005 is_fragile_op(ir_node *node) {
2006 return is_op_fragile(get_irn_op(node));
2009 /* Returns the memory operand of fragile operations. */
2010 ir_node *get_fragile_op_mem(ir_node *node) {
2011 assert(node && is_fragile_op(node));
2013 switch (get_irn_opcode (node)) {
2022 return get_irn_n(node, 0);
2027 assert(0 && "should not be reached");
2032 #ifdef DEBUG_libfirm
2033 void dump_irn (ir_node *n) {
2034 int i, arity = get_irn_arity(n);
2035 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2037 ir_node *pred = get_irn_n(n, -1);
2038 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2039 get_irn_node_nr(pred), (void *)pred);
2041 printf(" preds: \n");
2042 for (i = 0; i < arity; ++i) {
2043 ir_node *pred = get_irn_n(n, i);
2044 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2045 get_irn_node_nr(pred), (void *)pred);
2049 #else /* DEBUG_libfirm */
2050 void dump_irn (ir_node *n) {}
2051 #endif /* DEBUG_libfirm */