3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
33 /* some constants fixing the positions of nodes predecessors
35 #define CALL_PARAM_OFFSET 2
36 #define FUNCCALL_PARAM_OFFSET 1
37 #define SEL_INDEX_OFFSET 2
38 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
39 #define END_KEEPALIVE_OFFSET 0
41 static const char *pnc_name_arr [] = {
42 "False", "Eq", "Lt", "Le",
43 "Gt", "Ge", "Lg", "Leg", "Uo",
44 "Ue", "Ul", "Ule", "Ug", "Uge",
49 * returns the pnc name from an pnc constant
51 const char *get_pnc_string(int pnc) {
52 return pnc_name_arr[pnc];
56 * Calculates the negated pnc condition.
59 get_negated_pnc(int pnc) {
61 case False: return True; break;
62 case Eq: return Ne; break;
63 case Lt: return Uge; break;
64 case Le: return Ug; break;
65 case Gt: return Ule; break;
66 case Ge: return Ul; break;
67 case Lg: return Ue; break;
68 case Leg: return Uo; break;
69 case Uo: return Leg; break;
70 case Ue: return Lg; break;
71 case Ul: return Ge; break;
72 case Ule: return Gt; break;
73 case Ug: return Le; break;
74 case Uge: return Lt; break;
75 case Ne: return Eq; break;
76 case True: return False; break;
78 return 99; /* to shut up gcc */
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 static size_t additional_node_data_size = 0;
103 size_t register_additional_node_data(size_t size)
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return additional_node_data_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + additional_node_data_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + additional_node_data_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
164 /* Copies all attributes stored in the old node to the new node.
165 Assumes both have the same opcode and sufficient size. */
167 copy_attrs (const ir_node *old_node, ir_node *new_node) {
168 assert(get_irn_op(old_node) == get_irn_op(new_node));
169 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
170 if (get_irn_op(new_node) == op_Call) remove_Call_callee_arr(new_node);
173 /*-- getting some parameters from ir_nodes --*/
176 (is_ir_node)(const void *thing) {
177 return __is_ir_node(thing);
181 (get_irn_intra_arity)(const ir_node *node) {
182 return __get_irn_intra_arity(node);
186 (get_irn_inter_arity)(const ir_node *node) {
187 return __get_irn_inter_arity(node);
190 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
193 (get_irn_arity)(const ir_node *node) {
194 return __get_irn_arity(node);
197 /* Returns the array with ins. This array is shifted with respect to the
198 array accessed by get_irn_n: The block operand is at position 0 not -1.
199 (@@@ This should be changed.)
200 The order of the predecessors in this array is not guaranteed, except that
201 lists of operands as predecessors of Block or arguments of a Call are
204 get_irn_in (const ir_node *node) {
206 if (get_interprocedural_view()) { /* handle Filter and Block specially */
207 if (get_irn_opcode(node) == iro_Filter) {
208 assert(node->attr.filter.in_cg);
209 return node->attr.filter.in_cg;
210 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
211 return node->attr.block.in_cg;
213 /* else fall through */
219 set_irn_in (ir_node *node, int arity, ir_node **in) {
222 if (get_interprocedural_view()) { /* handle Filter and Block specially */
223 if (get_irn_opcode(node) == iro_Filter) {
224 assert(node->attr.filter.in_cg);
225 arr = &node->attr.filter.in_cg;
226 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
227 arr = &node->attr.block.in_cg;
234 if (arity != ARR_LEN(*arr) - 1) {
235 ir_node * block = (*arr)[0];
236 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
239 fix_backedges(current_ir_graph->obst, node);
240 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
244 (get_irn_intra_n)(const ir_node *node, int n) {
245 return __get_irn_intra_n (node, n);
249 (get_irn_inter_n)(const ir_node *node, int n) {
250 return __get_irn_inter_n (node, n);
253 ir_node *(*__get_irn_n)(const ir_node *node, int n) = __get_irn_intra_n;
256 (get_irn_n)(const ir_node *node, int n) {
257 return __get_irn_n(node, n);
261 set_irn_n (ir_node *node, int n, ir_node *in) {
262 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
263 assert(in && in->kind == k_ir_node);
264 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
265 /* Change block pred in both views! */
266 node->in[n + 1] = in;
267 assert(node->attr.filter.in_cg);
268 node->attr.filter.in_cg[n + 1] = in;
271 if (get_interprocedural_view()) { /* handle Filter and Block specially */
272 if (get_irn_opcode(node) == iro_Filter) {
273 assert(node->attr.filter.in_cg);
274 node->attr.filter.in_cg[n + 1] = in;
276 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
277 node->attr.block.in_cg[n + 1] = in;
280 /* else fall through */
282 node->in[n + 1] = in;
286 (get_irn_mode)(const ir_node *node) {
287 return __get_irn_mode(node);
291 (set_irn_mode)(ir_node *node, ir_mode *mode)
293 __set_irn_mode(node, mode);
297 get_irn_modecode (const ir_node *node)
300 return node->mode->code;
303 /** Gets the string representation of the mode .*/
305 get_irn_modename (const ir_node *node)
308 return get_mode_name(node->mode);
312 get_irn_modeident (const ir_node *node)
315 return get_mode_ident(node->mode);
319 (get_irn_op)(const ir_node *node)
321 return __get_irn_op(node);
324 /* should be private to the library: */
326 set_irn_op (ir_node *node, ir_op *op)
333 (get_irn_opcode)(const ir_node *node)
335 return __get_irn_opcode(node);
339 get_irn_opname (const ir_node *node)
342 if ((get_irn_op((ir_node *)node) == op_Phi) &&
343 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
344 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
345 return get_id_str(node->op->name);
349 get_irn_opident (const ir_node *node)
352 return node->op->name;
356 (get_irn_visited)(const ir_node *node)
358 return __get_irn_visited(node);
362 (set_irn_visited)(ir_node *node, unsigned long visited)
364 __set_irn_visited(node, visited);
368 (mark_irn_visited)(ir_node *node) {
369 __mark_irn_visited(node);
373 (irn_not_visited)(const ir_node *node) {
374 return __irn_not_visited(node);
378 (irn_visited)(const ir_node *node) {
379 return __irn_visited(node);
383 (set_irn_link)(ir_node *node, void *link) {
384 __set_irn_link(node, link);
388 (get_irn_link)(const ir_node *node) {
389 return __get_irn_link(node);
393 (get_irn_pinned)(const ir_node *node) {
394 return __get_irn_pinned(node);
397 void set_irn_pinned(ir_node *node, op_pin_state state) {
398 /* due to optimization an opt may be turned into a Tuple */
399 if (get_irn_op(node) == op_Tuple)
402 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
403 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
405 node->attr.except.pin_state = state;
408 #ifdef DO_HEAPANALYSIS
409 /* Access the abstract interpretation information of a node.
410 Returns NULL if no such information is available. */
411 struct abstval *get_irn_abst_value(ir_node *n) {
414 /* Set the abstract interpretation information of a node. */
415 void set_irn_abst_value(ir_node *n, struct abstval *os) {
418 struct section *firm_get_irn_section(ir_node *n) {
421 void firm_set_irn_section(ir_node *n, struct section *s) {
425 /* Dummies needed for firmjni. */
426 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
427 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
428 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
429 void firm_set_irn_section(ir_node *n, struct section *s) {}
430 #endif /* DO_HEAPANALYSIS */
433 /* Outputs a unique number for this node */
435 get_irn_node_nr(const ir_node *node) {
438 return node->node_nr;
445 get_irn_const_attr (ir_node *node)
447 assert (node->op == op_Const);
448 return node->attr.con;
452 get_irn_proj_attr (ir_node *node)
454 assert (node->op == op_Proj);
455 return node->attr.proj;
459 get_irn_alloc_attr (ir_node *node)
461 assert (node->op == op_Alloc);
466 get_irn_free_attr (ir_node *node)
468 assert (node->op == op_Free);
469 return node->attr.f = skip_tid(node->attr.f);
473 get_irn_symconst_attr (ir_node *node)
475 assert (node->op == op_SymConst);
480 get_irn_call_attr (ir_node *node)
482 assert (node->op == op_Call);
483 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
487 get_irn_sel_attr (ir_node *node)
489 assert (node->op == op_Sel);
494 get_irn_phi_attr (ir_node *node)
496 assert (node->op == op_Phi);
497 return node->attr.phi0_pos;
501 get_irn_block_attr (ir_node *node)
503 assert (node->op == op_Block);
504 return node->attr.block;
508 get_irn_load_attr (ir_node *node)
510 assert (node->op == op_Load);
511 return node->attr.load;
515 get_irn_store_attr (ir_node *node)
517 assert (node->op == op_Store);
518 return node->attr.store;
522 get_irn_except_attr (ir_node *node)
524 assert (node->op == op_Div || node->op == op_Quot ||
525 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
526 return node->attr.except;
529 /** manipulate fields of individual nodes **/
531 /* this works for all except Block */
533 get_nodes_block (const ir_node *node) {
534 assert (!(node->op == op_Block));
535 return get_irn_n(node, -1);
539 set_nodes_block (ir_node *node, ir_node *block) {
540 assert (!(node->op == op_Block));
541 set_irn_n(node, -1, block);
544 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
545 * from Start. If so returns frame type, else Null. */
546 type *is_frame_pointer(ir_node *n) {
547 if ((get_irn_op(n) == op_Proj) &&
548 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
549 ir_node *start = get_Proj_pred(n);
550 if (get_irn_op(start) == op_Start) {
551 return get_irg_frame_type(get_irn_irg(start));
557 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
558 * from Start. If so returns global type, else Null. */
559 type *is_globals_pointer(ir_node *n) {
560 if ((get_irn_op(n) == op_Proj) &&
561 (get_Proj_proj(n) == pn_Start_P_globals)) {
562 ir_node *start = get_Proj_pred(n);
563 if (get_irn_op(start) == op_Start) {
564 return get_glob_type();
570 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
571 * from Start. If so returns 1, else 0. */
572 int is_value_arg_pointer(ir_node *n) {
573 if ((get_irn_op(n) == op_Proj) &&
574 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
575 (get_irn_op(get_Proj_pred(n)) == op_Start))
580 /* Returns an array with the predecessors of the Block. Depending on
581 the implementation of the graph data structure this can be a copy of
582 the internal representation of predecessors as well as the internal
583 array itself. Therefore writing to this array might obstruct the ir. */
585 get_Block_cfgpred_arr (ir_node *node)
587 assert ((node->op == op_Block));
588 return (ir_node **)&(get_irn_in(node)[1]);
593 get_Block_n_cfgpreds (ir_node *node) {
594 assert ((node->op == op_Block));
595 return get_irn_arity(node);
599 get_Block_cfgpred (ir_node *node, int pos) {
600 assert(-1 <= pos && pos < get_irn_arity(node));
601 assert(node->op == op_Block);
602 return get_irn_n(node, pos);
606 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
607 assert (node->op == op_Block);
608 set_irn_n(node, pos, pred);
612 get_Block_matured (ir_node *node) {
613 assert (node->op == op_Block);
614 return node->attr.block.matured;
618 set_Block_matured (ir_node *node, bool matured) {
619 assert (node->op == op_Block);
620 node->attr.block.matured = matured;
623 get_Block_block_visited (ir_node *node) {
624 assert (node->op == op_Block);
625 return node->attr.block.block_visited;
629 set_Block_block_visited (ir_node *node, unsigned long visit) {
630 assert (node->op == op_Block);
631 node->attr.block.block_visited = visit;
634 /* For this current_ir_graph must be set. */
636 mark_Block_block_visited (ir_node *node) {
637 assert (node->op == op_Block);
638 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
642 Block_not_block_visited(ir_node *node) {
643 assert (node->op == op_Block);
644 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
648 get_Block_graph_arr (ir_node *node, int pos) {
649 assert (node->op == op_Block);
650 return node->attr.block.graph_arr[pos+1];
654 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
655 assert (node->op == op_Block);
656 node->attr.block.graph_arr[pos+1] = value;
659 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
660 assert(node->op == op_Block);
661 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
662 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
663 node->attr.block.in_cg[0] = NULL;
664 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
666 /* Fix backedge array. fix_backedges operates depending on
667 interprocedural_view. */
668 int ipv = get_interprocedural_view();
669 set_interprocedural_view(true);
670 fix_backedges(current_ir_graph->obst, node);
671 set_interprocedural_view(ipv);
674 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
677 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
678 assert(node->op == op_Block &&
679 node->attr.block.in_cg &&
680 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
681 node->attr.block.in_cg[pos + 1] = pred;
684 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
685 assert(node->op == op_Block);
686 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
689 int get_Block_cg_n_cfgpreds(ir_node * node) {
690 assert(node->op == op_Block);
691 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
694 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
695 assert(node->op == op_Block && node->attr.block.in_cg);
696 return node->attr.block.in_cg[pos + 1];
699 void remove_Block_cg_cfgpred_arr(ir_node * node) {
700 assert(node->op == op_Block);
701 node->attr.block.in_cg = NULL;
704 ir_node *(set_Block_dead)(ir_node *block) {
705 return __set_Block_dead(block);
708 int (is_Block_dead)(const ir_node *block) {
709 return __is_Block_dead(block);
713 set_Start_irg(ir_node *node, ir_graph *irg) {
714 assert(node->op == op_Start);
715 assert(is_ir_graph(irg));
716 assert(0 && " Why set irg? -- use set_irn_irg");
720 get_End_n_keepalives(ir_node *end) {
721 assert (end->op == op_End);
722 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
726 get_End_keepalive(ir_node *end, int pos) {
727 assert (end->op == op_End);
728 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
732 add_End_keepalive (ir_node *end, ir_node *ka) {
733 assert (end->op == op_End);
734 ARR_APP1 (ir_node *, end->in, ka);
738 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
739 assert (end->op == op_End);
740 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
744 free_End (ir_node *end) {
745 assert (end->op == op_End);
747 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
748 end->in = NULL; /* @@@ make sure we get an error if we use the
749 in array afterwards ... */
754 > Implementing the case construct (which is where the constant Proj node is
755 > important) involves far more than simply determining the constant values.
756 > We could argue that this is more properly a function of the translator from
757 > Firm to the target machine. That could be done if there was some way of
758 > projecting "default" out of the Cond node.
759 I know it's complicated.
760 Basically there are two proglems:
761 - determining the gaps between the projs
762 - determining the biggest case constant to know the proj number for
764 I see several solutions:
765 1. Introduce a ProjDefault node. Solves both problems.
766 This means to extend all optimizations executed during construction.
767 2. Give the Cond node for switch two flavors:
768 a) there are no gaps in the projs (existing flavor)
769 b) gaps may exist, default proj is still the Proj with the largest
770 projection number. This covers also the gaps.
771 3. Fix the semantic of the Cond to that of 2b)
773 Solution 2 seems to be the best:
774 Computing the gaps in the Firm representation is not too hard, i.e.,
775 libFIRM can implement a routine that transforms between the two
776 flavours. This is also possible for 1) but 2) does not require to
777 change any existing optimization.
778 Further it should be far simpler to determine the biggest constant than
780 I don't want to choose 3) as 2a) seems to have advantages for
781 dataflow analysis and 3) does not allow to convert the representation to
785 get_Cond_selector (ir_node *node) {
786 assert (node->op == op_Cond);
787 return get_irn_n(node, 0);
791 set_Cond_selector (ir_node *node, ir_node *selector) {
792 assert (node->op == op_Cond);
793 set_irn_n(node, 0, selector);
797 get_Cond_kind (ir_node *node) {
798 assert (node->op == op_Cond);
799 return node->attr.c.kind;
803 set_Cond_kind (ir_node *node, cond_kind kind) {
804 assert (node->op == op_Cond);
805 node->attr.c.kind = kind;
809 get_Cond_defaultProj (ir_node *node) {
810 assert (node->op == op_Cond);
811 return node->attr.c.default_proj;
815 get_Return_mem (ir_node *node) {
816 assert (node->op == op_Return);
817 return get_irn_n(node, 0);
821 set_Return_mem (ir_node *node, ir_node *mem) {
822 assert (node->op == op_Return);
823 set_irn_n(node, 0, mem);
827 get_Return_n_ress (ir_node *node) {
828 assert (node->op == op_Return);
829 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
833 get_Return_res_arr (ir_node *node)
835 assert ((node->op == op_Return));
836 if (get_Return_n_ress(node) > 0)
837 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
844 set_Return_n_res (ir_node *node, int results) {
845 assert (node->op == op_Return);
850 get_Return_res (ir_node *node, int pos) {
851 assert (node->op == op_Return);
852 assert (get_Return_n_ress(node) > pos);
853 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
857 set_Return_res (ir_node *node, int pos, ir_node *res){
858 assert (node->op == op_Return);
859 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
863 get_Raise_mem (ir_node *node) {
864 assert (node->op == op_Raise);
865 return get_irn_n(node, 0);
869 set_Raise_mem (ir_node *node, ir_node *mem) {
870 assert (node->op == op_Raise);
871 set_irn_n(node, 0, mem);
875 get_Raise_exo_ptr (ir_node *node) {
876 assert (node->op == op_Raise);
877 return get_irn_n(node, 1);
881 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
882 assert (node->op == op_Raise);
883 set_irn_n(node, 1, exo_ptr);
886 tarval *get_Const_tarval (ir_node *node) {
887 assert (node->op == op_Const);
888 return node->attr.con.tv;
892 set_Const_tarval (ir_node *node, tarval *con) {
893 assert (node->op == op_Const);
894 node->attr.con.tv = con;
898 /* The source language type. Must be an atomic type. Mode of type must
899 be mode of node. For tarvals from entities type must be pointer to
902 get_Const_type (ir_node *node) {
903 assert (node->op == op_Const);
904 return node->attr.con.tp;
908 set_Const_type (ir_node *node, type *tp) {
909 assert (node->op == op_Const);
910 if (tp != unknown_type) {
911 assert (is_atomic_type(tp));
912 assert (get_type_mode(tp) == get_irn_mode(node));
914 node->attr.con.tp = tp;
919 get_SymConst_kind (const ir_node *node) {
920 assert (node->op == op_SymConst);
921 return node->attr.i.num;
925 set_SymConst_kind (ir_node *node, symconst_kind num) {
926 assert (node->op == op_SymConst);
927 node->attr.i.num = num;
931 get_SymConst_type (ir_node *node) {
932 assert ( (node->op == op_SymConst)
933 && ( get_SymConst_kind(node) == symconst_type_tag
934 || get_SymConst_kind(node) == symconst_size));
935 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
939 set_SymConst_type (ir_node *node, type *tp) {
940 assert ( (node->op == op_SymConst)
941 && ( get_SymConst_kind(node) == symconst_type_tag
942 || get_SymConst_kind(node) == symconst_size));
943 node->attr.i.sym.type_p = tp;
947 get_SymConst_name (ir_node *node) {
948 assert ( (node->op == op_SymConst)
949 && (get_SymConst_kind(node) == symconst_addr_name));
950 return node->attr.i.sym.ident_p;
954 set_SymConst_name (ir_node *node, ident *name) {
955 assert ( (node->op == op_SymConst)
956 && (get_SymConst_kind(node) == symconst_addr_name));
957 node->attr.i.sym.ident_p = name;
961 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
962 entity *get_SymConst_entity (ir_node *node) {
963 assert ( (node->op == op_SymConst)
964 && (get_SymConst_kind (node) == symconst_addr_ent));
965 return node->attr.i.sym.entity_p;
968 void set_SymConst_entity (ir_node *node, entity *ent) {
969 assert ( (node->op == op_SymConst)
970 && (get_SymConst_kind(node) == symconst_addr_ent));
971 node->attr.i.sym.entity_p = ent;
974 union symconst_symbol
975 get_SymConst_symbol (ir_node *node) {
976 assert (node->op == op_SymConst);
977 return node->attr.i.sym;
981 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
982 assert (node->op == op_SymConst);
983 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
984 node->attr.i.sym = sym;
988 get_SymConst_value_type (ir_node *node) {
989 assert (node->op == op_SymConst);
990 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
991 return node->attr.i.tp;
995 set_SymConst_value_type (ir_node *node, type *tp) {
996 assert (node->op == op_SymConst);
997 node->attr.i.tp = tp;
1001 get_Sel_mem (ir_node *node) {
1002 assert (node->op == op_Sel);
1003 return get_irn_n(node, 0);
1007 set_Sel_mem (ir_node *node, ir_node *mem) {
1008 assert (node->op == op_Sel);
1009 set_irn_n(node, 0, mem);
1013 get_Sel_ptr (ir_node *node) {
1014 assert (node->op == op_Sel);
1015 return get_irn_n(node, 1);
1019 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1020 assert (node->op == op_Sel);
1021 set_irn_n(node, 1, ptr);
1025 get_Sel_n_indexs (ir_node *node) {
1026 assert (node->op == op_Sel);
1027 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1031 get_Sel_index_arr (ir_node *node)
1033 assert ((node->op == op_Sel));
1034 if (get_Sel_n_indexs(node) > 0)
1035 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1041 get_Sel_index (ir_node *node, int pos) {
1042 assert (node->op == op_Sel);
1043 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1047 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1048 assert (node->op == op_Sel);
1049 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1053 get_Sel_entity (ir_node *node) {
1054 assert (node->op == op_Sel);
1055 return node->attr.s.ent;
1059 set_Sel_entity (ir_node *node, entity *ent) {
1060 assert (node->op == op_Sel);
1061 node->attr.s.ent = ent;
1065 get_InstOf_ent (ir_node *node) {
1066 assert (node->op = op_InstOf);
1067 return (node->attr.io.ent);
1071 set_InstOf_ent (ir_node *node, type *ent) {
1072 assert (node->op = op_InstOf);
1073 node->attr.io.ent = ent;
1077 get_InstOf_store (ir_node *node) {
1078 assert (node->op = op_InstOf);
1079 return (get_irn_n (node, 0));
1083 set_InstOf_store (ir_node *node, ir_node *obj) {
1084 assert (node->op = op_InstOf);
1085 set_irn_n (node, 0, obj);
1089 get_InstOf_obj (ir_node *node) {
1090 assert (node->op = op_InstOf);
1091 return (get_irn_n (node, 1));
1095 set_InstOf_obj (ir_node *node, ir_node *obj) {
1096 assert (node->op = op_InstOf);
1097 set_irn_n (node, 1, obj);
1101 /* For unary and binary arithmetic operations the access to the
1102 operands can be factored out. Left is the first, right the
1103 second arithmetic value as listed in tech report 0999-33.
1104 unops are: Minus, Abs, Not, Conv, Cast
1105 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1106 Shr, Shrs, Rotate, Cmp */
1110 get_Call_mem (ir_node *node) {
1111 assert (node->op == op_Call);
1112 return get_irn_n(node, 0);
1116 set_Call_mem (ir_node *node, ir_node *mem) {
1117 assert (node->op == op_Call);
1118 set_irn_n(node, 0, mem);
1122 get_Call_ptr (ir_node *node) {
1123 assert (node->op == op_Call);
1124 return get_irn_n(node, 1);
1128 set_Call_ptr (ir_node *node, ir_node *ptr) {
1129 assert (node->op == op_Call);
1130 set_irn_n(node, 1, ptr);
1134 get_Call_param_arr (ir_node *node) {
1135 assert (node->op == op_Call);
1136 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1140 get_Call_n_params (ir_node *node) {
1141 assert (node->op == op_Call);
1142 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1146 get_Call_arity (ir_node *node) {
1147 assert (node->op == op_Call);
1148 return get_Call_n_params(node);
1152 set_Call_arity (ir_node *node, ir_node *arity) {
1153 assert (node->op == op_Call);
1158 get_Call_param (ir_node *node, int pos) {
1159 assert (node->op == op_Call);
1160 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1164 set_Call_param (ir_node *node, int pos, ir_node *param) {
1165 assert (node->op == op_Call);
1166 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1170 get_Call_type (ir_node *node) {
1171 assert (node->op == op_Call);
1172 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1176 set_Call_type (ir_node *node, type *tp) {
1177 assert (node->op == op_Call);
1178 assert ((get_unknown_type() == tp) || is_method_type(tp));
1179 node->attr.call.cld_tp = tp;
1182 int Call_has_callees(ir_node *node) {
1183 assert(node && node->op == op_Call);
1184 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1185 (node->attr.call.callee_arr != NULL));
1188 int get_Call_n_callees(ir_node * node) {
1189 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1190 return ARR_LEN(node->attr.call.callee_arr);
1193 entity * get_Call_callee(ir_node * node, int pos) {
1194 assert(pos >= 0 && pos < get_Call_n_callees(node));
1195 return node->attr.call.callee_arr[pos];
1198 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1199 assert(node->op == op_Call);
1200 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1201 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1203 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1206 void remove_Call_callee_arr(ir_node * node) {
1207 assert(node->op == op_Call);
1208 node->attr.call.callee_arr = NULL;
1211 ir_node * get_CallBegin_ptr (ir_node *node) {
1212 assert(node->op == op_CallBegin);
1213 return get_irn_n(node, 0);
1215 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1216 assert(node->op == op_CallBegin);
1217 set_irn_n(node, 0, ptr);
1219 ir_node * get_CallBegin_call (ir_node *node) {
1220 assert(node->op == op_CallBegin);
1221 return node->attr.callbegin.call;
1223 void set_CallBegin_call (ir_node *node, ir_node *call) {
1224 assert(node->op == op_CallBegin);
1225 node->attr.callbegin.call = call;
1230 ir_node * get_##OP##_left(ir_node *node) { \
1231 assert(node->op == op_##OP); \
1232 return get_irn_n(node, node->op->op_index); \
1234 void set_##OP##_left(ir_node *node, ir_node *left) { \
1235 assert(node->op == op_##OP); \
1236 set_irn_n(node, node->op->op_index, left); \
1238 ir_node *get_##OP##_right(ir_node *node) { \
1239 assert(node->op == op_##OP); \
1240 return get_irn_n(node, node->op->op_index + 1); \
1242 void set_##OP##_right(ir_node *node, ir_node *right) { \
1243 assert(node->op == op_##OP); \
1244 set_irn_n(node, node->op->op_index + 1, right); \
1248 ir_node *get_##OP##_op(ir_node *node) { \
1249 assert(node->op == op_##OP); \
1250 return get_irn_n(node, node->op->op_index); \
1252 void set_##OP##_op (ir_node *node, ir_node *op) { \
1253 assert(node->op == op_##OP); \
1254 set_irn_n(node, node->op->op_index, op); \
1264 get_Quot_mem (ir_node *node) {
1265 assert (node->op == op_Quot);
1266 return get_irn_n(node, 0);
1270 set_Quot_mem (ir_node *node, ir_node *mem) {
1271 assert (node->op == op_Quot);
1272 set_irn_n(node, 0, mem);
1278 get_DivMod_mem (ir_node *node) {
1279 assert (node->op == op_DivMod);
1280 return get_irn_n(node, 0);
1284 set_DivMod_mem (ir_node *node, ir_node *mem) {
1285 assert (node->op == op_DivMod);
1286 set_irn_n(node, 0, mem);
1292 get_Div_mem (ir_node *node) {
1293 assert (node->op == op_Div);
1294 return get_irn_n(node, 0);
1298 set_Div_mem (ir_node *node, ir_node *mem) {
1299 assert (node->op == op_Div);
1300 set_irn_n(node, 0, mem);
1306 get_Mod_mem (ir_node *node) {
1307 assert (node->op == op_Mod);
1308 return get_irn_n(node, 0);
1312 set_Mod_mem (ir_node *node, ir_node *mem) {
1313 assert (node->op == op_Mod);
1314 set_irn_n(node, 0, mem);
1331 get_Cast_type (ir_node *node) {
1332 assert (node->op == op_Cast);
1333 return node->attr.cast.totype;
1337 set_Cast_type (ir_node *node, type *to_tp) {
1338 assert (node->op == op_Cast);
1339 node->attr.cast.totype = to_tp;
1343 (is_unop)(const ir_node *node) {
1344 return __is_unop(node);
1348 get_unop_op (ir_node *node) {
1349 if (node->op->opar == oparity_unary)
1350 return get_irn_n(node, node->op->op_index);
1352 assert(node->op->opar == oparity_unary);
1357 set_unop_op (ir_node *node, ir_node *op) {
1358 if (node->op->opar == oparity_unary)
1359 set_irn_n(node, node->op->op_index, op);
1361 assert(node->op->opar == oparity_unary);
1365 (is_binop)(const ir_node *node) {
1366 return __is_binop(node);
1370 get_binop_left (ir_node *node) {
1371 if (node->op->opar == oparity_binary)
1372 return get_irn_n(node, node->op->op_index);
1374 assert(node->op->opar == oparity_binary);
1379 set_binop_left (ir_node *node, ir_node *left) {
1380 if (node->op->opar == oparity_binary)
1381 set_irn_n(node, node->op->op_index, left);
1383 assert (node->op->opar == oparity_binary);
1387 get_binop_right (ir_node *node) {
1388 if (node->op->opar == oparity_binary)
1389 return get_irn_n(node, node->op->op_index + 1);
1391 assert(node->op->opar == oparity_binary);
1396 set_binop_right (ir_node *node, ir_node *right) {
1397 if (node->op->opar == oparity_binary)
1398 set_irn_n(node, node->op->op_index + 1, right);
1400 assert (node->op->opar == oparity_binary);
1403 int is_Phi (ir_node *n) {
1409 if (op == op_Filter) return get_interprocedural_view();
1412 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1413 (get_irn_arity(n) > 0));
1418 int is_Phi0 (ir_node *n) {
1421 return ((get_irn_op(n) == op_Phi) &&
1422 (get_irn_arity(n) == 0) &&
1423 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1427 get_Phi_preds_arr (ir_node *node) {
1428 assert (node->op == op_Phi);
1429 return (ir_node **)&(get_irn_in(node)[1]);
1433 get_Phi_n_preds (ir_node *node) {
1434 assert (is_Phi(node) || is_Phi0(node));
1435 return (get_irn_arity(node));
1439 void set_Phi_n_preds (ir_node *node, int n_preds) {
1440 assert (node->op == op_Phi);
1445 get_Phi_pred (ir_node *node, int pos) {
1446 assert (is_Phi(node) || is_Phi0(node));
1447 return get_irn_n(node, pos);
1451 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1452 assert (is_Phi(node) || is_Phi0(node));
1453 set_irn_n(node, pos, pred);
1457 int is_memop(ir_node *node) {
1458 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1461 ir_node *get_memop_mem (ir_node *node) {
1462 assert(is_memop(node));
1463 return get_irn_n(node, 0);
1466 void set_memop_mem (ir_node *node, ir_node *mem) {
1467 assert(is_memop(node));
1468 set_irn_n(node, 0, mem);
1471 ir_node *get_memop_ptr (ir_node *node) {
1472 assert(is_memop(node));
1473 return get_irn_n(node, 1);
1476 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1477 assert(is_memop(node));
1478 set_irn_n(node, 1, ptr);
1482 get_Load_mem (ir_node *node) {
1483 assert (node->op == op_Load);
1484 return get_irn_n(node, 0);
1488 set_Load_mem (ir_node *node, ir_node *mem) {
1489 assert (node->op == op_Load);
1490 set_irn_n(node, 0, mem);
1494 get_Load_ptr (ir_node *node) {
1495 assert (node->op == op_Load);
1496 return get_irn_n(node, 1);
1500 set_Load_ptr (ir_node *node, ir_node *ptr) {
1501 assert (node->op == op_Load);
1502 set_irn_n(node, 1, ptr);
1506 get_Load_mode (ir_node *node) {
1507 assert (node->op == op_Load);
1508 return node->attr.load.load_mode;
1512 set_Load_mode (ir_node *node, ir_mode *mode) {
1513 assert (node->op == op_Load);
1514 node->attr.load.load_mode = mode;
1518 get_Load_volatility (ir_node *node) {
1519 assert (node->op == op_Load);
1520 return node->attr.load.volatility;
1524 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1525 assert (node->op == op_Load);
1526 node->attr.load.volatility = volatility;
1531 get_Store_mem (ir_node *node) {
1532 assert (node->op == op_Store);
1533 return get_irn_n(node, 0);
1537 set_Store_mem (ir_node *node, ir_node *mem) {
1538 assert (node->op == op_Store);
1539 set_irn_n(node, 0, mem);
1543 get_Store_ptr (ir_node *node) {
1544 assert (node->op == op_Store);
1545 return get_irn_n(node, 1);
1549 set_Store_ptr (ir_node *node, ir_node *ptr) {
1550 assert (node->op == op_Store);
1551 set_irn_n(node, 1, ptr);
1555 get_Store_value (ir_node *node) {
1556 assert (node->op == op_Store);
1557 return get_irn_n(node, 2);
1561 set_Store_value (ir_node *node, ir_node *value) {
1562 assert (node->op == op_Store);
1563 set_irn_n(node, 2, value);
1567 get_Store_volatility (ir_node *node) {
1568 assert (node->op == op_Store);
1569 return node->attr.store.volatility;
1573 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1574 assert (node->op == op_Store);
1575 node->attr.store.volatility = volatility;
1580 get_Alloc_mem (ir_node *node) {
1581 assert (node->op == op_Alloc);
1582 return get_irn_n(node, 0);
1586 set_Alloc_mem (ir_node *node, ir_node *mem) {
1587 assert (node->op == op_Alloc);
1588 set_irn_n(node, 0, mem);
1592 get_Alloc_size (ir_node *node) {
1593 assert (node->op == op_Alloc);
1594 return get_irn_n(node, 1);
1598 set_Alloc_size (ir_node *node, ir_node *size) {
1599 assert (node->op == op_Alloc);
1600 set_irn_n(node, 1, size);
1604 get_Alloc_type (ir_node *node) {
1605 assert (node->op == op_Alloc);
1606 return node->attr.a.type = skip_tid(node->attr.a.type);
1610 set_Alloc_type (ir_node *node, type *tp) {
1611 assert (node->op == op_Alloc);
1612 node->attr.a.type = tp;
1616 get_Alloc_where (ir_node *node) {
1617 assert (node->op == op_Alloc);
1618 return node->attr.a.where;
1622 set_Alloc_where (ir_node *node, where_alloc where) {
1623 assert (node->op == op_Alloc);
1624 node->attr.a.where = where;
1629 get_Free_mem (ir_node *node) {
1630 assert (node->op == op_Free);
1631 return get_irn_n(node, 0);
1635 set_Free_mem (ir_node *node, ir_node *mem) {
1636 assert (node->op == op_Free);
1637 set_irn_n(node, 0, mem);
1641 get_Free_ptr (ir_node *node) {
1642 assert (node->op == op_Free);
1643 return get_irn_n(node, 1);
1647 set_Free_ptr (ir_node *node, ir_node *ptr) {
1648 assert (node->op == op_Free);
1649 set_irn_n(node, 1, ptr);
1653 get_Free_size (ir_node *node) {
1654 assert (node->op == op_Free);
1655 return get_irn_n(node, 2);
1659 set_Free_size (ir_node *node, ir_node *size) {
1660 assert (node->op == op_Free);
1661 set_irn_n(node, 2, size);
1665 get_Free_type (ir_node *node) {
1666 assert (node->op == op_Free);
1667 return node->attr.f = skip_tid(node->attr.f);
1671 set_Free_type (ir_node *node, type *tp) {
1672 assert (node->op == op_Free);
1677 get_Sync_preds_arr (ir_node *node) {
1678 assert (node->op == op_Sync);
1679 return (ir_node **)&(get_irn_in(node)[1]);
1683 get_Sync_n_preds (ir_node *node) {
1684 assert (node->op == op_Sync);
1685 return (get_irn_arity(node));
1690 set_Sync_n_preds (ir_node *node, int n_preds) {
1691 assert (node->op == op_Sync);
1696 get_Sync_pred (ir_node *node, int pos) {
1697 assert (node->op == op_Sync);
1698 return get_irn_n(node, pos);
1702 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1703 assert (node->op == op_Sync);
1704 set_irn_n(node, pos, pred);
1708 get_Proj_pred (ir_node *node) {
1709 assert (is_Proj(node));
1710 return get_irn_n(node, 0);
1714 set_Proj_pred (ir_node *node, ir_node *pred) {
1715 assert (is_Proj(node));
1716 set_irn_n(node, 0, pred);
1720 get_Proj_proj (ir_node *node) {
1721 assert (is_Proj(node));
1722 if (get_irn_opcode(node) == iro_Proj) {
1723 return node->attr.proj;
1725 assert(get_irn_opcode(node) == iro_Filter);
1726 return node->attr.filter.proj;
1731 set_Proj_proj (ir_node *node, long proj) {
1732 assert (node->op == op_Proj);
1733 node->attr.proj = proj;
1737 get_Tuple_preds_arr (ir_node *node) {
1738 assert (node->op == op_Tuple);
1739 return (ir_node **)&(get_irn_in(node)[1]);
1743 get_Tuple_n_preds (ir_node *node) {
1744 assert (node->op == op_Tuple);
1745 return (get_irn_arity(node));
1750 set_Tuple_n_preds (ir_node *node, int n_preds) {
1751 assert (node->op == op_Tuple);
1756 get_Tuple_pred (ir_node *node, int pos) {
1757 assert (node->op == op_Tuple);
1758 return get_irn_n(node, pos);
1762 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1763 assert (node->op == op_Tuple);
1764 set_irn_n(node, pos, pred);
1768 get_Id_pred (ir_node *node) {
1769 assert (node->op == op_Id);
1770 return get_irn_n(node, 0);
1774 set_Id_pred (ir_node *node, ir_node *pred) {
1775 assert (node->op == op_Id);
1776 set_irn_n(node, 0, pred);
1779 ir_node *get_Confirm_value (ir_node *node) {
1780 assert (node->op == op_Confirm);
1781 return get_irn_n(node, 0);
1783 void set_Confirm_value (ir_node *node, ir_node *value) {
1784 assert (node->op == op_Confirm);
1785 set_irn_n(node, 0, value);
1787 ir_node *get_Confirm_bound (ir_node *node) {
1788 assert (node->op == op_Confirm);
1789 return get_irn_n(node, 1);
1791 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1792 assert (node->op == op_Confirm);
1793 set_irn_n(node, 0, bound);
1795 pn_Cmp get_Confirm_cmp (ir_node *node) {
1796 assert (node->op == op_Confirm);
1797 return node->attr.confirm_cmp;
1799 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1800 assert (node->op == op_Confirm);
1801 node->attr.confirm_cmp = cmp;
1806 get_Filter_pred (ir_node *node) {
1807 assert(node->op == op_Filter);
1811 set_Filter_pred (ir_node *node, ir_node *pred) {
1812 assert(node->op == op_Filter);
1816 get_Filter_proj(ir_node *node) {
1817 assert(node->op == op_Filter);
1818 return node->attr.filter.proj;
1821 set_Filter_proj (ir_node *node, long proj) {
1822 assert(node->op == op_Filter);
1823 node->attr.filter.proj = proj;
1826 /* Don't use get_irn_arity, get_irn_n in implementation as access
1827 shall work independent of view!!! */
1828 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1829 assert(node->op == op_Filter);
1830 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1831 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1832 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1833 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1834 node->attr.filter.in_cg[0] = node->in[0];
1836 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1839 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1840 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1841 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1842 node->attr.filter.in_cg[pos + 1] = pred;
1844 int get_Filter_n_cg_preds(ir_node *node) {
1845 assert(node->op == op_Filter && node->attr.filter.in_cg);
1846 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1848 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1850 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1852 arity = ARR_LEN(node->attr.filter.in_cg);
1853 assert(pos < arity - 1);
1854 return node->attr.filter.in_cg[pos + 1];
1859 get_irn_irg(ir_node *node) {
1860 if (get_irn_op(node) != op_Block)
1861 node = get_nodes_block(node);
1862 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1863 node = get_nodes_block(node);
1864 assert(get_irn_op(node) == op_Block);
1865 return node->attr.block.irg;
1869 /*----------------------------------------------------------------*/
1870 /* Auxiliary routines */
1871 /*----------------------------------------------------------------*/
1874 skip_Proj (ir_node *node) {
1875 /* don't assert node !!! */
1876 if (node && is_Proj(node)) {
1877 return get_Proj_pred(node);
1884 skip_Tuple (ir_node *node) {
1887 if (!get_opt_normalize()) return node;
1889 node = skip_Id(node);
1890 if (get_irn_op(node) == op_Proj) {
1891 pred = skip_Id(get_Proj_pred(node));
1892 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1893 pred = skip_Id(skip_Tuple(pred));
1894 if (get_irn_op(pred) == op_Tuple)
1895 return get_Tuple_pred(pred, get_Proj_proj(node));
1900 /** returns operand of node if node is a Cast */
1901 ir_node *skip_Cast (ir_node *node) {
1902 if (node && get_irn_op(node) == op_Cast) {
1903 return skip_Id(get_irn_n(node, 0));
1910 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1911 than any other approach, as Id chains are resolved and all point to the real node, or
1912 all id's are self loops. */
1914 skip_Id (ir_node *node) {
1915 /* don't assert node !!! */
1917 if (!get_opt_normalize()) return node;
1919 /* Don't use get_Id_pred: We get into an endless loop for
1920 self-referencing Ids. */
1921 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1922 ir_node *rem_pred = node->in[0+1];
1925 assert (get_irn_arity (node) > 0);
1927 node->in[0+1] = node;
1928 res = skip_Id(rem_pred);
1929 if (res->op == op_Id) /* self-loop */ return node;
1931 node->in[0+1] = res;
1938 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1939 than any other approach, as Id chains are resolved and all point to the real node, or
1940 all id's are self loops. */
1942 skip_Id (ir_node *node) {
1944 /* don't assert node !!! */
1946 if (!node || (node->op != op_Id)) return node;
1948 if (!get_opt_normalize()) return node;
1950 /* Don't use get_Id_pred: We get into an endless loop for
1951 self-referencing Ids. */
1952 pred = node->in[0+1];
1954 if (pred->op != op_Id) return pred;
1956 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1957 ir_node *rem_pred, *res;
1959 if (pred->op != op_Id) return pred; /* shortcut */
1962 assert (get_irn_arity (node) > 0);
1964 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1965 res = skip_Id(rem_pred);
1966 if (res->op == op_Id) /* self-loop */ return node;
1968 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1977 (is_Bad)(const ir_node *node) {
1978 return __is_Bad(node);
1982 (is_no_Block)(const ir_node *node) {
1983 return __is_no_Block(node);
1987 (is_Block)(const ir_node *node) {
1988 return __is_Block(node);
1991 /* returns true if node is a Unknown node. */
1993 is_Unknown (const ir_node *node) {
1995 return (get_irn_op(node) == op_Unknown);
1999 is_Proj (const ir_node *node) {
2001 return node->op == op_Proj
2002 || (!get_interprocedural_view() && node->op == op_Filter);
2005 /* Returns true if the operation manipulates control flow. */
2007 is_cfop(const ir_node *node) {
2008 return is_cfopcode(get_irn_op(node));
2011 /* Returns true if the operation manipulates interprocedural control flow:
2012 CallBegin, EndReg, EndExcept */
2013 int is_ip_cfop(const ir_node *node) {
2014 return is_ip_cfopcode(get_irn_op(node));
2017 /* Returns true if the operation can change the control flow because
2020 is_fragile_op(const ir_node *node) {
2021 return is_op_fragile(get_irn_op(node));
2024 /* Returns the memory operand of fragile operations. */
2025 ir_node *get_fragile_op_mem(ir_node *node) {
2026 assert(node && is_fragile_op(node));
2028 switch (get_irn_opcode (node)) {
2037 return get_irn_n(node, 0);
2042 assert(0 && "should not be reached");
2047 /* Returns true if the operation is a forking control flow operation. */
2049 is_forking_op(const ir_node *node) {
2050 return is_op_forking(get_irn_op(node));
2054 #ifdef DEBUG_libfirm
2055 void dump_irn (ir_node *n) {
2056 int i, arity = get_irn_arity(n);
2057 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2059 ir_node *pred = get_irn_n(n, -1);
2060 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2061 get_irn_node_nr(pred), (void *)pred);
2063 printf(" preds: \n");
2064 for (i = 0; i < arity; ++i) {
2065 ir_node *pred = get_irn_n(n, i);
2066 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2067 get_irn_node_nr(pred), (void *)pred);
2071 #else /* DEBUG_libfirm */
2072 void dump_irn (ir_node *n) {}
2073 #endif /* DEBUG_libfirm */