3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 (get_irn_pinned)(const ir_node *node) {
358 /* return __get_irn_pinned(node); */
359 return (__get_op_pinned (get_irn_op (node)));
362 void set_irn_pinned(ir_node *node, op_pin_state state) {
363 /* due to optimization an opt may be turned into a Tuple */
364 if (get_irn_op(node) == op_Tuple)
367 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
368 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
370 node->attr.except.pin_state = state;
373 #ifdef DO_HEAPANALYSIS
374 /* Access the abstract interpretation information of a node.
375 Returns NULL if no such information is available. */
376 struct abstval *get_irn_abst_value(ir_node *n) {
379 /* Set the abstract interpretation information of a node. */
380 void set_irn_abst_value(ir_node *n, struct abstval *os) {
383 struct section *firm_get_irn_section(ir_node *n) {
386 void firm_set_irn_section(ir_node *n, struct section *s) {
389 #endif /* DO_HEAPANALYSIS */
392 /* Outputs a unique number for this node */
394 get_irn_node_nr(const ir_node *node) {
397 return node->node_nr;
404 get_irn_const_attr (ir_node *node)
406 assert (node->op == op_Const);
407 return node->attr.con;
411 get_irn_proj_attr (ir_node *node)
413 assert (node->op == op_Proj);
414 return node->attr.proj;
418 get_irn_alloc_attr (ir_node *node)
420 assert (node->op == op_Alloc);
425 get_irn_free_attr (ir_node *node)
427 assert (node->op == op_Free);
428 return node->attr.f = skip_tid(node->attr.f);
432 get_irn_symconst_attr (ir_node *node)
434 assert (node->op == op_SymConst);
439 get_irn_call_attr (ir_node *node)
441 assert (node->op == op_Call);
442 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
446 get_irn_funccall_attr (ir_node *node)
448 assert (node->op == op_FuncCall);
449 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
453 get_irn_sel_attr (ir_node *node)
455 assert (node->op == op_Sel);
460 get_irn_phi_attr (ir_node *node)
462 assert (node->op == op_Phi);
463 return node->attr.phi0_pos;
467 get_irn_block_attr (ir_node *node)
469 assert (node->op == op_Block);
470 return node->attr.block;
474 get_irn_load_attr (ir_node *node)
476 assert (node->op == op_Load);
477 return node->attr.load;
481 get_irn_store_attr (ir_node *node)
483 assert (node->op == op_Store);
484 return node->attr.store;
488 get_irn_except_attr (ir_node *node)
490 assert (node->op == op_Div || node->op == op_Quot ||
491 node->op == op_DivMod || node->op == op_Mod);
492 return node->attr.except;
495 /** manipulate fields of individual nodes **/
497 /* this works for all except Block */
499 get_nodes_block (ir_node *node) {
500 assert (!(node->op == op_Block));
501 return get_irn_n(node, -1);
505 set_nodes_block (ir_node *node, ir_node *block) {
506 assert (!(node->op == op_Block));
507 set_irn_n(node, -1, block);
510 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
511 * from Start. If so returns frame type, else Null. */
512 type *is_frame_pointer(ir_node *n) {
513 if ((get_irn_op(n) == op_Proj) &&
514 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
515 ir_node *start = get_Proj_pred(n);
516 if (get_irn_op(start) == op_Start) {
517 return get_irg_frame_type(get_irn_irg(start));
523 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
524 * from Start. If so returns global type, else Null. */
525 type *is_globals_pointer(ir_node *n) {
526 if ((get_irn_op(n) == op_Proj) &&
527 (get_Proj_proj(n) == pn_Start_P_globals)) {
528 ir_node *start = get_Proj_pred(n);
529 if (get_irn_op(start) == op_Start) {
530 return get_glob_type();
536 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
537 * from Start. If so returns 1, else 0. */
538 int is_value_arg_pointer(ir_node *n) {
539 if ((get_irn_op(n) == op_Proj) &&
540 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
541 (get_irn_op(get_Proj_pred(n)) == op_Start))
546 /* Returns an array with the predecessors of the Block. Depending on
547 the implementation of the graph data structure this can be a copy of
548 the internal representation of predecessors as well as the internal
549 array itself. Therefore writing to this array might obstruct the ir. */
551 get_Block_cfgpred_arr (ir_node *node)
553 assert ((node->op == op_Block));
554 return (ir_node **)&(get_irn_in(node)[1]);
559 get_Block_n_cfgpreds (ir_node *node) {
560 assert ((node->op == op_Block));
561 return get_irn_arity(node);
565 get_Block_cfgpred (ir_node *node, int pos) {
567 assert (node->op == op_Block);
568 assert(-1 <= pos && pos < get_irn_arity(node));
569 return get_irn_n(node, pos);
573 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
574 assert (node->op == op_Block);
575 set_irn_n(node, pos, pred);
579 get_Block_matured (ir_node *node) {
580 assert (node->op == op_Block);
581 return node->attr.block.matured;
585 set_Block_matured (ir_node *node, bool matured) {
586 assert (node->op == op_Block);
587 node->attr.block.matured = matured;
590 get_Block_block_visited (ir_node *node) {
591 assert (node->op == op_Block);
592 return node->attr.block.block_visited;
596 set_Block_block_visited (ir_node *node, unsigned long visit) {
597 assert (node->op == op_Block);
598 node->attr.block.block_visited = visit;
601 /* For this current_ir_graph must be set. */
603 mark_Block_block_visited (ir_node *node) {
604 assert (node->op == op_Block);
605 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
609 Block_not_block_visited(ir_node *node) {
610 assert (node->op == op_Block);
611 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
615 get_Block_graph_arr (ir_node *node, int pos) {
616 assert (node->op == op_Block);
617 return node->attr.block.graph_arr[pos+1];
621 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
622 assert (node->op == op_Block);
623 node->attr.block.graph_arr[pos+1] = value;
626 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
627 assert(node->op == op_Block);
628 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
629 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
630 node->attr.block.in_cg[0] = NULL;
631 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
633 /* Fix backedge array. fix_backedges operates depending on
634 interprocedural_view. */
635 bool ipv = interprocedural_view;
636 interprocedural_view = true;
637 fix_backedges(current_ir_graph->obst, node);
638 interprocedural_view = ipv;
641 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
644 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
645 assert(node->op == op_Block &&
646 node->attr.block.in_cg &&
647 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
648 node->attr.block.in_cg[pos + 1] = pred;
651 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
652 assert(node->op == op_Block);
653 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
656 int get_Block_cg_n_cfgpreds(ir_node * node) {
657 assert(node->op == op_Block);
658 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
661 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
662 assert(node->op == op_Block && node->attr.block.in_cg);
663 return node->attr.block.in_cg[pos + 1];
666 void remove_Block_cg_cfgpred_arr(ir_node * node) {
667 assert(node->op == op_Block);
668 node->attr.block.in_cg = NULL;
672 set_Start_irg(ir_node *node, ir_graph *irg) {
673 assert(node->op == op_Start);
674 assert(is_ir_graph(irg));
675 assert(0 && " Why set irg? -- use set_irn_irg");
679 get_End_n_keepalives(ir_node *end) {
680 assert (end->op == op_End);
681 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
685 get_End_keepalive(ir_node *end, int pos) {
686 assert (end->op == op_End);
687 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
691 add_End_keepalive (ir_node *end, ir_node *ka) {
692 assert (end->op == op_End);
693 ARR_APP1 (ir_node *, end->in, ka);
697 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
698 assert (end->op == op_End);
699 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
703 free_End (ir_node *end) {
704 assert (end->op == op_End);
706 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
707 end->in = NULL; /* @@@ make sure we get an error if we use the
708 in array afterwards ... */
713 > Implementing the case construct (which is where the constant Proj node is
714 > important) involves far more than simply determining the constant values.
715 > We could argue that this is more properly a function of the translator from
716 > Firm to the target machine. That could be done if there was some way of
717 > projecting "default" out of the Cond node.
718 I know it's complicated.
719 Basically there are two proglems:
720 - determining the gaps between the projs
721 - determining the biggest case constant to know the proj number for
723 I see several solutions:
724 1. Introduce a ProjDefault node. Solves both problems.
725 This means to extend all optimizations executed during construction.
726 2. Give the Cond node for switch two flavors:
727 a) there are no gaps in the projs (existing flavor)
728 b) gaps may exist, default proj is still the Proj with the largest
729 projection number. This covers also the gaps.
730 3. Fix the semantic of the Cond to that of 2b)
732 Solution 2 seems to be the best:
733 Computing the gaps in the Firm representation is not too hard, i.e.,
734 libFIRM can implement a routine that transforms between the two
735 flavours. This is also possible for 1) but 2) does not require to
736 change any existing optimization.
737 Further it should be far simpler to determine the biggest constant than
739 I don't want to choose 3) as 2a) seems to have advantages for
740 dataflow analysis and 3) does not allow to convert the representation to
744 get_Cond_selector (ir_node *node) {
745 assert (node->op == op_Cond);
746 return get_irn_n(node, 0);
750 set_Cond_selector (ir_node *node, ir_node *selector) {
751 assert (node->op == op_Cond);
752 set_irn_n(node, 0, selector);
756 get_Cond_kind (ir_node *node) {
757 assert (node->op == op_Cond);
758 return node->attr.c.kind;
762 set_Cond_kind (ir_node *node, cond_kind kind) {
763 assert (node->op == op_Cond);
764 node->attr.c.kind = kind;
768 get_Cond_defaultProj (ir_node *node) {
769 assert (node->op == op_Cond);
770 return node->attr.c.default_proj;
774 get_Return_mem (ir_node *node) {
775 assert (node->op == op_Return);
776 return get_irn_n(node, 0);
780 set_Return_mem (ir_node *node, ir_node *mem) {
781 assert (node->op == op_Return);
782 set_irn_n(node, 0, mem);
786 get_Return_n_ress (ir_node *node) {
787 assert (node->op == op_Return);
788 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
792 get_Return_res_arr (ir_node *node)
794 assert ((node->op == op_Return));
795 if (get_Return_n_ress(node) > 0)
796 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
803 set_Return_n_res (ir_node *node, int results) {
804 assert (node->op == op_Return);
809 get_Return_res (ir_node *node, int pos) {
810 assert (node->op == op_Return);
811 assert (get_Return_n_ress(node) > pos);
812 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
816 set_Return_res (ir_node *node, int pos, ir_node *res){
817 assert (node->op == op_Return);
818 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
822 get_Raise_mem (ir_node *node) {
823 assert (node->op == op_Raise);
824 return get_irn_n(node, 0);
828 set_Raise_mem (ir_node *node, ir_node *mem) {
829 assert (node->op == op_Raise);
830 set_irn_n(node, 0, mem);
834 get_Raise_exo_ptr (ir_node *node) {
835 assert (node->op == op_Raise);
836 return get_irn_n(node, 1);
840 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
841 assert (node->op == op_Raise);
842 set_irn_n(node, 1, exo_ptr);
845 tarval *get_Const_tarval (ir_node *node) {
846 assert (node->op == op_Const);
847 return node->attr.con.tv;
851 set_Const_tarval (ir_node *node, tarval *con) {
852 assert (node->op == op_Const);
853 node->attr.con.tv = con;
857 /* The source language type. Must be an atomic type. Mode of type must
858 be mode of node. For tarvals from entities type must be pointer to
861 get_Const_type (ir_node *node) {
862 assert (node->op == op_Const);
863 return node->attr.con.tp;
867 set_Const_type (ir_node *node, type *tp) {
868 assert (node->op == op_Const);
869 if (tp != unknown_type) {
870 assert (is_atomic_type(tp));
871 assert (get_type_mode(tp) == get_irn_mode(node));
874 node->attr.con.tp = tp;
879 get_SymConst_kind (const ir_node *node) {
880 assert (node->op == op_SymConst);
881 return node->attr.i.num;
885 set_SymConst_kind (ir_node *node, symconst_kind num) {
886 assert (node->op == op_SymConst);
887 node->attr.i.num = num;
891 get_SymConst_type (ir_node *node) {
892 assert ( (node->op == op_SymConst)
893 && ( get_SymConst_kind(node) == symconst_type_tag
894 || get_SymConst_kind(node) == symconst_size));
895 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
899 set_SymConst_type (ir_node *node, type *tp) {
900 assert ( (node->op == op_SymConst)
901 && ( get_SymConst_kind(node) == symconst_type_tag
902 || get_SymConst_kind(node) == symconst_size));
903 node->attr.i.sym.type_p = tp;
907 get_SymConst_name (ir_node *node) {
908 assert ( (node->op == op_SymConst)
909 && (get_SymConst_kind(node) == symconst_addr_name));
910 return node->attr.i.sym.ident_p;
914 set_SymConst_name (ir_node *node, ident *name) {
915 assert ( (node->op == op_SymConst)
916 && (get_SymConst_kind(node) == symconst_addr_name));
917 node->attr.i.sym.ident_p = name;
921 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
922 entity *get_SymConst_entity (ir_node *node) {
923 assert ( (node->op == op_SymConst)
924 && (get_SymConst_kind (node) == symconst_addr_ent));
925 return node->attr.i.sym.entity_p;
928 void set_SymConst_entity (ir_node *node, entity *ent) {
929 assert ( (node->op == op_SymConst)
930 && (get_SymConst_kind(node) == symconst_addr_ent));
931 node->attr.i.sym.entity_p = ent;
935 union symconst_symbol
936 get_SymConst_symbol (ir_node *node) {
937 assert (node->op == op_SymConst);
938 return node->attr.i.sym;
942 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
943 assert (node->op == op_SymConst);
944 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
945 node->attr.i.sym = sym;
949 get_Sel_mem (ir_node *node) {
950 assert (node->op == op_Sel);
951 return get_irn_n(node, 0);
955 set_Sel_mem (ir_node *node, ir_node *mem) {
956 assert (node->op == op_Sel);
957 set_irn_n(node, 0, mem);
961 get_Sel_ptr (ir_node *node) {
962 assert (node->op == op_Sel);
963 return get_irn_n(node, 1);
967 set_Sel_ptr (ir_node *node, ir_node *ptr) {
968 assert (node->op == op_Sel);
969 set_irn_n(node, 1, ptr);
973 get_Sel_n_indexs (ir_node *node) {
974 assert (node->op == op_Sel);
975 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
979 get_Sel_index_arr (ir_node *node)
981 assert ((node->op == op_Sel));
982 if (get_Sel_n_indexs(node) > 0)
983 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
989 get_Sel_index (ir_node *node, int pos) {
990 assert (node->op == op_Sel);
991 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
995 set_Sel_index (ir_node *node, int pos, ir_node *index) {
996 assert (node->op == op_Sel);
997 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1001 get_Sel_entity (ir_node *node) {
1002 assert (node->op == op_Sel);
1003 return node->attr.s.ent;
1007 set_Sel_entity (ir_node *node, entity *ent) {
1008 assert (node->op == op_Sel);
1009 node->attr.s.ent = ent;
1013 get_InstOf_ent (ir_node *node) {
1014 assert (node->op = op_InstOf);
1015 return (node->attr.io.ent);
1019 set_InstOf_ent (ir_node *node, type *ent) {
1020 assert (node->op = op_InstOf);
1021 node->attr.io.ent = ent;
1025 get_InstOf_store (ir_node *node) {
1026 assert (node->op = op_InstOf);
1027 return (get_irn_n (node, 0));
1031 set_InstOf_store (ir_node *node, ir_node *obj) {
1032 assert (node->op = op_InstOf);
1033 set_irn_n (node, 0, obj);
1037 get_InstOf_obj (ir_node *node) {
1038 assert (node->op = op_InstOf);
1039 return (get_irn_n (node, 1));
1043 set_InstOf_obj (ir_node *node, ir_node *obj) {
1044 assert (node->op = op_InstOf);
1045 set_irn_n (node, 1, obj);
1049 /* For unary and binary arithmetic operations the access to the
1050 operands can be factored out. Left is the first, right the
1051 second arithmetic value as listed in tech report 0999-33.
1052 unops are: Minus, Abs, Not, Conv, Cast
1053 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1054 Shr, Shrs, Rotate, Cmp */
1058 get_Call_mem (ir_node *node) {
1059 assert (node->op == op_Call);
1060 return get_irn_n(node, 0);
1064 set_Call_mem (ir_node *node, ir_node *mem) {
1065 assert (node->op == op_Call);
1066 set_irn_n(node, 0, mem);
1070 get_Call_ptr (ir_node *node) {
1071 assert (node->op == op_Call);
1072 return get_irn_n(node, 1);
1076 set_Call_ptr (ir_node *node, ir_node *ptr) {
1077 assert (node->op == op_Call);
1078 set_irn_n(node, 1, ptr);
1082 get_Call_param_arr (ir_node *node) {
1083 assert (node->op == op_Call);
1084 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1088 get_Call_n_params (ir_node *node) {
1089 assert (node->op == op_Call);
1090 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1094 get_Call_arity (ir_node *node) {
1095 assert (node->op == op_Call);
1096 return get_Call_n_params(node);
1100 set_Call_arity (ir_node *node, ir_node *arity) {
1101 assert (node->op == op_Call);
1106 get_Call_param (ir_node *node, int pos) {
1107 assert (node->op == op_Call);
1108 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1112 set_Call_param (ir_node *node, int pos, ir_node *param) {
1113 assert (node->op == op_Call);
1114 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1118 get_Call_type (ir_node *node) {
1119 assert (node->op == op_Call);
1120 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1124 set_Call_type (ir_node *node, type *tp) {
1125 assert (node->op == op_Call);
1126 assert (is_method_type(tp));
1127 node->attr.call.cld_tp = tp;
1130 int Call_has_callees(ir_node *node) {
1131 assert(node && node->op == op_Call);
1132 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1133 (node->attr.call.callee_arr != NULL));
1136 int get_Call_n_callees(ir_node * node) {
1137 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1138 return ARR_LEN(node->attr.call.callee_arr);
1141 entity * get_Call_callee(ir_node * node, int pos) {
1142 assert(pos >= 0 && pos < get_Call_n_callees(node));
1143 return node->attr.call.callee_arr[pos];
1146 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1147 assert(node->op == op_Call);
1148 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1149 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1151 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1154 void remove_Call_callee_arr(ir_node * node) {
1155 assert(node->op == op_Call);
1156 node->attr.call.callee_arr = NULL;
1159 ir_node * get_CallBegin_ptr (ir_node *node) {
1160 assert(node->op == op_CallBegin);
1161 return get_irn_n(node, 0);
1163 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1164 assert(node->op == op_CallBegin);
1165 set_irn_n(node, 0, ptr);
1167 ir_node * get_CallBegin_call (ir_node *node) {
1168 assert(node->op == op_CallBegin);
1169 return node->attr.callbegin.call;
1171 void set_CallBegin_call (ir_node *node, ir_node *call) {
1172 assert(node->op == op_CallBegin);
1173 node->attr.callbegin.call = call;
1177 get_FuncCall_ptr (ir_node *node) {
1178 assert (node->op == op_FuncCall);
1179 return get_irn_n(node, 0);
1183 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1184 assert (node->op == op_FuncCall);
1185 set_irn_n(node, 0, ptr);
1189 get_FuncCall_param_arr (ir_node *node) {
1190 assert (node->op == op_FuncCall);
1191 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1195 get_FuncCall_n_params (ir_node *node) {
1196 assert (node->op == op_FuncCall);
1197 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1201 get_FuncCall_arity (ir_node *node) {
1202 assert (node->op == op_FuncCall);
1203 return get_FuncCall_n_params(node);
1207 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1208 assert (node->op == op_FuncCall);
1213 get_FuncCall_param (ir_node *node, int pos) {
1214 assert (node->op == op_FuncCall);
1215 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1219 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1220 assert (node->op == op_FuncCall);
1221 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1225 get_FuncCall_type (ir_node *node) {
1226 assert (node->op == op_FuncCall);
1227 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1231 set_FuncCall_type (ir_node *node, type *tp) {
1232 assert (node->op == op_FuncCall);
1233 assert (is_method_type(tp));
1234 node->attr.call.cld_tp = tp;
1237 int FuncCall_has_callees(ir_node *node) {
1238 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1239 (node->attr.call.callee_arr != NULL));
1242 int get_FuncCall_n_callees(ir_node * node) {
1243 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1244 return ARR_LEN(node->attr.call.callee_arr);
1247 entity * get_FuncCall_callee(ir_node * node, int pos) {
1248 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1249 return node->attr.call.callee_arr[pos];
1252 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1253 assert(node->op == op_FuncCall);
1254 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1255 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1257 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1260 void remove_FuncCall_callee_arr(ir_node * node) {
1261 assert(node->op == op_FuncCall);
1262 node->attr.call.callee_arr = NULL;
1267 ir_node * get_##OP##_left(ir_node *node) { \
1268 assert(node->op == op_##OP); \
1269 return get_irn_n(node, node->op->op_index); \
1271 void set_##OP##_left(ir_node *node, ir_node *left) { \
1272 assert(node->op == op_##OP); \
1273 set_irn_n(node, node->op->op_index, left); \
1275 ir_node *get_##OP##_right(ir_node *node) { \
1276 assert(node->op == op_##OP); \
1277 return get_irn_n(node, node->op->op_index + 1); \
1279 void set_##OP##_right(ir_node *node, ir_node *right) { \
1280 assert(node->op == op_##OP); \
1281 set_irn_n(node, node->op->op_index + 1, right); \
1285 ir_node *get_##OP##_op(ir_node *node) { \
1286 assert(node->op == op_##OP); \
1287 return get_irn_n(node, node->op->op_index); \
1289 void set_##OP##_op (ir_node *node, ir_node *op) { \
1290 assert(node->op == op_##OP); \
1291 set_irn_n(node, node->op->op_index, op); \
1301 get_Quot_mem (ir_node *node) {
1302 assert (node->op == op_Quot);
1303 return get_irn_n(node, 0);
1307 set_Quot_mem (ir_node *node, ir_node *mem) {
1308 assert (node->op == op_Quot);
1309 set_irn_n(node, 0, mem);
1315 get_DivMod_mem (ir_node *node) {
1316 assert (node->op == op_DivMod);
1317 return get_irn_n(node, 0);
1321 set_DivMod_mem (ir_node *node, ir_node *mem) {
1322 assert (node->op == op_DivMod);
1323 set_irn_n(node, 0, mem);
1329 get_Div_mem (ir_node *node) {
1330 assert (node->op == op_Div);
1331 return get_irn_n(node, 0);
1335 set_Div_mem (ir_node *node, ir_node *mem) {
1336 assert (node->op == op_Div);
1337 set_irn_n(node, 0, mem);
1343 get_Mod_mem (ir_node *node) {
1344 assert (node->op == op_Mod);
1345 return get_irn_n(node, 0);
1349 set_Mod_mem (ir_node *node, ir_node *mem) {
1350 assert (node->op == op_Mod);
1351 set_irn_n(node, 0, mem);
1368 get_Cast_type (ir_node *node) {
1369 assert (node->op == op_Cast);
1370 return node->attr.cast.totype;
1374 set_Cast_type (ir_node *node, type *to_tp) {
1375 assert (node->op == op_Cast);
1376 node->attr.cast.totype = to_tp;
1380 is_unop (ir_node *node) {
1381 return (node->op->opar == oparity_unary);
1385 get_unop_op (ir_node *node) {
1386 if (node->op->opar == oparity_unary)
1387 return get_irn_n(node, node->op->op_index);
1389 assert(node->op->opar == oparity_unary);
1394 set_unop_op (ir_node *node, ir_node *op) {
1395 if (node->op->opar == oparity_unary)
1396 set_irn_n(node, node->op->op_index, op);
1398 assert(node->op->opar == oparity_unary);
1402 is_binop (ir_node *node) {
1403 return (node->op->opar == oparity_binary);
1407 get_binop_left (ir_node *node) {
1408 if (node->op->opar == oparity_binary)
1409 return get_irn_n(node, node->op->op_index);
1411 assert(node->op->opar == oparity_binary);
1416 set_binop_left (ir_node *node, ir_node *left) {
1417 if (node->op->opar == oparity_binary)
1418 set_irn_n(node, node->op->op_index, left);
1420 assert (node->op->opar == oparity_binary);
1424 get_binop_right (ir_node *node) {
1425 if (node->op->opar == oparity_binary)
1426 return get_irn_n(node, node->op->op_index + 1);
1428 assert(node->op->opar == oparity_binary);
1433 set_binop_right (ir_node *node, ir_node *right) {
1434 if (node->op->opar == oparity_binary)
1435 set_irn_n(node, node->op->op_index + 1, right);
1437 assert (node->op->opar == oparity_binary);
1440 int is_Phi (ir_node *n) {
1446 if (op == op_Filter) return interprocedural_view;
1449 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1450 (get_irn_arity(n) > 0));
1455 int is_Phi0 (ir_node *n) {
1458 return ((get_irn_op(n) == op_Phi) &&
1459 (get_irn_arity(n) == 0) &&
1460 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1464 get_Phi_preds_arr (ir_node *node) {
1465 assert (node->op == op_Phi);
1466 return (ir_node **)&(get_irn_in(node)[1]);
1470 get_Phi_n_preds (ir_node *node) {
1471 assert (is_Phi(node) || is_Phi0(node));
1472 return (get_irn_arity(node));
1476 void set_Phi_n_preds (ir_node *node, int n_preds) {
1477 assert (node->op == op_Phi);
1482 get_Phi_pred (ir_node *node, int pos) {
1483 assert (is_Phi(node) || is_Phi0(node));
1484 return get_irn_n(node, pos);
1488 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1489 assert (is_Phi(node) || is_Phi0(node));
1490 set_irn_n(node, pos, pred);
1494 int is_memop(ir_node *node) {
1495 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1498 ir_node *get_memop_mem (ir_node *node) {
1499 assert(is_memop(node));
1500 return get_irn_n(node, 0);
1503 void set_memop_mem (ir_node *node, ir_node *mem) {
1504 assert(is_memop(node));
1505 set_irn_n(node, 0, mem);
1508 ir_node *get_memop_ptr (ir_node *node) {
1509 assert(is_memop(node));
1510 return get_irn_n(node, 1);
1513 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1514 assert(is_memop(node));
1515 set_irn_n(node, 1, ptr);
1519 get_Load_mem (ir_node *node) {
1520 assert (node->op == op_Load);
1521 return get_irn_n(node, 0);
1525 set_Load_mem (ir_node *node, ir_node *mem) {
1526 assert (node->op == op_Load);
1527 set_irn_n(node, 0, mem);
1531 get_Load_ptr (ir_node *node) {
1532 assert (node->op == op_Load);
1533 return get_irn_n(node, 1);
1537 set_Load_ptr (ir_node *node, ir_node *ptr) {
1538 assert (node->op == op_Load);
1539 set_irn_n(node, 1, ptr);
1543 get_Load_mode (ir_node *node) {
1544 assert (node->op == op_Load);
1545 return node->attr.load.load_mode;
1549 set_Load_mode (ir_node *node, ir_mode *mode) {
1550 assert (node->op == op_Load);
1551 node->attr.load.load_mode = mode;
1555 get_Load_volatility (ir_node *node) {
1556 assert (node->op == op_Load);
1557 return node->attr.load.volatility;
1561 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1562 assert (node->op == op_Load);
1563 node->attr.load.volatility = volatility;
1568 get_Store_mem (ir_node *node) {
1569 assert (node->op == op_Store);
1570 return get_irn_n(node, 0);
1574 set_Store_mem (ir_node *node, ir_node *mem) {
1575 assert (node->op == op_Store);
1576 set_irn_n(node, 0, mem);
1580 get_Store_ptr (ir_node *node) {
1581 assert (node->op == op_Store);
1582 return get_irn_n(node, 1);
1586 set_Store_ptr (ir_node *node, ir_node *ptr) {
1587 assert (node->op == op_Store);
1588 set_irn_n(node, 1, ptr);
1592 get_Store_value (ir_node *node) {
1593 assert (node->op == op_Store);
1594 return get_irn_n(node, 2);
1598 set_Store_value (ir_node *node, ir_node *value) {
1599 assert (node->op == op_Store);
1600 set_irn_n(node, 2, value);
1604 get_Store_volatility (ir_node *node) {
1605 assert (node->op == op_Store);
1606 return node->attr.store.volatility;
1610 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1611 assert (node->op == op_Store);
1612 node->attr.store.volatility = volatility;
1617 get_Alloc_mem (ir_node *node) {
1618 assert (node->op == op_Alloc);
1619 return get_irn_n(node, 0);
1623 set_Alloc_mem (ir_node *node, ir_node *mem) {
1624 assert (node->op == op_Alloc);
1625 set_irn_n(node, 0, mem);
1629 get_Alloc_size (ir_node *node) {
1630 assert (node->op == op_Alloc);
1631 return get_irn_n(node, 1);
1635 set_Alloc_size (ir_node *node, ir_node *size) {
1636 assert (node->op == op_Alloc);
1637 set_irn_n(node, 1, size);
1641 get_Alloc_type (ir_node *node) {
1642 assert (node->op == op_Alloc);
1643 return node->attr.a.type = skip_tid(node->attr.a.type);
1647 set_Alloc_type (ir_node *node, type *tp) {
1648 assert (node->op == op_Alloc);
1649 node->attr.a.type = tp;
1653 get_Alloc_where (ir_node *node) {
1654 assert (node->op == op_Alloc);
1655 return node->attr.a.where;
1659 set_Alloc_where (ir_node *node, where_alloc where) {
1660 assert (node->op == op_Alloc);
1661 node->attr.a.where = where;
1666 get_Free_mem (ir_node *node) {
1667 assert (node->op == op_Free);
1668 return get_irn_n(node, 0);
1672 set_Free_mem (ir_node *node, ir_node *mem) {
1673 assert (node->op == op_Free);
1674 set_irn_n(node, 0, mem);
1678 get_Free_ptr (ir_node *node) {
1679 assert (node->op == op_Free);
1680 return get_irn_n(node, 1);
1684 set_Free_ptr (ir_node *node, ir_node *ptr) {
1685 assert (node->op == op_Free);
1686 set_irn_n(node, 1, ptr);
1690 get_Free_size (ir_node *node) {
1691 assert (node->op == op_Free);
1692 return get_irn_n(node, 2);
1696 set_Free_size (ir_node *node, ir_node *size) {
1697 assert (node->op == op_Free);
1698 set_irn_n(node, 2, size);
1702 get_Free_type (ir_node *node) {
1703 assert (node->op == op_Free);
1704 return node->attr.f = skip_tid(node->attr.f);
1708 set_Free_type (ir_node *node, type *tp) {
1709 assert (node->op == op_Free);
1714 get_Sync_preds_arr (ir_node *node) {
1715 assert (node->op == op_Sync);
1716 return (ir_node **)&(get_irn_in(node)[1]);
1720 get_Sync_n_preds (ir_node *node) {
1721 assert (node->op == op_Sync);
1722 return (get_irn_arity(node));
1727 set_Sync_n_preds (ir_node *node, int n_preds) {
1728 assert (node->op == op_Sync);
1733 get_Sync_pred (ir_node *node, int pos) {
1734 assert (node->op == op_Sync);
1735 return get_irn_n(node, pos);
1739 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1740 assert (node->op == op_Sync);
1741 set_irn_n(node, pos, pred);
1745 get_Proj_pred (ir_node *node) {
1746 assert (is_Proj(node));
1747 return get_irn_n(node, 0);
1751 set_Proj_pred (ir_node *node, ir_node *pred) {
1752 assert (is_Proj(node));
1753 set_irn_n(node, 0, pred);
1757 get_Proj_proj (ir_node *node) {
1758 assert (is_Proj(node));
1759 if (get_irn_opcode(node) == iro_Proj) {
1760 return node->attr.proj;
1762 assert(get_irn_opcode(node) == iro_Filter);
1763 return node->attr.filter.proj;
1768 set_Proj_proj (ir_node *node, long proj) {
1769 assert (node->op == op_Proj);
1770 node->attr.proj = proj;
1774 get_Tuple_preds_arr (ir_node *node) {
1775 assert (node->op == op_Tuple);
1776 return (ir_node **)&(get_irn_in(node)[1]);
1780 get_Tuple_n_preds (ir_node *node) {
1781 assert (node->op == op_Tuple);
1782 return (get_irn_arity(node));
1787 set_Tuple_n_preds (ir_node *node, int n_preds) {
1788 assert (node->op == op_Tuple);
1793 get_Tuple_pred (ir_node *node, int pos) {
1794 assert (node->op == op_Tuple);
1795 return get_irn_n(node, pos);
1799 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1800 assert (node->op == op_Tuple);
1801 set_irn_n(node, pos, pred);
1805 get_Id_pred (ir_node *node) {
1806 assert (node->op == op_Id);
1807 return get_irn_n(node, 0);
1811 set_Id_pred (ir_node *node, ir_node *pred) {
1812 assert (node->op == op_Id);
1813 set_irn_n(node, 0, pred);
1816 ir_node *get_Confirm_value (ir_node *node) {
1817 assert (node->op == op_Confirm);
1818 return get_irn_n(node, 0);
1820 void set_Confirm_value (ir_node *node, ir_node *value) {
1821 assert (node->op == op_Confirm);
1822 set_irn_n(node, 0, value);
1824 ir_node *get_Confirm_bound (ir_node *node) {
1825 assert (node->op == op_Confirm);
1826 return get_irn_n(node, 1);
1828 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1829 assert (node->op == op_Confirm);
1830 set_irn_n(node, 0, bound);
1832 pn_Cmp get_Confirm_cmp (ir_node *node) {
1833 assert (node->op == op_Confirm);
1834 return node->attr.confirm_cmp;
1836 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1837 assert (node->op == op_Confirm);
1838 node->attr.confirm_cmp = cmp;
1843 get_Filter_pred (ir_node *node) {
1844 assert(node->op == op_Filter);
1848 set_Filter_pred (ir_node *node, ir_node *pred) {
1849 assert(node->op == op_Filter);
1853 get_Filter_proj(ir_node *node) {
1854 assert(node->op == op_Filter);
1855 return node->attr.filter.proj;
1858 set_Filter_proj (ir_node *node, long proj) {
1859 assert(node->op == op_Filter);
1860 node->attr.filter.proj = proj;
1863 /* Don't use get_irn_arity, get_irn_n in implementation as access
1864 shall work independent of view!!! */
1865 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1866 assert(node->op == op_Filter);
1867 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1868 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1869 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1870 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1871 node->attr.filter.in_cg[0] = node->in[0];
1873 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1876 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1877 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1878 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1879 node->attr.filter.in_cg[pos + 1] = pred;
1881 int get_Filter_n_cg_preds(ir_node *node) {
1882 assert(node->op == op_Filter && node->attr.filter.in_cg);
1883 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1885 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1887 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1889 arity = ARR_LEN(node->attr.filter.in_cg);
1890 assert(pos < arity - 1);
1891 return node->attr.filter.in_cg[pos + 1];
1896 get_irn_irg(ir_node *node) {
1897 if (get_irn_op(node) != op_Block)
1898 node = get_nodes_block(node);
1899 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1900 node = get_nodes_block(node);
1901 assert(get_irn_op(node) == op_Block);
1902 return node->attr.block.irg;
1906 /*----------------------------------------------------------------*/
1907 /* Auxiliary routines */
1908 /*----------------------------------------------------------------*/
1911 skip_Proj (ir_node *node) {
1912 /* don't assert node !!! */
1913 if (node && is_Proj(node)) {
1914 return get_Proj_pred(node);
1921 skip_Tuple (ir_node *node) {
1924 if (!get_opt_normalize()) return node;
1926 node = skip_Id(node);
1927 if (get_irn_op(node) == op_Proj) {
1928 pred = skip_Id(get_Proj_pred(node));
1929 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1930 pred = skip_Id(skip_Tuple(pred));
1931 if (get_irn_op(pred) == op_Tuple)
1932 return get_Tuple_pred(pred, get_Proj_proj(node));
1937 /** returns operand of node if node is a Cast */
1938 ir_node *skip_Cast (ir_node *node) {
1939 if (node && get_irn_op(node) == op_Cast) {
1940 return skip_Id(get_irn_n(node, 0));
1947 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1948 than any other approach, as Id chains are resolved and all point to the real node, or
1949 all id's are self loops. */
1951 skip_Id (ir_node *node) {
1952 /* don't assert node !!! */
1954 if (!get_opt_normalize()) return node;
1956 /* Don't use get_Id_pred: We get into an endless loop for
1957 self-referencing Ids. */
1958 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1959 ir_node *rem_pred = node->in[0+1];
1962 assert (get_irn_arity (node) > 0);
1964 node->in[0+1] = node;
1965 res = skip_Id(rem_pred);
1966 if (res->op == op_Id) /* self-loop */ return node;
1968 node->in[0+1] = res;
1975 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1976 than any other approach, as Id chains are resolved and all point to the real node, or
1977 all id's are self loops. */
1979 skip_Id (ir_node *node) {
1981 /* don't assert node !!! */
1983 if (!node || (node->op != op_Id)) return node;
1985 if (!get_opt_normalize()) return node;
1987 /* Don't use get_Id_pred: We get into an endless loop for
1988 self-referencing Ids. */
1989 pred = node->in[0+1];
1991 if (pred->op != op_Id) return pred;
1993 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1994 ir_node *rem_pred, *res;
1996 if (pred->op != op_Id) return pred; /* shortcut */
1999 assert (get_irn_arity (node) > 0);
2001 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2002 res = skip_Id(rem_pred);
2003 if (res->op == op_Id) /* self-loop */ return node;
2005 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2014 is_Bad (ir_node *node) {
2016 if ((node) && get_irn_opcode(node) == iro_Bad)
2022 is_no_Block (ir_node *node) {
2024 return (get_irn_opcode(node) != iro_Block);
2028 is_Block (ir_node *node) {
2030 return (get_irn_opcode(node) == iro_Block);
2033 /* returns true if node is a Unknown node. */
2035 is_Unknown (ir_node *node) {
2037 return (get_irn_opcode(node) == iro_Unknown);
2041 is_Proj (const ir_node *node) {
2043 return node->op == op_Proj
2044 || (!interprocedural_view && node->op == op_Filter);
2047 /* Returns true if the operation manipulates control flow. */
2049 is_cfop(ir_node *node) {
2050 return is_cfopcode(get_irn_op(node));
2053 /* Returns true if the operation manipulates interprocedural control flow:
2054 CallBegin, EndReg, EndExcept */
2055 int is_ip_cfop(ir_node *node) {
2056 return is_ip_cfopcode(get_irn_op(node));
2059 /* Returns true if the operation can change the control flow because
2062 is_fragile_op(ir_node *node) {
2063 return is_op_fragile(get_irn_op(node));
2066 /* Returns the memory operand of fragile operations. */
2067 ir_node *get_fragile_op_mem(ir_node *node) {
2068 assert(node && is_fragile_op(node));
2070 switch (get_irn_opcode (node)) {
2079 return get_irn_n(node, 0);
2084 assert(0 && "should not be reached");
2089 /* Returns true if the operation is a forking control flow operation. */
2091 is_forking_op(ir_node *node) {
2092 return is_op_forking(get_irn_op(node));
2095 #ifdef DEBUG_libfirm
2096 void dump_irn (ir_node *n) {
2097 int i, arity = get_irn_arity(n);
2098 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2100 ir_node *pred = get_irn_n(n, -1);
2101 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2102 get_irn_node_nr(pred), (void *)pred);
2104 printf(" preds: \n");
2105 for (i = 0; i < arity; ++i) {
2106 ir_node *pred = get_irn_n(n, i);
2107 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2108 get_irn_node_nr(pred), (void *)pred);
2112 #else /* DEBUG_libfirm */
2113 void dump_irn (ir_node *n) {}
2114 #endif /* DEBUG_libfirm */