3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 (get_irn_pinned)(const ir_node *node) {
358 return __get_irn_pinned(node);
361 void set_irn_pinned(ir_node *node, op_pin_state state) {
362 /* due to optimization an opt may be turned into a Tuple */
363 if (get_irn_op(node) == op_Tuple)
366 assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned);
367 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
369 node->attr.except.pin_state = state;
372 #ifdef DO_HEAPANALYSIS
373 /* Access the abstract interpretation information of a node.
374 Returns NULL if no such information is available. */
375 struct abstval *get_irn_abst_value(ir_node *n) {
378 /* Set the abstract interpretation information of a node. */
379 void set_irn_abst_value(ir_node *n, struct abstval *os) {
382 struct section *firm_get_irn_section(ir_node *n) {
385 void firm_set_irn_section(ir_node *n, struct section *s) {
388 #endif /* DO_HEAPANALYSIS */
391 /* Outputs a unique number for this node */
393 get_irn_node_nr(const ir_node *node) {
396 return node->node_nr;
403 get_irn_const_attr (ir_node *node)
405 assert (node->op == op_Const);
406 return node->attr.con;
410 get_irn_proj_attr (ir_node *node)
412 assert (node->op == op_Proj);
413 return node->attr.proj;
417 get_irn_alloc_attr (ir_node *node)
419 assert (node->op == op_Alloc);
424 get_irn_free_attr (ir_node *node)
426 assert (node->op == op_Free);
427 return node->attr.f = skip_tid(node->attr.f);
431 get_irn_symconst_attr (ir_node *node)
433 assert (node->op == op_SymConst);
438 get_irn_call_attr (ir_node *node)
440 assert (node->op == op_Call);
441 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
445 get_irn_funccall_attr (ir_node *node)
447 assert (node->op == op_FuncCall);
448 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
452 get_irn_sel_attr (ir_node *node)
454 assert (node->op == op_Sel);
459 get_irn_phi_attr (ir_node *node)
461 assert (node->op == op_Phi);
462 return node->attr.phi0_pos;
466 get_irn_block_attr (ir_node *node)
468 assert (node->op == op_Block);
469 return node->attr.block;
473 get_irn_load_attr (ir_node *node)
475 assert (node->op == op_Load);
476 return node->attr.load;
480 get_irn_store_attr (ir_node *node)
482 assert (node->op == op_Store);
483 return node->attr.store;
487 get_irn_except_attr (ir_node *node)
489 assert (node->op == op_Div || node->op == op_Quot ||
490 node->op == op_DivMod || node->op == op_Mod);
491 return node->attr.except;
494 /** manipulate fields of individual nodes **/
496 /* this works for all except Block */
498 get_nodes_block (ir_node *node) {
499 assert (!(node->op == op_Block));
500 return get_irn_n(node, -1);
504 set_nodes_block (ir_node *node, ir_node *block) {
505 assert (!(node->op == op_Block));
506 set_irn_n(node, -1, block);
509 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
510 * from Start. If so returns frame type, else Null. */
511 type *is_frame_pointer(ir_node *n) {
512 if ((get_irn_op(n) == op_Proj) &&
513 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
514 ir_node *start = get_Proj_pred(n);
515 if (get_irn_op(start) == op_Start) {
516 return get_irg_frame_type(get_irn_irg(start));
522 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
523 * from Start. If so returns global type, else Null. */
524 type *is_globals_pointer(ir_node *n) {
525 if ((get_irn_op(n) == op_Proj) &&
526 (get_Proj_proj(n) == pn_Start_P_globals)) {
527 ir_node *start = get_Proj_pred(n);
528 if (get_irn_op(start) == op_Start) {
529 return get_glob_type();
535 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
536 * from Start. If so returns 1, else 0. */
537 int is_value_arg_pointer(ir_node *n) {
538 if ((get_irn_op(n) == op_Proj) &&
539 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
540 (get_irn_op(get_Proj_pred(n)) == op_Start))
545 /* Returns an array with the predecessors of the Block. Depending on
546 the implementation of the graph data structure this can be a copy of
547 the internal representation of predecessors as well as the internal
548 array itself. Therefore writing to this array might obstruct the ir. */
550 get_Block_cfgpred_arr (ir_node *node)
552 assert ((node->op == op_Block));
553 return (ir_node **)&(get_irn_in(node)[1]);
558 get_Block_n_cfgpreds (ir_node *node) {
559 assert ((node->op == op_Block));
560 return get_irn_arity(node);
564 get_Block_cfgpred (ir_node *node, int pos) {
566 assert (node->op == op_Block);
567 assert(-1 <= pos && pos < get_irn_arity(node));
568 return get_irn_n(node, pos);
572 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
573 assert (node->op == op_Block);
574 set_irn_n(node, pos, pred);
578 get_Block_matured (ir_node *node) {
579 assert (node->op == op_Block);
580 return node->attr.block.matured;
584 set_Block_matured (ir_node *node, bool matured) {
585 assert (node->op == op_Block);
586 node->attr.block.matured = matured;
589 get_Block_block_visited (ir_node *node) {
590 assert (node->op == op_Block);
591 return node->attr.block.block_visited;
595 set_Block_block_visited (ir_node *node, unsigned long visit) {
596 assert (node->op == op_Block);
597 node->attr.block.block_visited = visit;
600 /* For this current_ir_graph must be set. */
602 mark_Block_block_visited (ir_node *node) {
603 assert (node->op == op_Block);
604 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
608 Block_not_block_visited(ir_node *node) {
609 assert (node->op == op_Block);
610 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
614 get_Block_graph_arr (ir_node *node, int pos) {
615 assert (node->op == op_Block);
616 return node->attr.block.graph_arr[pos+1];
620 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
621 assert (node->op == op_Block);
622 node->attr.block.graph_arr[pos+1] = value;
625 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
626 assert(node->op == op_Block);
627 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
628 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
629 node->attr.block.in_cg[0] = NULL;
630 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
632 /* Fix backedge array. fix_backedges operates depending on
633 interprocedural_view. */
634 bool ipv = interprocedural_view;
635 interprocedural_view = true;
636 fix_backedges(current_ir_graph->obst, node);
637 interprocedural_view = ipv;
640 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
643 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
644 assert(node->op == op_Block &&
645 node->attr.block.in_cg &&
646 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
647 node->attr.block.in_cg[pos + 1] = pred;
650 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
651 assert(node->op == op_Block);
652 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
655 int get_Block_cg_n_cfgpreds(ir_node * node) {
656 assert(node->op == op_Block);
657 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
660 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
661 assert(node->op == op_Block && node->attr.block.in_cg);
662 return node->attr.block.in_cg[pos + 1];
665 void remove_Block_cg_cfgpred_arr(ir_node * node) {
666 assert(node->op == op_Block);
667 node->attr.block.in_cg = NULL;
671 set_Start_irg(ir_node *node, ir_graph *irg) {
672 assert(node->op == op_Start);
673 assert(is_ir_graph(irg));
674 assert(0 && " Why set irg? -- use set_irn_irg");
678 get_End_n_keepalives(ir_node *end) {
679 assert (end->op == op_End);
680 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
684 get_End_keepalive(ir_node *end, int pos) {
685 assert (end->op == op_End);
686 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
690 add_End_keepalive (ir_node *end, ir_node *ka) {
691 assert (end->op == op_End);
692 ARR_APP1 (ir_node *, end->in, ka);
696 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
697 assert (end->op == op_End);
698 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
702 free_End (ir_node *end) {
703 assert (end->op == op_End);
705 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
706 end->in = NULL; /* @@@ make sure we get an error if we use the
707 in array afterwards ... */
712 > Implementing the case construct (which is where the constant Proj node is
713 > important) involves far more than simply determining the constant values.
714 > We could argue that this is more properly a function of the translator from
715 > Firm to the target machine. That could be done if there was some way of
716 > projecting "default" out of the Cond node.
717 I know it's complicated.
718 Basically there are two proglems:
719 - determining the gaps between the projs
720 - determining the biggest case constant to know the proj number for
722 I see several solutions:
723 1. Introduce a ProjDefault node. Solves both problems.
724 This means to extend all optimizations executed during construction.
725 2. Give the Cond node for switch two flavors:
726 a) there are no gaps in the projs (existing flavor)
727 b) gaps may exist, default proj is still the Proj with the largest
728 projection number. This covers also the gaps.
729 3. Fix the semantic of the Cond to that of 2b)
731 Solution 2 seems to be the best:
732 Computing the gaps in the Firm representation is not too hard, i.e.,
733 libFIRM can implement a routine that transforms between the two
734 flavours. This is also possible for 1) but 2) does not require to
735 change any existing optimization.
736 Further it should be far simpler to determine the biggest constant than
738 I don't want to choose 3) as 2a) seems to have advantages for
739 dataflow analysis and 3) does not allow to convert the representation to
743 get_Cond_selector (ir_node *node) {
744 assert (node->op == op_Cond);
745 return get_irn_n(node, 0);
749 set_Cond_selector (ir_node *node, ir_node *selector) {
750 assert (node->op == op_Cond);
751 set_irn_n(node, 0, selector);
755 get_Cond_kind (ir_node *node) {
756 assert (node->op == op_Cond);
757 return node->attr.c.kind;
761 set_Cond_kind (ir_node *node, cond_kind kind) {
762 assert (node->op == op_Cond);
763 node->attr.c.kind = kind;
767 get_Cond_defaultProj (ir_node *node) {
768 assert (node->op == op_Cond);
769 return node->attr.c.default_proj;
773 get_Return_mem (ir_node *node) {
774 assert (node->op == op_Return);
775 return get_irn_n(node, 0);
779 set_Return_mem (ir_node *node, ir_node *mem) {
780 assert (node->op == op_Return);
781 set_irn_n(node, 0, mem);
785 get_Return_n_ress (ir_node *node) {
786 assert (node->op == op_Return);
787 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
791 get_Return_res_arr (ir_node *node)
793 assert ((node->op == op_Return));
794 if (get_Return_n_ress(node) > 0)
795 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
802 set_Return_n_res (ir_node *node, int results) {
803 assert (node->op == op_Return);
808 get_Return_res (ir_node *node, int pos) {
809 assert (node->op == op_Return);
810 assert (get_Return_n_ress(node) > pos);
811 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
815 set_Return_res (ir_node *node, int pos, ir_node *res){
816 assert (node->op == op_Return);
817 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
821 get_Raise_mem (ir_node *node) {
822 assert (node->op == op_Raise);
823 return get_irn_n(node, 0);
827 set_Raise_mem (ir_node *node, ir_node *mem) {
828 assert (node->op == op_Raise);
829 set_irn_n(node, 0, mem);
833 get_Raise_exo_ptr (ir_node *node) {
834 assert (node->op == op_Raise);
835 return get_irn_n(node, 1);
839 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
840 assert (node->op == op_Raise);
841 set_irn_n(node, 1, exo_ptr);
844 tarval *get_Const_tarval (ir_node *node) {
845 assert (node->op == op_Const);
846 return node->attr.con.tv;
850 set_Const_tarval (ir_node *node, tarval *con) {
851 assert (node->op == op_Const);
852 node->attr.con.tv = con;
856 /* The source language type. Must be an atomic type. Mode of type must
857 be mode of node. For tarvals from entities type must be pointer to
860 get_Const_type (ir_node *node) {
861 assert (node->op == op_Const);
862 return node->attr.con.tp;
866 set_Const_type (ir_node *node, type *tp) {
867 assert (node->op == op_Const);
868 if (tp != unknown_type) {
869 assert (is_atomic_type(tp));
870 assert (get_type_mode(tp) == get_irn_mode(node));
873 node->attr.con.tp = tp;
878 get_SymConst_kind (const ir_node *node) {
879 assert (node->op == op_SymConst);
880 return node->attr.i.num;
884 set_SymConst_kind (ir_node *node, symconst_kind num) {
885 assert (node->op == op_SymConst);
886 node->attr.i.num = num;
890 get_SymConst_type (ir_node *node) {
891 assert ( (node->op == op_SymConst)
892 && ( get_SymConst_kind(node) == symconst_type_tag
893 || get_SymConst_kind(node) == symconst_size));
894 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
898 set_SymConst_type (ir_node *node, type *tp) {
899 assert ( (node->op == op_SymConst)
900 && ( get_SymConst_kind(node) == symconst_type_tag
901 || get_SymConst_kind(node) == symconst_size));
902 node->attr.i.sym.type_p = tp;
906 get_SymConst_name (ir_node *node) {
907 assert ( (node->op == op_SymConst)
908 && (get_SymConst_kind(node) == symconst_addr_name));
909 return node->attr.i.sym.ident_p;
913 set_SymConst_name (ir_node *node, ident *name) {
914 assert ( (node->op == op_SymConst)
915 && (get_SymConst_kind(node) == symconst_addr_name));
916 node->attr.i.sym.ident_p = name;
920 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
921 entity *get_SymConst_entity (ir_node *node) {
922 assert ( (node->op == op_SymConst)
923 && (get_SymConst_kind (node) == symconst_addr_ent));
924 return node->attr.i.sym.entity_p;
927 void set_SymConst_entity (ir_node *node, entity *ent) {
928 assert ( (node->op == op_SymConst)
929 && (get_SymConst_kind(node) == symconst_addr_ent));
930 node->attr.i.sym.entity_p = ent;
934 union symconst_symbol
935 get_SymConst_symbol (ir_node *node) {
936 assert (node->op == op_SymConst);
937 return node->attr.i.sym;
941 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
942 assert (node->op == op_SymConst);
943 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
944 node->attr.i.sym = sym;
948 get_Sel_mem (ir_node *node) {
949 assert (node->op == op_Sel);
950 return get_irn_n(node, 0);
954 set_Sel_mem (ir_node *node, ir_node *mem) {
955 assert (node->op == op_Sel);
956 set_irn_n(node, 0, mem);
960 get_Sel_ptr (ir_node *node) {
961 assert (node->op == op_Sel);
962 return get_irn_n(node, 1);
966 set_Sel_ptr (ir_node *node, ir_node *ptr) {
967 assert (node->op == op_Sel);
968 set_irn_n(node, 1, ptr);
972 get_Sel_n_indexs (ir_node *node) {
973 assert (node->op == op_Sel);
974 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
978 get_Sel_index_arr (ir_node *node)
980 assert ((node->op == op_Sel));
981 if (get_Sel_n_indexs(node) > 0)
982 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
988 get_Sel_index (ir_node *node, int pos) {
989 assert (node->op == op_Sel);
990 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
994 set_Sel_index (ir_node *node, int pos, ir_node *index) {
995 assert (node->op == op_Sel);
996 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1000 get_Sel_entity (ir_node *node) {
1001 assert (node->op == op_Sel);
1002 return node->attr.s.ent;
1006 set_Sel_entity (ir_node *node, entity *ent) {
1007 assert (node->op == op_Sel);
1008 node->attr.s.ent = ent;
1012 get_InstOf_ent (ir_node *node) {
1013 assert (node->op = op_InstOf);
1014 return (node->attr.io.ent);
1018 set_InstOf_ent (ir_node *node, type *ent) {
1019 assert (node->op = op_InstOf);
1020 node->attr.io.ent = ent;
1024 get_InstOf_store (ir_node *node) {
1025 assert (node->op = op_InstOf);
1026 return (get_irn_n (node, 0));
1030 set_InstOf_store (ir_node *node, ir_node *obj) {
1031 assert (node->op = op_InstOf);
1032 set_irn_n (node, 0, obj);
1036 get_InstOf_obj (ir_node *node) {
1037 assert (node->op = op_InstOf);
1038 return (get_irn_n (node, 1));
1042 set_InstOf_obj (ir_node *node, ir_node *obj) {
1043 assert (node->op = op_InstOf);
1044 set_irn_n (node, 1, obj);
1048 /* For unary and binary arithmetic operations the access to the
1049 operands can be factored out. Left is the first, right the
1050 second arithmetic value as listed in tech report 0999-33.
1051 unops are: Minus, Abs, Not, Conv, Cast
1052 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1053 Shr, Shrs, Rotate, Cmp */
1057 get_Call_mem (ir_node *node) {
1058 assert (node->op == op_Call);
1059 return get_irn_n(node, 0);
1063 set_Call_mem (ir_node *node, ir_node *mem) {
1064 assert (node->op == op_Call);
1065 set_irn_n(node, 0, mem);
1069 get_Call_ptr (ir_node *node) {
1070 assert (node->op == op_Call);
1071 return get_irn_n(node, 1);
1075 set_Call_ptr (ir_node *node, ir_node *ptr) {
1076 assert (node->op == op_Call);
1077 set_irn_n(node, 1, ptr);
1081 get_Call_param_arr (ir_node *node) {
1082 assert (node->op == op_Call);
1083 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1087 get_Call_n_params (ir_node *node) {
1088 assert (node->op == op_Call);
1089 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1093 get_Call_arity (ir_node *node) {
1094 assert (node->op == op_Call);
1095 return get_Call_n_params(node);
1099 set_Call_arity (ir_node *node, ir_node *arity) {
1100 assert (node->op == op_Call);
1105 get_Call_param (ir_node *node, int pos) {
1106 assert (node->op == op_Call);
1107 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1111 set_Call_param (ir_node *node, int pos, ir_node *param) {
1112 assert (node->op == op_Call);
1113 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1117 get_Call_type (ir_node *node) {
1118 assert (node->op == op_Call);
1119 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1123 set_Call_type (ir_node *node, type *tp) {
1124 assert (node->op == op_Call);
1125 assert (is_method_type(tp));
1126 node->attr.call.cld_tp = tp;
1129 int Call_has_callees(ir_node *node) {
1130 assert(node && node->op == op_Call);
1131 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1132 (node->attr.call.callee_arr != NULL));
1135 int get_Call_n_callees(ir_node * node) {
1136 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1137 return ARR_LEN(node->attr.call.callee_arr);
1140 entity * get_Call_callee(ir_node * node, int pos) {
1141 assert(pos >= 0 && pos < get_Call_n_callees(node));
1142 return node->attr.call.callee_arr[pos];
1145 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1146 assert(node->op == op_Call);
1147 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1148 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1150 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1153 void remove_Call_callee_arr(ir_node * node) {
1154 assert(node->op == op_Call);
1155 node->attr.call.callee_arr = NULL;
1158 ir_node * get_CallBegin_ptr (ir_node *node) {
1159 assert(node->op == op_CallBegin);
1160 return get_irn_n(node, 0);
1162 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1163 assert(node->op == op_CallBegin);
1164 set_irn_n(node, 0, ptr);
1166 ir_node * get_CallBegin_call (ir_node *node) {
1167 assert(node->op == op_CallBegin);
1168 return node->attr.callbegin.call;
1170 void set_CallBegin_call (ir_node *node, ir_node *call) {
1171 assert(node->op == op_CallBegin);
1172 node->attr.callbegin.call = call;
1176 get_FuncCall_ptr (ir_node *node) {
1177 assert (node->op == op_FuncCall);
1178 return get_irn_n(node, 0);
1182 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1183 assert (node->op == op_FuncCall);
1184 set_irn_n(node, 0, ptr);
1188 get_FuncCall_param_arr (ir_node *node) {
1189 assert (node->op == op_FuncCall);
1190 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1194 get_FuncCall_n_params (ir_node *node) {
1195 assert (node->op == op_FuncCall);
1196 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1200 get_FuncCall_arity (ir_node *node) {
1201 assert (node->op == op_FuncCall);
1202 return get_FuncCall_n_params(node);
1206 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1207 assert (node->op == op_FuncCall);
1212 get_FuncCall_param (ir_node *node, int pos) {
1213 assert (node->op == op_FuncCall);
1214 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1218 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1219 assert (node->op == op_FuncCall);
1220 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1224 get_FuncCall_type (ir_node *node) {
1225 assert (node->op == op_FuncCall);
1226 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1230 set_FuncCall_type (ir_node *node, type *tp) {
1231 assert (node->op == op_FuncCall);
1232 assert (is_method_type(tp));
1233 node->attr.call.cld_tp = tp;
1236 int FuncCall_has_callees(ir_node *node) {
1237 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1238 (node->attr.call.callee_arr != NULL));
1241 int get_FuncCall_n_callees(ir_node * node) {
1242 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1243 return ARR_LEN(node->attr.call.callee_arr);
1246 entity * get_FuncCall_callee(ir_node * node, int pos) {
1247 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1248 return node->attr.call.callee_arr[pos];
1251 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1252 assert(node->op == op_FuncCall);
1253 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1254 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1256 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1259 void remove_FuncCall_callee_arr(ir_node * node) {
1260 assert(node->op == op_FuncCall);
1261 node->attr.call.callee_arr = NULL;
1266 ir_node * get_##OP##_left(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index); \
1270 void set_##OP##_left(ir_node *node, ir_node *left) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index, left); \
1274 ir_node *get_##OP##_right(ir_node *node) { \
1275 assert(node->op == op_##OP); \
1276 return get_irn_n(node, node->op->op_index + 1); \
1278 void set_##OP##_right(ir_node *node, ir_node *right) { \
1279 assert(node->op == op_##OP); \
1280 set_irn_n(node, node->op->op_index + 1, right); \
1284 ir_node *get_##OP##_op(ir_node *node) { \
1285 assert(node->op == op_##OP); \
1286 return get_irn_n(node, node->op->op_index); \
1288 void set_##OP##_op (ir_node *node, ir_node *op) { \
1289 assert(node->op == op_##OP); \
1290 set_irn_n(node, node->op->op_index, op); \
1300 get_Quot_mem (ir_node *node) {
1301 assert (node->op == op_Quot);
1302 return get_irn_n(node, 0);
1306 set_Quot_mem (ir_node *node, ir_node *mem) {
1307 assert (node->op == op_Quot);
1308 set_irn_n(node, 0, mem);
1314 get_DivMod_mem (ir_node *node) {
1315 assert (node->op == op_DivMod);
1316 return get_irn_n(node, 0);
1320 set_DivMod_mem (ir_node *node, ir_node *mem) {
1321 assert (node->op == op_DivMod);
1322 set_irn_n(node, 0, mem);
1328 get_Div_mem (ir_node *node) {
1329 assert (node->op == op_Div);
1330 return get_irn_n(node, 0);
1334 set_Div_mem (ir_node *node, ir_node *mem) {
1335 assert (node->op == op_Div);
1336 set_irn_n(node, 0, mem);
1342 get_Mod_mem (ir_node *node) {
1343 assert (node->op == op_Mod);
1344 return get_irn_n(node, 0);
1348 set_Mod_mem (ir_node *node, ir_node *mem) {
1349 assert (node->op == op_Mod);
1350 set_irn_n(node, 0, mem);
1367 get_Cast_type (ir_node *node) {
1368 assert (node->op == op_Cast);
1369 return node->attr.cast.totype;
1373 set_Cast_type (ir_node *node, type *to_tp) {
1374 assert (node->op == op_Cast);
1375 node->attr.cast.totype = to_tp;
1379 is_unop (ir_node *node) {
1380 return (node->op->opar == oparity_unary);
1384 get_unop_op (ir_node *node) {
1385 if (node->op->opar == oparity_unary)
1386 return get_irn_n(node, node->op->op_index);
1388 assert(node->op->opar == oparity_unary);
1393 set_unop_op (ir_node *node, ir_node *op) {
1394 if (node->op->opar == oparity_unary)
1395 set_irn_n(node, node->op->op_index, op);
1397 assert(node->op->opar == oparity_unary);
1401 is_binop (ir_node *node) {
1402 return (node->op->opar == oparity_binary);
1406 get_binop_left (ir_node *node) {
1407 if (node->op->opar == oparity_binary)
1408 return get_irn_n(node, node->op->op_index);
1410 assert(node->op->opar == oparity_binary);
1415 set_binop_left (ir_node *node, ir_node *left) {
1416 if (node->op->opar == oparity_binary)
1417 set_irn_n(node, node->op->op_index, left);
1419 assert (node->op->opar == oparity_binary);
1423 get_binop_right (ir_node *node) {
1424 if (node->op->opar == oparity_binary)
1425 return get_irn_n(node, node->op->op_index + 1);
1427 assert(node->op->opar == oparity_binary);
1432 set_binop_right (ir_node *node, ir_node *right) {
1433 if (node->op->opar == oparity_binary)
1434 set_irn_n(node, node->op->op_index + 1, right);
1436 assert (node->op->opar == oparity_binary);
1439 int is_Phi (ir_node *n) {
1445 if (op == op_Filter) return interprocedural_view;
1448 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1449 (get_irn_arity(n) > 0));
1454 int is_Phi0 (ir_node *n) {
1457 return ((get_irn_op(n) == op_Phi) &&
1458 (get_irn_arity(n) == 0) &&
1459 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1463 get_Phi_preds_arr (ir_node *node) {
1464 assert (node->op == op_Phi);
1465 return (ir_node **)&(get_irn_in(node)[1]);
1469 get_Phi_n_preds (ir_node *node) {
1470 assert (is_Phi(node) || is_Phi0(node));
1471 return (get_irn_arity(node));
1475 void set_Phi_n_preds (ir_node *node, int n_preds) {
1476 assert (node->op == op_Phi);
1481 get_Phi_pred (ir_node *node, int pos) {
1482 assert (is_Phi(node) || is_Phi0(node));
1483 return get_irn_n(node, pos);
1487 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1488 assert (is_Phi(node) || is_Phi0(node));
1489 set_irn_n(node, pos, pred);
1493 int is_memop(ir_node *node) {
1494 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1497 ir_node *get_memop_mem (ir_node *node) {
1498 assert(is_memop(node));
1499 return get_irn_n(node, 0);
1502 void set_memop_mem (ir_node *node, ir_node *mem) {
1503 assert(is_memop(node));
1504 set_irn_n(node, 0, mem);
1507 ir_node *get_memop_ptr (ir_node *node) {
1508 assert(is_memop(node));
1509 return get_irn_n(node, 1);
1512 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1513 assert(is_memop(node));
1514 set_irn_n(node, 1, ptr);
1518 get_Load_mem (ir_node *node) {
1519 assert (node->op == op_Load);
1520 return get_irn_n(node, 0);
1524 set_Load_mem (ir_node *node, ir_node *mem) {
1525 assert (node->op == op_Load);
1526 set_irn_n(node, 0, mem);
1530 get_Load_ptr (ir_node *node) {
1531 assert (node->op == op_Load);
1532 return get_irn_n(node, 1);
1536 set_Load_ptr (ir_node *node, ir_node *ptr) {
1537 assert (node->op == op_Load);
1538 set_irn_n(node, 1, ptr);
1542 get_Load_mode (ir_node *node) {
1543 assert (node->op == op_Load);
1544 return node->attr.load.load_mode;
1548 set_Load_mode (ir_node *node, ir_mode *mode) {
1549 assert (node->op == op_Load);
1550 node->attr.load.load_mode = mode;
1554 get_Load_volatility (ir_node *node) {
1555 assert (node->op == op_Load);
1556 return node->attr.load.volatility;
1560 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1561 assert (node->op == op_Load);
1562 node->attr.load.volatility = volatility;
1567 get_Store_mem (ir_node *node) {
1568 assert (node->op == op_Store);
1569 return get_irn_n(node, 0);
1573 set_Store_mem (ir_node *node, ir_node *mem) {
1574 assert (node->op == op_Store);
1575 set_irn_n(node, 0, mem);
1579 get_Store_ptr (ir_node *node) {
1580 assert (node->op == op_Store);
1581 return get_irn_n(node, 1);
1585 set_Store_ptr (ir_node *node, ir_node *ptr) {
1586 assert (node->op == op_Store);
1587 set_irn_n(node, 1, ptr);
1591 get_Store_value (ir_node *node) {
1592 assert (node->op == op_Store);
1593 return get_irn_n(node, 2);
1597 set_Store_value (ir_node *node, ir_node *value) {
1598 assert (node->op == op_Store);
1599 set_irn_n(node, 2, value);
1603 get_Store_volatility (ir_node *node) {
1604 assert (node->op == op_Store);
1605 return node->attr.store.volatility;
1609 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1610 assert (node->op == op_Store);
1611 node->attr.store.volatility = volatility;
1616 get_Alloc_mem (ir_node *node) {
1617 assert (node->op == op_Alloc);
1618 return get_irn_n(node, 0);
1622 set_Alloc_mem (ir_node *node, ir_node *mem) {
1623 assert (node->op == op_Alloc);
1624 set_irn_n(node, 0, mem);
1628 get_Alloc_size (ir_node *node) {
1629 assert (node->op == op_Alloc);
1630 return get_irn_n(node, 1);
1634 set_Alloc_size (ir_node *node, ir_node *size) {
1635 assert (node->op == op_Alloc);
1636 set_irn_n(node, 1, size);
1640 get_Alloc_type (ir_node *node) {
1641 assert (node->op == op_Alloc);
1642 return node->attr.a.type = skip_tid(node->attr.a.type);
1646 set_Alloc_type (ir_node *node, type *tp) {
1647 assert (node->op == op_Alloc);
1648 node->attr.a.type = tp;
1652 get_Alloc_where (ir_node *node) {
1653 assert (node->op == op_Alloc);
1654 return node->attr.a.where;
1658 set_Alloc_where (ir_node *node, where_alloc where) {
1659 assert (node->op == op_Alloc);
1660 node->attr.a.where = where;
1665 get_Free_mem (ir_node *node) {
1666 assert (node->op == op_Free);
1667 return get_irn_n(node, 0);
1671 set_Free_mem (ir_node *node, ir_node *mem) {
1672 assert (node->op == op_Free);
1673 set_irn_n(node, 0, mem);
1677 get_Free_ptr (ir_node *node) {
1678 assert (node->op == op_Free);
1679 return get_irn_n(node, 1);
1683 set_Free_ptr (ir_node *node, ir_node *ptr) {
1684 assert (node->op == op_Free);
1685 set_irn_n(node, 1, ptr);
1689 get_Free_size (ir_node *node) {
1690 assert (node->op == op_Free);
1691 return get_irn_n(node, 2);
1695 set_Free_size (ir_node *node, ir_node *size) {
1696 assert (node->op == op_Free);
1697 set_irn_n(node, 2, size);
1701 get_Free_type (ir_node *node) {
1702 assert (node->op == op_Free);
1703 return node->attr.f = skip_tid(node->attr.f);
1707 set_Free_type (ir_node *node, type *tp) {
1708 assert (node->op == op_Free);
1713 get_Sync_preds_arr (ir_node *node) {
1714 assert (node->op == op_Sync);
1715 return (ir_node **)&(get_irn_in(node)[1]);
1719 get_Sync_n_preds (ir_node *node) {
1720 assert (node->op == op_Sync);
1721 return (get_irn_arity(node));
1726 set_Sync_n_preds (ir_node *node, int n_preds) {
1727 assert (node->op == op_Sync);
1732 get_Sync_pred (ir_node *node, int pos) {
1733 assert (node->op == op_Sync);
1734 return get_irn_n(node, pos);
1738 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1739 assert (node->op == op_Sync);
1740 set_irn_n(node, pos, pred);
1744 get_Proj_pred (ir_node *node) {
1745 assert (is_Proj(node));
1746 return get_irn_n(node, 0);
1750 set_Proj_pred (ir_node *node, ir_node *pred) {
1751 assert (is_Proj(node));
1752 set_irn_n(node, 0, pred);
1756 get_Proj_proj (ir_node *node) {
1757 assert (is_Proj(node));
1758 if (get_irn_opcode(node) == iro_Proj) {
1759 return node->attr.proj;
1761 assert(get_irn_opcode(node) == iro_Filter);
1762 return node->attr.filter.proj;
1767 set_Proj_proj (ir_node *node, long proj) {
1768 assert (node->op == op_Proj);
1769 node->attr.proj = proj;
1773 get_Tuple_preds_arr (ir_node *node) {
1774 assert (node->op == op_Tuple);
1775 return (ir_node **)&(get_irn_in(node)[1]);
1779 get_Tuple_n_preds (ir_node *node) {
1780 assert (node->op == op_Tuple);
1781 return (get_irn_arity(node));
1786 set_Tuple_n_preds (ir_node *node, int n_preds) {
1787 assert (node->op == op_Tuple);
1792 get_Tuple_pred (ir_node *node, int pos) {
1793 assert (node->op == op_Tuple);
1794 return get_irn_n(node, pos);
1798 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1799 assert (node->op == op_Tuple);
1800 set_irn_n(node, pos, pred);
1804 get_Id_pred (ir_node *node) {
1805 assert (node->op == op_Id);
1806 return get_irn_n(node, 0);
1810 set_Id_pred (ir_node *node, ir_node *pred) {
1811 assert (node->op == op_Id);
1812 set_irn_n(node, 0, pred);
1815 ir_node *get_Confirm_value (ir_node *node) {
1816 assert (node->op == op_Confirm);
1817 return get_irn_n(node, 0);
1819 void set_Confirm_value (ir_node *node, ir_node *value) {
1820 assert (node->op == op_Confirm);
1821 set_irn_n(node, 0, value);
1823 ir_node *get_Confirm_bound (ir_node *node) {
1824 assert (node->op == op_Confirm);
1825 return get_irn_n(node, 1);
1827 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1828 assert (node->op == op_Confirm);
1829 set_irn_n(node, 0, bound);
1831 pn_Cmp get_Confirm_cmp (ir_node *node) {
1832 assert (node->op == op_Confirm);
1833 return node->attr.confirm_cmp;
1835 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1836 assert (node->op == op_Confirm);
1837 node->attr.confirm_cmp = cmp;
1842 get_Filter_pred (ir_node *node) {
1843 assert(node->op == op_Filter);
1847 set_Filter_pred (ir_node *node, ir_node *pred) {
1848 assert(node->op == op_Filter);
1852 get_Filter_proj(ir_node *node) {
1853 assert(node->op == op_Filter);
1854 return node->attr.filter.proj;
1857 set_Filter_proj (ir_node *node, long proj) {
1858 assert(node->op == op_Filter);
1859 node->attr.filter.proj = proj;
1862 /* Don't use get_irn_arity, get_irn_n in implementation as access
1863 shall work independent of view!!! */
1864 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1865 assert(node->op == op_Filter);
1866 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1867 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1868 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1869 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1870 node->attr.filter.in_cg[0] = node->in[0];
1872 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1875 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1876 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1877 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1878 node->attr.filter.in_cg[pos + 1] = pred;
1880 int get_Filter_n_cg_preds(ir_node *node) {
1881 assert(node->op == op_Filter && node->attr.filter.in_cg);
1882 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1884 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1886 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1888 arity = ARR_LEN(node->attr.filter.in_cg);
1889 assert(pos < arity - 1);
1890 return node->attr.filter.in_cg[pos + 1];
1895 get_irn_irg(ir_node *node) {
1896 if (get_irn_op(node) != op_Block)
1897 node = get_nodes_block(node);
1898 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1899 node = get_nodes_block(node);
1900 assert(get_irn_op(node) == op_Block);
1901 return node->attr.block.irg;
1905 /*----------------------------------------------------------------*/
1906 /* Auxiliary routines */
1907 /*----------------------------------------------------------------*/
1910 skip_Proj (ir_node *node) {
1911 /* don't assert node !!! */
1912 if (node && is_Proj(node)) {
1913 return get_Proj_pred(node);
1920 skip_Tuple (ir_node *node) {
1923 if (!get_opt_normalize()) return node;
1925 node = skip_Id(node);
1926 if (get_irn_op(node) == op_Proj) {
1927 pred = skip_Id(get_Proj_pred(node));
1928 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1929 pred = skip_Id(skip_Tuple(pred));
1930 if (get_irn_op(pred) == op_Tuple)
1931 return get_Tuple_pred(pred, get_Proj_proj(node));
1936 /** returns operand of node if node is a Cast */
1937 ir_node *skip_Cast (ir_node *node) {
1938 if (node && get_irn_op(node) == op_Cast) {
1939 return skip_Id(get_irn_n(node, 0));
1946 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1947 than any other approach, as Id chains are resolved and all point to the real node, or
1948 all id's are self loops. */
1950 skip_Id (ir_node *node) {
1951 /* don't assert node !!! */
1953 if (!get_opt_normalize()) return node;
1955 /* Don't use get_Id_pred: We get into an endless loop for
1956 self-referencing Ids. */
1957 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1958 ir_node *rem_pred = node->in[0+1];
1961 assert (get_irn_arity (node) > 0);
1963 node->in[0+1] = node;
1964 res = skip_Id(rem_pred);
1965 if (res->op == op_Id) /* self-loop */ return node;
1967 node->in[0+1] = res;
1974 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1975 than any other approach, as Id chains are resolved and all point to the real node, or
1976 all id's are self loops. */
1978 skip_Id (ir_node *node) {
1980 /* don't assert node !!! */
1982 if (!node || (node->op != op_Id)) return node;
1984 if (!get_opt_normalize()) return node;
1986 /* Don't use get_Id_pred: We get into an endless loop for
1987 self-referencing Ids. */
1988 pred = node->in[0+1];
1990 if (pred->op != op_Id) return pred;
1992 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1993 ir_node *rem_pred, *res;
1995 if (pred->op != op_Id) return pred; /* shortcut */
1998 assert (get_irn_arity (node) > 0);
2000 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2001 res = skip_Id(rem_pred);
2002 if (res->op == op_Id) /* self-loop */ return node;
2004 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2013 is_Bad (ir_node *node) {
2015 if ((node) && get_irn_opcode(node) == iro_Bad)
2021 is_no_Block (ir_node *node) {
2023 return (get_irn_opcode(node) != iro_Block);
2027 is_Block (ir_node *node) {
2029 return (get_irn_opcode(node) == iro_Block);
2032 /* returns true if node is a Unknown node. */
2034 is_Unknown (ir_node *node) {
2036 return (get_irn_opcode(node) == iro_Unknown);
2040 is_Proj (const ir_node *node) {
2042 return node->op == op_Proj
2043 || (!interprocedural_view && node->op == op_Filter);
2046 /* Returns true if the operation manipulates control flow. */
2048 is_cfop(ir_node *node) {
2049 return is_cfopcode(get_irn_op(node));
2052 /* Returns true if the operation manipulates interprocedural control flow:
2053 CallBegin, EndReg, EndExcept */
2054 int is_ip_cfop(ir_node *node) {
2055 return is_ip_cfopcode(get_irn_op(node));
2058 /* Returns true if the operation can change the control flow because
2061 is_fragile_op(ir_node *node) {
2062 return is_op_fragile(get_irn_op(node));
2065 /* Returns the memory operand of fragile operations. */
2066 ir_node *get_fragile_op_mem(ir_node *node) {
2067 assert(node && is_fragile_op(node));
2069 switch (get_irn_opcode (node)) {
2078 return get_irn_n(node, 0);
2083 assert(0 && "should not be reached");
2088 /* Returns true if the operation is a forking control flow operation. */
2090 is_forking_op(ir_node *node) {
2091 return is_op_forking(get_irn_op(node));
2094 #ifdef DEBUG_libfirm
2095 void dump_irn (ir_node *n) {
2096 int i, arity = get_irn_arity(n);
2097 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2099 ir_node *pred = get_irn_n(n, -1);
2100 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2101 get_irn_node_nr(pred), (void *)pred);
2103 printf(" preds: \n");
2104 for (i = 0; i < arity; ++i) {
2105 ir_node *pred = get_irn_n(n, i);
2106 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2107 get_irn_node_nr(pred), (void *)pred);
2111 #else /* DEBUG_libfirm */
2112 void dump_irn (ir_node *n) {}
2113 #endif /* DEBUG_libfirm */