3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 #ifdef DO_HEAPANALYSIS
358 /* Access the abstract interpretation information of a node.
359 Returns NULL if no such information is available. */
360 struct abstval *get_irn_abst_value(ir_node *n) {
363 /* Set the abstract interpretation information of a node. */
364 void set_irn_abst_value(ir_node *n, struct abstval *os) {
367 struct section *firm_get_irn_section(ir_node *n) {
370 void firm_set_irn_section(ir_node *n, struct section *s) {
373 #endif /* DO_HEAPANALYSIS */
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_funccall_attr (ir_node *node)
432 assert (node->op == op_FuncCall);
433 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
437 get_irn_sel_attr (ir_node *node)
439 assert (node->op == op_Sel);
444 get_irn_phi_attr (ir_node *node)
446 assert (node->op == op_Phi);
447 return node->attr.phi0_pos;
451 get_irn_block_attr (ir_node *node)
453 assert (node->op == op_Block);
454 return node->attr.block;
458 get_irn_load_attr (ir_node *node)
460 assert (node->op == op_Load);
461 return node->attr.load;
465 get_irn_store_attr (ir_node *node)
467 assert (node->op == op_Store);
468 return node->attr.store;
472 get_irn_except_attr (ir_node *node)
474 assert (node->op == op_Div || node->op == op_Quot ||
475 node->op == op_DivMod || node->op == op_Mod);
476 return node->attr.except;
479 /** manipulate fields of individual nodes **/
481 /* this works for all except Block */
483 get_nodes_block (ir_node *node) {
484 assert (!(node->op == op_Block));
485 return get_irn_n(node, -1);
489 set_nodes_block (ir_node *node, ir_node *block) {
490 assert (!(node->op == op_Block));
491 set_irn_n(node, -1, block);
494 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
495 * from Start. If so returns frame type, else Null. */
496 type *is_frame_pointer(ir_node *n) {
497 if ((get_irn_op(n) == op_Proj) &&
498 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
499 ir_node *start = get_Proj_pred(n);
500 if (get_irn_op(start) == op_Start) {
501 return get_irg_frame_type(get_irn_irg(start));
507 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
508 * from Start. If so returns global type, else Null. */
509 type *is_globals_pointer(ir_node *n) {
510 if ((get_irn_op(n) == op_Proj) &&
511 (get_Proj_proj(n) == pn_Start_P_globals)) {
512 ir_node *start = get_Proj_pred(n);
513 if (get_irn_op(start) == op_Start) {
514 return get_glob_type();
520 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
521 * from Start. If so returns 1, else 0. */
522 int is_value_arg_pointer(ir_node *n) {
523 if ((get_irn_op(n) == op_Proj) &&
524 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
525 (get_irn_op(get_Proj_pred(n)) == op_Start))
530 /* Returns an array with the predecessors of the Block. Depending on
531 the implementation of the graph data structure this can be a copy of
532 the internal representation of predecessors as well as the internal
533 array itself. Therefore writing to this array might obstruct the ir. */
535 get_Block_cfgpred_arr (ir_node *node)
537 assert ((node->op == op_Block));
538 return (ir_node **)&(get_irn_in(node)[1]);
543 get_Block_n_cfgpreds (ir_node *node) {
544 assert ((node->op == op_Block));
545 return get_irn_arity(node);
549 get_Block_cfgpred (ir_node *node, int pos) {
551 assert (node->op == op_Block);
552 assert(-1 <= pos && pos < get_irn_arity(node));
553 return get_irn_n(node, pos);
557 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
558 assert (node->op == op_Block);
559 set_irn_n(node, pos, pred);
563 get_Block_matured (ir_node *node) {
564 assert (node->op == op_Block);
565 return node->attr.block.matured;
569 set_Block_matured (ir_node *node, bool matured) {
570 assert (node->op == op_Block);
571 node->attr.block.matured = matured;
574 get_Block_block_visited (ir_node *node) {
575 assert (node->op == op_Block);
576 return node->attr.block.block_visited;
580 set_Block_block_visited (ir_node *node, unsigned long visit) {
581 assert (node->op == op_Block);
582 node->attr.block.block_visited = visit;
585 /* For this current_ir_graph must be set. */
587 mark_Block_block_visited (ir_node *node) {
588 assert (node->op == op_Block);
589 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
593 Block_not_block_visited(ir_node *node) {
594 assert (node->op == op_Block);
595 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
599 get_Block_graph_arr (ir_node *node, int pos) {
600 assert (node->op == op_Block);
601 return node->attr.block.graph_arr[pos+1];
605 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
606 assert (node->op == op_Block);
607 node->attr.block.graph_arr[pos+1] = value;
610 /* handler handling for Blocks */
612 set_Block_handler (ir_node *block, ir_node *handler) {
613 assert ((block->op == op_Block));
614 assert ((handler->op == op_Block));
618 get_Block_handler (ir_node *block) {
619 assert ((block->op == op_Block));
623 /* handler handling for Nodes */
625 set_Node_handler (ir_node *node, ir_node *handler) {
630 get_Node_handler (ir_node *node) {
634 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
635 assert(node->op == op_Block);
636 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
637 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
638 node->attr.block.in_cg[0] = NULL;
639 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
641 /* Fix backedge array. fix_backedges operates depending on
642 interprocedural_view. */
643 bool ipv = interprocedural_view;
644 interprocedural_view = true;
645 fix_backedges(current_ir_graph->obst, node);
646 interprocedural_view = ipv;
649 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
652 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
653 assert(node->op == op_Block &&
654 node->attr.block.in_cg &&
655 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
656 node->attr.block.in_cg[pos + 1] = pred;
659 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
660 assert(node->op == op_Block);
661 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
664 int get_Block_cg_n_cfgpreds(ir_node * node) {
665 assert(node->op == op_Block);
666 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
669 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
670 assert(node->op == op_Block && node->attr.block.in_cg);
671 return node->attr.block.in_cg[pos + 1];
674 void remove_Block_cg_cfgpred_arr(ir_node * node) {
675 assert(node->op == op_Block);
676 node->attr.block.in_cg = NULL;
680 set_Start_irg(ir_node *node, ir_graph *irg) {
681 assert(node->op == op_Start);
682 assert(is_ir_graph(irg));
683 assert(0 && " Why set irg? -- use set_irn_irg");
687 get_End_n_keepalives(ir_node *end) {
688 assert (end->op == op_End);
689 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
693 get_End_keepalive(ir_node *end, int pos) {
694 assert (end->op == op_End);
695 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
699 add_End_keepalive (ir_node *end, ir_node *ka) {
700 assert (end->op == op_End);
701 ARR_APP1 (ir_node *, end->in, ka);
705 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
706 assert (end->op == op_End);
707 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
711 free_End (ir_node *end) {
712 assert (end->op == op_End);
714 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
715 end->in = NULL; /* @@@ make sure we get an error if we use the
716 in array afterwards ... */
721 > Implementing the case construct (which is where the constant Proj node is
722 > important) involves far more than simply determining the constant values.
723 > We could argue that this is more properly a function of the translator from
724 > Firm to the target machine. That could be done if there was some way of
725 > projecting "default" out of the Cond node.
726 I know it's complicated.
727 Basically there are two proglems:
728 - determining the gaps between the projs
729 - determining the biggest case constant to know the proj number for
731 I see several solutions:
732 1. Introduce a ProjDefault node. Solves both problems.
733 This means to extend all optimizations executed during construction.
734 2. Give the Cond node for switch two flavors:
735 a) there are no gaps in the projs (existing flavor)
736 b) gaps may exist, default proj is still the Proj with the largest
737 projection number. This covers also the gaps.
738 3. Fix the semantic of the Cond to that of 2b)
740 Solution 2 seems to be the best:
741 Computing the gaps in the Firm representation is not too hard, i.e.,
742 libFIRM can implement a routine that transforms between the two
743 flavours. This is also possible for 1) but 2) does not require to
744 change any existing optimization.
745 Further it should be far simpler to determine the biggest constant than
747 I don't want to choose 3) as 2a) seems to have advantages for
748 dataflow analysis and 3) does not allow to convert the representation to
752 get_Cond_selector (ir_node *node) {
753 assert (node->op == op_Cond);
754 return get_irn_n(node, 0);
758 set_Cond_selector (ir_node *node, ir_node *selector) {
759 assert (node->op == op_Cond);
760 set_irn_n(node, 0, selector);
764 get_Cond_kind (ir_node *node) {
765 assert (node->op == op_Cond);
766 return node->attr.c.kind;
770 set_Cond_kind (ir_node *node, cond_kind kind) {
771 assert (node->op == op_Cond);
772 node->attr.c.kind = kind;
776 get_Cond_defaultProj (ir_node *node) {
777 assert (node->op == op_Cond);
778 return node->attr.c.default_proj;
782 get_Return_mem (ir_node *node) {
783 assert (node->op == op_Return);
784 return get_irn_n(node, 0);
788 set_Return_mem (ir_node *node, ir_node *mem) {
789 assert (node->op == op_Return);
790 set_irn_n(node, 0, mem);
794 get_Return_n_ress (ir_node *node) {
795 assert (node->op == op_Return);
796 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
800 get_Return_res_arr (ir_node *node)
802 assert ((node->op == op_Return));
803 if (get_Return_n_ress(node) > 0)
804 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
811 set_Return_n_res (ir_node *node, int results) {
812 assert (node->op == op_Return);
817 get_Return_res (ir_node *node, int pos) {
818 assert (node->op == op_Return);
819 assert (get_Return_n_ress(node) > pos);
820 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
824 set_Return_res (ir_node *node, int pos, ir_node *res){
825 assert (node->op == op_Return);
826 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
830 get_Raise_mem (ir_node *node) {
831 assert (node->op == op_Raise);
832 return get_irn_n(node, 0);
836 set_Raise_mem (ir_node *node, ir_node *mem) {
837 assert (node->op == op_Raise);
838 set_irn_n(node, 0, mem);
842 get_Raise_exo_ptr (ir_node *node) {
843 assert (node->op == op_Raise);
844 return get_irn_n(node, 1);
848 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
849 assert (node->op == op_Raise);
850 set_irn_n(node, 1, exo_ptr);
853 tarval *get_Const_tarval (ir_node *node) {
854 assert (node->op == op_Const);
855 return node->attr.con.tv;
859 set_Const_tarval (ir_node *node, tarval *con) {
860 assert (node->op == op_Const);
861 node->attr.con.tv = con;
865 /* The source language type. Must be an atomic type. Mode of type must
866 be mode of node. For tarvals from entities type must be pointer to
869 get_Const_type (ir_node *node) {
870 assert (node->op == op_Const);
871 return node->attr.con.tp;
875 set_Const_type (ir_node *node, type *tp) {
876 assert (node->op == op_Const);
877 if (tp != unknown_type) {
878 assert (is_atomic_type(tp));
879 assert (get_type_mode(tp) == get_irn_mode(node));
882 node->attr.con.tp = tp;
887 get_SymConst_kind (const ir_node *node) {
888 assert (node->op == op_SymConst);
889 return node->attr.i.num;
893 set_SymConst_kind (ir_node *node, symconst_kind num) {
894 assert (node->op == op_SymConst);
895 node->attr.i.num = num;
899 get_SymConst_type (ir_node *node) {
900 assert ( (node->op == op_SymConst)
901 && ( get_SymConst_kind(node) == symconst_type_tag
902 || get_SymConst_kind(node) == symconst_size));
903 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
907 set_SymConst_type (ir_node *node, type *tp) {
908 assert ( (node->op == op_SymConst)
909 && ( get_SymConst_kind(node) == symconst_type_tag
910 || get_SymConst_kind(node) == symconst_size));
911 node->attr.i.sym.type_p = tp;
915 get_SymConst_name (ir_node *node) {
916 assert ( (node->op == op_SymConst)
917 && (get_SymConst_kind(node) == symconst_addr_name));
918 return node->attr.i.sym.ident_p;
922 set_SymConst_name (ir_node *node, ident *name) {
923 assert ( (node->op == op_SymConst)
924 && (get_SymConst_kind(node) == symconst_addr_name));
925 node->attr.i.sym.ident_p = name;
929 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
930 entity *get_SymConst_entity (ir_node *node) {
931 assert ( (node->op == op_SymConst)
932 && (get_SymConst_kind (node) == symconst_addr_ent));
933 return node->attr.i.sym.entity_p;
936 void set_SymConst_entity (ir_node *node, entity *ent) {
937 assert ( (node->op == op_SymConst)
938 && (get_SymConst_kind(node) == symconst_addr_ent));
939 node->attr.i.sym.entity_p = ent;
943 union symconst_symbol
944 get_SymConst_symbol (ir_node *node) {
945 assert (node->op == op_SymConst);
946 return node->attr.i.sym;
950 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
951 assert (node->op == op_SymConst);
952 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
953 node->attr.i.sym = sym;
957 get_Sel_mem (ir_node *node) {
958 assert (node->op == op_Sel);
959 return get_irn_n(node, 0);
963 set_Sel_mem (ir_node *node, ir_node *mem) {
964 assert (node->op == op_Sel);
965 set_irn_n(node, 0, mem);
969 get_Sel_ptr (ir_node *node) {
970 assert (node->op == op_Sel);
971 return get_irn_n(node, 1);
975 set_Sel_ptr (ir_node *node, ir_node *ptr) {
976 assert (node->op == op_Sel);
977 set_irn_n(node, 1, ptr);
981 get_Sel_n_indexs (ir_node *node) {
982 assert (node->op == op_Sel);
983 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
987 get_Sel_index_arr (ir_node *node)
989 assert ((node->op == op_Sel));
990 if (get_Sel_n_indexs(node) > 0)
991 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
997 get_Sel_index (ir_node *node, int pos) {
998 assert (node->op == op_Sel);
999 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1003 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1004 assert (node->op == op_Sel);
1005 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1009 get_Sel_entity (ir_node *node) {
1010 assert (node->op == op_Sel);
1011 return node->attr.s.ent;
1015 set_Sel_entity (ir_node *node, entity *ent) {
1016 assert (node->op == op_Sel);
1017 node->attr.s.ent = ent;
1021 get_InstOf_ent (ir_node *node) {
1022 assert (node->op = op_InstOf);
1023 return (node->attr.io.ent);
1027 set_InstOf_ent (ir_node *node, type *ent) {
1028 assert (node->op = op_InstOf);
1029 node->attr.io.ent = ent;
1033 get_InstOf_store (ir_node *node) {
1034 assert (node->op = op_InstOf);
1035 return (get_irn_n (node, 0));
1039 set_InstOf_store (ir_node *node, ir_node *obj) {
1040 assert (node->op = op_InstOf);
1041 set_irn_n (node, 0, obj);
1045 get_InstOf_obj (ir_node *node) {
1046 assert (node->op = op_InstOf);
1047 return (get_irn_n (node, 1));
1051 set_InstOf_obj (ir_node *node, ir_node *obj) {
1052 assert (node->op = op_InstOf);
1053 set_irn_n (node, 1, obj);
1057 /* For unary and binary arithmetic operations the access to the
1058 operands can be factored out. Left is the first, right the
1059 second arithmetic value as listed in tech report 0999-33.
1060 unops are: Minus, Abs, Not, Conv, Cast
1061 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1062 Shr, Shrs, Rotate, Cmp */
1066 get_Call_mem (ir_node *node) {
1067 assert (node->op == op_Call);
1068 return get_irn_n(node, 0);
1072 set_Call_mem (ir_node *node, ir_node *mem) {
1073 assert (node->op == op_Call);
1074 set_irn_n(node, 0, mem);
1078 get_Call_ptr (ir_node *node) {
1079 assert (node->op == op_Call);
1080 return get_irn_n(node, 1);
1084 set_Call_ptr (ir_node *node, ir_node *ptr) {
1085 assert (node->op == op_Call);
1086 set_irn_n(node, 1, ptr);
1090 get_Call_param_arr (ir_node *node) {
1091 assert (node->op == op_Call);
1092 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1096 get_Call_n_params (ir_node *node) {
1097 assert (node->op == op_Call);
1098 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1102 get_Call_arity (ir_node *node) {
1103 assert (node->op == op_Call);
1104 return get_Call_n_params(node);
1108 set_Call_arity (ir_node *node, ir_node *arity) {
1109 assert (node->op == op_Call);
1114 get_Call_param (ir_node *node, int pos) {
1115 assert (node->op == op_Call);
1116 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1120 set_Call_param (ir_node *node, int pos, ir_node *param) {
1121 assert (node->op == op_Call);
1122 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1126 get_Call_type (ir_node *node) {
1127 assert (node->op == op_Call);
1128 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1132 set_Call_type (ir_node *node, type *tp) {
1133 assert (node->op == op_Call);
1134 assert (is_method_type(tp));
1135 node->attr.call.cld_tp = tp;
1138 int Call_has_callees(ir_node *node) {
1139 assert(node && node->op == op_Call);
1140 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1141 (node->attr.call.callee_arr != NULL));
1144 int get_Call_n_callees(ir_node * node) {
1145 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1146 return ARR_LEN(node->attr.call.callee_arr);
1149 entity * get_Call_callee(ir_node * node, int pos) {
1150 assert(pos >= 0 && pos < get_Call_n_callees(node));
1151 return node->attr.call.callee_arr[pos];
1154 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1155 assert(node->op == op_Call);
1156 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1157 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1159 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1162 void remove_Call_callee_arr(ir_node * node) {
1163 assert(node->op == op_Call);
1164 node->attr.call.callee_arr = NULL;
1167 ir_node * get_CallBegin_ptr (ir_node *node) {
1168 assert(node->op == op_CallBegin);
1169 return get_irn_n(node, 0);
1171 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1172 assert(node->op == op_CallBegin);
1173 set_irn_n(node, 0, ptr);
1175 ir_node * get_CallBegin_call (ir_node *node) {
1176 assert(node->op == op_CallBegin);
1177 return node->attr.callbegin.call;
1179 void set_CallBegin_call (ir_node *node, ir_node *call) {
1180 assert(node->op == op_CallBegin);
1181 node->attr.callbegin.call = call;
1185 get_FuncCall_ptr (ir_node *node) {
1186 assert (node->op == op_FuncCall);
1187 return get_irn_n(node, 0);
1191 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1192 assert (node->op == op_FuncCall);
1193 set_irn_n(node, 0, ptr);
1197 get_FuncCall_param_arr (ir_node *node) {
1198 assert (node->op == op_FuncCall);
1199 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1203 get_FuncCall_n_params (ir_node *node) {
1204 assert (node->op == op_FuncCall);
1205 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1209 get_FuncCall_arity (ir_node *node) {
1210 assert (node->op == op_FuncCall);
1211 return get_FuncCall_n_params(node);
1215 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1216 assert (node->op == op_FuncCall);
1221 get_FuncCall_param (ir_node *node, int pos) {
1222 assert (node->op == op_FuncCall);
1223 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1227 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1228 assert (node->op == op_FuncCall);
1229 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1233 get_FuncCall_type (ir_node *node) {
1234 assert (node->op == op_FuncCall);
1235 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1239 set_FuncCall_type (ir_node *node, type *tp) {
1240 assert (node->op == op_FuncCall);
1241 assert (is_method_type(tp));
1242 node->attr.call.cld_tp = tp;
1245 int FuncCall_has_callees(ir_node *node) {
1246 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1247 (node->attr.call.callee_arr != NULL));
1250 int get_FuncCall_n_callees(ir_node * node) {
1251 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1252 return ARR_LEN(node->attr.call.callee_arr);
1255 entity * get_FuncCall_callee(ir_node * node, int pos) {
1256 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1257 return node->attr.call.callee_arr[pos];
1260 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1261 assert(node->op == op_FuncCall);
1262 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1263 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1265 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1268 void remove_FuncCall_callee_arr(ir_node * node) {
1269 assert(node->op == op_FuncCall);
1270 node->attr.call.callee_arr = NULL;
1275 ir_node * get_##OP##_left(ir_node *node) { \
1276 assert(node->op == op_##OP); \
1277 return get_irn_n(node, node->op->op_index); \
1279 void set_##OP##_left(ir_node *node, ir_node *left) { \
1280 assert(node->op == op_##OP); \
1281 set_irn_n(node, node->op->op_index, left); \
1283 ir_node *get_##OP##_right(ir_node *node) { \
1284 assert(node->op == op_##OP); \
1285 return get_irn_n(node, node->op->op_index + 1); \
1287 void set_##OP##_right(ir_node *node, ir_node *right) { \
1288 assert(node->op == op_##OP); \
1289 set_irn_n(node, node->op->op_index + 1, right); \
1293 ir_node *get_##OP##_op(ir_node *node) { \
1294 assert(node->op == op_##OP); \
1295 return get_irn_n(node, node->op->op_index); \
1297 void set_##OP##_op (ir_node *node, ir_node *op) { \
1298 assert(node->op == op_##OP); \
1299 set_irn_n(node, node->op->op_index, op); \
1309 get_Quot_mem (ir_node *node) {
1310 assert (node->op == op_Quot);
1311 return get_irn_n(node, 0);
1315 set_Quot_mem (ir_node *node, ir_node *mem) {
1316 assert (node->op == op_Quot);
1317 set_irn_n(node, 0, mem);
1323 get_DivMod_mem (ir_node *node) {
1324 assert (node->op == op_DivMod);
1325 return get_irn_n(node, 0);
1329 set_DivMod_mem (ir_node *node, ir_node *mem) {
1330 assert (node->op == op_DivMod);
1331 set_irn_n(node, 0, mem);
1337 get_Div_mem (ir_node *node) {
1338 assert (node->op == op_Div);
1339 return get_irn_n(node, 0);
1343 set_Div_mem (ir_node *node, ir_node *mem) {
1344 assert (node->op == op_Div);
1345 set_irn_n(node, 0, mem);
1351 get_Mod_mem (ir_node *node) {
1352 assert (node->op == op_Mod);
1353 return get_irn_n(node, 0);
1357 set_Mod_mem (ir_node *node, ir_node *mem) {
1358 assert (node->op == op_Mod);
1359 set_irn_n(node, 0, mem);
1376 get_Cast_type (ir_node *node) {
1377 assert (node->op == op_Cast);
1378 return node->attr.cast.totype;
1382 set_Cast_type (ir_node *node, type *to_tp) {
1383 assert (node->op == op_Cast);
1384 node->attr.cast.totype = to_tp;
1388 is_unop (ir_node *node) {
1389 return (node->op->opar == oparity_unary);
1393 get_unop_op (ir_node *node) {
1394 if (node->op->opar == oparity_unary)
1395 return get_irn_n(node, node->op->op_index);
1397 assert(node->op->opar == oparity_unary);
1402 set_unop_op (ir_node *node, ir_node *op) {
1403 if (node->op->opar == oparity_unary)
1404 set_irn_n(node, node->op->op_index, op);
1406 assert(node->op->opar == oparity_unary);
1410 is_binop (ir_node *node) {
1411 return (node->op->opar == oparity_binary);
1415 get_binop_left (ir_node *node) {
1416 if (node->op->opar == oparity_binary)
1417 return get_irn_n(node, node->op->op_index);
1419 assert(node->op->opar == oparity_binary);
1424 set_binop_left (ir_node *node, ir_node *left) {
1425 if (node->op->opar == oparity_binary)
1426 set_irn_n(node, node->op->op_index, left);
1428 assert (node->op->opar == oparity_binary);
1432 get_binop_right (ir_node *node) {
1433 if (node->op->opar == oparity_binary)
1434 return get_irn_n(node, node->op->op_index + 1);
1436 assert(node->op->opar == oparity_binary);
1441 set_binop_right (ir_node *node, ir_node *right) {
1442 if (node->op->opar == oparity_binary)
1443 set_irn_n(node, node->op->op_index + 1, right);
1445 assert (node->op->opar == oparity_binary);
1448 int is_Phi (ir_node *n) {
1454 if (op == op_Filter) return interprocedural_view;
1457 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1458 (get_irn_arity(n) > 0));
1463 int is_Phi0 (ir_node *n) {
1466 return ((get_irn_op(n) == op_Phi) &&
1467 (get_irn_arity(n) == 0) &&
1468 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1472 get_Phi_preds_arr (ir_node *node) {
1473 assert (node->op == op_Phi);
1474 return (ir_node **)&(get_irn_in(node)[1]);
1478 get_Phi_n_preds (ir_node *node) {
1479 assert (is_Phi(node) || is_Phi0(node));
1480 return (get_irn_arity(node));
1484 void set_Phi_n_preds (ir_node *node, int n_preds) {
1485 assert (node->op == op_Phi);
1490 get_Phi_pred (ir_node *node, int pos) {
1491 assert (is_Phi(node) || is_Phi0(node));
1492 return get_irn_n(node, pos);
1496 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1497 assert (is_Phi(node) || is_Phi0(node));
1498 set_irn_n(node, pos, pred);
1502 int is_memop(ir_node *node) {
1503 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1506 ir_node *get_memop_mem (ir_node *node) {
1507 assert(is_memop(node));
1508 return get_irn_n(node, 0);
1511 void set_memop_mem (ir_node *node, ir_node *mem) {
1512 assert(is_memop(node));
1513 set_irn_n(node, 0, mem);
1516 ir_node *get_memop_ptr (ir_node *node) {
1517 assert(is_memop(node));
1518 return get_irn_n(node, 1);
1521 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1522 assert(is_memop(node));
1523 set_irn_n(node, 1, ptr);
1527 get_Load_mem (ir_node *node) {
1528 assert (node->op == op_Load);
1529 return get_irn_n(node, 0);
1533 set_Load_mem (ir_node *node, ir_node *mem) {
1534 assert (node->op == op_Load);
1535 set_irn_n(node, 0, mem);
1539 get_Load_ptr (ir_node *node) {
1540 assert (node->op == op_Load);
1541 return get_irn_n(node, 1);
1545 set_Load_ptr (ir_node *node, ir_node *ptr) {
1546 assert (node->op == op_Load);
1547 set_irn_n(node, 1, ptr);
1551 get_Load_mode (ir_node *node) {
1552 assert (node->op == op_Load);
1553 return node->attr.load.load_mode;
1557 set_Load_mode (ir_node *node, ir_mode *mode) {
1558 assert (node->op == op_Load);
1559 node->attr.load.load_mode = mode;
1563 get_Load_volatility (ir_node *node) {
1564 assert (node->op == op_Load);
1565 return node->attr.load.volatility;
1569 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1570 assert (node->op == op_Load);
1571 node->attr.load.volatility = volatility;
1576 get_Store_mem (ir_node *node) {
1577 assert (node->op == op_Store);
1578 return get_irn_n(node, 0);
1582 set_Store_mem (ir_node *node, ir_node *mem) {
1583 assert (node->op == op_Store);
1584 set_irn_n(node, 0, mem);
1588 get_Store_ptr (ir_node *node) {
1589 assert (node->op == op_Store);
1590 return get_irn_n(node, 1);
1594 set_Store_ptr (ir_node *node, ir_node *ptr) {
1595 assert (node->op == op_Store);
1596 set_irn_n(node, 1, ptr);
1600 get_Store_value (ir_node *node) {
1601 assert (node->op == op_Store);
1602 return get_irn_n(node, 2);
1606 set_Store_value (ir_node *node, ir_node *value) {
1607 assert (node->op == op_Store);
1608 set_irn_n(node, 2, value);
1612 get_Store_volatility (ir_node *node) {
1613 assert (node->op == op_Store);
1614 return node->attr.store.volatility;
1618 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1619 assert (node->op == op_Store);
1620 node->attr.store.volatility = volatility;
1625 get_Alloc_mem (ir_node *node) {
1626 assert (node->op == op_Alloc);
1627 return get_irn_n(node, 0);
1631 set_Alloc_mem (ir_node *node, ir_node *mem) {
1632 assert (node->op == op_Alloc);
1633 set_irn_n(node, 0, mem);
1637 get_Alloc_size (ir_node *node) {
1638 assert (node->op == op_Alloc);
1639 return get_irn_n(node, 1);
1643 set_Alloc_size (ir_node *node, ir_node *size) {
1644 assert (node->op == op_Alloc);
1645 set_irn_n(node, 1, size);
1649 get_Alloc_type (ir_node *node) {
1650 assert (node->op == op_Alloc);
1651 return node->attr.a.type = skip_tid(node->attr.a.type);
1655 set_Alloc_type (ir_node *node, type *tp) {
1656 assert (node->op == op_Alloc);
1657 node->attr.a.type = tp;
1661 get_Alloc_where (ir_node *node) {
1662 assert (node->op == op_Alloc);
1663 return node->attr.a.where;
1667 set_Alloc_where (ir_node *node, where_alloc where) {
1668 assert (node->op == op_Alloc);
1669 node->attr.a.where = where;
1674 get_Free_mem (ir_node *node) {
1675 assert (node->op == op_Free);
1676 return get_irn_n(node, 0);
1680 set_Free_mem (ir_node *node, ir_node *mem) {
1681 assert (node->op == op_Free);
1682 set_irn_n(node, 0, mem);
1686 get_Free_ptr (ir_node *node) {
1687 assert (node->op == op_Free);
1688 return get_irn_n(node, 1);
1692 set_Free_ptr (ir_node *node, ir_node *ptr) {
1693 assert (node->op == op_Free);
1694 set_irn_n(node, 1, ptr);
1698 get_Free_size (ir_node *node) {
1699 assert (node->op == op_Free);
1700 return get_irn_n(node, 2);
1704 set_Free_size (ir_node *node, ir_node *size) {
1705 assert (node->op == op_Free);
1706 set_irn_n(node, 2, size);
1710 get_Free_type (ir_node *node) {
1711 assert (node->op == op_Free);
1712 return node->attr.f = skip_tid(node->attr.f);
1716 set_Free_type (ir_node *node, type *tp) {
1717 assert (node->op == op_Free);
1722 get_Sync_preds_arr (ir_node *node) {
1723 assert (node->op == op_Sync);
1724 return (ir_node **)&(get_irn_in(node)[1]);
1728 get_Sync_n_preds (ir_node *node) {
1729 assert (node->op == op_Sync);
1730 return (get_irn_arity(node));
1735 set_Sync_n_preds (ir_node *node, int n_preds) {
1736 assert (node->op == op_Sync);
1741 get_Sync_pred (ir_node *node, int pos) {
1742 assert (node->op == op_Sync);
1743 return get_irn_n(node, pos);
1747 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1748 assert (node->op == op_Sync);
1749 set_irn_n(node, pos, pred);
1753 get_Proj_pred (ir_node *node) {
1754 assert (is_Proj(node));
1755 return get_irn_n(node, 0);
1759 set_Proj_pred (ir_node *node, ir_node *pred) {
1760 assert (is_Proj(node));
1761 set_irn_n(node, 0, pred);
1765 get_Proj_proj (ir_node *node) {
1766 assert (is_Proj(node));
1767 if (get_irn_opcode(node) == iro_Proj) {
1768 return node->attr.proj;
1770 assert(get_irn_opcode(node) == iro_Filter);
1771 return node->attr.filter.proj;
1776 set_Proj_proj (ir_node *node, long proj) {
1777 assert (node->op == op_Proj);
1778 node->attr.proj = proj;
1782 get_Tuple_preds_arr (ir_node *node) {
1783 assert (node->op == op_Tuple);
1784 return (ir_node **)&(get_irn_in(node)[1]);
1788 get_Tuple_n_preds (ir_node *node) {
1789 assert (node->op == op_Tuple);
1790 return (get_irn_arity(node));
1795 set_Tuple_n_preds (ir_node *node, int n_preds) {
1796 assert (node->op == op_Tuple);
1801 get_Tuple_pred (ir_node *node, int pos) {
1802 assert (node->op == op_Tuple);
1803 return get_irn_n(node, pos);
1807 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1808 assert (node->op == op_Tuple);
1809 set_irn_n(node, pos, pred);
1813 get_Id_pred (ir_node *node) {
1814 assert (node->op == op_Id);
1815 return get_irn_n(node, 0);
1819 set_Id_pred (ir_node *node, ir_node *pred) {
1820 assert (node->op == op_Id);
1821 set_irn_n(node, 0, pred);
1824 ir_node *get_Confirm_value (ir_node *node) {
1825 assert (node->op == op_Confirm);
1826 return get_irn_n(node, 0);
1828 void set_Confirm_value (ir_node *node, ir_node *value) {
1829 assert (node->op == op_Confirm);
1830 set_irn_n(node, 0, value);
1832 ir_node *get_Confirm_bound (ir_node *node) {
1833 assert (node->op == op_Confirm);
1834 return get_irn_n(node, 1);
1836 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1837 assert (node->op == op_Confirm);
1838 set_irn_n(node, 0, bound);
1840 pn_Cmp get_Confirm_cmp (ir_node *node) {
1841 assert (node->op == op_Confirm);
1842 return node->attr.confirm_cmp;
1844 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1845 assert (node->op == op_Confirm);
1846 node->attr.confirm_cmp = cmp;
1851 get_Filter_pred (ir_node *node) {
1852 assert(node->op == op_Filter);
1856 set_Filter_pred (ir_node *node, ir_node *pred) {
1857 assert(node->op == op_Filter);
1861 get_Filter_proj(ir_node *node) {
1862 assert(node->op == op_Filter);
1863 return node->attr.filter.proj;
1866 set_Filter_proj (ir_node *node, long proj) {
1867 assert(node->op == op_Filter);
1868 node->attr.filter.proj = proj;
1871 /* Don't use get_irn_arity, get_irn_n in implementation as access
1872 shall work independent of view!!! */
1873 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1874 assert(node->op == op_Filter);
1875 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1876 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1877 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1878 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1879 node->attr.filter.in_cg[0] = node->in[0];
1881 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1884 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1885 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1886 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1887 node->attr.filter.in_cg[pos + 1] = pred;
1889 int get_Filter_n_cg_preds(ir_node *node) {
1890 assert(node->op == op_Filter && node->attr.filter.in_cg);
1891 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1893 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1895 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1897 arity = ARR_LEN(node->attr.filter.in_cg);
1898 assert(pos < arity - 1);
1899 return node->attr.filter.in_cg[pos + 1];
1904 get_irn_irg(ir_node *node) {
1905 if (get_irn_op(node) != op_Block)
1906 node = get_nodes_block(node);
1907 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1908 node = get_nodes_block(node);
1909 assert(get_irn_op(node) == op_Block);
1910 return node->attr.block.irg;
1914 /*----------------------------------------------------------------*/
1915 /* Auxiliary routines */
1916 /*----------------------------------------------------------------*/
1919 skip_Proj (ir_node *node) {
1920 /* don't assert node !!! */
1921 if (node && is_Proj(node)) {
1922 return get_Proj_pred(node);
1929 skip_Tuple (ir_node *node) {
1932 if (!get_opt_normalize()) return node;
1934 node = skip_Id(node);
1935 if (get_irn_op(node) == op_Proj) {
1936 pred = skip_Id(get_Proj_pred(node));
1937 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1938 pred = skip_Id(skip_Tuple(pred));
1939 if (get_irn_op(pred) == op_Tuple)
1940 return get_Tuple_pred(pred, get_Proj_proj(node));
1945 /** returns operand of node if node is a Cast */
1946 ir_node *skip_Cast (ir_node *node) {
1947 if (node && get_irn_op(node) == op_Cast) {
1948 return skip_Id(get_irn_n(node, 0));
1955 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1956 than any other approach, as Id chains are resolved and all point to the real node, or
1957 all id's are self loops. */
1959 skip_Id (ir_node *node) {
1960 /* don't assert node !!! */
1962 if (!get_opt_normalize()) return node;
1964 /* Don't use get_Id_pred: We get into an endless loop for
1965 self-referencing Ids. */
1966 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1967 ir_node *rem_pred = node->in[0+1];
1970 assert (get_irn_arity (node) > 0);
1972 node->in[0+1] = node;
1973 res = skip_Id(rem_pred);
1974 if (res->op == op_Id) /* self-loop */ return node;
1976 node->in[0+1] = res;
1983 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1984 than any other approach, as Id chains are resolved and all point to the real node, or
1985 all id's are self loops. */
1987 skip_Id (ir_node *node) {
1989 /* don't assert node !!! */
1991 if (!node || (node->op != op_Id)) return node;
1993 if (!get_opt_normalize()) return node;
1995 /* Don't use get_Id_pred: We get into an endless loop for
1996 self-referencing Ids. */
1997 pred = node->in[0+1];
1999 if (pred->op != op_Id) return pred;
2001 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2002 ir_node *rem_pred, *res;
2004 if (pred->op != op_Id) return pred; /* shortcut */
2007 assert (get_irn_arity (node) > 0);
2009 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2010 res = skip_Id(rem_pred);
2011 if (res->op == op_Id) /* self-loop */ return node;
2013 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2022 is_Bad (ir_node *node) {
2024 if ((node) && get_irn_opcode(node) == iro_Bad)
2030 is_no_Block (ir_node *node) {
2032 return (get_irn_opcode(node) != iro_Block);
2036 is_Block (ir_node *node) {
2038 return (get_irn_opcode(node) == iro_Block);
2041 /* returns true if node is a Unknown node. */
2043 is_Unknown (ir_node *node) {
2045 return (get_irn_opcode(node) == iro_Unknown);
2049 is_Proj (const ir_node *node) {
2051 return node->op == op_Proj
2052 || (!interprocedural_view && node->op == op_Filter);
2055 /* Returns true if the operation manipulates control flow. */
2057 is_cfop(ir_node *node) {
2058 return is_cfopcode(get_irn_op(node));
2061 /* Returns true if the operation manipulates interprocedural control flow:
2062 CallBegin, EndReg, EndExcept */
2063 int is_ip_cfop(ir_node *node) {
2064 return is_ip_cfopcode(get_irn_op(node));
2067 /* Returns true if the operation can change the control flow because
2070 is_fragile_op(ir_node *node) {
2071 return is_op_fragile(get_irn_op(node));
2074 /* Returns the memory operand of fragile operations. */
2075 ir_node *get_fragile_op_mem(ir_node *node) {
2076 assert(node && is_fragile_op(node));
2078 switch (get_irn_opcode (node)) {
2087 return get_irn_n(node, 0);
2092 assert(0 && "should not be reached");
2097 #ifdef DEBUG_libfirm
2098 void dump_irn (ir_node *n) {
2099 int i, arity = get_irn_arity(n);
2100 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2102 ir_node *pred = get_irn_n(n, -1);
2103 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2104 get_irn_node_nr(pred), (void *)pred);
2106 printf(" preds: \n");
2107 for (i = 0; i < arity; ++i) {
2108 ir_node *pred = get_irn_n(n, i);
2109 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2110 get_irn_node_nr(pred), (void *)pred);
2114 #else /* DEBUG_libfirm */
2115 void dump_irn (ir_node *n) {}
2116 #endif /* DEBUG_libfirm */