3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 #ifdef DO_HEAPANALYSIS
358 /* Access the abstract interpretation information of a node.
359 Returns NULL if no such information is available. */
360 struct abstval *get_irn_abst_value(ir_node *n) {
363 /* Set the abstract interpretation information of a node. */
364 void set_irn_abst_value(ir_node *n, struct abstval *os) {
367 struct section *firm_get_irn_section(ir_node *n) {
370 void firm_set_irn_section(ir_node *n, struct section *s) {
373 #endif /* DO_HEAPANALYSIS */
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_funccall_attr (ir_node *node)
432 assert (node->op == op_FuncCall);
433 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
437 get_irn_sel_attr (ir_node *node)
439 assert (node->op == op_Sel);
444 get_irn_phi_attr (ir_node *node)
446 assert (node->op == op_Phi);
447 return node->attr.phi0_pos;
451 get_irn_block_attr (ir_node *node)
453 assert (node->op == op_Block);
454 return node->attr.block;
458 get_irn_load_attr (ir_node *node)
460 assert (node->op == op_Load);
461 return node->attr.load;
465 get_irn_store_attr (ir_node *node)
467 assert (node->op == op_Store);
468 return node->attr.store;
472 get_irn_except_attr (ir_node *node)
474 assert (node->op == op_Div || node->op == op_Quot ||
475 node->op == op_DivMod || node->op == op_Mod);
476 return node->attr.except;
479 /** manipulate fields of individual nodes **/
481 /* this works for all except Block */
483 get_nodes_block (ir_node *node) {
484 assert (!(node->op == op_Block));
485 return get_irn_n(node, -1);
489 set_nodes_block (ir_node *node, ir_node *block) {
490 assert (!(node->op == op_Block));
491 set_irn_n(node, -1, block);
494 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
495 * from Start. If so returns frame type, else Null. */
496 type *is_frame_pointer(ir_node *n) {
497 if ((get_irn_op(n) == op_Proj) &&
498 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
499 ir_node *start = get_Proj_pred(n);
500 if (get_irn_op(start) == op_Start) {
501 return get_irg_frame_type(get_irn_irg(start));
507 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
508 * from Start. If so returns global type, else Null. */
509 type *is_globals_pointer(ir_node *n) {
510 if ((get_irn_op(n) == op_Proj) &&
511 (get_Proj_proj(n) == pn_Start_P_globals)) {
512 ir_node *start = get_Proj_pred(n);
513 if (get_irn_op(start) == op_Start) {
514 return get_glob_type();
520 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
521 * from Start. If so returns 1, else 0. */
522 int is_value_arg_pointer(ir_node *n) {
523 if ((get_irn_op(n) == op_Proj) &&
524 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
525 (get_irn_op(get_Proj_pred(n)) == op_Start))
530 /* Returns an array with the predecessors of the Block. Depending on
531 the implementation of the graph data structure this can be a copy of
532 the internal representation of predecessors as well as the internal
533 array itself. Therefore writing to this array might obstruct the ir. */
535 get_Block_cfgpred_arr (ir_node *node)
537 assert ((node->op == op_Block));
538 return (ir_node **)&(get_irn_in(node)[1]);
543 get_Block_n_cfgpreds (ir_node *node) {
544 assert ((node->op == op_Block));
545 return get_irn_arity(node);
549 get_Block_cfgpred (ir_node *node, int pos) {
551 assert (node->op == op_Block);
552 assert(-1 <= pos && pos < get_irn_arity(node));
553 return get_irn_n(node, pos);
557 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
558 assert (node->op == op_Block);
559 set_irn_n(node, pos, pred);
563 get_Block_matured (ir_node *node) {
564 assert (node->op == op_Block);
565 return node->attr.block.matured;
569 set_Block_matured (ir_node *node, bool matured) {
570 assert (node->op == op_Block);
571 node->attr.block.matured = matured;
574 get_Block_block_visited (ir_node *node) {
575 assert (node->op == op_Block);
576 return node->attr.block.block_visited;
580 set_Block_block_visited (ir_node *node, unsigned long visit) {
581 assert (node->op == op_Block);
582 node->attr.block.block_visited = visit;
585 /* For this current_ir_graph must be set. */
587 mark_Block_block_visited (ir_node *node) {
588 assert (node->op == op_Block);
589 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
593 Block_not_block_visited(ir_node *node) {
594 assert (node->op == op_Block);
595 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
599 get_Block_graph_arr (ir_node *node, int pos) {
600 assert (node->op == op_Block);
601 return node->attr.block.graph_arr[pos+1];
605 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
606 assert (node->op == op_Block);
607 node->attr.block.graph_arr[pos+1] = value;
610 /* handler handling for Blocks * /
612 set_Block_handler (ir_node *block, ir_node *handler) {
613 assert ((block->op == op_Block));
614 assert ((handler->op == op_Block));
615 block->attr.block.handler_entry = handler;
619 get_Block_handler (ir_node *block) {
620 assert ((block->op == op_Block));
621 return (block->attr.block.handler_entry);
624 / * handler handling for Nodes * /
626 set_Node_handler (ir_node *node, ir_node *handler) {
627 set_Block_handler (get_nodes_block (node), handler);
631 get_Node_handler (ir_node *node) {
632 return (get_Block_handler (get_nodes_block (node)));
635 / * exc_t handling for Blocks * /
636 void set_Block_exc (ir_node *block, exc_t exc) {
637 assert ((block->op == op_Block));
638 block->attr.block.exc = exc;
641 exc_t get_Block_exc (ir_node *block) {
642 assert ((block->op == op_Block));
643 return (block->attr.block.exc);
646 / * exc_t handling for Nodes * /
647 void set_Node_exc (ir_node *node, exc_t exc) {
648 set_Block_exc (get_nodes_block (node), exc);
651 exc_t get_Node_exc (ir_node *node) {
652 return (get_Block_exc (get_nodes_block (node)));
656 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
657 assert(node->op == op_Block);
658 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
659 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
660 node->attr.block.in_cg[0] = NULL;
661 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
663 /* Fix backedge array. fix_backedges operates depending on
664 interprocedural_view. */
665 bool ipv = interprocedural_view;
666 interprocedural_view = true;
667 fix_backedges(current_ir_graph->obst, node);
668 interprocedural_view = ipv;
671 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
674 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
675 assert(node->op == op_Block &&
676 node->attr.block.in_cg &&
677 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
678 node->attr.block.in_cg[pos + 1] = pred;
681 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
682 assert(node->op == op_Block);
683 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
686 int get_Block_cg_n_cfgpreds(ir_node * node) {
687 assert(node->op == op_Block);
688 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
691 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
692 assert(node->op == op_Block && node->attr.block.in_cg);
693 return node->attr.block.in_cg[pos + 1];
696 void remove_Block_cg_cfgpred_arr(ir_node * node) {
697 assert(node->op == op_Block);
698 node->attr.block.in_cg = NULL;
702 set_Start_irg(ir_node *node, ir_graph *irg) {
703 assert(node->op == op_Start);
704 assert(is_ir_graph(irg));
705 assert(0 && " Why set irg? -- use set_irn_irg");
709 get_End_n_keepalives(ir_node *end) {
710 assert (end->op == op_End);
711 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
715 get_End_keepalive(ir_node *end, int pos) {
716 assert (end->op == op_End);
717 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
721 add_End_keepalive (ir_node *end, ir_node *ka) {
722 assert (end->op == op_End);
723 ARR_APP1 (ir_node *, end->in, ka);
727 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
728 assert (end->op == op_End);
729 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
733 free_End (ir_node *end) {
734 assert (end->op == op_End);
736 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
737 end->in = NULL; /* @@@ make sure we get an error if we use the
738 in array afterwards ... */
743 > Implementing the case construct (which is where the constant Proj node is
744 > important) involves far more than simply determining the constant values.
745 > We could argue that this is more properly a function of the translator from
746 > Firm to the target machine. That could be done if there was some way of
747 > projecting "default" out of the Cond node.
748 I know it's complicated.
749 Basically there are two proglems:
750 - determining the gaps between the projs
751 - determining the biggest case constant to know the proj number for
753 I see several solutions:
754 1. Introduce a ProjDefault node. Solves both problems.
755 This means to extend all optimizations executed during construction.
756 2. Give the Cond node for switch two flavors:
757 a) there are no gaps in the projs (existing flavor)
758 b) gaps may exist, default proj is still the Proj with the largest
759 projection number. This covers also the gaps.
760 3. Fix the semantic of the Cond to that of 2b)
762 Solution 2 seems to be the best:
763 Computing the gaps in the Firm representation is not too hard, i.e.,
764 libFIRM can implement a routine that transforms between the two
765 flavours. This is also possible for 1) but 2) does not require to
766 change any existing optimization.
767 Further it should be far simpler to determine the biggest constant than
769 I don't want to choose 3) as 2a) seems to have advantages for
770 dataflow analysis and 3) does not allow to convert the representation to
774 get_Cond_selector (ir_node *node) {
775 assert (node->op == op_Cond);
776 return get_irn_n(node, 0);
780 set_Cond_selector (ir_node *node, ir_node *selector) {
781 assert (node->op == op_Cond);
782 set_irn_n(node, 0, selector);
786 get_Cond_kind (ir_node *node) {
787 assert (node->op == op_Cond);
788 return node->attr.c.kind;
792 set_Cond_kind (ir_node *node, cond_kind kind) {
793 assert (node->op == op_Cond);
794 node->attr.c.kind = kind;
798 get_Cond_defaultProj (ir_node *node) {
799 assert (node->op == op_Cond);
800 return node->attr.c.default_proj;
804 get_Return_mem (ir_node *node) {
805 assert (node->op == op_Return);
806 return get_irn_n(node, 0);
810 set_Return_mem (ir_node *node, ir_node *mem) {
811 assert (node->op == op_Return);
812 set_irn_n(node, 0, mem);
816 get_Return_n_ress (ir_node *node) {
817 assert (node->op == op_Return);
818 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
822 get_Return_res_arr (ir_node *node)
824 assert ((node->op == op_Return));
825 if (get_Return_n_ress(node) > 0)
826 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
833 set_Return_n_res (ir_node *node, int results) {
834 assert (node->op == op_Return);
839 get_Return_res (ir_node *node, int pos) {
840 assert (node->op == op_Return);
841 assert (get_Return_n_ress(node) > pos);
842 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
846 set_Return_res (ir_node *node, int pos, ir_node *res){
847 assert (node->op == op_Return);
848 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
852 get_Raise_mem (ir_node *node) {
853 assert (node->op == op_Raise);
854 return get_irn_n(node, 0);
858 set_Raise_mem (ir_node *node, ir_node *mem) {
859 assert (node->op == op_Raise);
860 set_irn_n(node, 0, mem);
864 get_Raise_exo_ptr (ir_node *node) {
865 assert (node->op == op_Raise);
866 return get_irn_n(node, 1);
870 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
871 assert (node->op == op_Raise);
872 set_irn_n(node, 1, exo_ptr);
875 tarval *get_Const_tarval (ir_node *node) {
876 assert (node->op == op_Const);
877 return node->attr.con.tv;
881 set_Const_tarval (ir_node *node, tarval *con) {
882 assert (node->op == op_Const);
883 node->attr.con.tv = con;
887 /* The source language type. Must be an atomic type. Mode of type must
888 be mode of node. For tarvals from entities type must be pointer to
891 get_Const_type (ir_node *node) {
892 assert (node->op == op_Const);
893 return node->attr.con.tp;
897 set_Const_type (ir_node *node, type *tp) {
898 assert (node->op == op_Const);
899 if (tp != unknown_type) {
900 assert (is_atomic_type(tp));
901 assert (get_type_mode(tp) == get_irn_mode(node));
904 node->attr.con.tp = tp;
909 get_SymConst_kind (const ir_node *node) {
910 assert (node->op == op_SymConst);
911 return node->attr.i.num;
915 set_SymConst_kind (ir_node *node, symconst_kind num) {
916 assert (node->op == op_SymConst);
917 node->attr.i.num = num;
921 get_SymConst_type (ir_node *node) {
922 assert ( (node->op == op_SymConst)
923 && ( get_SymConst_kind(node) == symconst_type_tag
924 || get_SymConst_kind(node) == symconst_size));
925 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
929 set_SymConst_type (ir_node *node, type *tp) {
930 assert ( (node->op == op_SymConst)
931 && ( get_SymConst_kind(node) == symconst_type_tag
932 || get_SymConst_kind(node) == symconst_size));
933 node->attr.i.sym.type_p = tp;
937 get_SymConst_name (ir_node *node) {
938 assert ( (node->op == op_SymConst)
939 && (get_SymConst_kind(node) == symconst_addr_name));
940 return node->attr.i.sym.ident_p;
944 set_SymConst_name (ir_node *node, ident *name) {
945 assert ( (node->op == op_SymConst)
946 && (get_SymConst_kind(node) == symconst_addr_name));
947 node->attr.i.sym.ident_p = name;
951 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
952 entity *get_SymConst_entity (ir_node *node) {
953 assert ( (node->op == op_SymConst)
954 && (get_SymConst_kind (node) == symconst_addr_ent));
955 return node->attr.i.sym.entity_p;
958 void set_SymConst_entity (ir_node *node, entity *ent) {
959 assert ( (node->op == op_SymConst)
960 && (get_SymConst_kind(node) == symconst_addr_ent));
961 node->attr.i.sym.entity_p = ent;
965 union symconst_symbol
966 get_SymConst_symbol (ir_node *node) {
967 assert (node->op == op_SymConst);
968 return node->attr.i.sym;
972 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
973 assert (node->op == op_SymConst);
974 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
975 node->attr.i.sym = sym;
979 get_Sel_mem (ir_node *node) {
980 assert (node->op == op_Sel);
981 return get_irn_n(node, 0);
985 set_Sel_mem (ir_node *node, ir_node *mem) {
986 assert (node->op == op_Sel);
987 set_irn_n(node, 0, mem);
991 get_Sel_ptr (ir_node *node) {
992 assert (node->op == op_Sel);
993 return get_irn_n(node, 1);
997 set_Sel_ptr (ir_node *node, ir_node *ptr) {
998 assert (node->op == op_Sel);
999 set_irn_n(node, 1, ptr);
1003 get_Sel_n_indexs (ir_node *node) {
1004 assert (node->op == op_Sel);
1005 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1009 get_Sel_index_arr (ir_node *node)
1011 assert ((node->op == op_Sel));
1012 if (get_Sel_n_indexs(node) > 0)
1013 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1019 get_Sel_index (ir_node *node, int pos) {
1020 assert (node->op == op_Sel);
1021 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1025 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1026 assert (node->op == op_Sel);
1027 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1031 get_Sel_entity (ir_node *node) {
1032 assert (node->op == op_Sel);
1033 return node->attr.s.ent;
1037 set_Sel_entity (ir_node *node, entity *ent) {
1038 assert (node->op == op_Sel);
1039 node->attr.s.ent = ent;
1043 get_InstOf_ent (ir_node *node) {
1044 assert (node->op = op_InstOf);
1045 return (node->attr.io.ent);
1049 set_InstOf_ent (ir_node *node, type *ent) {
1050 assert (node->op = op_InstOf);
1051 node->attr.io.ent = ent;
1055 get_InstOf_store (ir_node *node) {
1056 assert (node->op = op_InstOf);
1057 return (get_irn_n (node, 0));
1061 set_InstOf_store (ir_node *node, ir_node *obj) {
1062 assert (node->op = op_InstOf);
1063 set_irn_n (node, 0, obj);
1067 get_InstOf_obj (ir_node *node) {
1068 assert (node->op = op_InstOf);
1069 return (get_irn_n (node, 1));
1073 set_InstOf_obj (ir_node *node, ir_node *obj) {
1074 assert (node->op = op_InstOf);
1075 set_irn_n (node, 1, obj);
1079 /* For unary and binary arithmetic operations the access to the
1080 operands can be factored out. Left is the first, right the
1081 second arithmetic value as listed in tech report 0999-33.
1082 unops are: Minus, Abs, Not, Conv, Cast
1083 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1084 Shr, Shrs, Rotate, Cmp */
1088 get_Call_mem (ir_node *node) {
1089 assert (node->op == op_Call);
1090 return get_irn_n(node, 0);
1094 set_Call_mem (ir_node *node, ir_node *mem) {
1095 assert (node->op == op_Call);
1096 set_irn_n(node, 0, mem);
1100 get_Call_ptr (ir_node *node) {
1101 assert (node->op == op_Call);
1102 return get_irn_n(node, 1);
1106 set_Call_ptr (ir_node *node, ir_node *ptr) {
1107 assert (node->op == op_Call);
1108 set_irn_n(node, 1, ptr);
1112 get_Call_param_arr (ir_node *node) {
1113 assert (node->op == op_Call);
1114 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1118 get_Call_n_params (ir_node *node) {
1119 assert (node->op == op_Call);
1120 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1124 get_Call_arity (ir_node *node) {
1125 assert (node->op == op_Call);
1126 return get_Call_n_params(node);
1130 set_Call_arity (ir_node *node, ir_node *arity) {
1131 assert (node->op == op_Call);
1136 get_Call_param (ir_node *node, int pos) {
1137 assert (node->op == op_Call);
1138 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1142 set_Call_param (ir_node *node, int pos, ir_node *param) {
1143 assert (node->op == op_Call);
1144 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1148 get_Call_type (ir_node *node) {
1149 assert (node->op == op_Call);
1150 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1154 set_Call_type (ir_node *node, type *tp) {
1155 assert (node->op == op_Call);
1156 assert (is_method_type(tp));
1157 node->attr.call.cld_tp = tp;
1160 int Call_has_callees(ir_node *node) {
1162 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1163 (node->attr.call.callee_arr != NULL));
1166 int get_Call_n_callees(ir_node * node) {
1167 assert(node->op == op_Call && node->attr.call.callee_arr);
1168 return ARR_LEN(node->attr.call.callee_arr);
1171 entity * get_Call_callee(ir_node * node, int pos) {
1172 assert(node->op == op_Call && node->attr.call.callee_arr);
1173 return node->attr.call.callee_arr[pos];
1176 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1177 assert(node->op == op_Call);
1178 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1179 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1181 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1184 void remove_Call_callee_arr(ir_node * node) {
1185 assert(node->op == op_Call);
1186 node->attr.call.callee_arr = NULL;
1189 ir_node * get_CallBegin_ptr (ir_node *node) {
1190 assert(node->op == op_CallBegin);
1191 return get_irn_n(node, 0);
1193 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1194 assert(node->op == op_CallBegin);
1195 set_irn_n(node, 0, ptr);
1197 ir_node * get_CallBegin_call (ir_node *node) {
1198 assert(node->op == op_CallBegin);
1199 return node->attr.callbegin.call;
1201 void set_CallBegin_call (ir_node *node, ir_node *call) {
1202 assert(node->op == op_CallBegin);
1203 node->attr.callbegin.call = call;
1207 get_FuncCall_ptr (ir_node *node) {
1208 assert (node->op == op_FuncCall);
1209 return get_irn_n(node, 0);
1213 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1214 assert (node->op == op_FuncCall);
1215 set_irn_n(node, 0, ptr);
1219 get_FuncCall_param_arr (ir_node *node) {
1220 assert (node->op == op_FuncCall);
1221 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1225 get_FuncCall_n_params (ir_node *node) {
1226 assert (node->op == op_FuncCall);
1227 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1231 get_FuncCall_arity (ir_node *node) {
1232 assert (node->op == op_FuncCall);
1233 return get_FuncCall_n_params(node);
1237 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1238 assert (node->op == op_FuncCall);
1243 get_FuncCall_param (ir_node *node, int pos) {
1244 assert (node->op == op_FuncCall);
1245 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1249 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1250 assert (node->op == op_FuncCall);
1251 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1255 get_FuncCall_type (ir_node *node) {
1256 assert (node->op == op_FuncCall);
1257 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1261 set_FuncCall_type (ir_node *node, type *tp) {
1262 assert (node->op == op_FuncCall);
1263 assert (is_method_type(tp));
1264 node->attr.call.cld_tp = tp;
1267 int FuncCall_has_callees(ir_node *node) {
1268 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1269 (node->attr.call.callee_arr != NULL));
1272 int get_FuncCall_n_callees(ir_node * node) {
1273 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1274 return ARR_LEN(node->attr.call.callee_arr);
1277 entity * get_FuncCall_callee(ir_node * node, int pos) {
1278 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1279 return node->attr.call.callee_arr[pos];
1282 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1283 assert(node->op == op_FuncCall);
1284 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1285 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1287 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1290 void remove_FuncCall_callee_arr(ir_node * node) {
1291 assert(node->op == op_FuncCall);
1292 node->attr.call.callee_arr = NULL;
1297 ir_node * get_##OP##_left(ir_node *node) { \
1298 assert(node->op == op_##OP); \
1299 return get_irn_n(node, node->op->op_index); \
1301 void set_##OP##_left(ir_node *node, ir_node *left) { \
1302 assert(node->op == op_##OP); \
1303 set_irn_n(node, node->op->op_index, left); \
1305 ir_node *get_##OP##_right(ir_node *node) { \
1306 assert(node->op == op_##OP); \
1307 return get_irn_n(node, node->op->op_index + 1); \
1309 void set_##OP##_right(ir_node *node, ir_node *right) { \
1310 assert(node->op == op_##OP); \
1311 set_irn_n(node, node->op->op_index + 1, right); \
1315 ir_node *get_##OP##_op(ir_node *node) { \
1316 assert(node->op == op_##OP); \
1317 return get_irn_n(node, node->op->op_index); \
1319 void set_##OP##_op (ir_node *node, ir_node *op) { \
1320 assert(node->op == op_##OP); \
1321 set_irn_n(node, node->op->op_index, op); \
1331 get_Quot_mem (ir_node *node) {
1332 assert (node->op == op_Quot);
1333 return get_irn_n(node, 0);
1337 set_Quot_mem (ir_node *node, ir_node *mem) {
1338 assert (node->op == op_Quot);
1339 set_irn_n(node, 0, mem);
1345 get_DivMod_mem (ir_node *node) {
1346 assert (node->op == op_DivMod);
1347 return get_irn_n(node, 0);
1351 set_DivMod_mem (ir_node *node, ir_node *mem) {
1352 assert (node->op == op_DivMod);
1353 set_irn_n(node, 0, mem);
1359 get_Div_mem (ir_node *node) {
1360 assert (node->op == op_Div);
1361 return get_irn_n(node, 0);
1365 set_Div_mem (ir_node *node, ir_node *mem) {
1366 assert (node->op == op_Div);
1367 set_irn_n(node, 0, mem);
1373 get_Mod_mem (ir_node *node) {
1374 assert (node->op == op_Mod);
1375 return get_irn_n(node, 0);
1379 set_Mod_mem (ir_node *node, ir_node *mem) {
1380 assert (node->op == op_Mod);
1381 set_irn_n(node, 0, mem);
1398 get_Cast_type (ir_node *node) {
1399 assert (node->op == op_Cast);
1400 return node->attr.cast.totype;
1404 set_Cast_type (ir_node *node, type *to_tp) {
1405 assert (node->op == op_Cast);
1406 node->attr.cast.totype = to_tp;
1410 is_unop (ir_node *node) {
1411 return (node->op->opar == oparity_unary);
1415 get_unop_op (ir_node *node) {
1416 if (node->op->opar == oparity_unary)
1417 return get_irn_n(node, node->op->op_index);
1419 assert(node->op->opar == oparity_unary);
1424 set_unop_op (ir_node *node, ir_node *op) {
1425 if (node->op->opar == oparity_unary)
1426 set_irn_n(node, node->op->op_index, op);
1428 assert(node->op->opar == oparity_unary);
1432 is_binop (ir_node *node) {
1433 return (node->op->opar == oparity_binary);
1437 get_binop_left (ir_node *node) {
1438 if (node->op->opar == oparity_binary)
1439 return get_irn_n(node, node->op->op_index);
1441 assert(node->op->opar == oparity_binary);
1446 set_binop_left (ir_node *node, ir_node *left) {
1447 if (node->op->opar == oparity_binary)
1448 set_irn_n(node, node->op->op_index, left);
1450 assert (node->op->opar == oparity_binary);
1454 get_binop_right (ir_node *node) {
1455 if (node->op->opar == oparity_binary)
1456 return get_irn_n(node, node->op->op_index + 1);
1458 assert(node->op->opar == oparity_binary);
1463 set_binop_right (ir_node *node, ir_node *right) {
1464 if (node->op->opar == oparity_binary)
1465 set_irn_n(node, node->op->op_index + 1, right);
1467 assert (node->op->opar == oparity_binary);
1470 int is_Phi (ir_node *n) {
1476 if (op == op_Filter) return interprocedural_view;
1479 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1480 (get_irn_arity(n) > 0));
1485 int is_Phi0 (ir_node *n) {
1488 return ((get_irn_op(n) == op_Phi) &&
1489 (get_irn_arity(n) == 0) &&
1490 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1494 get_Phi_preds_arr (ir_node *node) {
1495 assert (node->op == op_Phi);
1496 return (ir_node **)&(get_irn_in(node)[1]);
1500 get_Phi_n_preds (ir_node *node) {
1501 assert (is_Phi(node) || is_Phi0(node));
1502 return (get_irn_arity(node));
1506 void set_Phi_n_preds (ir_node *node, int n_preds) {
1507 assert (node->op == op_Phi);
1512 get_Phi_pred (ir_node *node, int pos) {
1513 assert (is_Phi(node) || is_Phi0(node));
1514 return get_irn_n(node, pos);
1518 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1519 assert (is_Phi(node) || is_Phi0(node));
1520 set_irn_n(node, pos, pred);
1524 int is_memop(ir_node *node) {
1525 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1528 ir_node *get_memop_mem (ir_node *node) {
1529 assert(is_memop(node));
1530 return get_irn_n(node, 0);
1533 void set_memop_mem (ir_node *node, ir_node *mem) {
1534 assert(is_memop(node));
1535 set_irn_n(node, 0, mem);
1538 ir_node *get_memop_ptr (ir_node *node) {
1539 assert(is_memop(node));
1540 return get_irn_n(node, 1);
1543 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1544 assert(is_memop(node));
1545 set_irn_n(node, 1, ptr);
1549 get_Load_mem (ir_node *node) {
1550 assert (node->op == op_Load);
1551 return get_irn_n(node, 0);
1555 set_Load_mem (ir_node *node, ir_node *mem) {
1556 assert (node->op == op_Load);
1557 set_irn_n(node, 0, mem);
1561 get_Load_ptr (ir_node *node) {
1562 assert (node->op == op_Load);
1563 return get_irn_n(node, 1);
1567 set_Load_ptr (ir_node *node, ir_node *ptr) {
1568 assert (node->op == op_Load);
1569 set_irn_n(node, 1, ptr);
1573 get_Load_mode (ir_node *node) {
1574 assert (node->op == op_Load);
1575 return node->attr.load.load_mode;
1579 set_Load_mode (ir_node *node, ir_mode *mode) {
1580 assert (node->op == op_Load);
1581 node->attr.load.load_mode = mode;
1585 get_Load_volatility (ir_node *node) {
1586 assert (node->op == op_Load);
1587 return node->attr.load.volatility;
1591 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1592 assert (node->op == op_Load);
1593 node->attr.load.volatility = volatility;
1598 get_Store_mem (ir_node *node) {
1599 assert (node->op == op_Store);
1600 return get_irn_n(node, 0);
1604 set_Store_mem (ir_node *node, ir_node *mem) {
1605 assert (node->op == op_Store);
1606 set_irn_n(node, 0, mem);
1610 get_Store_ptr (ir_node *node) {
1611 assert (node->op == op_Store);
1612 return get_irn_n(node, 1);
1616 set_Store_ptr (ir_node *node, ir_node *ptr) {
1617 assert (node->op == op_Store);
1618 set_irn_n(node, 1, ptr);
1622 get_Store_value (ir_node *node) {
1623 assert (node->op == op_Store);
1624 return get_irn_n(node, 2);
1628 set_Store_value (ir_node *node, ir_node *value) {
1629 assert (node->op == op_Store);
1630 set_irn_n(node, 2, value);
1634 get_Store_volatility (ir_node *node) {
1635 assert (node->op == op_Store);
1636 return node->attr.store.volatility;
1640 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1641 assert (node->op == op_Store);
1642 node->attr.store.volatility = volatility;
1647 get_Alloc_mem (ir_node *node) {
1648 assert (node->op == op_Alloc);
1649 return get_irn_n(node, 0);
1653 set_Alloc_mem (ir_node *node, ir_node *mem) {
1654 assert (node->op == op_Alloc);
1655 set_irn_n(node, 0, mem);
1659 get_Alloc_size (ir_node *node) {
1660 assert (node->op == op_Alloc);
1661 return get_irn_n(node, 1);
1665 set_Alloc_size (ir_node *node, ir_node *size) {
1666 assert (node->op == op_Alloc);
1667 set_irn_n(node, 1, size);
1671 get_Alloc_type (ir_node *node) {
1672 assert (node->op == op_Alloc);
1673 return node->attr.a.type = skip_tid(node->attr.a.type);
1677 set_Alloc_type (ir_node *node, type *tp) {
1678 assert (node->op == op_Alloc);
1679 node->attr.a.type = tp;
1683 get_Alloc_where (ir_node *node) {
1684 assert (node->op == op_Alloc);
1685 return node->attr.a.where;
1689 set_Alloc_where (ir_node *node, where_alloc where) {
1690 assert (node->op == op_Alloc);
1691 node->attr.a.where = where;
1696 get_Free_mem (ir_node *node) {
1697 assert (node->op == op_Free);
1698 return get_irn_n(node, 0);
1702 set_Free_mem (ir_node *node, ir_node *mem) {
1703 assert (node->op == op_Free);
1704 set_irn_n(node, 0, mem);
1708 get_Free_ptr (ir_node *node) {
1709 assert (node->op == op_Free);
1710 return get_irn_n(node, 1);
1714 set_Free_ptr (ir_node *node, ir_node *ptr) {
1715 assert (node->op == op_Free);
1716 set_irn_n(node, 1, ptr);
1720 get_Free_size (ir_node *node) {
1721 assert (node->op == op_Free);
1722 return get_irn_n(node, 2);
1726 set_Free_size (ir_node *node, ir_node *size) {
1727 assert (node->op == op_Free);
1728 set_irn_n(node, 2, size);
1732 get_Free_type (ir_node *node) {
1733 assert (node->op == op_Free);
1734 return node->attr.f = skip_tid(node->attr.f);
1738 set_Free_type (ir_node *node, type *tp) {
1739 assert (node->op == op_Free);
1744 get_Sync_preds_arr (ir_node *node) {
1745 assert (node->op == op_Sync);
1746 return (ir_node **)&(get_irn_in(node)[1]);
1750 get_Sync_n_preds (ir_node *node) {
1751 assert (node->op == op_Sync);
1752 return (get_irn_arity(node));
1757 set_Sync_n_preds (ir_node *node, int n_preds) {
1758 assert (node->op == op_Sync);
1763 get_Sync_pred (ir_node *node, int pos) {
1764 assert (node->op == op_Sync);
1765 return get_irn_n(node, pos);
1769 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1770 assert (node->op == op_Sync);
1771 set_irn_n(node, pos, pred);
1775 get_Proj_pred (ir_node *node) {
1776 assert (is_Proj(node));
1777 return get_irn_n(node, 0);
1781 set_Proj_pred (ir_node *node, ir_node *pred) {
1782 assert (is_Proj(node));
1783 set_irn_n(node, 0, pred);
1787 get_Proj_proj (ir_node *node) {
1788 assert (is_Proj(node));
1789 if (get_irn_opcode(node) == iro_Proj) {
1790 return node->attr.proj;
1792 assert(get_irn_opcode(node) == iro_Filter);
1793 return node->attr.filter.proj;
1798 set_Proj_proj (ir_node *node, long proj) {
1799 assert (node->op == op_Proj);
1800 node->attr.proj = proj;
1804 get_Tuple_preds_arr (ir_node *node) {
1805 assert (node->op == op_Tuple);
1806 return (ir_node **)&(get_irn_in(node)[1]);
1810 get_Tuple_n_preds (ir_node *node) {
1811 assert (node->op == op_Tuple);
1812 return (get_irn_arity(node));
1817 set_Tuple_n_preds (ir_node *node, int n_preds) {
1818 assert (node->op == op_Tuple);
1823 get_Tuple_pred (ir_node *node, int pos) {
1824 assert (node->op == op_Tuple);
1825 return get_irn_n(node, pos);
1829 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1830 assert (node->op == op_Tuple);
1831 set_irn_n(node, pos, pred);
1835 get_Id_pred (ir_node *node) {
1836 assert (node->op == op_Id);
1837 return get_irn_n(node, 0);
1841 set_Id_pred (ir_node *node, ir_node *pred) {
1842 assert (node->op == op_Id);
1843 set_irn_n(node, 0, pred);
1846 ir_node *get_Confirm_value (ir_node *node) {
1847 assert (node->op == op_Confirm);
1848 return get_irn_n(node, 0);
1850 void set_Confirm_value (ir_node *node, ir_node *value) {
1851 assert (node->op == op_Confirm);
1852 set_irn_n(node, 0, value);
1854 ir_node *get_Confirm_bound (ir_node *node) {
1855 assert (node->op == op_Confirm);
1856 return get_irn_n(node, 1);
1858 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1859 assert (node->op == op_Confirm);
1860 set_irn_n(node, 0, bound);
1862 pn_Cmp get_Confirm_cmp (ir_node *node) {
1863 assert (node->op == op_Confirm);
1864 return node->attr.confirm_cmp;
1866 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1867 assert (node->op == op_Confirm);
1868 node->attr.confirm_cmp = cmp;
1873 get_Filter_pred (ir_node *node) {
1874 assert(node->op == op_Filter);
1878 set_Filter_pred (ir_node *node, ir_node *pred) {
1879 assert(node->op == op_Filter);
1883 get_Filter_proj(ir_node *node) {
1884 assert(node->op == op_Filter);
1885 return node->attr.filter.proj;
1888 set_Filter_proj (ir_node *node, long proj) {
1889 assert(node->op == op_Filter);
1890 node->attr.filter.proj = proj;
1893 /* Don't use get_irn_arity, get_irn_n in implementation as access
1894 shall work independent of view!!! */
1895 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1896 assert(node->op == op_Filter);
1897 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1898 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1899 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1900 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1901 node->attr.filter.in_cg[0] = node->in[0];
1903 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1906 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1907 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1908 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1909 node->attr.filter.in_cg[pos + 1] = pred;
1911 int get_Filter_n_cg_preds(ir_node *node) {
1912 assert(node->op == op_Filter && node->attr.filter.in_cg);
1913 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1915 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1917 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1919 arity = ARR_LEN(node->attr.filter.in_cg);
1920 assert(pos < arity - 1);
1921 return node->attr.filter.in_cg[pos + 1];
1926 get_irn_irg(ir_node *node) {
1927 if (get_irn_op(node) != op_Block)
1928 node = get_nodes_block(node);
1929 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1930 node = get_nodes_block(node);
1931 assert(get_irn_op(node) == op_Block);
1932 return node->attr.block.irg;
1936 /*----------------------------------------------------------------*/
1937 /* Auxiliary routines */
1938 /*----------------------------------------------------------------*/
1941 skip_Proj (ir_node *node) {
1942 /* don't assert node !!! */
1943 if (node && is_Proj(node)) {
1944 return get_Proj_pred(node);
1951 skip_Tuple (ir_node *node) {
1954 if (!get_opt_normalize()) return node;
1956 node = skip_Id(node);
1957 if (get_irn_op(node) == op_Proj) {
1958 pred = skip_Id(get_Proj_pred(node));
1959 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1960 pred = skip_Id(skip_Tuple(pred));
1961 if (get_irn_op(pred) == op_Tuple)
1962 return get_Tuple_pred(pred, get_Proj_proj(node));
1967 /** returns operand of node if node is a Cast */
1968 ir_node *skip_Cast (ir_node *node) {
1969 if (node && get_irn_op(node) == op_Cast) {
1970 return skip_Id(get_irn_n(node, 0));
1977 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1978 than any other approach, as Id chains are resolved and all point to the real node, or
1979 all id's are self loops. */
1981 skip_Id (ir_node *node) {
1982 /* don't assert node !!! */
1984 if (!get_opt_normalize()) return node;
1986 /* Don't use get_Id_pred: We get into an endless loop for
1987 self-referencing Ids. */
1988 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1989 ir_node *rem_pred = node->in[0+1];
1992 assert (get_irn_arity (node) > 0);
1994 node->in[0+1] = node;
1995 res = skip_Id(rem_pred);
1996 if (res->op == op_Id) /* self-loop */ return node;
1998 node->in[0+1] = res;
2005 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2006 than any other approach, as Id chains are resolved and all point to the real node, or
2007 all id's are self loops. */
2009 skip_Id (ir_node *node) {
2011 /* don't assert node !!! */
2013 if (!node || (node->op != op_Id)) return node;
2015 if (!get_opt_normalize()) return node;
2017 /* Don't use get_Id_pred: We get into an endless loop for
2018 self-referencing Ids. */
2019 pred = node->in[0+1];
2021 if (pred->op != op_Id) return pred;
2023 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2024 ir_node *rem_pred, *res;
2026 if (pred->op != op_Id) return pred; /* shortcut */
2029 assert (get_irn_arity (node) > 0);
2031 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2032 res = skip_Id(rem_pred);
2033 if (res->op == op_Id) /* self-loop */ return node;
2035 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2044 is_Bad (ir_node *node) {
2046 if ((node) && get_irn_opcode(node) == iro_Bad)
2052 is_no_Block (ir_node *node) {
2054 return (get_irn_opcode(node) != iro_Block);
2058 is_Block (ir_node *node) {
2060 return (get_irn_opcode(node) == iro_Block);
2063 /* returns true if node is a Unknown node. */
2065 is_Unknown (ir_node *node) {
2067 return (get_irn_opcode(node) == iro_Unknown);
2071 is_Proj (const ir_node *node) {
2073 return node->op == op_Proj
2074 || (!interprocedural_view && node->op == op_Filter);
2077 /* Returns true if the operation manipulates control flow. */
2079 is_cfop(ir_node *node) {
2080 return is_cfopcode(get_irn_op(node));
2083 /* Returns true if the operation manipulates interprocedural control flow:
2084 CallBegin, EndReg, EndExcept */
2085 int is_ip_cfop(ir_node *node) {
2086 return is_ip_cfopcode(get_irn_op(node));
2089 /* Returns true if the operation can change the control flow because
2092 is_fragile_op(ir_node *node) {
2093 return is_op_fragile(get_irn_op(node));
2096 /* Returns the memory operand of fragile operations. */
2097 ir_node *get_fragile_op_mem(ir_node *node) {
2098 assert(node && is_fragile_op(node));
2100 switch (get_irn_opcode (node)) {
2109 return get_irn_n(node, 0);
2114 assert(0 && "should not be reached");
2119 #ifdef DEBUG_libfirm
2120 void dump_irn (ir_node *n) {
2121 int i, arity = get_irn_arity(n);
2122 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2124 ir_node *pred = get_irn_n(n, -1);
2125 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2126 get_irn_node_nr(pred), (void *)pred);
2128 printf(" preds: \n");
2129 for (i = 0; i < arity; ++i) {
2130 ir_node *pred = get_irn_n(n, i);
2131 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2132 get_irn_node_nr(pred), (void *)pred);
2136 #else /* DEBUG_libfirm */
2137 void dump_irn (ir_node *n) {}
2138 #endif /* DEBUG_libfirm */