3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
227 assert(in && in->kind == k_ir_node);
228 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
229 /* Change block pred in both views! */
230 node->in[n + 1] = in;
231 assert(node->attr.filter.in_cg);
232 node->attr.filter.in_cg[n + 1] = in;
235 if (interprocedural_view) { /* handle Filter and Block specially */
236 if (get_irn_opcode(node) == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 node->attr.filter.in_cg[n + 1] = in;
240 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
241 node->attr.block.in_cg[n + 1] = in;
244 /* else fall through */
246 node->in[n + 1] = in;
250 (get_irn_mode)(const ir_node *node) {
251 return __get_irn_mode(node);
255 (set_irn_mode)(ir_node *node, ir_mode *mode)
257 __set_irn_mode(node, mode);
261 get_irn_modecode (const ir_node *node)
264 return node->mode->code;
267 /** Gets the string representation of the mode .*/
269 get_irn_modename (const ir_node *node)
272 return get_mode_name(node->mode);
276 get_irn_modeident (const ir_node *node)
279 return get_mode_ident(node->mode);
283 (get_irn_op)(const ir_node *node)
285 return __get_irn_op(node);
288 /* should be private to the library: */
290 set_irn_op (ir_node *node, ir_op *op)
297 (get_irn_opcode)(const ir_node *node)
299 return __get_irn_opcode(node);
303 get_irn_opname (const ir_node *node)
306 if ((get_irn_op((ir_node *)node) == op_Phi) &&
307 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
308 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
309 return get_id_str(node->op->name);
313 get_irn_opident (const ir_node *node)
316 return node->op->name;
320 (get_irn_visited)(const ir_node *node)
322 return __get_irn_visited(node);
326 (set_irn_visited)(ir_node *node, unsigned long visited)
328 __set_irn_visited(node, visited);
332 (mark_irn_visited)(ir_node *node) {
333 __mark_irn_visited(node);
337 (irn_not_visited)(const ir_node *node) {
338 return __irn_not_visited(node);
342 (irn_visited)(const ir_node *node) {
343 return __irn_visited(node);
347 (set_irn_link)(ir_node *node, void *link) {
348 __set_irn_link(node, link);
352 (get_irn_link)(const ir_node *node) {
353 return __get_irn_link(node);
357 #ifdef DO_HEAPANALYSIS
358 /* Access the abstract interpretation information of a node.
359 Returns NULL if no such information is available. */
360 struct abstval *get_irn_abst_value(ir_node *n) {
363 /* Set the abstract interpretation information of a node. */
364 void set_irn_abst_value(ir_node *n, struct abstval *os) {
367 struct section *firm_get_irn_section(ir_node *n) {
370 void firm_set_irn_section(ir_node *n, struct section *s) {
373 #endif /* DO_HEAPANALYSIS */
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_funccall_attr (ir_node *node)
432 assert (node->op == op_FuncCall);
433 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
437 get_irn_sel_attr (ir_node *node)
439 assert (node->op == op_Sel);
444 get_irn_phi_attr (ir_node *node)
446 assert (node->op == op_Phi);
447 return node->attr.phi0_pos;
451 get_irn_block_attr (ir_node *node)
453 assert (node->op == op_Block);
454 return node->attr.block;
458 get_irn_load_attr (ir_node *node)
460 assert (node->op == op_Load);
461 return node->attr.load;
465 get_irn_store_attr (ir_node *node)
467 assert (node->op == op_Store);
468 return node->attr.store;
472 get_irn_except_attr (ir_node *node)
474 assert (node->op == op_Div || node->op == op_Quot ||
475 node->op == op_DivMod || node->op == op_Mod);
476 return node->attr.except;
479 /** manipulate fields of individual nodes **/
481 /* this works for all except Block */
483 get_nodes_block (ir_node *node) {
484 assert (!(node->op == op_Block));
485 return get_irn_n(node, -1);
489 set_nodes_block (ir_node *node, ir_node *block) {
490 assert (!(node->op == op_Block));
491 set_irn_n(node, -1, block);
494 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
495 * from Start. If so returns frame type, else Null. */
496 type *is_frame_pointer(ir_node *n) {
497 if ((get_irn_op(n) == op_Proj) &&
498 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
499 ir_node *start = get_Proj_pred(n);
500 if (get_irn_op(start) == op_Start) {
501 return get_irg_frame_type(get_irn_irg(start));
507 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
508 * from Start. If so returns global type, else Null. */
509 type *is_globals_pointer(ir_node *n) {
510 if ((get_irn_op(n) == op_Proj) &&
511 (get_Proj_proj(n) == pn_Start_P_globals)) {
512 ir_node *start = get_Proj_pred(n);
513 if (get_irn_op(start) == op_Start) {
514 return get_glob_type();
520 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
521 * from Start. If so returns 1, else 0. */
522 int is_value_arg_pointer(ir_node *n) {
523 if ((get_irn_op(n) == op_Proj) &&
524 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
525 (get_irn_op(get_Proj_pred(n)) == op_Start))
530 /* Returns an array with the predecessors of the Block. Depending on
531 the implementation of the graph data structure this can be a copy of
532 the internal representation of predecessors as well as the internal
533 array itself. Therefore writing to this array might obstruct the ir. */
535 get_Block_cfgpred_arr (ir_node *node)
537 assert ((node->op == op_Block));
538 return (ir_node **)&(get_irn_in(node)[1]);
543 get_Block_n_cfgpreds (ir_node *node) {
544 assert ((node->op == op_Block));
545 return get_irn_arity(node);
549 get_Block_cfgpred (ir_node *node, int pos) {
551 assert (node->op == op_Block);
552 assert(-1 <= pos && pos < get_irn_arity(node));
553 return get_irn_n(node, pos);
557 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
558 assert (node->op == op_Block);
559 set_irn_n(node, pos, pred);
563 get_Block_matured (ir_node *node) {
564 assert (node->op == op_Block);
565 return node->attr.block.matured;
569 set_Block_matured (ir_node *node, bool matured) {
570 assert (node->op == op_Block);
571 node->attr.block.matured = matured;
574 get_Block_block_visited (ir_node *node) {
575 assert (node->op == op_Block);
576 return node->attr.block.block_visited;
580 set_Block_block_visited (ir_node *node, unsigned long visit) {
581 assert (node->op == op_Block);
582 node->attr.block.block_visited = visit;
585 /* For this current_ir_graph must be set. */
587 mark_Block_block_visited (ir_node *node) {
588 assert (node->op == op_Block);
589 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
593 Block_not_block_visited(ir_node *node) {
594 assert (node->op == op_Block);
595 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
599 get_Block_graph_arr (ir_node *node, int pos) {
600 assert (node->op == op_Block);
601 return node->attr.block.graph_arr[pos+1];
605 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
606 assert (node->op == op_Block);
607 node->attr.block.graph_arr[pos+1] = value;
610 /* handler handling for Blocks */
612 set_Block_handler (ir_node *block, ir_node *handler) {
613 assert ((block->op == op_Block));
614 assert ((handler->op == op_Block));
618 get_Block_handler (ir_node *block) {
619 assert ((block->op == op_Block));
623 /* handler handling for Nodes */
625 set_Node_handler (ir_node *node, ir_node *handler) {
630 get_Node_handler (ir_node *node) {
634 /* exc_t handling for Blocks */
635 void set_Block_exc (ir_node *block, exc_t exc) {
636 assert ((block->op == op_Block));
639 exc_t get_Block_exc (ir_node *block) {
640 assert ((block->op == op_Block));
644 /* exc_t handling for Nodes */
645 void set_Node_exc (ir_node *node, exc_t exc) {
649 exc_t get_Node_exc (ir_node *node) {
653 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
654 assert(node->op == op_Block);
655 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
656 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
657 node->attr.block.in_cg[0] = NULL;
658 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
660 /* Fix backedge array. fix_backedges operates depending on
661 interprocedural_view. */
662 bool ipv = interprocedural_view;
663 interprocedural_view = true;
664 fix_backedges(current_ir_graph->obst, node);
665 interprocedural_view = ipv;
668 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
671 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
672 assert(node->op == op_Block &&
673 node->attr.block.in_cg &&
674 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
675 node->attr.block.in_cg[pos + 1] = pred;
678 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
679 assert(node->op == op_Block);
680 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
683 int get_Block_cg_n_cfgpreds(ir_node * node) {
684 assert(node->op == op_Block);
685 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
688 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
689 assert(node->op == op_Block && node->attr.block.in_cg);
690 return node->attr.block.in_cg[pos + 1];
693 void remove_Block_cg_cfgpred_arr(ir_node * node) {
694 assert(node->op == op_Block);
695 node->attr.block.in_cg = NULL;
699 set_Start_irg(ir_node *node, ir_graph *irg) {
700 assert(node->op == op_Start);
701 assert(is_ir_graph(irg));
702 assert(0 && " Why set irg? -- use set_irn_irg");
706 get_End_n_keepalives(ir_node *end) {
707 assert (end->op == op_End);
708 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
712 get_End_keepalive(ir_node *end, int pos) {
713 assert (end->op == op_End);
714 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
718 add_End_keepalive (ir_node *end, ir_node *ka) {
719 assert (end->op == op_End);
720 ARR_APP1 (ir_node *, end->in, ka);
724 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
725 assert (end->op == op_End);
726 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
730 free_End (ir_node *end) {
731 assert (end->op == op_End);
733 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
734 end->in = NULL; /* @@@ make sure we get an error if we use the
735 in array afterwards ... */
740 > Implementing the case construct (which is where the constant Proj node is
741 > important) involves far more than simply determining the constant values.
742 > We could argue that this is more properly a function of the translator from
743 > Firm to the target machine. That could be done if there was some way of
744 > projecting "default" out of the Cond node.
745 I know it's complicated.
746 Basically there are two proglems:
747 - determining the gaps between the projs
748 - determining the biggest case constant to know the proj number for
750 I see several solutions:
751 1. Introduce a ProjDefault node. Solves both problems.
752 This means to extend all optimizations executed during construction.
753 2. Give the Cond node for switch two flavors:
754 a) there are no gaps in the projs (existing flavor)
755 b) gaps may exist, default proj is still the Proj with the largest
756 projection number. This covers also the gaps.
757 3. Fix the semantic of the Cond to that of 2b)
759 Solution 2 seems to be the best:
760 Computing the gaps in the Firm representation is not too hard, i.e.,
761 libFIRM can implement a routine that transforms between the two
762 flavours. This is also possible for 1) but 2) does not require to
763 change any existing optimization.
764 Further it should be far simpler to determine the biggest constant than
766 I don't want to choose 3) as 2a) seems to have advantages for
767 dataflow analysis and 3) does not allow to convert the representation to
771 get_Cond_selector (ir_node *node) {
772 assert (node->op == op_Cond);
773 return get_irn_n(node, 0);
777 set_Cond_selector (ir_node *node, ir_node *selector) {
778 assert (node->op == op_Cond);
779 set_irn_n(node, 0, selector);
783 get_Cond_kind (ir_node *node) {
784 assert (node->op == op_Cond);
785 return node->attr.c.kind;
789 set_Cond_kind (ir_node *node, cond_kind kind) {
790 assert (node->op == op_Cond);
791 node->attr.c.kind = kind;
795 get_Cond_defaultProj (ir_node *node) {
796 assert (node->op == op_Cond);
797 return node->attr.c.default_proj;
801 get_Return_mem (ir_node *node) {
802 assert (node->op == op_Return);
803 return get_irn_n(node, 0);
807 set_Return_mem (ir_node *node, ir_node *mem) {
808 assert (node->op == op_Return);
809 set_irn_n(node, 0, mem);
813 get_Return_n_ress (ir_node *node) {
814 assert (node->op == op_Return);
815 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
819 get_Return_res_arr (ir_node *node)
821 assert ((node->op == op_Return));
822 if (get_Return_n_ress(node) > 0)
823 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
830 set_Return_n_res (ir_node *node, int results) {
831 assert (node->op == op_Return);
836 get_Return_res (ir_node *node, int pos) {
837 assert (node->op == op_Return);
838 assert (get_Return_n_ress(node) > pos);
839 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
843 set_Return_res (ir_node *node, int pos, ir_node *res){
844 assert (node->op == op_Return);
845 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
849 get_Raise_mem (ir_node *node) {
850 assert (node->op == op_Raise);
851 return get_irn_n(node, 0);
855 set_Raise_mem (ir_node *node, ir_node *mem) {
856 assert (node->op == op_Raise);
857 set_irn_n(node, 0, mem);
861 get_Raise_exo_ptr (ir_node *node) {
862 assert (node->op == op_Raise);
863 return get_irn_n(node, 1);
867 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
868 assert (node->op == op_Raise);
869 set_irn_n(node, 1, exo_ptr);
872 tarval *get_Const_tarval (ir_node *node) {
873 assert (node->op == op_Const);
874 return node->attr.con.tv;
878 set_Const_tarval (ir_node *node, tarval *con) {
879 assert (node->op == op_Const);
880 node->attr.con.tv = con;
884 /* The source language type. Must be an atomic type. Mode of type must
885 be mode of node. For tarvals from entities type must be pointer to
888 get_Const_type (ir_node *node) {
889 assert (node->op == op_Const);
890 return node->attr.con.tp;
894 set_Const_type (ir_node *node, type *tp) {
895 assert (node->op == op_Const);
896 if (tp != unknown_type) {
897 assert (is_atomic_type(tp));
898 assert (get_type_mode(tp) == get_irn_mode(node));
901 node->attr.con.tp = tp;
906 get_SymConst_kind (const ir_node *node) {
907 assert (node->op == op_SymConst);
908 return node->attr.i.num;
912 set_SymConst_kind (ir_node *node, symconst_kind num) {
913 assert (node->op == op_SymConst);
914 node->attr.i.num = num;
918 get_SymConst_type (ir_node *node) {
919 assert ( (node->op == op_SymConst)
920 && ( get_SymConst_kind(node) == symconst_type_tag
921 || get_SymConst_kind(node) == symconst_size));
922 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
926 set_SymConst_type (ir_node *node, type *tp) {
927 assert ( (node->op == op_SymConst)
928 && ( get_SymConst_kind(node) == symconst_type_tag
929 || get_SymConst_kind(node) == symconst_size));
930 node->attr.i.sym.type_p = tp;
934 get_SymConst_name (ir_node *node) {
935 assert ( (node->op == op_SymConst)
936 && (get_SymConst_kind(node) == symconst_addr_name));
937 return node->attr.i.sym.ident_p;
941 set_SymConst_name (ir_node *node, ident *name) {
942 assert ( (node->op == op_SymConst)
943 && (get_SymConst_kind(node) == symconst_addr_name));
944 node->attr.i.sym.ident_p = name;
948 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
949 entity *get_SymConst_entity (ir_node *node) {
950 assert ( (node->op == op_SymConst)
951 && (get_SymConst_kind (node) == symconst_addr_ent));
952 return node->attr.i.sym.entity_p;
955 void set_SymConst_entity (ir_node *node, entity *ent) {
956 assert ( (node->op == op_SymConst)
957 && (get_SymConst_kind(node) == symconst_addr_ent));
958 node->attr.i.sym.entity_p = ent;
962 union symconst_symbol
963 get_SymConst_symbol (ir_node *node) {
964 assert (node->op == op_SymConst);
965 return node->attr.i.sym;
969 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
970 assert (node->op == op_SymConst);
971 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
972 node->attr.i.sym = sym;
976 get_Sel_mem (ir_node *node) {
977 assert (node->op == op_Sel);
978 return get_irn_n(node, 0);
982 set_Sel_mem (ir_node *node, ir_node *mem) {
983 assert (node->op == op_Sel);
984 set_irn_n(node, 0, mem);
988 get_Sel_ptr (ir_node *node) {
989 assert (node->op == op_Sel);
990 return get_irn_n(node, 1);
994 set_Sel_ptr (ir_node *node, ir_node *ptr) {
995 assert (node->op == op_Sel);
996 set_irn_n(node, 1, ptr);
1000 get_Sel_n_indexs (ir_node *node) {
1001 assert (node->op == op_Sel);
1002 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1006 get_Sel_index_arr (ir_node *node)
1008 assert ((node->op == op_Sel));
1009 if (get_Sel_n_indexs(node) > 0)
1010 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1016 get_Sel_index (ir_node *node, int pos) {
1017 assert (node->op == op_Sel);
1018 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1022 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1023 assert (node->op == op_Sel);
1024 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1028 get_Sel_entity (ir_node *node) {
1029 assert (node->op == op_Sel);
1030 return node->attr.s.ent;
1034 set_Sel_entity (ir_node *node, entity *ent) {
1035 assert (node->op == op_Sel);
1036 node->attr.s.ent = ent;
1040 get_InstOf_ent (ir_node *node) {
1041 assert (node->op = op_InstOf);
1042 return (node->attr.io.ent);
1046 set_InstOf_ent (ir_node *node, type *ent) {
1047 assert (node->op = op_InstOf);
1048 node->attr.io.ent = ent;
1052 get_InstOf_store (ir_node *node) {
1053 assert (node->op = op_InstOf);
1054 return (get_irn_n (node, 0));
1058 set_InstOf_store (ir_node *node, ir_node *obj) {
1059 assert (node->op = op_InstOf);
1060 set_irn_n (node, 0, obj);
1064 get_InstOf_obj (ir_node *node) {
1065 assert (node->op = op_InstOf);
1066 return (get_irn_n (node, 1));
1070 set_InstOf_obj (ir_node *node, ir_node *obj) {
1071 assert (node->op = op_InstOf);
1072 set_irn_n (node, 1, obj);
1076 /* For unary and binary arithmetic operations the access to the
1077 operands can be factored out. Left is the first, right the
1078 second arithmetic value as listed in tech report 0999-33.
1079 unops are: Minus, Abs, Not, Conv, Cast
1080 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1081 Shr, Shrs, Rotate, Cmp */
1085 get_Call_mem (ir_node *node) {
1086 assert (node->op == op_Call);
1087 return get_irn_n(node, 0);
1091 set_Call_mem (ir_node *node, ir_node *mem) {
1092 assert (node->op == op_Call);
1093 set_irn_n(node, 0, mem);
1097 get_Call_ptr (ir_node *node) {
1098 assert (node->op == op_Call);
1099 return get_irn_n(node, 1);
1103 set_Call_ptr (ir_node *node, ir_node *ptr) {
1104 assert (node->op == op_Call);
1105 set_irn_n(node, 1, ptr);
1109 get_Call_param_arr (ir_node *node) {
1110 assert (node->op == op_Call);
1111 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1115 get_Call_n_params (ir_node *node) {
1116 assert (node->op == op_Call);
1117 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1121 get_Call_arity (ir_node *node) {
1122 assert (node->op == op_Call);
1123 return get_Call_n_params(node);
1127 set_Call_arity (ir_node *node, ir_node *arity) {
1128 assert (node->op == op_Call);
1133 get_Call_param (ir_node *node, int pos) {
1134 assert (node->op == op_Call);
1135 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1139 set_Call_param (ir_node *node, int pos, ir_node *param) {
1140 assert (node->op == op_Call);
1141 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1145 get_Call_type (ir_node *node) {
1146 assert (node->op == op_Call);
1147 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1151 set_Call_type (ir_node *node, type *tp) {
1152 assert (node->op == op_Call);
1153 assert (is_method_type(tp));
1154 node->attr.call.cld_tp = tp;
1157 int Call_has_callees(ir_node *node) {
1158 assert(node && node->op == op_Call);
1159 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1160 (node->attr.call.callee_arr != NULL));
1163 int get_Call_n_callees(ir_node * node) {
1164 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1165 return ARR_LEN(node->attr.call.callee_arr);
1168 entity * get_Call_callee(ir_node * node, int pos) {
1169 assert(pos >= 0 && pos < get_Call_n_callees(node));
1170 return node->attr.call.callee_arr[pos];
1173 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1174 assert(node->op == op_Call);
1175 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1176 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1178 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1181 void remove_Call_callee_arr(ir_node * node) {
1182 assert(node->op == op_Call);
1183 node->attr.call.callee_arr = NULL;
1186 ir_node * get_CallBegin_ptr (ir_node *node) {
1187 assert(node->op == op_CallBegin);
1188 return get_irn_n(node, 0);
1190 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1191 assert(node->op == op_CallBegin);
1192 set_irn_n(node, 0, ptr);
1194 ir_node * get_CallBegin_call (ir_node *node) {
1195 assert(node->op == op_CallBegin);
1196 return node->attr.callbegin.call;
1198 void set_CallBegin_call (ir_node *node, ir_node *call) {
1199 assert(node->op == op_CallBegin);
1200 node->attr.callbegin.call = call;
1204 get_FuncCall_ptr (ir_node *node) {
1205 assert (node->op == op_FuncCall);
1206 return get_irn_n(node, 0);
1210 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1211 assert (node->op == op_FuncCall);
1212 set_irn_n(node, 0, ptr);
1216 get_FuncCall_param_arr (ir_node *node) {
1217 assert (node->op == op_FuncCall);
1218 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1222 get_FuncCall_n_params (ir_node *node) {
1223 assert (node->op == op_FuncCall);
1224 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1228 get_FuncCall_arity (ir_node *node) {
1229 assert (node->op == op_FuncCall);
1230 return get_FuncCall_n_params(node);
1234 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1235 assert (node->op == op_FuncCall);
1240 get_FuncCall_param (ir_node *node, int pos) {
1241 assert (node->op == op_FuncCall);
1242 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1246 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1247 assert (node->op == op_FuncCall);
1248 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1252 get_FuncCall_type (ir_node *node) {
1253 assert (node->op == op_FuncCall);
1254 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1258 set_FuncCall_type (ir_node *node, type *tp) {
1259 assert (node->op == op_FuncCall);
1260 assert (is_method_type(tp));
1261 node->attr.call.cld_tp = tp;
1264 int FuncCall_has_callees(ir_node *node) {
1265 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1266 (node->attr.call.callee_arr != NULL));
1269 int get_FuncCall_n_callees(ir_node * node) {
1270 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1271 return ARR_LEN(node->attr.call.callee_arr);
1274 entity * get_FuncCall_callee(ir_node * node, int pos) {
1275 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1276 return node->attr.call.callee_arr[pos];
1279 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1280 assert(node->op == op_FuncCall);
1281 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1282 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1284 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1287 void remove_FuncCall_callee_arr(ir_node * node) {
1288 assert(node->op == op_FuncCall);
1289 node->attr.call.callee_arr = NULL;
1294 ir_node * get_##OP##_left(ir_node *node) { \
1295 assert(node->op == op_##OP); \
1296 return get_irn_n(node, node->op->op_index); \
1298 void set_##OP##_left(ir_node *node, ir_node *left) { \
1299 assert(node->op == op_##OP); \
1300 set_irn_n(node, node->op->op_index, left); \
1302 ir_node *get_##OP##_right(ir_node *node) { \
1303 assert(node->op == op_##OP); \
1304 return get_irn_n(node, node->op->op_index + 1); \
1306 void set_##OP##_right(ir_node *node, ir_node *right) { \
1307 assert(node->op == op_##OP); \
1308 set_irn_n(node, node->op->op_index + 1, right); \
1312 ir_node *get_##OP##_op(ir_node *node) { \
1313 assert(node->op == op_##OP); \
1314 return get_irn_n(node, node->op->op_index); \
1316 void set_##OP##_op (ir_node *node, ir_node *op) { \
1317 assert(node->op == op_##OP); \
1318 set_irn_n(node, node->op->op_index, op); \
1328 get_Quot_mem (ir_node *node) {
1329 assert (node->op == op_Quot);
1330 return get_irn_n(node, 0);
1334 set_Quot_mem (ir_node *node, ir_node *mem) {
1335 assert (node->op == op_Quot);
1336 set_irn_n(node, 0, mem);
1342 get_DivMod_mem (ir_node *node) {
1343 assert (node->op == op_DivMod);
1344 return get_irn_n(node, 0);
1348 set_DivMod_mem (ir_node *node, ir_node *mem) {
1349 assert (node->op == op_DivMod);
1350 set_irn_n(node, 0, mem);
1356 get_Div_mem (ir_node *node) {
1357 assert (node->op == op_Div);
1358 return get_irn_n(node, 0);
1362 set_Div_mem (ir_node *node, ir_node *mem) {
1363 assert (node->op == op_Div);
1364 set_irn_n(node, 0, mem);
1370 get_Mod_mem (ir_node *node) {
1371 assert (node->op == op_Mod);
1372 return get_irn_n(node, 0);
1376 set_Mod_mem (ir_node *node, ir_node *mem) {
1377 assert (node->op == op_Mod);
1378 set_irn_n(node, 0, mem);
1395 get_Cast_type (ir_node *node) {
1396 assert (node->op == op_Cast);
1397 return node->attr.cast.totype;
1401 set_Cast_type (ir_node *node, type *to_tp) {
1402 assert (node->op == op_Cast);
1403 node->attr.cast.totype = to_tp;
1407 is_unop (ir_node *node) {
1408 return (node->op->opar == oparity_unary);
1412 get_unop_op (ir_node *node) {
1413 if (node->op->opar == oparity_unary)
1414 return get_irn_n(node, node->op->op_index);
1416 assert(node->op->opar == oparity_unary);
1421 set_unop_op (ir_node *node, ir_node *op) {
1422 if (node->op->opar == oparity_unary)
1423 set_irn_n(node, node->op->op_index, op);
1425 assert(node->op->opar == oparity_unary);
1429 is_binop (ir_node *node) {
1430 return (node->op->opar == oparity_binary);
1434 get_binop_left (ir_node *node) {
1435 if (node->op->opar == oparity_binary)
1436 return get_irn_n(node, node->op->op_index);
1438 assert(node->op->opar == oparity_binary);
1443 set_binop_left (ir_node *node, ir_node *left) {
1444 if (node->op->opar == oparity_binary)
1445 set_irn_n(node, node->op->op_index, left);
1447 assert (node->op->opar == oparity_binary);
1451 get_binop_right (ir_node *node) {
1452 if (node->op->opar == oparity_binary)
1453 return get_irn_n(node, node->op->op_index + 1);
1455 assert(node->op->opar == oparity_binary);
1460 set_binop_right (ir_node *node, ir_node *right) {
1461 if (node->op->opar == oparity_binary)
1462 set_irn_n(node, node->op->op_index + 1, right);
1464 assert (node->op->opar == oparity_binary);
1467 int is_Phi (ir_node *n) {
1473 if (op == op_Filter) return interprocedural_view;
1476 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1477 (get_irn_arity(n) > 0));
1482 int is_Phi0 (ir_node *n) {
1485 return ((get_irn_op(n) == op_Phi) &&
1486 (get_irn_arity(n) == 0) &&
1487 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1491 get_Phi_preds_arr (ir_node *node) {
1492 assert (node->op == op_Phi);
1493 return (ir_node **)&(get_irn_in(node)[1]);
1497 get_Phi_n_preds (ir_node *node) {
1498 assert (is_Phi(node) || is_Phi0(node));
1499 return (get_irn_arity(node));
1503 void set_Phi_n_preds (ir_node *node, int n_preds) {
1504 assert (node->op == op_Phi);
1509 get_Phi_pred (ir_node *node, int pos) {
1510 assert (is_Phi(node) || is_Phi0(node));
1511 return get_irn_n(node, pos);
1515 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1516 assert (is_Phi(node) || is_Phi0(node));
1517 set_irn_n(node, pos, pred);
1521 int is_memop(ir_node *node) {
1522 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1525 ir_node *get_memop_mem (ir_node *node) {
1526 assert(is_memop(node));
1527 return get_irn_n(node, 0);
1530 void set_memop_mem (ir_node *node, ir_node *mem) {
1531 assert(is_memop(node));
1532 set_irn_n(node, 0, mem);
1535 ir_node *get_memop_ptr (ir_node *node) {
1536 assert(is_memop(node));
1537 return get_irn_n(node, 1);
1540 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1541 assert(is_memop(node));
1542 set_irn_n(node, 1, ptr);
1546 get_Load_mem (ir_node *node) {
1547 assert (node->op == op_Load);
1548 return get_irn_n(node, 0);
1552 set_Load_mem (ir_node *node, ir_node *mem) {
1553 assert (node->op == op_Load);
1554 set_irn_n(node, 0, mem);
1558 get_Load_ptr (ir_node *node) {
1559 assert (node->op == op_Load);
1560 return get_irn_n(node, 1);
1564 set_Load_ptr (ir_node *node, ir_node *ptr) {
1565 assert (node->op == op_Load);
1566 set_irn_n(node, 1, ptr);
1570 get_Load_mode (ir_node *node) {
1571 assert (node->op == op_Load);
1572 return node->attr.load.load_mode;
1576 set_Load_mode (ir_node *node, ir_mode *mode) {
1577 assert (node->op == op_Load);
1578 node->attr.load.load_mode = mode;
1582 get_Load_volatility (ir_node *node) {
1583 assert (node->op == op_Load);
1584 return node->attr.load.volatility;
1588 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1589 assert (node->op == op_Load);
1590 node->attr.load.volatility = volatility;
1595 get_Store_mem (ir_node *node) {
1596 assert (node->op == op_Store);
1597 return get_irn_n(node, 0);
1601 set_Store_mem (ir_node *node, ir_node *mem) {
1602 assert (node->op == op_Store);
1603 set_irn_n(node, 0, mem);
1607 get_Store_ptr (ir_node *node) {
1608 assert (node->op == op_Store);
1609 return get_irn_n(node, 1);
1613 set_Store_ptr (ir_node *node, ir_node *ptr) {
1614 assert (node->op == op_Store);
1615 set_irn_n(node, 1, ptr);
1619 get_Store_value (ir_node *node) {
1620 assert (node->op == op_Store);
1621 return get_irn_n(node, 2);
1625 set_Store_value (ir_node *node, ir_node *value) {
1626 assert (node->op == op_Store);
1627 set_irn_n(node, 2, value);
1631 get_Store_volatility (ir_node *node) {
1632 assert (node->op == op_Store);
1633 return node->attr.store.volatility;
1637 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1638 assert (node->op == op_Store);
1639 node->attr.store.volatility = volatility;
1644 get_Alloc_mem (ir_node *node) {
1645 assert (node->op == op_Alloc);
1646 return get_irn_n(node, 0);
1650 set_Alloc_mem (ir_node *node, ir_node *mem) {
1651 assert (node->op == op_Alloc);
1652 set_irn_n(node, 0, mem);
1656 get_Alloc_size (ir_node *node) {
1657 assert (node->op == op_Alloc);
1658 return get_irn_n(node, 1);
1662 set_Alloc_size (ir_node *node, ir_node *size) {
1663 assert (node->op == op_Alloc);
1664 set_irn_n(node, 1, size);
1668 get_Alloc_type (ir_node *node) {
1669 assert (node->op == op_Alloc);
1670 return node->attr.a.type = skip_tid(node->attr.a.type);
1674 set_Alloc_type (ir_node *node, type *tp) {
1675 assert (node->op == op_Alloc);
1676 node->attr.a.type = tp;
1680 get_Alloc_where (ir_node *node) {
1681 assert (node->op == op_Alloc);
1682 return node->attr.a.where;
1686 set_Alloc_where (ir_node *node, where_alloc where) {
1687 assert (node->op == op_Alloc);
1688 node->attr.a.where = where;
1693 get_Free_mem (ir_node *node) {
1694 assert (node->op == op_Free);
1695 return get_irn_n(node, 0);
1699 set_Free_mem (ir_node *node, ir_node *mem) {
1700 assert (node->op == op_Free);
1701 set_irn_n(node, 0, mem);
1705 get_Free_ptr (ir_node *node) {
1706 assert (node->op == op_Free);
1707 return get_irn_n(node, 1);
1711 set_Free_ptr (ir_node *node, ir_node *ptr) {
1712 assert (node->op == op_Free);
1713 set_irn_n(node, 1, ptr);
1717 get_Free_size (ir_node *node) {
1718 assert (node->op == op_Free);
1719 return get_irn_n(node, 2);
1723 set_Free_size (ir_node *node, ir_node *size) {
1724 assert (node->op == op_Free);
1725 set_irn_n(node, 2, size);
1729 get_Free_type (ir_node *node) {
1730 assert (node->op == op_Free);
1731 return node->attr.f = skip_tid(node->attr.f);
1735 set_Free_type (ir_node *node, type *tp) {
1736 assert (node->op == op_Free);
1741 get_Sync_preds_arr (ir_node *node) {
1742 assert (node->op == op_Sync);
1743 return (ir_node **)&(get_irn_in(node)[1]);
1747 get_Sync_n_preds (ir_node *node) {
1748 assert (node->op == op_Sync);
1749 return (get_irn_arity(node));
1754 set_Sync_n_preds (ir_node *node, int n_preds) {
1755 assert (node->op == op_Sync);
1760 get_Sync_pred (ir_node *node, int pos) {
1761 assert (node->op == op_Sync);
1762 return get_irn_n(node, pos);
1766 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1767 assert (node->op == op_Sync);
1768 set_irn_n(node, pos, pred);
1772 get_Proj_pred (ir_node *node) {
1773 assert (is_Proj(node));
1774 return get_irn_n(node, 0);
1778 set_Proj_pred (ir_node *node, ir_node *pred) {
1779 assert (is_Proj(node));
1780 set_irn_n(node, 0, pred);
1784 get_Proj_proj (ir_node *node) {
1785 assert (is_Proj(node));
1786 if (get_irn_opcode(node) == iro_Proj) {
1787 return node->attr.proj;
1789 assert(get_irn_opcode(node) == iro_Filter);
1790 return node->attr.filter.proj;
1795 set_Proj_proj (ir_node *node, long proj) {
1796 assert (node->op == op_Proj);
1797 node->attr.proj = proj;
1801 get_Tuple_preds_arr (ir_node *node) {
1802 assert (node->op == op_Tuple);
1803 return (ir_node **)&(get_irn_in(node)[1]);
1807 get_Tuple_n_preds (ir_node *node) {
1808 assert (node->op == op_Tuple);
1809 return (get_irn_arity(node));
1814 set_Tuple_n_preds (ir_node *node, int n_preds) {
1815 assert (node->op == op_Tuple);
1820 get_Tuple_pred (ir_node *node, int pos) {
1821 assert (node->op == op_Tuple);
1822 return get_irn_n(node, pos);
1826 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1827 assert (node->op == op_Tuple);
1828 set_irn_n(node, pos, pred);
1832 get_Id_pred (ir_node *node) {
1833 assert (node->op == op_Id);
1834 return get_irn_n(node, 0);
1838 set_Id_pred (ir_node *node, ir_node *pred) {
1839 assert (node->op == op_Id);
1840 set_irn_n(node, 0, pred);
1843 ir_node *get_Confirm_value (ir_node *node) {
1844 assert (node->op == op_Confirm);
1845 return get_irn_n(node, 0);
1847 void set_Confirm_value (ir_node *node, ir_node *value) {
1848 assert (node->op == op_Confirm);
1849 set_irn_n(node, 0, value);
1851 ir_node *get_Confirm_bound (ir_node *node) {
1852 assert (node->op == op_Confirm);
1853 return get_irn_n(node, 1);
1855 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1856 assert (node->op == op_Confirm);
1857 set_irn_n(node, 0, bound);
1859 pn_Cmp get_Confirm_cmp (ir_node *node) {
1860 assert (node->op == op_Confirm);
1861 return node->attr.confirm_cmp;
1863 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1864 assert (node->op == op_Confirm);
1865 node->attr.confirm_cmp = cmp;
1870 get_Filter_pred (ir_node *node) {
1871 assert(node->op == op_Filter);
1875 set_Filter_pred (ir_node *node, ir_node *pred) {
1876 assert(node->op == op_Filter);
1880 get_Filter_proj(ir_node *node) {
1881 assert(node->op == op_Filter);
1882 return node->attr.filter.proj;
1885 set_Filter_proj (ir_node *node, long proj) {
1886 assert(node->op == op_Filter);
1887 node->attr.filter.proj = proj;
1890 /* Don't use get_irn_arity, get_irn_n in implementation as access
1891 shall work independent of view!!! */
1892 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1893 assert(node->op == op_Filter);
1894 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1895 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1896 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1897 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1898 node->attr.filter.in_cg[0] = node->in[0];
1900 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1903 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1904 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1905 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1906 node->attr.filter.in_cg[pos + 1] = pred;
1908 int get_Filter_n_cg_preds(ir_node *node) {
1909 assert(node->op == op_Filter && node->attr.filter.in_cg);
1910 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1912 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1914 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1916 arity = ARR_LEN(node->attr.filter.in_cg);
1917 assert(pos < arity - 1);
1918 return node->attr.filter.in_cg[pos + 1];
1923 get_irn_irg(ir_node *node) {
1924 if (get_irn_op(node) != op_Block)
1925 node = get_nodes_block(node);
1926 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1927 node = get_nodes_block(node);
1928 assert(get_irn_op(node) == op_Block);
1929 return node->attr.block.irg;
1933 /*----------------------------------------------------------------*/
1934 /* Auxiliary routines */
1935 /*----------------------------------------------------------------*/
1938 skip_Proj (ir_node *node) {
1939 /* don't assert node !!! */
1940 if (node && is_Proj(node)) {
1941 return get_Proj_pred(node);
1948 skip_Tuple (ir_node *node) {
1951 if (!get_opt_normalize()) return node;
1953 node = skip_Id(node);
1954 if (get_irn_op(node) == op_Proj) {
1955 pred = skip_Id(get_Proj_pred(node));
1956 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1957 pred = skip_Id(skip_Tuple(pred));
1958 if (get_irn_op(pred) == op_Tuple)
1959 return get_Tuple_pred(pred, get_Proj_proj(node));
1964 /** returns operand of node if node is a Cast */
1965 ir_node *skip_Cast (ir_node *node) {
1966 if (node && get_irn_op(node) == op_Cast) {
1967 return skip_Id(get_irn_n(node, 0));
1974 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1975 than any other approach, as Id chains are resolved and all point to the real node, or
1976 all id's are self loops. */
1978 skip_Id (ir_node *node) {
1979 /* don't assert node !!! */
1981 if (!get_opt_normalize()) return node;
1983 /* Don't use get_Id_pred: We get into an endless loop for
1984 self-referencing Ids. */
1985 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1986 ir_node *rem_pred = node->in[0+1];
1989 assert (get_irn_arity (node) > 0);
1991 node->in[0+1] = node;
1992 res = skip_Id(rem_pred);
1993 if (res->op == op_Id) /* self-loop */ return node;
1995 node->in[0+1] = res;
2002 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2003 than any other approach, as Id chains are resolved and all point to the real node, or
2004 all id's are self loops. */
2006 skip_Id (ir_node *node) {
2008 /* don't assert node !!! */
2010 if (!node || (node->op != op_Id)) return node;
2012 if (!get_opt_normalize()) return node;
2014 /* Don't use get_Id_pred: We get into an endless loop for
2015 self-referencing Ids. */
2016 pred = node->in[0+1];
2018 if (pred->op != op_Id) return pred;
2020 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2021 ir_node *rem_pred, *res;
2023 if (pred->op != op_Id) return pred; /* shortcut */
2026 assert (get_irn_arity (node) > 0);
2028 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2029 res = skip_Id(rem_pred);
2030 if (res->op == op_Id) /* self-loop */ return node;
2032 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2041 is_Bad (ir_node *node) {
2043 if ((node) && get_irn_opcode(node) == iro_Bad)
2049 is_no_Block (ir_node *node) {
2051 return (get_irn_opcode(node) != iro_Block);
2055 is_Block (ir_node *node) {
2057 return (get_irn_opcode(node) == iro_Block);
2060 /* returns true if node is a Unknown node. */
2062 is_Unknown (ir_node *node) {
2064 return (get_irn_opcode(node) == iro_Unknown);
2068 is_Proj (const ir_node *node) {
2070 return node->op == op_Proj
2071 || (!interprocedural_view && node->op == op_Filter);
2074 /* Returns true if the operation manipulates control flow. */
2076 is_cfop(ir_node *node) {
2077 return is_cfopcode(get_irn_op(node));
2080 /* Returns true if the operation manipulates interprocedural control flow:
2081 CallBegin, EndReg, EndExcept */
2082 int is_ip_cfop(ir_node *node) {
2083 return is_ip_cfopcode(get_irn_op(node));
2086 /* Returns true if the operation can change the control flow because
2089 is_fragile_op(ir_node *node) {
2090 return is_op_fragile(get_irn_op(node));
2093 /* Returns the memory operand of fragile operations. */
2094 ir_node *get_fragile_op_mem(ir_node *node) {
2095 assert(node && is_fragile_op(node));
2097 switch (get_irn_opcode (node)) {
2106 return get_irn_n(node, 0);
2111 assert(0 && "should not be reached");
2116 #ifdef DEBUG_libfirm
2117 void dump_irn (ir_node *n) {
2118 int i, arity = get_irn_arity(n);
2119 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2121 ir_node *pred = get_irn_n(n, -1);
2122 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2123 get_irn_node_nr(pred), (void *)pred);
2125 printf(" preds: \n");
2126 for (i = 0; i < arity; ++i) {
2127 ir_node *pred = get_irn_n(n, i);
2128 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2129 get_irn_node_nr(pred), (void *)pred);
2133 #else /* DEBUG_libfirm */
2134 void dump_irn (ir_node *n) {}
2135 #endif /* DEBUG_libfirm */