3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case False: return True; break;
63 case Eq: return Ne; break;
64 case Lt: return Uge; break;
65 case Le: return Ug; break;
66 case Gt: return Ule; break;
67 case Ge: return Ul; break;
68 case Lg: return Ue; break;
69 case Leg: return Uo; break;
70 case Uo: return Leg; break;
71 case Ue: return Lg; break;
72 case Ul: return Ge; break;
73 case Ule: return Gt; break;
74 case Ug: return Le; break;
75 case Uge: return Lt; break;
76 case Ne: return Eq; break;
77 case True: return False; break;
79 return 99; /* to shut up gcc */
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
153 set_irn_dbg_info(res, db);
157 res->node_nr = get_irp_new_node_nr();
160 #if FIRM_EDGES_INPLACE
163 int not_a_block = is_no_Block(res);
165 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 for (i = 0, n = arity + not_a_block; i < n; ++i)
168 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 if (get_interprocedural_view()) { /* handle Filter and Block specially */
227 if (get_irn_opcode(node) == iro_Filter) {
228 assert(node->attr.filter.in_cg);
229 arr = &node->attr.filter.in_cg;
230 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
231 arr = &node->attr.block.in_cg;
238 if (arity != ARR_LEN(*arr) - 1) {
239 ir_node * block = (*arr)[0];
240 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
243 fix_backedges(current_ir_graph->obst, node);
244 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
248 (get_irn_intra_n)(const ir_node *node, int n) {
249 return _get_irn_intra_n (node, n);
253 (get_irn_inter_n)(const ir_node *node, int n) {
254 return _get_irn_inter_n (node, n);
257 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
260 (get_irn_n)(const ir_node *node, int n) {
261 return _get_irn_n(node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && node->kind == k_ir_node);
268 assert(n < get_irn_arity(node));
269 assert(in && in->kind == k_ir_node);
271 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
272 /* Change block pred in both views! */
273 node->in[n + 1] = in;
274 assert(node->attr.filter.in_cg);
275 node->attr.filter.in_cg[n + 1] = in;
278 if (get_interprocedural_view()) { /* handle Filter and Block specially */
279 if (get_irn_opcode(node) == iro_Filter) {
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
283 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
284 node->attr.block.in_cg[n + 1] = in;
287 /* else fall through */
291 hook_set_irn_n(node, n, in, node->in[n + 1]);
293 /* Here, we rely on src and tgt being in the current ir graph */
294 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
296 node->in[n + 1] = in;
300 (get_irn_mode)(const ir_node *node) {
301 return _get_irn_mode(node);
305 (set_irn_mode)(ir_node *node, ir_mode *mode)
307 _set_irn_mode(node, mode);
311 get_irn_modecode (const ir_node *node)
314 return node->mode->code;
317 /** Gets the string representation of the mode .*/
319 get_irn_modename (const ir_node *node)
322 return get_mode_name(node->mode);
326 get_irn_modeident (const ir_node *node)
329 return get_mode_ident(node->mode);
333 (get_irn_op)(const ir_node *node)
335 return _get_irn_op(node);
338 /* should be private to the library: */
340 set_irn_op (ir_node *node, ir_op *op)
347 (get_irn_opcode)(const ir_node *node)
349 return _get_irn_opcode(node);
353 get_irn_opname (const ir_node *node)
356 if ((get_irn_op((ir_node *)node) == op_Phi) &&
357 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
358 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
359 return get_id_str(node->op->name);
363 get_irn_opident (const ir_node *node)
366 return node->op->name;
370 (get_irn_visited)(const ir_node *node)
372 return _get_irn_visited(node);
376 (set_irn_visited)(ir_node *node, unsigned long visited)
378 _set_irn_visited(node, visited);
382 (mark_irn_visited)(ir_node *node) {
383 _mark_irn_visited(node);
387 (irn_not_visited)(const ir_node *node) {
388 return _irn_not_visited(node);
392 (irn_visited)(const ir_node *node) {
393 return _irn_visited(node);
397 (set_irn_link)(ir_node *node, void *link) {
398 _set_irn_link(node, link);
402 (get_irn_link)(const ir_node *node) {
403 return _get_irn_link(node);
407 (get_irn_pinned)(const ir_node *node) {
408 return _get_irn_pinned(node);
411 void set_irn_pinned(ir_node *node, op_pin_state state) {
412 /* due to optimization an opt may be turned into a Tuple */
413 if (get_irn_op(node) == op_Tuple)
416 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
417 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
419 node->attr.except.pin_state = state;
422 #ifdef DO_HEAPANALYSIS
423 /* Access the abstract interpretation information of a node.
424 Returns NULL if no such information is available. */
425 struct abstval *get_irn_abst_value(ir_node *n) {
428 /* Set the abstract interpretation information of a node. */
429 void set_irn_abst_value(ir_node *n, struct abstval *os) {
432 struct section *firm_get_irn_section(ir_node *n) {
435 void firm_set_irn_section(ir_node *n, struct section *s) {
439 /* Dummies needed for firmjni. */
440 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
441 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
442 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
443 void firm_set_irn_section(ir_node *n, struct section *s) {}
444 #endif /* DO_HEAPANALYSIS */
447 /* Outputs a unique number for this node */
449 get_irn_node_nr(const ir_node *node) {
452 return node->node_nr;
459 get_irn_const_attr (ir_node *node)
461 assert (node->op == op_Const);
462 return node->attr.con;
466 get_irn_proj_attr (ir_node *node)
468 assert (node->op == op_Proj);
469 return node->attr.proj;
473 get_irn_alloc_attr (ir_node *node)
475 assert (node->op == op_Alloc);
480 get_irn_free_attr (ir_node *node)
482 assert (node->op == op_Free);
487 get_irn_symconst_attr (ir_node *node)
489 assert (node->op == op_SymConst);
494 get_irn_call_attr (ir_node *node)
496 assert (node->op == op_Call);
497 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
501 get_irn_sel_attr (ir_node *node)
503 assert (node->op == op_Sel);
508 get_irn_phi_attr (ir_node *node)
510 assert (node->op == op_Phi);
511 return node->attr.phi0_pos;
515 get_irn_block_attr (ir_node *node)
517 assert (node->op == op_Block);
518 return node->attr.block;
522 get_irn_load_attr (ir_node *node)
524 assert (node->op == op_Load);
525 return node->attr.load;
529 get_irn_store_attr (ir_node *node)
531 assert (node->op == op_Store);
532 return node->attr.store;
536 get_irn_except_attr (ir_node *node)
538 assert (node->op == op_Div || node->op == op_Quot ||
539 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
540 return node->attr.except;
543 /** manipulate fields of individual nodes **/
545 /* this works for all except Block */
547 get_nodes_block (const ir_node *node) {
548 assert (!(node->op == op_Block));
549 return get_irn_n(node, -1);
553 set_nodes_block (ir_node *node, ir_node *block) {
554 assert (!(node->op == op_Block));
555 set_irn_n(node, -1, block);
558 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
559 * from Start. If so returns frame type, else Null. */
560 type *is_frame_pointer(ir_node *n) {
561 if ((get_irn_op(n) == op_Proj) &&
562 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
563 ir_node *start = get_Proj_pred(n);
564 if (get_irn_op(start) == op_Start) {
565 return get_irg_frame_type(get_irn_irg(start));
571 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
572 * from Start. If so returns global type, else Null. */
573 type *is_globals_pointer(ir_node *n) {
574 if ((get_irn_op(n) == op_Proj) &&
575 (get_Proj_proj(n) == pn_Start_P_globals)) {
576 ir_node *start = get_Proj_pred(n);
577 if (get_irn_op(start) == op_Start) {
578 return get_glob_type();
584 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
585 * from Start. If so returns 1, else 0. */
586 int is_value_arg_pointer(ir_node *n) {
587 if ((get_irn_op(n) == op_Proj) &&
588 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
589 (get_irn_op(get_Proj_pred(n)) == op_Start))
594 /* Returns an array with the predecessors of the Block. Depending on
595 the implementation of the graph data structure this can be a copy of
596 the internal representation of predecessors as well as the internal
597 array itself. Therefore writing to this array might obstruct the ir. */
599 get_Block_cfgpred_arr (ir_node *node)
601 assert ((node->op == op_Block));
602 return (ir_node **)&(get_irn_in(node)[1]);
607 get_Block_n_cfgpreds (ir_node *node) {
608 assert ((node->op == op_Block));
609 return get_irn_arity(node);
613 get_Block_cfgpred (ir_node *node, int pos) {
614 assert(-1 <= pos && pos < get_irn_arity(node));
615 assert(node->op == op_Block);
616 return get_irn_n(node, pos);
620 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
621 assert (node->op == op_Block);
622 set_irn_n(node, pos, pred);
626 get_Block_matured (ir_node *node) {
627 assert (node->op == op_Block);
628 return node->attr.block.matured;
632 set_Block_matured (ir_node *node, bool matured) {
633 assert (node->op == op_Block);
634 node->attr.block.matured = matured;
637 get_Block_block_visited (ir_node *node) {
638 assert (node->op == op_Block);
639 return node->attr.block.block_visited;
643 set_Block_block_visited (ir_node *node, unsigned long visit) {
644 assert (node->op == op_Block);
645 node->attr.block.block_visited = visit;
648 /* For this current_ir_graph must be set. */
650 mark_Block_block_visited (ir_node *node) {
651 assert (node->op == op_Block);
652 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
656 Block_not_block_visited(ir_node *node) {
657 assert (node->op == op_Block);
658 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
662 get_Block_graph_arr (ir_node *node, int pos) {
663 assert (node->op == op_Block);
664 return node->attr.block.graph_arr[pos+1];
668 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
669 assert (node->op == op_Block);
670 node->attr.block.graph_arr[pos+1] = value;
673 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
674 assert(node->op == op_Block);
675 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
676 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
677 node->attr.block.in_cg[0] = NULL;
678 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
680 /* Fix backedge array. fix_backedges operates depending on
681 interprocedural_view. */
682 int ipv = get_interprocedural_view();
683 set_interprocedural_view(true);
684 fix_backedges(current_ir_graph->obst, node);
685 set_interprocedural_view(ipv);
688 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
691 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
692 assert(node->op == op_Block &&
693 node->attr.block.in_cg &&
694 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
695 node->attr.block.in_cg[pos + 1] = pred;
698 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
699 assert(node->op == op_Block);
700 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
703 int get_Block_cg_n_cfgpreds(ir_node * node) {
704 assert(node->op == op_Block);
705 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
708 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
709 assert(node->op == op_Block && node->attr.block.in_cg);
710 return node->attr.block.in_cg[pos + 1];
713 void remove_Block_cg_cfgpred_arr(ir_node * node) {
714 assert(node->op == op_Block);
715 node->attr.block.in_cg = NULL;
718 ir_node *(set_Block_dead)(ir_node *block) {
719 return _set_Block_dead(block);
722 int (is_Block_dead)(const ir_node *block) {
723 return _is_Block_dead(block);
727 set_Start_irg(ir_node *node, ir_graph *irg) {
728 assert(node->op == op_Start);
729 assert(is_ir_graph(irg));
730 assert(0 && " Why set irg? -- use set_irn_irg");
734 get_End_n_keepalives(ir_node *end) {
735 assert (end->op == op_End);
736 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
740 get_End_keepalive(ir_node *end, int pos) {
741 assert (end->op == op_End);
742 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
746 add_End_keepalive (ir_node *end, ir_node *ka) {
747 assert (end->op == op_End);
748 ARR_APP1 (ir_node *, end->in, ka);
752 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
753 assert (end->op == op_End);
754 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
758 free_End (ir_node *end) {
759 assert (end->op == op_End);
761 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
762 end->in = NULL; /* @@@ make sure we get an error if we use the
763 in array afterwards ... */
768 > Implementing the case construct (which is where the constant Proj node is
769 > important) involves far more than simply determining the constant values.
770 > We could argue that this is more properly a function of the translator from
771 > Firm to the target machine. That could be done if there was some way of
772 > projecting "default" out of the Cond node.
773 I know it's complicated.
774 Basically there are two proglems:
775 - determining the gaps between the projs
776 - determining the biggest case constant to know the proj number for
778 I see several solutions:
779 1. Introduce a ProjDefault node. Solves both problems.
780 This means to extend all optimizations executed during construction.
781 2. Give the Cond node for switch two flavors:
782 a) there are no gaps in the projs (existing flavor)
783 b) gaps may exist, default proj is still the Proj with the largest
784 projection number. This covers also the gaps.
785 3. Fix the semantic of the Cond to that of 2b)
787 Solution 2 seems to be the best:
788 Computing the gaps in the Firm representation is not too hard, i.e.,
789 libFIRM can implement a routine that transforms between the two
790 flavours. This is also possible for 1) but 2) does not require to
791 change any existing optimization.
792 Further it should be far simpler to determine the biggest constant than
794 I don't want to choose 3) as 2a) seems to have advantages for
795 dataflow analysis and 3) does not allow to convert the representation to
799 get_Cond_selector (ir_node *node) {
800 assert (node->op == op_Cond);
801 return get_irn_n(node, 0);
805 set_Cond_selector (ir_node *node, ir_node *selector) {
806 assert (node->op == op_Cond);
807 set_irn_n(node, 0, selector);
811 get_Cond_kind (ir_node *node) {
812 assert (node->op == op_Cond);
813 return node->attr.c.kind;
817 set_Cond_kind (ir_node *node, cond_kind kind) {
818 assert (node->op == op_Cond);
819 node->attr.c.kind = kind;
823 get_Cond_defaultProj (ir_node *node) {
824 assert (node->op == op_Cond);
825 return node->attr.c.default_proj;
829 get_Return_mem (ir_node *node) {
830 assert (node->op == op_Return);
831 return get_irn_n(node, 0);
835 set_Return_mem (ir_node *node, ir_node *mem) {
836 assert (node->op == op_Return);
837 set_irn_n(node, 0, mem);
841 get_Return_n_ress (ir_node *node) {
842 assert (node->op == op_Return);
843 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
847 get_Return_res_arr (ir_node *node)
849 assert ((node->op == op_Return));
850 if (get_Return_n_ress(node) > 0)
851 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
858 set_Return_n_res (ir_node *node, int results) {
859 assert (node->op == op_Return);
864 get_Return_res (ir_node *node, int pos) {
865 assert (node->op == op_Return);
866 assert (get_Return_n_ress(node) > pos);
867 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
871 set_Return_res (ir_node *node, int pos, ir_node *res){
872 assert (node->op == op_Return);
873 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
877 get_Raise_mem (ir_node *node) {
878 assert (node->op == op_Raise);
879 return get_irn_n(node, 0);
883 set_Raise_mem (ir_node *node, ir_node *mem) {
884 assert (node->op == op_Raise);
885 set_irn_n(node, 0, mem);
889 get_Raise_exo_ptr (ir_node *node) {
890 assert (node->op == op_Raise);
891 return get_irn_n(node, 1);
895 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
896 assert (node->op == op_Raise);
897 set_irn_n(node, 1, exo_ptr);
900 tarval *(get_Const_tarval)(ir_node *node) {
901 return _get_Const_tarval(node);
905 set_Const_tarval (ir_node *node, tarval *con) {
906 assert (node->op == op_Const);
907 node->attr.con.tv = con;
910 cnst_classify_t (classify_Const)(ir_node *node)
912 return _classify_Const(node);
916 /* The source language type. Must be an atomic type. Mode of type must
917 be mode of node. For tarvals from entities type must be pointer to
920 get_Const_type (ir_node *node) {
921 assert (node->op == op_Const);
922 return node->attr.con.tp;
926 set_Const_type (ir_node *node, type *tp) {
927 assert (node->op == op_Const);
928 if (tp != firm_unknown_type) {
929 assert (is_atomic_type(tp));
930 assert (get_type_mode(tp) == get_irn_mode(node));
932 node->attr.con.tp = tp;
937 get_SymConst_kind (const ir_node *node) {
938 assert (node->op == op_SymConst);
939 return node->attr.i.num;
943 set_SymConst_kind (ir_node *node, symconst_kind num) {
944 assert (node->op == op_SymConst);
945 node->attr.i.num = num;
949 get_SymConst_type (ir_node *node) {
950 assert ( (node->op == op_SymConst)
951 && ( get_SymConst_kind(node) == symconst_type_tag
952 || get_SymConst_kind(node) == symconst_size));
953 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
957 set_SymConst_type (ir_node *node, type *tp) {
958 assert ( (node->op == op_SymConst)
959 && ( get_SymConst_kind(node) == symconst_type_tag
960 || get_SymConst_kind(node) == symconst_size));
961 node->attr.i.sym.type_p = tp;
965 get_SymConst_name (ir_node *node) {
966 assert ( (node->op == op_SymConst)
967 && (get_SymConst_kind(node) == symconst_addr_name));
968 return node->attr.i.sym.ident_p;
972 set_SymConst_name (ir_node *node, ident *name) {
973 assert ( (node->op == op_SymConst)
974 && (get_SymConst_kind(node) == symconst_addr_name));
975 node->attr.i.sym.ident_p = name;
979 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
980 entity *get_SymConst_entity (ir_node *node) {
981 assert ( (node->op == op_SymConst)
982 && (get_SymConst_kind (node) == symconst_addr_ent));
983 return node->attr.i.sym.entity_p;
986 void set_SymConst_entity (ir_node *node, entity *ent) {
987 assert ( (node->op == op_SymConst)
988 && (get_SymConst_kind(node) == symconst_addr_ent));
989 node->attr.i.sym.entity_p = ent;
992 union symconst_symbol
993 get_SymConst_symbol (ir_node *node) {
994 assert (node->op == op_SymConst);
995 return node->attr.i.sym;
999 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1000 assert (node->op == op_SymConst);
1001 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1002 node->attr.i.sym = sym;
1006 get_SymConst_value_type (ir_node *node) {
1007 assert (node->op == op_SymConst);
1008 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1009 return node->attr.i.tp;
1013 set_SymConst_value_type (ir_node *node, type *tp) {
1014 assert (node->op == op_SymConst);
1015 node->attr.i.tp = tp;
1019 get_Sel_mem (ir_node *node) {
1020 assert (node->op == op_Sel);
1021 return get_irn_n(node, 0);
1025 set_Sel_mem (ir_node *node, ir_node *mem) {
1026 assert (node->op == op_Sel);
1027 set_irn_n(node, 0, mem);
1031 get_Sel_ptr (ir_node *node) {
1032 assert (node->op == op_Sel);
1033 return get_irn_n(node, 1);
1037 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1038 assert (node->op == op_Sel);
1039 set_irn_n(node, 1, ptr);
1043 get_Sel_n_indexs (ir_node *node) {
1044 assert (node->op == op_Sel);
1045 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1049 get_Sel_index_arr (ir_node *node)
1051 assert ((node->op == op_Sel));
1052 if (get_Sel_n_indexs(node) > 0)
1053 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1059 get_Sel_index (ir_node *node, int pos) {
1060 assert (node->op == op_Sel);
1061 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1065 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1066 assert (node->op == op_Sel);
1067 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1071 get_Sel_entity (ir_node *node) {
1072 assert (node->op == op_Sel);
1073 return node->attr.s.ent;
1077 set_Sel_entity (ir_node *node, entity *ent) {
1078 assert (node->op == op_Sel);
1079 node->attr.s.ent = ent;
1083 get_InstOf_ent (ir_node *node) {
1084 assert (node->op = op_InstOf);
1085 return (node->attr.io.ent);
1089 set_InstOf_ent (ir_node *node, type *ent) {
1090 assert (node->op = op_InstOf);
1091 node->attr.io.ent = ent;
1095 get_InstOf_store (ir_node *node) {
1096 assert (node->op = op_InstOf);
1097 return (get_irn_n (node, 0));
1101 set_InstOf_store (ir_node *node, ir_node *obj) {
1102 assert (node->op = op_InstOf);
1103 set_irn_n (node, 0, obj);
1107 get_InstOf_obj (ir_node *node) {
1108 assert (node->op = op_InstOf);
1109 return (get_irn_n (node, 1));
1113 set_InstOf_obj (ir_node *node, ir_node *obj) {
1114 assert (node->op = op_InstOf);
1115 set_irn_n (node, 1, obj);
1119 /* For unary and binary arithmetic operations the access to the
1120 operands can be factored out. Left is the first, right the
1121 second arithmetic value as listed in tech report 0999-33.
1122 unops are: Minus, Abs, Not, Conv, Cast
1123 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1124 Shr, Shrs, Rotate, Cmp */
1128 get_Call_mem (ir_node *node) {
1129 assert (node->op == op_Call);
1130 return get_irn_n(node, 0);
1134 set_Call_mem (ir_node *node, ir_node *mem) {
1135 assert (node->op == op_Call);
1136 set_irn_n(node, 0, mem);
1140 get_Call_ptr (ir_node *node) {
1141 assert (node->op == op_Call);
1142 return get_irn_n(node, 1);
1146 set_Call_ptr (ir_node *node, ir_node *ptr) {
1147 assert (node->op == op_Call);
1148 set_irn_n(node, 1, ptr);
1152 get_Call_param_arr (ir_node *node) {
1153 assert (node->op == op_Call);
1154 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1158 get_Call_n_params (ir_node *node) {
1159 assert (node->op == op_Call);
1160 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1164 get_Call_arity (ir_node *node) {
1165 assert (node->op == op_Call);
1166 return get_Call_n_params(node);
1170 set_Call_arity (ir_node *node, ir_node *arity) {
1171 assert (node->op == op_Call);
1176 get_Call_param (ir_node *node, int pos) {
1177 assert (node->op == op_Call);
1178 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1182 set_Call_param (ir_node *node, int pos, ir_node *param) {
1183 assert (node->op == op_Call);
1184 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1188 get_Call_type (ir_node *node) {
1189 assert (node->op == op_Call);
1190 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1194 set_Call_type (ir_node *node, type *tp) {
1195 assert (node->op == op_Call);
1196 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1197 node->attr.call.cld_tp = tp;
1200 int Call_has_callees(ir_node *node) {
1201 assert(node && node->op == op_Call);
1202 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1203 (node->attr.call.callee_arr != NULL));
1206 int get_Call_n_callees(ir_node * node) {
1207 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1208 return ARR_LEN(node->attr.call.callee_arr);
1211 entity * get_Call_callee(ir_node * node, int pos) {
1212 assert(pos >= 0 && pos < get_Call_n_callees(node));
1213 return node->attr.call.callee_arr[pos];
1216 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1217 assert(node->op == op_Call);
1218 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1219 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1221 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1224 void remove_Call_callee_arr(ir_node * node) {
1225 assert(node->op == op_Call);
1226 node->attr.call.callee_arr = NULL;
1229 ir_node * get_CallBegin_ptr (ir_node *node) {
1230 assert(node->op == op_CallBegin);
1231 return get_irn_n(node, 0);
1233 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1234 assert(node->op == op_CallBegin);
1235 set_irn_n(node, 0, ptr);
1237 ir_node * get_CallBegin_call (ir_node *node) {
1238 assert(node->op == op_CallBegin);
1239 return node->attr.callbegin.call;
1241 void set_CallBegin_call (ir_node *node, ir_node *call) {
1242 assert(node->op == op_CallBegin);
1243 node->attr.callbegin.call = call;
1248 ir_node * get_##OP##_left(ir_node *node) { \
1249 assert(node->op == op_##OP); \
1250 return get_irn_n(node, node->op->op_index); \
1252 void set_##OP##_left(ir_node *node, ir_node *left) { \
1253 assert(node->op == op_##OP); \
1254 set_irn_n(node, node->op->op_index, left); \
1256 ir_node *get_##OP##_right(ir_node *node) { \
1257 assert(node->op == op_##OP); \
1258 return get_irn_n(node, node->op->op_index + 1); \
1260 void set_##OP##_right(ir_node *node, ir_node *right) { \
1261 assert(node->op == op_##OP); \
1262 set_irn_n(node, node->op->op_index + 1, right); \
1266 ir_node *get_##OP##_op(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index); \
1270 void set_##OP##_op (ir_node *node, ir_node *op) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index, op); \
1282 get_Quot_mem (ir_node *node) {
1283 assert (node->op == op_Quot);
1284 return get_irn_n(node, 0);
1288 set_Quot_mem (ir_node *node, ir_node *mem) {
1289 assert (node->op == op_Quot);
1290 set_irn_n(node, 0, mem);
1296 get_DivMod_mem (ir_node *node) {
1297 assert (node->op == op_DivMod);
1298 return get_irn_n(node, 0);
1302 set_DivMod_mem (ir_node *node, ir_node *mem) {
1303 assert (node->op == op_DivMod);
1304 set_irn_n(node, 0, mem);
1310 get_Div_mem (ir_node *node) {
1311 assert (node->op == op_Div);
1312 return get_irn_n(node, 0);
1316 set_Div_mem (ir_node *node, ir_node *mem) {
1317 assert (node->op == op_Div);
1318 set_irn_n(node, 0, mem);
1324 get_Mod_mem (ir_node *node) {
1325 assert (node->op == op_Mod);
1326 return get_irn_n(node, 0);
1330 set_Mod_mem (ir_node *node, ir_node *mem) {
1331 assert (node->op == op_Mod);
1332 set_irn_n(node, 0, mem);
1349 get_Cast_type (ir_node *node) {
1350 assert (node->op == op_Cast);
1351 return node->attr.cast.totype;
1355 set_Cast_type (ir_node *node, type *to_tp) {
1356 assert (node->op == op_Cast);
1357 node->attr.cast.totype = to_tp;
1361 (is_unop)(const ir_node *node) {
1362 return _is_unop(node);
1366 get_unop_op (ir_node *node) {
1367 if (node->op->opar == oparity_unary)
1368 return get_irn_n(node, node->op->op_index);
1370 assert(node->op->opar == oparity_unary);
1375 set_unop_op (ir_node *node, ir_node *op) {
1376 if (node->op->opar == oparity_unary)
1377 set_irn_n(node, node->op->op_index, op);
1379 assert(node->op->opar == oparity_unary);
1383 (is_binop)(const ir_node *node) {
1384 return _is_binop(node);
1388 get_binop_left (ir_node *node) {
1389 if (node->op->opar == oparity_binary)
1390 return get_irn_n(node, node->op->op_index);
1392 assert(node->op->opar == oparity_binary);
1397 set_binop_left (ir_node *node, ir_node *left) {
1398 if (node->op->opar == oparity_binary)
1399 set_irn_n(node, node->op->op_index, left);
1401 assert (node->op->opar == oparity_binary);
1405 get_binop_right (ir_node *node) {
1406 if (node->op->opar == oparity_binary)
1407 return get_irn_n(node, node->op->op_index + 1);
1409 assert(node->op->opar == oparity_binary);
1414 set_binop_right (ir_node *node, ir_node *right) {
1415 if (node->op->opar == oparity_binary)
1416 set_irn_n(node, node->op->op_index + 1, right);
1418 assert (node->op->opar == oparity_binary);
1421 int is_Phi (const ir_node *n) {
1427 if (op == op_Filter) return get_interprocedural_view();
1430 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1431 (get_irn_arity(n) > 0));
1436 int is_Phi0 (const ir_node *n) {
1439 return ((get_irn_op(n) == op_Phi) &&
1440 (get_irn_arity(n) == 0) &&
1441 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1445 get_Phi_preds_arr (ir_node *node) {
1446 assert (node->op == op_Phi);
1447 return (ir_node **)&(get_irn_in(node)[1]);
1451 get_Phi_n_preds (ir_node *node) {
1452 assert (is_Phi(node) || is_Phi0(node));
1453 return (get_irn_arity(node));
1457 void set_Phi_n_preds (ir_node *node, int n_preds) {
1458 assert (node->op == op_Phi);
1463 get_Phi_pred (ir_node *node, int pos) {
1464 assert (is_Phi(node) || is_Phi0(node));
1465 return get_irn_n(node, pos);
1469 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1470 assert (is_Phi(node) || is_Phi0(node));
1471 set_irn_n(node, pos, pred);
1475 int is_memop(ir_node *node) {
1476 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1479 ir_node *get_memop_mem (ir_node *node) {
1480 assert(is_memop(node));
1481 return get_irn_n(node, 0);
1484 void set_memop_mem (ir_node *node, ir_node *mem) {
1485 assert(is_memop(node));
1486 set_irn_n(node, 0, mem);
1489 ir_node *get_memop_ptr (ir_node *node) {
1490 assert(is_memop(node));
1491 return get_irn_n(node, 1);
1494 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1495 assert(is_memop(node));
1496 set_irn_n(node, 1, ptr);
1500 get_Load_mem (ir_node *node) {
1501 assert (node->op == op_Load);
1502 return get_irn_n(node, 0);
1506 set_Load_mem (ir_node *node, ir_node *mem) {
1507 assert (node->op == op_Load);
1508 set_irn_n(node, 0, mem);
1512 get_Load_ptr (ir_node *node) {
1513 assert (node->op == op_Load);
1514 return get_irn_n(node, 1);
1518 set_Load_ptr (ir_node *node, ir_node *ptr) {
1519 assert (node->op == op_Load);
1520 set_irn_n(node, 1, ptr);
1524 get_Load_mode (ir_node *node) {
1525 assert (node->op == op_Load);
1526 return node->attr.load.load_mode;
1530 set_Load_mode (ir_node *node, ir_mode *mode) {
1531 assert (node->op == op_Load);
1532 node->attr.load.load_mode = mode;
1536 get_Load_volatility (ir_node *node) {
1537 assert (node->op == op_Load);
1538 return node->attr.load.volatility;
1542 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1543 assert (node->op == op_Load);
1544 node->attr.load.volatility = volatility;
1549 get_Store_mem (ir_node *node) {
1550 assert (node->op == op_Store);
1551 return get_irn_n(node, 0);
1555 set_Store_mem (ir_node *node, ir_node *mem) {
1556 assert (node->op == op_Store);
1557 set_irn_n(node, 0, mem);
1561 get_Store_ptr (ir_node *node) {
1562 assert (node->op == op_Store);
1563 return get_irn_n(node, 1);
1567 set_Store_ptr (ir_node *node, ir_node *ptr) {
1568 assert (node->op == op_Store);
1569 set_irn_n(node, 1, ptr);
1573 get_Store_value (ir_node *node) {
1574 assert (node->op == op_Store);
1575 return get_irn_n(node, 2);
1579 set_Store_value (ir_node *node, ir_node *value) {
1580 assert (node->op == op_Store);
1581 set_irn_n(node, 2, value);
1585 get_Store_volatility (ir_node *node) {
1586 assert (node->op == op_Store);
1587 return node->attr.store.volatility;
1591 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1592 assert (node->op == op_Store);
1593 node->attr.store.volatility = volatility;
1598 get_Alloc_mem (ir_node *node) {
1599 assert (node->op == op_Alloc);
1600 return get_irn_n(node, 0);
1604 set_Alloc_mem (ir_node *node, ir_node *mem) {
1605 assert (node->op == op_Alloc);
1606 set_irn_n(node, 0, mem);
1610 get_Alloc_size (ir_node *node) {
1611 assert (node->op == op_Alloc);
1612 return get_irn_n(node, 1);
1616 set_Alloc_size (ir_node *node, ir_node *size) {
1617 assert (node->op == op_Alloc);
1618 set_irn_n(node, 1, size);
1622 get_Alloc_type (ir_node *node) {
1623 assert (node->op == op_Alloc);
1624 return node->attr.a.type = skip_tid(node->attr.a.type);
1628 set_Alloc_type (ir_node *node, type *tp) {
1629 assert (node->op == op_Alloc);
1630 node->attr.a.type = tp;
1634 get_Alloc_where (ir_node *node) {
1635 assert (node->op == op_Alloc);
1636 return node->attr.a.where;
1640 set_Alloc_where (ir_node *node, where_alloc where) {
1641 assert (node->op == op_Alloc);
1642 node->attr.a.where = where;
1647 get_Free_mem (ir_node *node) {
1648 assert (node->op == op_Free);
1649 return get_irn_n(node, 0);
1653 set_Free_mem (ir_node *node, ir_node *mem) {
1654 assert (node->op == op_Free);
1655 set_irn_n(node, 0, mem);
1659 get_Free_ptr (ir_node *node) {
1660 assert (node->op == op_Free);
1661 return get_irn_n(node, 1);
1665 set_Free_ptr (ir_node *node, ir_node *ptr) {
1666 assert (node->op == op_Free);
1667 set_irn_n(node, 1, ptr);
1671 get_Free_size (ir_node *node) {
1672 assert (node->op == op_Free);
1673 return get_irn_n(node, 2);
1677 set_Free_size (ir_node *node, ir_node *size) {
1678 assert (node->op == op_Free);
1679 set_irn_n(node, 2, size);
1683 get_Free_type (ir_node *node) {
1684 assert (node->op == op_Free);
1685 return node->attr.f.type = skip_tid(node->attr.f.type);
1689 set_Free_type (ir_node *node, type *tp) {
1690 assert (node->op == op_Free);
1691 node->attr.f.type = tp;
1695 get_Free_where (ir_node *node) {
1696 assert (node->op == op_Free);
1697 return node->attr.f.where;
1701 set_Free_where (ir_node *node, where_alloc where) {
1702 assert (node->op == op_Free);
1703 node->attr.f.where = where;
1707 get_Sync_preds_arr (ir_node *node) {
1708 assert (node->op == op_Sync);
1709 return (ir_node **)&(get_irn_in(node)[1]);
1713 get_Sync_n_preds (ir_node *node) {
1714 assert (node->op == op_Sync);
1715 return (get_irn_arity(node));
1720 set_Sync_n_preds (ir_node *node, int n_preds) {
1721 assert (node->op == op_Sync);
1726 get_Sync_pred (ir_node *node, int pos) {
1727 assert (node->op == op_Sync);
1728 return get_irn_n(node, pos);
1732 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1733 assert (node->op == op_Sync);
1734 set_irn_n(node, pos, pred);
1738 get_Proj_pred (ir_node *node) {
1739 assert (is_Proj(node));
1740 return get_irn_n(node, 0);
1744 set_Proj_pred (ir_node *node, ir_node *pred) {
1745 assert (is_Proj(node));
1746 set_irn_n(node, 0, pred);
1750 get_Proj_proj (ir_node *node) {
1751 assert (is_Proj(node));
1752 if (get_irn_opcode(node) == iro_Proj) {
1753 return node->attr.proj;
1755 assert(get_irn_opcode(node) == iro_Filter);
1756 return node->attr.filter.proj;
1761 set_Proj_proj (ir_node *node, long proj) {
1762 assert (node->op == op_Proj);
1763 node->attr.proj = proj;
1767 get_Tuple_preds_arr (ir_node *node) {
1768 assert (node->op == op_Tuple);
1769 return (ir_node **)&(get_irn_in(node)[1]);
1773 get_Tuple_n_preds (ir_node *node) {
1774 assert (node->op == op_Tuple);
1775 return (get_irn_arity(node));
1780 set_Tuple_n_preds (ir_node *node, int n_preds) {
1781 assert (node->op == op_Tuple);
1786 get_Tuple_pred (ir_node *node, int pos) {
1787 assert (node->op == op_Tuple);
1788 return get_irn_n(node, pos);
1792 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1793 assert (node->op == op_Tuple);
1794 set_irn_n(node, pos, pred);
1798 get_Id_pred (ir_node *node) {
1799 assert (node->op == op_Id);
1800 return get_irn_n(node, 0);
1804 set_Id_pred (ir_node *node, ir_node *pred) {
1805 assert (node->op == op_Id);
1806 set_irn_n(node, 0, pred);
1809 ir_node *get_Confirm_value (ir_node *node) {
1810 assert (node->op == op_Confirm);
1811 return get_irn_n(node, 0);
1813 void set_Confirm_value (ir_node *node, ir_node *value) {
1814 assert (node->op == op_Confirm);
1815 set_irn_n(node, 0, value);
1817 ir_node *get_Confirm_bound (ir_node *node) {
1818 assert (node->op == op_Confirm);
1819 return get_irn_n(node, 1);
1821 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1822 assert (node->op == op_Confirm);
1823 set_irn_n(node, 0, bound);
1825 pn_Cmp get_Confirm_cmp (ir_node *node) {
1826 assert (node->op == op_Confirm);
1827 return node->attr.confirm_cmp;
1829 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1830 assert (node->op == op_Confirm);
1831 node->attr.confirm_cmp = cmp;
1836 get_Filter_pred (ir_node *node) {
1837 assert(node->op == op_Filter);
1841 set_Filter_pred (ir_node *node, ir_node *pred) {
1842 assert(node->op == op_Filter);
1846 get_Filter_proj(ir_node *node) {
1847 assert(node->op == op_Filter);
1848 return node->attr.filter.proj;
1851 set_Filter_proj (ir_node *node, long proj) {
1852 assert(node->op == op_Filter);
1853 node->attr.filter.proj = proj;
1856 /* Don't use get_irn_arity, get_irn_n in implementation as access
1857 shall work independent of view!!! */
1858 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1859 assert(node->op == op_Filter);
1860 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1861 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1862 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1863 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1864 node->attr.filter.in_cg[0] = node->in[0];
1866 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1869 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1870 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1871 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1872 node->attr.filter.in_cg[pos + 1] = pred;
1874 int get_Filter_n_cg_preds(ir_node *node) {
1875 assert(node->op == op_Filter && node->attr.filter.in_cg);
1876 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1878 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1880 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1882 arity = ARR_LEN(node->attr.filter.in_cg);
1883 assert(pos < arity - 1);
1884 return node->attr.filter.in_cg[pos + 1];
1888 ir_node *get_Mux_sel (ir_node *node) {
1889 assert(node->op == op_Mux);
1892 void set_Mux_sel (ir_node *node, ir_node *sel) {
1893 assert(node->op == op_Mux);
1897 ir_node *get_Mux_false (ir_node *node) {
1898 assert(node->op == op_Mux);
1901 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1902 assert(node->op == op_Mux);
1903 node->in[2] = ir_false;
1906 ir_node *get_Mux_true (ir_node *node) {
1907 assert(node->op == op_Mux);
1910 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1911 assert(node->op == op_Mux);
1912 node->in[3] = ir_true;
1917 get_irn_irg(const ir_node *node) {
1918 if (! is_Block(node))
1919 node = get_nodes_block(node);
1920 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1921 node = get_nodes_block(node);
1922 assert(get_irn_op(node) == op_Block);
1923 return node->attr.block.irg;
1927 /*----------------------------------------------------------------*/
1928 /* Auxiliary routines */
1929 /*----------------------------------------------------------------*/
1932 skip_Proj (ir_node *node) {
1933 /* don't assert node !!! */
1934 if (node && is_Proj(node)) {
1935 return get_Proj_pred(node);
1942 skip_Tuple (ir_node *node) {
1945 if (!get_opt_normalize()) return node;
1947 node = skip_Id(node);
1948 if (get_irn_op(node) == op_Proj) {
1949 pred = skip_Id(get_Proj_pred(node));
1950 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1951 pred = skip_Id(skip_Tuple(pred));
1952 if (get_irn_op(pred) == op_Tuple)
1953 return get_Tuple_pred(pred, get_Proj_proj(node));
1958 /** returns operand of node if node is a Cast */
1959 ir_node *skip_Cast (ir_node *node) {
1960 if (node && get_irn_op(node) == op_Cast) {
1961 return skip_Id(get_irn_n(node, 0));
1968 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1969 than any other approach, as Id chains are resolved and all point to the real node, or
1970 all id's are self loops. */
1972 skip_Id (ir_node *node) {
1973 /* don't assert node !!! */
1975 if (!get_opt_normalize()) return node;
1977 /* Don't use get_Id_pred: We get into an endless loop for
1978 self-referencing Ids. */
1979 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1980 ir_node *rem_pred = node->in[0+1];
1983 assert (get_irn_arity (node) > 0);
1985 node->in[0+1] = node;
1986 res = skip_Id(rem_pred);
1987 if (res->op == op_Id) /* self-loop */ return node;
1989 node->in[0+1] = res;
1996 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1997 than any other approach, as Id chains are resolved and all point to the real node, or
1998 all id's are self loops. */
2000 skip_Id (ir_node *node) {
2002 /* don't assert node !!! */
2004 if (!node || (node->op != op_Id)) return node;
2006 if (!get_opt_normalize()) return node;
2008 /* Don't use get_Id_pred: We get into an endless loop for
2009 self-referencing Ids. */
2010 pred = node->in[0+1];
2012 if (pred->op != op_Id) return pred;
2014 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2015 ir_node *rem_pred, *res;
2017 if (pred->op != op_Id) return pred; /* shortcut */
2020 assert (get_irn_arity (node) > 0);
2022 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2023 res = skip_Id(rem_pred);
2024 if (res->op == op_Id) /* self-loop */ return node;
2026 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2035 (is_Bad)(const ir_node *node) {
2036 return _is_Bad(node);
2040 (is_no_Block)(const ir_node *node) {
2041 return _is_no_Block(node);
2045 (is_Block)(const ir_node *node) {
2046 return _is_Block(node);
2049 /* returns true if node is a Unknown node. */
2051 is_Unknown (const ir_node *node) {
2053 return (get_irn_op(node) == op_Unknown);
2057 is_Proj (const ir_node *node) {
2059 return node->op == op_Proj
2060 || (!get_interprocedural_view() && node->op == op_Filter);
2063 /* Returns true if the operation manipulates control flow. */
2065 is_cfop(const ir_node *node) {
2066 return is_cfopcode(get_irn_op(node));
2069 /* Returns true if the operation manipulates interprocedural control flow:
2070 CallBegin, EndReg, EndExcept */
2071 int is_ip_cfop(const ir_node *node) {
2072 return is_ip_cfopcode(get_irn_op(node));
2075 /* Returns true if the operation can change the control flow because
2078 is_fragile_op(const ir_node *node) {
2079 return is_op_fragile(get_irn_op(node));
2082 /* Returns the memory operand of fragile operations. */
2083 ir_node *get_fragile_op_mem(ir_node *node) {
2084 assert(node && is_fragile_op(node));
2086 switch (get_irn_opcode (node)) {
2095 return get_irn_n(node, 0);
2100 assert(0 && "should not be reached");
2105 /* Returns true if the operation is a forking control flow operation. */
2107 is_forking_op(const ir_node *node) {
2108 return is_op_forking(get_irn_op(node));
2112 #ifdef DEBUG_libfirm
2113 void dump_irn (ir_node *n) {
2114 int i, arity = get_irn_arity(n);
2115 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2117 ir_node *pred = get_irn_n(n, -1);
2118 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2119 get_irn_node_nr(pred), (void *)pred);
2121 printf(" preds: \n");
2122 for (i = 0; i < arity; ++i) {
2123 ir_node *pred = get_irn_n(n, i);
2124 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2125 get_irn_node_nr(pred), (void *)pred);
2129 #else /* DEBUG_libfirm */
2130 void dump_irn (ir_node *n) {}
2131 #endif /* DEBUG_libfirm */