3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case False: return True; break;
63 case Eq: return Ne; break;
64 case Lt: return Uge; break;
65 case Le: return Ug; break;
66 case Gt: return Ule; break;
67 case Ge: return Ul; break;
68 case Lg: return Ue; break;
69 case Leg: return Uo; break;
70 case Uo: return Leg; break;
71 case Ue: return Lg; break;
72 case Ul: return Ge; break;
73 case Ule: return Gt; break;
74 case Ug: return Le; break;
75 case Uge: return Lt; break;
76 case Ne: return Eq; break;
77 case True: return False; break;
79 return 99; /* to shut up gcc */
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
153 set_irn_dbg_info(res, db);
157 res->node_nr = get_irp_new_node_nr();
160 #ifdef FIRM_EDGES_INPLACE
163 int not_a_block = !is_Block(res);
165 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 for(i = 0, n = arity + not_a_block; i < n; ++i)
168 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 if (get_interprocedural_view()) { /* handle Filter and Block specially */
227 if (get_irn_opcode(node) == iro_Filter) {
228 assert(node->attr.filter.in_cg);
229 arr = &node->attr.filter.in_cg;
230 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
231 arr = &node->attr.block.in_cg;
238 if (arity != ARR_LEN(*arr) - 1) {
239 ir_node * block = (*arr)[0];
240 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
243 fix_backedges(current_ir_graph->obst, node);
244 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
248 (get_irn_intra_n)(const ir_node *node, int n) {
249 return _get_irn_intra_n (node, n);
253 (get_irn_inter_n)(const ir_node *node, int n) {
254 return _get_irn_inter_n (node, n);
257 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
260 (get_irn_n)(const ir_node *node, int n) {
261 return _get_irn_n(node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && node->kind == k_ir_node);
268 assert(n < get_irn_arity(node));
269 assert(in && in->kind == k_ir_node);
271 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
272 /* Change block pred in both views! */
273 node->in[n + 1] = in;
274 assert(node->attr.filter.in_cg);
275 node->attr.filter.in_cg[n + 1] = in;
278 if (get_interprocedural_view()) { /* handle Filter and Block specially */
279 if (get_irn_opcode(node) == iro_Filter) {
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
283 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
284 node->attr.block.in_cg[n + 1] = in;
287 /* else fall through */
291 hook_set_irn_n(node, n, in, node->in[n + 1]);
293 #ifdef FIRM_EDGES_INPLACE
294 /* Here, we rely on src and tgt being in the current ir graph */
295 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
413 void set_irn_pinned(ir_node *node, op_pin_state state) {
414 /* due to optimization an opt may be turned into a Tuple */
415 if (get_irn_op(node) == op_Tuple)
418 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
419 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
421 node->attr.except.pin_state = state;
424 #ifdef DO_HEAPANALYSIS
425 /* Access the abstract interpretation information of a node.
426 Returns NULL if no such information is available. */
427 struct abstval *get_irn_abst_value(ir_node *n) {
430 /* Set the abstract interpretation information of a node. */
431 void set_irn_abst_value(ir_node *n, struct abstval *os) {
434 struct section *firm_get_irn_section(ir_node *n) {
437 void firm_set_irn_section(ir_node *n, struct section *s) {
441 /* Dummies needed for firmjni. */
442 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
443 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
444 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
445 void firm_set_irn_section(ir_node *n, struct section *s) {}
446 #endif /* DO_HEAPANALYSIS */
449 /* Outputs a unique number for this node */
451 get_irn_node_nr(const ir_node *node) {
454 return node->node_nr;
461 get_irn_const_attr (ir_node *node)
463 assert (node->op == op_Const);
464 return node->attr.con;
468 get_irn_proj_attr (ir_node *node)
470 assert (node->op == op_Proj);
471 return node->attr.proj;
475 get_irn_alloc_attr (ir_node *node)
477 assert (node->op == op_Alloc);
482 get_irn_free_attr (ir_node *node)
484 assert (node->op == op_Free);
489 get_irn_symconst_attr (ir_node *node)
491 assert (node->op == op_SymConst);
496 get_irn_call_attr (ir_node *node)
498 assert (node->op == op_Call);
499 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
503 get_irn_sel_attr (ir_node *node)
505 assert (node->op == op_Sel);
510 get_irn_phi_attr (ir_node *node)
512 assert (node->op == op_Phi);
513 return node->attr.phi0_pos;
517 get_irn_block_attr (ir_node *node)
519 assert (node->op == op_Block);
520 return node->attr.block;
524 get_irn_load_attr (ir_node *node)
526 assert (node->op == op_Load);
527 return node->attr.load;
531 get_irn_store_attr (ir_node *node)
533 assert (node->op == op_Store);
534 return node->attr.store;
538 get_irn_except_attr (ir_node *node)
540 assert (node->op == op_Div || node->op == op_Quot ||
541 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
542 return node->attr.except;
545 /** manipulate fields of individual nodes **/
547 /* this works for all except Block */
549 get_nodes_block (const ir_node *node) {
550 assert (!(node->op == op_Block));
551 return get_irn_n(node, -1);
555 set_nodes_block (ir_node *node, ir_node *block) {
556 assert (!(node->op == op_Block));
557 set_irn_n(node, -1, block);
560 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
561 * from Start. If so returns frame type, else Null. */
562 type *is_frame_pointer(ir_node *n) {
563 if ((get_irn_op(n) == op_Proj) &&
564 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
565 ir_node *start = get_Proj_pred(n);
566 if (get_irn_op(start) == op_Start) {
567 return get_irg_frame_type(get_irn_irg(start));
573 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
574 * from Start. If so returns global type, else Null. */
575 type *is_globals_pointer(ir_node *n) {
576 if ((get_irn_op(n) == op_Proj) &&
577 (get_Proj_proj(n) == pn_Start_P_globals)) {
578 ir_node *start = get_Proj_pred(n);
579 if (get_irn_op(start) == op_Start) {
580 return get_glob_type();
586 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
587 * from Start. If so returns 1, else 0. */
588 int is_value_arg_pointer(ir_node *n) {
589 if ((get_irn_op(n) == op_Proj) &&
590 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
591 (get_irn_op(get_Proj_pred(n)) == op_Start))
596 /* Returns an array with the predecessors of the Block. Depending on
597 the implementation of the graph data structure this can be a copy of
598 the internal representation of predecessors as well as the internal
599 array itself. Therefore writing to this array might obstruct the ir. */
601 get_Block_cfgpred_arr (ir_node *node)
603 assert ((node->op == op_Block));
604 return (ir_node **)&(get_irn_in(node)[1]);
609 get_Block_n_cfgpreds (ir_node *node) {
610 assert ((node->op == op_Block));
611 return get_irn_arity(node);
615 get_Block_cfgpred (ir_node *node, int pos) {
616 assert(-1 <= pos && pos < get_irn_arity(node));
617 assert(node->op == op_Block);
618 return get_irn_n(node, pos);
622 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
623 assert (node->op == op_Block);
624 set_irn_n(node, pos, pred);
628 get_Block_matured (ir_node *node) {
629 assert (node->op == op_Block);
630 return node->attr.block.matured;
634 set_Block_matured (ir_node *node, bool matured) {
635 assert (node->op == op_Block);
636 node->attr.block.matured = matured;
639 get_Block_block_visited (ir_node *node) {
640 assert (node->op == op_Block);
641 return node->attr.block.block_visited;
645 set_Block_block_visited (ir_node *node, unsigned long visit) {
646 assert (node->op == op_Block);
647 node->attr.block.block_visited = visit;
650 /* For this current_ir_graph must be set. */
652 mark_Block_block_visited (ir_node *node) {
653 assert (node->op == op_Block);
654 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
658 Block_not_block_visited(ir_node *node) {
659 assert (node->op == op_Block);
660 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
664 get_Block_graph_arr (ir_node *node, int pos) {
665 assert (node->op == op_Block);
666 return node->attr.block.graph_arr[pos+1];
670 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
671 assert (node->op == op_Block);
672 node->attr.block.graph_arr[pos+1] = value;
675 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
676 assert(node->op == op_Block);
677 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
678 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
679 node->attr.block.in_cg[0] = NULL;
680 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
682 /* Fix backedge array. fix_backedges operates depending on
683 interprocedural_view. */
684 int ipv = get_interprocedural_view();
685 set_interprocedural_view(true);
686 fix_backedges(current_ir_graph->obst, node);
687 set_interprocedural_view(ipv);
690 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
693 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
694 assert(node->op == op_Block &&
695 node->attr.block.in_cg &&
696 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
697 node->attr.block.in_cg[pos + 1] = pred;
700 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
701 assert(node->op == op_Block);
702 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
705 int get_Block_cg_n_cfgpreds(ir_node * node) {
706 assert(node->op == op_Block);
707 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
710 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
711 assert(node->op == op_Block && node->attr.block.in_cg);
712 return node->attr.block.in_cg[pos + 1];
715 void remove_Block_cg_cfgpred_arr(ir_node * node) {
716 assert(node->op == op_Block);
717 node->attr.block.in_cg = NULL;
720 ir_node *(set_Block_dead)(ir_node *block) {
721 return _set_Block_dead(block);
724 int (is_Block_dead)(const ir_node *block) {
725 return _is_Block_dead(block);
729 set_Start_irg(ir_node *node, ir_graph *irg) {
730 assert(node->op == op_Start);
731 assert(is_ir_graph(irg));
732 assert(0 && " Why set irg? -- use set_irn_irg");
736 get_End_n_keepalives(ir_node *end) {
737 assert (end->op == op_End);
738 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
742 get_End_keepalive(ir_node *end, int pos) {
743 assert (end->op == op_End);
744 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
748 add_End_keepalive (ir_node *end, ir_node *ka) {
749 assert (end->op == op_End);
750 ARR_APP1 (ir_node *, end->in, ka);
754 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
755 assert (end->op == op_End);
756 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
760 free_End (ir_node *end) {
761 assert (end->op == op_End);
763 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
764 end->in = NULL; /* @@@ make sure we get an error if we use the
765 in array afterwards ... */
770 > Implementing the case construct (which is where the constant Proj node is
771 > important) involves far more than simply determining the constant values.
772 > We could argue that this is more properly a function of the translator from
773 > Firm to the target machine. That could be done if there was some way of
774 > projecting "default" out of the Cond node.
775 I know it's complicated.
776 Basically there are two proglems:
777 - determining the gaps between the projs
778 - determining the biggest case constant to know the proj number for
780 I see several solutions:
781 1. Introduce a ProjDefault node. Solves both problems.
782 This means to extend all optimizations executed during construction.
783 2. Give the Cond node for switch two flavors:
784 a) there are no gaps in the projs (existing flavor)
785 b) gaps may exist, default proj is still the Proj with the largest
786 projection number. This covers also the gaps.
787 3. Fix the semantic of the Cond to that of 2b)
789 Solution 2 seems to be the best:
790 Computing the gaps in the Firm representation is not too hard, i.e.,
791 libFIRM can implement a routine that transforms between the two
792 flavours. This is also possible for 1) but 2) does not require to
793 change any existing optimization.
794 Further it should be far simpler to determine the biggest constant than
796 I don't want to choose 3) as 2a) seems to have advantages for
797 dataflow analysis and 3) does not allow to convert the representation to
801 get_Cond_selector (ir_node *node) {
802 assert (node->op == op_Cond);
803 return get_irn_n(node, 0);
807 set_Cond_selector (ir_node *node, ir_node *selector) {
808 assert (node->op == op_Cond);
809 set_irn_n(node, 0, selector);
813 get_Cond_kind (ir_node *node) {
814 assert (node->op == op_Cond);
815 return node->attr.c.kind;
819 set_Cond_kind (ir_node *node, cond_kind kind) {
820 assert (node->op == op_Cond);
821 node->attr.c.kind = kind;
825 get_Cond_defaultProj (ir_node *node) {
826 assert (node->op == op_Cond);
827 return node->attr.c.default_proj;
831 get_Return_mem (ir_node *node) {
832 assert (node->op == op_Return);
833 return get_irn_n(node, 0);
837 set_Return_mem (ir_node *node, ir_node *mem) {
838 assert (node->op == op_Return);
839 set_irn_n(node, 0, mem);
843 get_Return_n_ress (ir_node *node) {
844 assert (node->op == op_Return);
845 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
849 get_Return_res_arr (ir_node *node)
851 assert ((node->op == op_Return));
852 if (get_Return_n_ress(node) > 0)
853 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
860 set_Return_n_res (ir_node *node, int results) {
861 assert (node->op == op_Return);
866 get_Return_res (ir_node *node, int pos) {
867 assert (node->op == op_Return);
868 assert (get_Return_n_ress(node) > pos);
869 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
873 set_Return_res (ir_node *node, int pos, ir_node *res){
874 assert (node->op == op_Return);
875 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
879 get_Raise_mem (ir_node *node) {
880 assert (node->op == op_Raise);
881 return get_irn_n(node, 0);
885 set_Raise_mem (ir_node *node, ir_node *mem) {
886 assert (node->op == op_Raise);
887 set_irn_n(node, 0, mem);
891 get_Raise_exo_ptr (ir_node *node) {
892 assert (node->op == op_Raise);
893 return get_irn_n(node, 1);
897 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
898 assert (node->op == op_Raise);
899 set_irn_n(node, 1, exo_ptr);
902 tarval *(get_Const_tarval)(ir_node *node) {
903 return _get_Const_tarval(node);
907 set_Const_tarval (ir_node *node, tarval *con) {
908 assert (node->op == op_Const);
909 node->attr.con.tv = con;
912 cnst_classify_t (classify_Const)(ir_node *node)
914 return _classify_Const(node);
918 /* The source language type. Must be an atomic type. Mode of type must
919 be mode of node. For tarvals from entities type must be pointer to
922 get_Const_type (ir_node *node) {
923 assert (node->op == op_Const);
924 return node->attr.con.tp;
928 set_Const_type (ir_node *node, type *tp) {
929 assert (node->op == op_Const);
930 if (tp != firm_unknown_type) {
931 assert (is_atomic_type(tp));
932 assert (get_type_mode(tp) == get_irn_mode(node));
934 node->attr.con.tp = tp;
939 get_SymConst_kind (const ir_node *node) {
940 assert (node->op == op_SymConst);
941 return node->attr.i.num;
945 set_SymConst_kind (ir_node *node, symconst_kind num) {
946 assert (node->op == op_SymConst);
947 node->attr.i.num = num;
951 get_SymConst_type (ir_node *node) {
952 assert ( (node->op == op_SymConst)
953 && ( get_SymConst_kind(node) == symconst_type_tag
954 || get_SymConst_kind(node) == symconst_size));
955 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
959 set_SymConst_type (ir_node *node, type *tp) {
960 assert ( (node->op == op_SymConst)
961 && ( get_SymConst_kind(node) == symconst_type_tag
962 || get_SymConst_kind(node) == symconst_size));
963 node->attr.i.sym.type_p = tp;
967 get_SymConst_name (ir_node *node) {
968 assert ( (node->op == op_SymConst)
969 && (get_SymConst_kind(node) == symconst_addr_name));
970 return node->attr.i.sym.ident_p;
974 set_SymConst_name (ir_node *node, ident *name) {
975 assert ( (node->op == op_SymConst)
976 && (get_SymConst_kind(node) == symconst_addr_name));
977 node->attr.i.sym.ident_p = name;
981 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
982 entity *get_SymConst_entity (ir_node *node) {
983 assert ( (node->op == op_SymConst)
984 && (get_SymConst_kind (node) == symconst_addr_ent));
985 return node->attr.i.sym.entity_p;
988 void set_SymConst_entity (ir_node *node, entity *ent) {
989 assert ( (node->op == op_SymConst)
990 && (get_SymConst_kind(node) == symconst_addr_ent));
991 node->attr.i.sym.entity_p = ent;
994 union symconst_symbol
995 get_SymConst_symbol (ir_node *node) {
996 assert (node->op == op_SymConst);
997 return node->attr.i.sym;
1001 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1002 assert (node->op == op_SymConst);
1003 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1004 node->attr.i.sym = sym;
1008 get_SymConst_value_type (ir_node *node) {
1009 assert (node->op == op_SymConst);
1010 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1011 return node->attr.i.tp;
1015 set_SymConst_value_type (ir_node *node, type *tp) {
1016 assert (node->op == op_SymConst);
1017 node->attr.i.tp = tp;
1021 get_Sel_mem (ir_node *node) {
1022 assert (node->op == op_Sel);
1023 return get_irn_n(node, 0);
1027 set_Sel_mem (ir_node *node, ir_node *mem) {
1028 assert (node->op == op_Sel);
1029 set_irn_n(node, 0, mem);
1033 get_Sel_ptr (ir_node *node) {
1034 assert (node->op == op_Sel);
1035 return get_irn_n(node, 1);
1039 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1040 assert (node->op == op_Sel);
1041 set_irn_n(node, 1, ptr);
1045 get_Sel_n_indexs (ir_node *node) {
1046 assert (node->op == op_Sel);
1047 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1051 get_Sel_index_arr (ir_node *node)
1053 assert ((node->op == op_Sel));
1054 if (get_Sel_n_indexs(node) > 0)
1055 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1061 get_Sel_index (ir_node *node, int pos) {
1062 assert (node->op == op_Sel);
1063 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1067 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1068 assert (node->op == op_Sel);
1069 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1073 get_Sel_entity (ir_node *node) {
1074 assert (node->op == op_Sel);
1075 return node->attr.s.ent;
1079 set_Sel_entity (ir_node *node, entity *ent) {
1080 assert (node->op == op_Sel);
1081 node->attr.s.ent = ent;
1085 get_InstOf_ent (ir_node *node) {
1086 assert (node->op = op_InstOf);
1087 return (node->attr.io.ent);
1091 set_InstOf_ent (ir_node *node, type *ent) {
1092 assert (node->op = op_InstOf);
1093 node->attr.io.ent = ent;
1097 get_InstOf_store (ir_node *node) {
1098 assert (node->op = op_InstOf);
1099 return (get_irn_n (node, 0));
1103 set_InstOf_store (ir_node *node, ir_node *obj) {
1104 assert (node->op = op_InstOf);
1105 set_irn_n (node, 0, obj);
1109 get_InstOf_obj (ir_node *node) {
1110 assert (node->op = op_InstOf);
1111 return (get_irn_n (node, 1));
1115 set_InstOf_obj (ir_node *node, ir_node *obj) {
1116 assert (node->op = op_InstOf);
1117 set_irn_n (node, 1, obj);
1121 /* For unary and binary arithmetic operations the access to the
1122 operands can be factored out. Left is the first, right the
1123 second arithmetic value as listed in tech report 0999-33.
1124 unops are: Minus, Abs, Not, Conv, Cast
1125 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1126 Shr, Shrs, Rotate, Cmp */
1130 get_Call_mem (ir_node *node) {
1131 assert (node->op == op_Call);
1132 return get_irn_n(node, 0);
1136 set_Call_mem (ir_node *node, ir_node *mem) {
1137 assert (node->op == op_Call);
1138 set_irn_n(node, 0, mem);
1142 get_Call_ptr (ir_node *node) {
1143 assert (node->op == op_Call);
1144 return get_irn_n(node, 1);
1148 set_Call_ptr (ir_node *node, ir_node *ptr) {
1149 assert (node->op == op_Call);
1150 set_irn_n(node, 1, ptr);
1154 get_Call_param_arr (ir_node *node) {
1155 assert (node->op == op_Call);
1156 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1160 get_Call_n_params (ir_node *node) {
1161 assert (node->op == op_Call);
1162 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1166 get_Call_arity (ir_node *node) {
1167 assert (node->op == op_Call);
1168 return get_Call_n_params(node);
1172 set_Call_arity (ir_node *node, ir_node *arity) {
1173 assert (node->op == op_Call);
1178 get_Call_param (ir_node *node, int pos) {
1179 assert (node->op == op_Call);
1180 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1184 set_Call_param (ir_node *node, int pos, ir_node *param) {
1185 assert (node->op == op_Call);
1186 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1190 get_Call_type (ir_node *node) {
1191 assert (node->op == op_Call);
1192 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1196 set_Call_type (ir_node *node, type *tp) {
1197 assert (node->op == op_Call);
1198 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1199 node->attr.call.cld_tp = tp;
1202 int Call_has_callees(ir_node *node) {
1203 assert(node && node->op == op_Call);
1204 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1205 (node->attr.call.callee_arr != NULL));
1208 int get_Call_n_callees(ir_node * node) {
1209 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1210 return ARR_LEN(node->attr.call.callee_arr);
1213 entity * get_Call_callee(ir_node * node, int pos) {
1214 assert(pos >= 0 && pos < get_Call_n_callees(node));
1215 return node->attr.call.callee_arr[pos];
1218 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1219 assert(node->op == op_Call);
1220 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1221 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1223 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1226 void remove_Call_callee_arr(ir_node * node) {
1227 assert(node->op == op_Call);
1228 node->attr.call.callee_arr = NULL;
1231 ir_node * get_CallBegin_ptr (ir_node *node) {
1232 assert(node->op == op_CallBegin);
1233 return get_irn_n(node, 0);
1235 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1236 assert(node->op == op_CallBegin);
1237 set_irn_n(node, 0, ptr);
1239 ir_node * get_CallBegin_call (ir_node *node) {
1240 assert(node->op == op_CallBegin);
1241 return node->attr.callbegin.call;
1243 void set_CallBegin_call (ir_node *node, ir_node *call) {
1244 assert(node->op == op_CallBegin);
1245 node->attr.callbegin.call = call;
1250 ir_node * get_##OP##_left(ir_node *node) { \
1251 assert(node->op == op_##OP); \
1252 return get_irn_n(node, node->op->op_index); \
1254 void set_##OP##_left(ir_node *node, ir_node *left) { \
1255 assert(node->op == op_##OP); \
1256 set_irn_n(node, node->op->op_index, left); \
1258 ir_node *get_##OP##_right(ir_node *node) { \
1259 assert(node->op == op_##OP); \
1260 return get_irn_n(node, node->op->op_index + 1); \
1262 void set_##OP##_right(ir_node *node, ir_node *right) { \
1263 assert(node->op == op_##OP); \
1264 set_irn_n(node, node->op->op_index + 1, right); \
1268 ir_node *get_##OP##_op(ir_node *node) { \
1269 assert(node->op == op_##OP); \
1270 return get_irn_n(node, node->op->op_index); \
1272 void set_##OP##_op (ir_node *node, ir_node *op) { \
1273 assert(node->op == op_##OP); \
1274 set_irn_n(node, node->op->op_index, op); \
1284 get_Quot_mem (ir_node *node) {
1285 assert (node->op == op_Quot);
1286 return get_irn_n(node, 0);
1290 set_Quot_mem (ir_node *node, ir_node *mem) {
1291 assert (node->op == op_Quot);
1292 set_irn_n(node, 0, mem);
1298 get_DivMod_mem (ir_node *node) {
1299 assert (node->op == op_DivMod);
1300 return get_irn_n(node, 0);
1304 set_DivMod_mem (ir_node *node, ir_node *mem) {
1305 assert (node->op == op_DivMod);
1306 set_irn_n(node, 0, mem);
1312 get_Div_mem (ir_node *node) {
1313 assert (node->op == op_Div);
1314 return get_irn_n(node, 0);
1318 set_Div_mem (ir_node *node, ir_node *mem) {
1319 assert (node->op == op_Div);
1320 set_irn_n(node, 0, mem);
1326 get_Mod_mem (ir_node *node) {
1327 assert (node->op == op_Mod);
1328 return get_irn_n(node, 0);
1332 set_Mod_mem (ir_node *node, ir_node *mem) {
1333 assert (node->op == op_Mod);
1334 set_irn_n(node, 0, mem);
1351 get_Cast_type (ir_node *node) {
1352 assert (node->op == op_Cast);
1353 return node->attr.cast.totype;
1357 set_Cast_type (ir_node *node, type *to_tp) {
1358 assert (node->op == op_Cast);
1359 node->attr.cast.totype = to_tp;
1363 (is_unop)(const ir_node *node) {
1364 return _is_unop(node);
1368 get_unop_op (ir_node *node) {
1369 if (node->op->opar == oparity_unary)
1370 return get_irn_n(node, node->op->op_index);
1372 assert(node->op->opar == oparity_unary);
1377 set_unop_op (ir_node *node, ir_node *op) {
1378 if (node->op->opar == oparity_unary)
1379 set_irn_n(node, node->op->op_index, op);
1381 assert(node->op->opar == oparity_unary);
1385 (is_binop)(const ir_node *node) {
1386 return _is_binop(node);
1390 get_binop_left (ir_node *node) {
1391 if (node->op->opar == oparity_binary)
1392 return get_irn_n(node, node->op->op_index);
1394 assert(node->op->opar == oparity_binary);
1399 set_binop_left (ir_node *node, ir_node *left) {
1400 if (node->op->opar == oparity_binary)
1401 set_irn_n(node, node->op->op_index, left);
1403 assert (node->op->opar == oparity_binary);
1407 get_binop_right (ir_node *node) {
1408 if (node->op->opar == oparity_binary)
1409 return get_irn_n(node, node->op->op_index + 1);
1411 assert(node->op->opar == oparity_binary);
1416 set_binop_right (ir_node *node, ir_node *right) {
1417 if (node->op->opar == oparity_binary)
1418 set_irn_n(node, node->op->op_index + 1, right);
1420 assert (node->op->opar == oparity_binary);
1423 int is_Phi (const ir_node *n) {
1429 if (op == op_Filter) return get_interprocedural_view();
1432 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1433 (get_irn_arity(n) > 0));
1438 int is_Phi0 (const ir_node *n) {
1441 return ((get_irn_op(n) == op_Phi) &&
1442 (get_irn_arity(n) == 0) &&
1443 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1447 get_Phi_preds_arr (ir_node *node) {
1448 assert (node->op == op_Phi);
1449 return (ir_node **)&(get_irn_in(node)[1]);
1453 get_Phi_n_preds (ir_node *node) {
1454 assert (is_Phi(node) || is_Phi0(node));
1455 return (get_irn_arity(node));
1459 void set_Phi_n_preds (ir_node *node, int n_preds) {
1460 assert (node->op == op_Phi);
1465 get_Phi_pred (ir_node *node, int pos) {
1466 assert (is_Phi(node) || is_Phi0(node));
1467 return get_irn_n(node, pos);
1471 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1472 assert (is_Phi(node) || is_Phi0(node));
1473 set_irn_n(node, pos, pred);
1477 int is_memop(ir_node *node) {
1478 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1481 ir_node *get_memop_mem (ir_node *node) {
1482 assert(is_memop(node));
1483 return get_irn_n(node, 0);
1486 void set_memop_mem (ir_node *node, ir_node *mem) {
1487 assert(is_memop(node));
1488 set_irn_n(node, 0, mem);
1491 ir_node *get_memop_ptr (ir_node *node) {
1492 assert(is_memop(node));
1493 return get_irn_n(node, 1);
1496 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1497 assert(is_memop(node));
1498 set_irn_n(node, 1, ptr);
1502 get_Load_mem (ir_node *node) {
1503 assert (node->op == op_Load);
1504 return get_irn_n(node, 0);
1508 set_Load_mem (ir_node *node, ir_node *mem) {
1509 assert (node->op == op_Load);
1510 set_irn_n(node, 0, mem);
1514 get_Load_ptr (ir_node *node) {
1515 assert (node->op == op_Load);
1516 return get_irn_n(node, 1);
1520 set_Load_ptr (ir_node *node, ir_node *ptr) {
1521 assert (node->op == op_Load);
1522 set_irn_n(node, 1, ptr);
1526 get_Load_mode (ir_node *node) {
1527 assert (node->op == op_Load);
1528 return node->attr.load.load_mode;
1532 set_Load_mode (ir_node *node, ir_mode *mode) {
1533 assert (node->op == op_Load);
1534 node->attr.load.load_mode = mode;
1538 get_Load_volatility (ir_node *node) {
1539 assert (node->op == op_Load);
1540 return node->attr.load.volatility;
1544 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1545 assert (node->op == op_Load);
1546 node->attr.load.volatility = volatility;
1551 get_Store_mem (ir_node *node) {
1552 assert (node->op == op_Store);
1553 return get_irn_n(node, 0);
1557 set_Store_mem (ir_node *node, ir_node *mem) {
1558 assert (node->op == op_Store);
1559 set_irn_n(node, 0, mem);
1563 get_Store_ptr (ir_node *node) {
1564 assert (node->op == op_Store);
1565 return get_irn_n(node, 1);
1569 set_Store_ptr (ir_node *node, ir_node *ptr) {
1570 assert (node->op == op_Store);
1571 set_irn_n(node, 1, ptr);
1575 get_Store_value (ir_node *node) {
1576 assert (node->op == op_Store);
1577 return get_irn_n(node, 2);
1581 set_Store_value (ir_node *node, ir_node *value) {
1582 assert (node->op == op_Store);
1583 set_irn_n(node, 2, value);
1587 get_Store_volatility (ir_node *node) {
1588 assert (node->op == op_Store);
1589 return node->attr.store.volatility;
1593 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1594 assert (node->op == op_Store);
1595 node->attr.store.volatility = volatility;
1600 get_Alloc_mem (ir_node *node) {
1601 assert (node->op == op_Alloc);
1602 return get_irn_n(node, 0);
1606 set_Alloc_mem (ir_node *node, ir_node *mem) {
1607 assert (node->op == op_Alloc);
1608 set_irn_n(node, 0, mem);
1612 get_Alloc_size (ir_node *node) {
1613 assert (node->op == op_Alloc);
1614 return get_irn_n(node, 1);
1618 set_Alloc_size (ir_node *node, ir_node *size) {
1619 assert (node->op == op_Alloc);
1620 set_irn_n(node, 1, size);
1624 get_Alloc_type (ir_node *node) {
1625 assert (node->op == op_Alloc);
1626 return node->attr.a.type = skip_tid(node->attr.a.type);
1630 set_Alloc_type (ir_node *node, type *tp) {
1631 assert (node->op == op_Alloc);
1632 node->attr.a.type = tp;
1636 get_Alloc_where (ir_node *node) {
1637 assert (node->op == op_Alloc);
1638 return node->attr.a.where;
1642 set_Alloc_where (ir_node *node, where_alloc where) {
1643 assert (node->op == op_Alloc);
1644 node->attr.a.where = where;
1649 get_Free_mem (ir_node *node) {
1650 assert (node->op == op_Free);
1651 return get_irn_n(node, 0);
1655 set_Free_mem (ir_node *node, ir_node *mem) {
1656 assert (node->op == op_Free);
1657 set_irn_n(node, 0, mem);
1661 get_Free_ptr (ir_node *node) {
1662 assert (node->op == op_Free);
1663 return get_irn_n(node, 1);
1667 set_Free_ptr (ir_node *node, ir_node *ptr) {
1668 assert (node->op == op_Free);
1669 set_irn_n(node, 1, ptr);
1673 get_Free_size (ir_node *node) {
1674 assert (node->op == op_Free);
1675 return get_irn_n(node, 2);
1679 set_Free_size (ir_node *node, ir_node *size) {
1680 assert (node->op == op_Free);
1681 set_irn_n(node, 2, size);
1685 get_Free_type (ir_node *node) {
1686 assert (node->op == op_Free);
1687 return node->attr.f.type = skip_tid(node->attr.f.type);
1691 set_Free_type (ir_node *node, type *tp) {
1692 assert (node->op == op_Free);
1693 node->attr.f.type = tp;
1697 get_Free_where (ir_node *node) {
1698 assert (node->op == op_Free);
1699 return node->attr.f.where;
1703 set_Free_where (ir_node *node, where_alloc where) {
1704 assert (node->op == op_Free);
1705 node->attr.f.where = where;
1709 get_Sync_preds_arr (ir_node *node) {
1710 assert (node->op == op_Sync);
1711 return (ir_node **)&(get_irn_in(node)[1]);
1715 get_Sync_n_preds (ir_node *node) {
1716 assert (node->op == op_Sync);
1717 return (get_irn_arity(node));
1722 set_Sync_n_preds (ir_node *node, int n_preds) {
1723 assert (node->op == op_Sync);
1728 get_Sync_pred (ir_node *node, int pos) {
1729 assert (node->op == op_Sync);
1730 return get_irn_n(node, pos);
1734 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1735 assert (node->op == op_Sync);
1736 set_irn_n(node, pos, pred);
1740 get_Proj_pred (ir_node *node) {
1741 assert (is_Proj(node));
1742 return get_irn_n(node, 0);
1746 set_Proj_pred (ir_node *node, ir_node *pred) {
1747 assert (is_Proj(node));
1748 set_irn_n(node, 0, pred);
1752 get_Proj_proj (ir_node *node) {
1753 assert (is_Proj(node));
1754 if (get_irn_opcode(node) == iro_Proj) {
1755 return node->attr.proj;
1757 assert(get_irn_opcode(node) == iro_Filter);
1758 return node->attr.filter.proj;
1763 set_Proj_proj (ir_node *node, long proj) {
1764 assert (node->op == op_Proj);
1765 node->attr.proj = proj;
1769 get_Tuple_preds_arr (ir_node *node) {
1770 assert (node->op == op_Tuple);
1771 return (ir_node **)&(get_irn_in(node)[1]);
1775 get_Tuple_n_preds (ir_node *node) {
1776 assert (node->op == op_Tuple);
1777 return (get_irn_arity(node));
1782 set_Tuple_n_preds (ir_node *node, int n_preds) {
1783 assert (node->op == op_Tuple);
1788 get_Tuple_pred (ir_node *node, int pos) {
1789 assert (node->op == op_Tuple);
1790 return get_irn_n(node, pos);
1794 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1795 assert (node->op == op_Tuple);
1796 set_irn_n(node, pos, pred);
1800 get_Id_pred (ir_node *node) {
1801 assert (node->op == op_Id);
1802 return get_irn_n(node, 0);
1806 set_Id_pred (ir_node *node, ir_node *pred) {
1807 assert (node->op == op_Id);
1808 set_irn_n(node, 0, pred);
1811 ir_node *get_Confirm_value (ir_node *node) {
1812 assert (node->op == op_Confirm);
1813 return get_irn_n(node, 0);
1815 void set_Confirm_value (ir_node *node, ir_node *value) {
1816 assert (node->op == op_Confirm);
1817 set_irn_n(node, 0, value);
1819 ir_node *get_Confirm_bound (ir_node *node) {
1820 assert (node->op == op_Confirm);
1821 return get_irn_n(node, 1);
1823 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1824 assert (node->op == op_Confirm);
1825 set_irn_n(node, 0, bound);
1827 pn_Cmp get_Confirm_cmp (ir_node *node) {
1828 assert (node->op == op_Confirm);
1829 return node->attr.confirm_cmp;
1831 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1832 assert (node->op == op_Confirm);
1833 node->attr.confirm_cmp = cmp;
1838 get_Filter_pred (ir_node *node) {
1839 assert(node->op == op_Filter);
1843 set_Filter_pred (ir_node *node, ir_node *pred) {
1844 assert(node->op == op_Filter);
1848 get_Filter_proj(ir_node *node) {
1849 assert(node->op == op_Filter);
1850 return node->attr.filter.proj;
1853 set_Filter_proj (ir_node *node, long proj) {
1854 assert(node->op == op_Filter);
1855 node->attr.filter.proj = proj;
1858 /* Don't use get_irn_arity, get_irn_n in implementation as access
1859 shall work independent of view!!! */
1860 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1861 assert(node->op == op_Filter);
1862 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1863 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1864 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1865 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1866 node->attr.filter.in_cg[0] = node->in[0];
1868 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1871 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1872 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1873 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1874 node->attr.filter.in_cg[pos + 1] = pred;
1876 int get_Filter_n_cg_preds(ir_node *node) {
1877 assert(node->op == op_Filter && node->attr.filter.in_cg);
1878 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1880 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1882 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1884 arity = ARR_LEN(node->attr.filter.in_cg);
1885 assert(pos < arity - 1);
1886 return node->attr.filter.in_cg[pos + 1];
1890 ir_node *get_Mux_sel (ir_node *node) {
1891 assert(node->op == op_Mux);
1894 void set_Mux_sel (ir_node *node, ir_node *sel) {
1895 assert(node->op == op_Mux);
1899 ir_node *get_Mux_false (ir_node *node) {
1900 assert(node->op == op_Mux);
1903 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1904 assert(node->op == op_Mux);
1905 node->in[2] = ir_false;
1908 ir_node *get_Mux_true (ir_node *node) {
1909 assert(node->op == op_Mux);
1912 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1913 assert(node->op == op_Mux);
1914 node->in[3] = ir_true;
1919 get_irn_irg(const ir_node *node) {
1920 if (! is_Block(node))
1921 node = get_nodes_block(node);
1922 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1923 node = get_nodes_block(node);
1924 assert(get_irn_op(node) == op_Block);
1925 return node->attr.block.irg;
1929 /*----------------------------------------------------------------*/
1930 /* Auxiliary routines */
1931 /*----------------------------------------------------------------*/
1934 skip_Proj (ir_node *node) {
1935 /* don't assert node !!! */
1936 if (node && is_Proj(node)) {
1937 return get_Proj_pred(node);
1944 skip_Tuple (ir_node *node) {
1947 if (!get_opt_normalize()) return node;
1949 node = skip_Id(node);
1950 if (get_irn_op(node) == op_Proj) {
1951 pred = skip_Id(get_Proj_pred(node));
1952 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1953 pred = skip_Id(skip_Tuple(pred));
1954 if (get_irn_op(pred) == op_Tuple)
1955 return get_Tuple_pred(pred, get_Proj_proj(node));
1960 /** returns operand of node if node is a Cast */
1961 ir_node *skip_Cast (ir_node *node) {
1962 if (node && get_irn_op(node) == op_Cast) {
1963 return skip_Id(get_irn_n(node, 0));
1970 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1971 than any other approach, as Id chains are resolved and all point to the real node, or
1972 all id's are self loops. */
1974 skip_Id (ir_node *node) {
1975 /* don't assert node !!! */
1977 if (!get_opt_normalize()) return node;
1979 /* Don't use get_Id_pred: We get into an endless loop for
1980 self-referencing Ids. */
1981 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1982 ir_node *rem_pred = node->in[0+1];
1985 assert (get_irn_arity (node) > 0);
1987 node->in[0+1] = node;
1988 res = skip_Id(rem_pred);
1989 if (res->op == op_Id) /* self-loop */ return node;
1991 node->in[0+1] = res;
1998 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1999 than any other approach, as Id chains are resolved and all point to the real node, or
2000 all id's are self loops. */
2002 skip_Id (ir_node *node) {
2004 /* don't assert node !!! */
2006 if (!node || (node->op != op_Id)) return node;
2008 if (!get_opt_normalize()) return node;
2010 /* Don't use get_Id_pred: We get into an endless loop for
2011 self-referencing Ids. */
2012 pred = node->in[0+1];
2014 if (pred->op != op_Id) return pred;
2016 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2017 ir_node *rem_pred, *res;
2019 if (pred->op != op_Id) return pred; /* shortcut */
2022 assert (get_irn_arity (node) > 0);
2024 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2025 res = skip_Id(rem_pred);
2026 if (res->op == op_Id) /* self-loop */ return node;
2028 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2037 (is_Bad)(const ir_node *node) {
2038 return _is_Bad(node);
2042 (is_no_Block)(const ir_node *node) {
2043 return _is_no_Block(node);
2047 (is_Block)(const ir_node *node) {
2048 return _is_Block(node);
2051 /* returns true if node is a Unknown node. */
2053 is_Unknown (const ir_node *node) {
2055 return (get_irn_op(node) == op_Unknown);
2059 is_Proj (const ir_node *node) {
2061 return node->op == op_Proj
2062 || (!get_interprocedural_view() && node->op == op_Filter);
2065 /* Returns true if the operation manipulates control flow. */
2067 is_cfop(const ir_node *node) {
2068 return is_cfopcode(get_irn_op(node));
2071 /* Returns true if the operation manipulates interprocedural control flow:
2072 CallBegin, EndReg, EndExcept */
2073 int is_ip_cfop(const ir_node *node) {
2074 return is_ip_cfopcode(get_irn_op(node));
2077 /* Returns true if the operation can change the control flow because
2080 is_fragile_op(const ir_node *node) {
2081 return is_op_fragile(get_irn_op(node));
2084 /* Returns the memory operand of fragile operations. */
2085 ir_node *get_fragile_op_mem(ir_node *node) {
2086 assert(node && is_fragile_op(node));
2088 switch (get_irn_opcode (node)) {
2097 return get_irn_n(node, 0);
2102 assert(0 && "should not be reached");
2107 /* Returns true if the operation is a forking control flow operation. */
2109 is_forking_op(const ir_node *node) {
2110 return is_op_forking(get_irn_op(node));
2114 #ifdef DEBUG_libfirm
2115 void dump_irn (ir_node *n) {
2116 int i, arity = get_irn_arity(n);
2117 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2119 ir_node *pred = get_irn_n(n, -1);
2120 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2121 get_irn_node_nr(pred), (void *)pred);
2123 printf(" preds: \n");
2124 for (i = 0; i < arity; ++i) {
2125 ir_node *pred = get_irn_n(n, i);
2126 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2127 get_irn_node_nr(pred), (void *)pred);
2131 #else /* DEBUG_libfirm */
2132 void dump_irn (ir_node *n) {}
2133 #endif /* DEBUG_libfirm */