3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case pn_Cmp_False: return pn_Cmp_True;
63 case pn_Cmp_Eq: return pn_Cmp_Ne;
64 case pn_Cmp_Lt: return pn_Cmp_Uge;
65 case pn_Cmp_Le: return pn_Cmp_Ug;
66 case pn_Cmp_Gt: return pn_Cmp_Ule;
67 case pn_Cmp_Ge: return pn_Cmp_Ul;
68 case pn_Cmp_Lg: return pn_Cmp_Ue;
69 case pn_Cmp_Leg: return pn_Cmp_Uo;
70 case pn_Cmp_Uo: return pn_Cmp_Leg;
71 case pn_Cmp_Ue: return pn_Cmp_Lg;
72 case pn_Cmp_Ul: return pn_Cmp_Ge;
73 case pn_Cmp_Ule: return pn_Cmp_Gt;
74 case pn_Cmp_Ug: return pn_Cmp_Le;
75 case pn_Cmp_Uge: return pn_Cmp_Lt;
76 case pn_Cmp_Ne: return pn_Cmp_Eq;
77 case pn_Cmp_True: return pn_Cmp_False;
79 return 99; /* to shut up gcc */
82 /* Calculates the swapped pnc condition, i.e., "<" --> ">" */
84 get_swapped_pnc(int pnc) {
85 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
86 int lesser = pnc & pn_Cmp_Lt;
87 int greater = pnc & pn_Cmp_Gt;
89 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
94 const char *pns_name_arr [] = {
95 "initial_exec", "global_store",
96 "frame_base", "globals", "args"
99 const char *symconst_name_arr [] = {
100 "type_tag", "size", "addr_name", "addr_ent"
104 * Indicates, whether additional data can be registered to ir nodes.
105 * If set to 1, this is not possible anymore.
107 static int forbid_new_data = 0;
110 * The amount of additional space for custom data to be allocated upon
111 * creating a new node.
113 unsigned firm_add_node_size = 0;
116 /* register new space for every node */
117 unsigned register_additional_node_data(unsigned size) {
118 assert(!forbid_new_data && "Too late to register additional node data");
123 return firm_add_node_size += size;
129 /* Forbid the addition of new data to an ir node. */
134 * irnode constructor.
135 * Create a new irnode in irg, with an op, mode, arity and
136 * some incoming irnodes.
137 * If arity is negative, a node with a dynamic array is created.
140 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
141 int arity, ir_node **in)
144 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
147 assert(irg && op && mode);
148 p = obstack_alloc (irg->obst, node_size);
149 memset(p, 0, node_size);
150 res = (ir_node *) (p + firm_add_node_size);
152 res->kind = k_ir_node;
158 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
160 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
161 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
165 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
172 #if FIRM_EDGES_INPLACE
175 int not_a_block = is_no_Block(res);
177 INIT_LIST_HEAD(&res->edge_info.outs_head);
179 for (i = 0, n = arity + not_a_block; i < n; ++i)
180 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
184 hook_new_node(irg, res);
189 /*-- getting some parameters from ir_nodes --*/
192 (is_ir_node)(const void *thing) {
193 return _is_ir_node(thing);
197 (get_irn_intra_arity)(const ir_node *node) {
198 return _get_irn_intra_arity(node);
202 (get_irn_inter_arity)(const ir_node *node) {
203 return _get_irn_inter_arity(node);
206 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
209 (get_irn_arity)(const ir_node *node) {
210 return _get_irn_arity(node);
213 /* Returns the array with ins. This array is shifted with respect to the
214 array accessed by get_irn_n: The block operand is at position 0 not -1.
215 (@@@ This should be changed.)
216 The order of the predecessors in this array is not guaranteed, except that
217 lists of operands as predecessors of Block or arguments of a Call are
220 get_irn_in (const ir_node *node) {
222 if (get_interprocedural_view()) { /* handle Filter and Block specially */
223 if (get_irn_opcode(node) == iro_Filter) {
224 assert(node->attr.filter.in_cg);
225 return node->attr.filter.in_cg;
226 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
227 return node->attr.block.in_cg;
229 /* else fall through */
235 set_irn_in (ir_node *node, int arity, ir_node **in) {
238 if (get_interprocedural_view()) { /* handle Filter and Block specially */
239 if (get_irn_opcode(node) == iro_Filter) {
240 assert(node->attr.filter.in_cg);
241 arr = &node->attr.filter.in_cg;
242 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
243 arr = &node->attr.block.in_cg;
250 if (arity != ARR_LEN(*arr) - 1) {
251 ir_node * block = (*arr)[0];
252 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
255 fix_backedges(current_ir_graph->obst, node);
256 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
260 (get_irn_intra_n)(const ir_node *node, int n) {
261 return _get_irn_intra_n (node, n);
265 (get_irn_inter_n)(const ir_node *node, int n) {
266 return _get_irn_inter_n (node, n);
269 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
272 (get_irn_n)(const ir_node *node, int n) {
273 return _get_irn_n(node, n);
277 set_irn_n (ir_node *node, int n, ir_node *in) {
278 assert(node && node->kind == k_ir_node);
280 assert(n < get_irn_arity(node));
281 assert(in && in->kind == k_ir_node);
283 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
284 /* Change block pred in both views! */
285 node->in[n + 1] = in;
286 assert(node->attr.filter.in_cg);
287 node->attr.filter.in_cg[n + 1] = in;
290 if (get_interprocedural_view()) { /* handle Filter and Block specially */
291 if (get_irn_opcode(node) == iro_Filter) {
292 assert(node->attr.filter.in_cg);
293 node->attr.filter.in_cg[n + 1] = in;
295 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
296 node->attr.block.in_cg[n + 1] = in;
299 /* else fall through */
303 hook_set_irn_n(node, n, in, node->in[n + 1]);
305 /* Here, we rely on src and tgt being in the current ir graph */
306 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
308 node->in[n + 1] = in;
312 (get_irn_mode)(const ir_node *node) {
313 return _get_irn_mode(node);
317 (set_irn_mode)(ir_node *node, ir_mode *mode)
319 _set_irn_mode(node, mode);
323 get_irn_modecode (const ir_node *node)
326 return node->mode->code;
329 /** Gets the string representation of the mode .*/
331 get_irn_modename (const ir_node *node)
334 return get_mode_name(node->mode);
338 get_irn_modeident (const ir_node *node)
341 return get_mode_ident(node->mode);
345 (get_irn_op)(const ir_node *node)
347 return _get_irn_op(node);
350 /* should be private to the library: */
352 set_irn_op (ir_node *node, ir_op *op)
359 (get_irn_opcode)(const ir_node *node)
361 return _get_irn_opcode(node);
365 get_irn_opname (const ir_node *node)
368 if ((get_irn_op((ir_node *)node) == op_Phi) &&
369 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
370 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
371 return get_id_str(node->op->name);
375 get_irn_opident (const ir_node *node)
378 return node->op->name;
382 (get_irn_visited)(const ir_node *node)
384 return _get_irn_visited(node);
388 (set_irn_visited)(ir_node *node, unsigned long visited)
390 _set_irn_visited(node, visited);
394 (mark_irn_visited)(ir_node *node) {
395 _mark_irn_visited(node);
399 (irn_not_visited)(const ir_node *node) {
400 return _irn_not_visited(node);
404 (irn_visited)(const ir_node *node) {
405 return _irn_visited(node);
409 (set_irn_link)(ir_node *node, void *link) {
410 _set_irn_link(node, link);
414 (get_irn_link)(const ir_node *node) {
415 return _get_irn_link(node);
419 (get_irn_pinned)(const ir_node *node) {
420 return _get_irn_pinned(node);
423 void set_irn_pinned(ir_node *node, op_pin_state state) {
424 /* due to optimization an opt may be turned into a Tuple */
425 if (get_irn_op(node) == op_Tuple)
428 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
429 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
431 node->attr.except.pin_state = state;
434 #ifdef DO_HEAPANALYSIS
435 /* Access the abstract interpretation information of a node.
436 Returns NULL if no such information is available. */
437 struct abstval *get_irn_abst_value(ir_node *n) {
440 /* Set the abstract interpretation information of a node. */
441 void set_irn_abst_value(ir_node *n, struct abstval *os) {
444 struct section *firm_get_irn_section(ir_node *n) {
447 void firm_set_irn_section(ir_node *n, struct section *s) {
451 /* Dummies needed for firmjni. */
452 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
453 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
454 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
455 void firm_set_irn_section(ir_node *n, struct section *s) {}
456 #endif /* DO_HEAPANALYSIS */
459 /* Outputs a unique number for this node */
461 get_irn_node_nr(const ir_node *node) {
464 return node->node_nr;
471 get_irn_const_attr (ir_node *node)
473 assert (node->op == op_Const);
474 return node->attr.con;
478 get_irn_proj_attr (ir_node *node)
480 assert (node->op == op_Proj);
481 return node->attr.proj;
485 get_irn_alloc_attr (ir_node *node)
487 assert (node->op == op_Alloc);
492 get_irn_free_attr (ir_node *node)
494 assert (node->op == op_Free);
499 get_irn_symconst_attr (ir_node *node)
501 assert (node->op == op_SymConst);
506 get_irn_call_attr (ir_node *node)
508 assert (node->op == op_Call);
509 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
513 get_irn_sel_attr (ir_node *node)
515 assert (node->op == op_Sel);
520 get_irn_phi_attr (ir_node *node)
522 assert (node->op == op_Phi);
523 return node->attr.phi0_pos;
527 get_irn_block_attr (ir_node *node)
529 assert (node->op == op_Block);
530 return node->attr.block;
534 get_irn_load_attr (ir_node *node)
536 assert (node->op == op_Load);
537 return node->attr.load;
541 get_irn_store_attr (ir_node *node)
543 assert (node->op == op_Store);
544 return node->attr.store;
548 get_irn_except_attr (ir_node *node)
550 assert (node->op == op_Div || node->op == op_Quot ||
551 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
552 return node->attr.except;
555 /** manipulate fields of individual nodes **/
557 /* this works for all except Block */
559 get_nodes_block (const ir_node *node) {
560 assert (!(node->op == op_Block));
561 return get_irn_n(node, -1);
565 set_nodes_block (ir_node *node, ir_node *block) {
566 assert (!(node->op == op_Block));
567 set_irn_n(node, -1, block);
570 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
571 * from Start. If so returns frame type, else Null. */
572 type *is_frame_pointer(ir_node *n) {
573 if ((get_irn_op(n) == op_Proj) &&
574 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
575 ir_node *start = get_Proj_pred(n);
576 if (get_irn_op(start) == op_Start) {
577 return get_irg_frame_type(get_irn_irg(start));
583 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
584 * from Start. If so returns global type, else Null. */
585 type *is_globals_pointer(ir_node *n) {
586 if ((get_irn_op(n) == op_Proj) &&
587 (get_Proj_proj(n) == pn_Start_P_globals)) {
588 ir_node *start = get_Proj_pred(n);
589 if (get_irn_op(start) == op_Start) {
590 return get_glob_type();
596 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
597 * from Start. If so returns 1, else 0. */
598 int is_value_arg_pointer(ir_node *n) {
599 if ((get_irn_op(n) == op_Proj) &&
600 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
601 (get_irn_op(get_Proj_pred(n)) == op_Start))
606 /* Returns an array with the predecessors of the Block. Depending on
607 the implementation of the graph data structure this can be a copy of
608 the internal representation of predecessors as well as the internal
609 array itself. Therefore writing to this array might obstruct the ir. */
611 get_Block_cfgpred_arr (ir_node *node)
613 assert ((node->op == op_Block));
614 return (ir_node **)&(get_irn_in(node)[1]);
618 get_Block_n_cfgpreds (ir_node *node) {
619 assert ((node->op == op_Block));
620 return get_irn_arity(node);
624 get_Block_cfgpred (ir_node *node, int pos) {
625 assert(-1 <= pos && pos < get_irn_arity(node));
626 assert(node->op == op_Block);
627 return get_irn_n(node, pos);
631 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
632 assert (node->op == op_Block);
633 set_irn_n(node, pos, pred);
637 get_Block_matured (ir_node *node) {
638 assert (node->op == op_Block);
639 return node->attr.block.matured;
643 set_Block_matured (ir_node *node, bool matured) {
644 assert (node->op == op_Block);
645 node->attr.block.matured = matured;
649 (get_Block_block_visited)(ir_node *node) {
650 return _get_Block_block_visited(node);
654 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
655 _set_Block_block_visited(node, visit);
658 /* For this current_ir_graph must be set. */
660 (mark_Block_block_visited)(ir_node *node) {
661 _mark_Block_block_visited(node);
665 (Block_not_block_visited)(ir_node *node) {
666 return _Block_not_block_visited(node);
670 get_Block_graph_arr (ir_node *node, int pos) {
671 assert (node->op == op_Block);
672 return node->attr.block.graph_arr[pos+1];
676 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
677 assert (node->op == op_Block);
678 node->attr.block.graph_arr[pos+1] = value;
681 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
682 assert(node->op == op_Block);
683 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
684 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
685 node->attr.block.in_cg[0] = NULL;
686 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
688 /* Fix backedge array. fix_backedges operates depending on
689 interprocedural_view. */
690 int ipv = get_interprocedural_view();
691 set_interprocedural_view(true);
692 fix_backedges(current_ir_graph->obst, node);
693 set_interprocedural_view(ipv);
696 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
699 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
700 assert(node->op == op_Block &&
701 node->attr.block.in_cg &&
702 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
703 node->attr.block.in_cg[pos + 1] = pred;
706 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
707 assert(node->op == op_Block);
708 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
711 int get_Block_cg_n_cfgpreds(ir_node * node) {
712 assert(node->op == op_Block);
713 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
716 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
717 assert(node->op == op_Block && node->attr.block.in_cg);
718 return node->attr.block.in_cg[pos + 1];
721 void remove_Block_cg_cfgpred_arr(ir_node * node) {
722 assert(node->op == op_Block);
723 node->attr.block.in_cg = NULL;
726 ir_node *(set_Block_dead)(ir_node *block) {
727 return _set_Block_dead(block);
730 int (is_Block_dead)(const ir_node *block) {
731 return _is_Block_dead(block);
734 ir_extblk *get_Block_extbb(const ir_node *block) {
735 assert(is_Block(block));
736 return block->attr.block.extblk;
739 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
740 assert(is_Block(block));
741 block->attr.block.extblk = extblk;
745 set_Start_irg(ir_node *node, ir_graph *irg) {
746 assert(node->op == op_Start);
747 assert(is_ir_graph(irg));
748 assert(0 && " Why set irg? -- use set_irn_irg");
752 get_End_n_keepalives(ir_node *end) {
753 assert (end->op == op_End);
754 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
758 get_End_keepalive(ir_node *end, int pos) {
759 assert (end->op == op_End);
760 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
764 add_End_keepalive (ir_node *end, ir_node *ka) {
765 assert (end->op == op_End);
766 ARR_APP1 (ir_node *, end->in, ka);
770 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
771 assert (end->op == op_End);
772 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
776 free_End (ir_node *end) {
777 assert (end->op == op_End);
779 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
780 end->in = NULL; /* @@@ make sure we get an error if we use the
781 in array afterwards ... */
786 > Implementing the case construct (which is where the constant Proj node is
787 > important) involves far more than simply determining the constant values.
788 > We could argue that this is more properly a function of the translator from
789 > Firm to the target machine. That could be done if there was some way of
790 > projecting "default" out of the Cond node.
791 I know it's complicated.
792 Basically there are two proglems:
793 - determining the gaps between the projs
794 - determining the biggest case constant to know the proj number for
796 I see several solutions:
797 1. Introduce a ProjDefault node. Solves both problems.
798 This means to extend all optimizations executed during construction.
799 2. Give the Cond node for switch two flavors:
800 a) there are no gaps in the projs (existing flavor)
801 b) gaps may exist, default proj is still the Proj with the largest
802 projection number. This covers also the gaps.
803 3. Fix the semantic of the Cond to that of 2b)
805 Solution 2 seems to be the best:
806 Computing the gaps in the Firm representation is not too hard, i.e.,
807 libFIRM can implement a routine that transforms between the two
808 flavours. This is also possible for 1) but 2) does not require to
809 change any existing optimization.
810 Further it should be far simpler to determine the biggest constant than
812 I don't want to choose 3) as 2a) seems to have advantages for
813 dataflow analysis and 3) does not allow to convert the representation to
817 get_Cond_selector (ir_node *node) {
818 assert (node->op == op_Cond);
819 return get_irn_n(node, 0);
823 set_Cond_selector (ir_node *node, ir_node *selector) {
824 assert (node->op == op_Cond);
825 set_irn_n(node, 0, selector);
829 get_Cond_kind (ir_node *node) {
830 assert (node->op == op_Cond);
831 return node->attr.c.kind;
835 set_Cond_kind (ir_node *node, cond_kind kind) {
836 assert (node->op == op_Cond);
837 node->attr.c.kind = kind;
841 get_Cond_defaultProj (ir_node *node) {
842 assert (node->op == op_Cond);
843 return node->attr.c.default_proj;
847 get_Return_mem (ir_node *node) {
848 assert (node->op == op_Return);
849 return get_irn_n(node, 0);
853 set_Return_mem (ir_node *node, ir_node *mem) {
854 assert (node->op == op_Return);
855 set_irn_n(node, 0, mem);
859 get_Return_n_ress (ir_node *node) {
860 assert (node->op == op_Return);
861 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
865 get_Return_res_arr (ir_node *node)
867 assert ((node->op == op_Return));
868 if (get_Return_n_ress(node) > 0)
869 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
876 set_Return_n_res (ir_node *node, int results) {
877 assert (node->op == op_Return);
882 get_Return_res (ir_node *node, int pos) {
883 assert (node->op == op_Return);
884 assert (get_Return_n_ress(node) > pos);
885 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
889 set_Return_res (ir_node *node, int pos, ir_node *res){
890 assert (node->op == op_Return);
891 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
895 get_Raise_mem (ir_node *node) {
896 assert (node->op == op_Raise);
897 return get_irn_n(node, 0);
901 set_Raise_mem (ir_node *node, ir_node *mem) {
902 assert (node->op == op_Raise);
903 set_irn_n(node, 0, mem);
907 get_Raise_exo_ptr (ir_node *node) {
908 assert (node->op == op_Raise);
909 return get_irn_n(node, 1);
913 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
914 assert (node->op == op_Raise);
915 set_irn_n(node, 1, exo_ptr);
918 tarval *(get_Const_tarval)(ir_node *node) {
919 return _get_Const_tarval(node);
923 set_Const_tarval (ir_node *node, tarval *con) {
924 assert (node->op == op_Const);
925 node->attr.con.tv = con;
928 cnst_classify_t (classify_Const)(ir_node *node)
930 return _classify_Const(node);
934 /* The source language type. Must be an atomic type. Mode of type must
935 be mode of node. For tarvals from entities type must be pointer to
938 get_Const_type (ir_node *node) {
939 assert (node->op == op_Const);
940 return node->attr.con.tp;
944 set_Const_type (ir_node *node, type *tp) {
945 assert (node->op == op_Const);
946 if (tp != firm_unknown_type) {
947 assert (is_atomic_type(tp));
948 assert (get_type_mode(tp) == get_irn_mode(node));
950 node->attr.con.tp = tp;
955 get_SymConst_kind (const ir_node *node) {
956 assert (node->op == op_SymConst);
957 return node->attr.i.num;
961 set_SymConst_kind (ir_node *node, symconst_kind num) {
962 assert (node->op == op_SymConst);
963 node->attr.i.num = num;
967 get_SymConst_type (ir_node *node) {
968 assert ( (node->op == op_SymConst)
969 && ( get_SymConst_kind(node) == symconst_type_tag
970 || get_SymConst_kind(node) == symconst_size));
971 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
975 set_SymConst_type (ir_node *node, type *tp) {
976 assert ( (node->op == op_SymConst)
977 && ( get_SymConst_kind(node) == symconst_type_tag
978 || get_SymConst_kind(node) == symconst_size));
979 node->attr.i.sym.type_p = tp;
983 get_SymConst_name (ir_node *node) {
984 assert ( (node->op == op_SymConst)
985 && (get_SymConst_kind(node) == symconst_addr_name));
986 return node->attr.i.sym.ident_p;
990 set_SymConst_name (ir_node *node, ident *name) {
991 assert ( (node->op == op_SymConst)
992 && (get_SymConst_kind(node) == symconst_addr_name));
993 node->attr.i.sym.ident_p = name;
997 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
998 entity *get_SymConst_entity (ir_node *node) {
999 assert ( (node->op == op_SymConst)
1000 && (get_SymConst_kind (node) == symconst_addr_ent));
1001 return node->attr.i.sym.entity_p;
1004 void set_SymConst_entity (ir_node *node, entity *ent) {
1005 assert ( (node->op == op_SymConst)
1006 && (get_SymConst_kind(node) == symconst_addr_ent));
1007 node->attr.i.sym.entity_p = ent;
1010 union symconst_symbol
1011 get_SymConst_symbol (ir_node *node) {
1012 assert (node->op == op_SymConst);
1013 return node->attr.i.sym;
1017 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1018 assert (node->op == op_SymConst);
1019 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1020 node->attr.i.sym = sym;
1024 get_SymConst_value_type (ir_node *node) {
1025 assert (node->op == op_SymConst);
1026 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1027 return node->attr.i.tp;
1031 set_SymConst_value_type (ir_node *node, type *tp) {
1032 assert (node->op == op_SymConst);
1033 node->attr.i.tp = tp;
1037 get_Sel_mem (ir_node *node) {
1038 assert (node->op == op_Sel);
1039 return get_irn_n(node, 0);
1043 set_Sel_mem (ir_node *node, ir_node *mem) {
1044 assert (node->op == op_Sel);
1045 set_irn_n(node, 0, mem);
1049 get_Sel_ptr (ir_node *node) {
1050 assert (node->op == op_Sel);
1051 return get_irn_n(node, 1);
1055 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1056 assert (node->op == op_Sel);
1057 set_irn_n(node, 1, ptr);
1061 get_Sel_n_indexs (ir_node *node) {
1062 assert (node->op == op_Sel);
1063 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1067 get_Sel_index_arr (ir_node *node)
1069 assert ((node->op == op_Sel));
1070 if (get_Sel_n_indexs(node) > 0)
1071 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1077 get_Sel_index (ir_node *node, int pos) {
1078 assert (node->op == op_Sel);
1079 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1083 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1084 assert (node->op == op_Sel);
1085 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1089 get_Sel_entity (ir_node *node) {
1090 assert (node->op == op_Sel);
1091 return node->attr.s.ent;
1095 set_Sel_entity (ir_node *node, entity *ent) {
1096 assert (node->op == op_Sel);
1097 node->attr.s.ent = ent;
1101 get_InstOf_ent (ir_node *node) {
1102 assert (node->op = op_InstOf);
1103 return (node->attr.io.ent);
1107 set_InstOf_ent (ir_node *node, type *ent) {
1108 assert (node->op = op_InstOf);
1109 node->attr.io.ent = ent;
1113 get_InstOf_store (ir_node *node) {
1114 assert (node->op = op_InstOf);
1115 return (get_irn_n (node, 0));
1119 set_InstOf_store (ir_node *node, ir_node *obj) {
1120 assert (node->op = op_InstOf);
1121 set_irn_n (node, 0, obj);
1125 get_InstOf_obj (ir_node *node) {
1126 assert (node->op = op_InstOf);
1127 return (get_irn_n (node, 1));
1131 set_InstOf_obj (ir_node *node, ir_node *obj) {
1132 assert (node->op = op_InstOf);
1133 set_irn_n (node, 1, obj);
1137 /* For unary and binary arithmetic operations the access to the
1138 operands can be factored out. Left is the first, right the
1139 second arithmetic value as listed in tech report 0999-33.
1140 unops are: Minus, Abs, Not, Conv, Cast
1141 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1142 Shr, Shrs, Rotate, Cmp */
1146 get_Call_mem (ir_node *node) {
1147 assert (node->op == op_Call);
1148 return get_irn_n(node, 0);
1152 set_Call_mem (ir_node *node, ir_node *mem) {
1153 assert (node->op == op_Call);
1154 set_irn_n(node, 0, mem);
1158 get_Call_ptr (ir_node *node) {
1159 assert (node->op == op_Call);
1160 return get_irn_n(node, 1);
1164 set_Call_ptr (ir_node *node, ir_node *ptr) {
1165 assert (node->op == op_Call);
1166 set_irn_n(node, 1, ptr);
1170 get_Call_param_arr (ir_node *node) {
1171 assert (node->op == op_Call);
1172 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1176 get_Call_n_params (ir_node *node) {
1177 assert (node->op == op_Call);
1178 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1182 get_Call_arity (ir_node *node) {
1183 assert (node->op == op_Call);
1184 return get_Call_n_params(node);
1188 set_Call_arity (ir_node *node, ir_node *arity) {
1189 assert (node->op == op_Call);
1194 get_Call_param (ir_node *node, int pos) {
1195 assert (node->op == op_Call);
1196 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1200 set_Call_param (ir_node *node, int pos, ir_node *param) {
1201 assert (node->op == op_Call);
1202 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1206 get_Call_type (ir_node *node) {
1207 assert (node->op == op_Call);
1208 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1212 set_Call_type (ir_node *node, type *tp) {
1213 assert (node->op == op_Call);
1214 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1215 node->attr.call.cld_tp = tp;
1218 int Call_has_callees(ir_node *node) {
1219 assert(node && node->op == op_Call);
1220 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1221 (node->attr.call.callee_arr != NULL));
1224 int get_Call_n_callees(ir_node * node) {
1225 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1226 return ARR_LEN(node->attr.call.callee_arr);
1229 entity * get_Call_callee(ir_node * node, int pos) {
1230 assert(pos >= 0 && pos < get_Call_n_callees(node));
1231 return node->attr.call.callee_arr[pos];
1234 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1235 assert(node->op == op_Call);
1236 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1237 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1239 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1242 void remove_Call_callee_arr(ir_node * node) {
1243 assert(node->op == op_Call);
1244 node->attr.call.callee_arr = NULL;
1247 ir_node * get_CallBegin_ptr (ir_node *node) {
1248 assert(node->op == op_CallBegin);
1249 return get_irn_n(node, 0);
1251 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1252 assert(node->op == op_CallBegin);
1253 set_irn_n(node, 0, ptr);
1255 ir_node * get_CallBegin_call (ir_node *node) {
1256 assert(node->op == op_CallBegin);
1257 return node->attr.callbegin.call;
1259 void set_CallBegin_call (ir_node *node, ir_node *call) {
1260 assert(node->op == op_CallBegin);
1261 node->attr.callbegin.call = call;
1266 ir_node * get_##OP##_left(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index); \
1270 void set_##OP##_left(ir_node *node, ir_node *left) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index, left); \
1274 ir_node *get_##OP##_right(ir_node *node) { \
1275 assert(node->op == op_##OP); \
1276 return get_irn_n(node, node->op->op_index + 1); \
1278 void set_##OP##_right(ir_node *node, ir_node *right) { \
1279 assert(node->op == op_##OP); \
1280 set_irn_n(node, node->op->op_index + 1, right); \
1284 ir_node *get_##OP##_op(ir_node *node) { \
1285 assert(node->op == op_##OP); \
1286 return get_irn_n(node, node->op->op_index); \
1288 void set_##OP##_op (ir_node *node, ir_node *op) { \
1289 assert(node->op == op_##OP); \
1290 set_irn_n(node, node->op->op_index, op); \
1300 get_Quot_mem (ir_node *node) {
1301 assert (node->op == op_Quot);
1302 return get_irn_n(node, 0);
1306 set_Quot_mem (ir_node *node, ir_node *mem) {
1307 assert (node->op == op_Quot);
1308 set_irn_n(node, 0, mem);
1314 get_DivMod_mem (ir_node *node) {
1315 assert (node->op == op_DivMod);
1316 return get_irn_n(node, 0);
1320 set_DivMod_mem (ir_node *node, ir_node *mem) {
1321 assert (node->op == op_DivMod);
1322 set_irn_n(node, 0, mem);
1328 get_Div_mem (ir_node *node) {
1329 assert (node->op == op_Div);
1330 return get_irn_n(node, 0);
1334 set_Div_mem (ir_node *node, ir_node *mem) {
1335 assert (node->op == op_Div);
1336 set_irn_n(node, 0, mem);
1342 get_Mod_mem (ir_node *node) {
1343 assert (node->op == op_Mod);
1344 return get_irn_n(node, 0);
1348 set_Mod_mem (ir_node *node, ir_node *mem) {
1349 assert (node->op == op_Mod);
1350 set_irn_n(node, 0, mem);
1367 get_Cast_type (ir_node *node) {
1368 assert (node->op == op_Cast);
1369 return node->attr.cast.totype;
1373 set_Cast_type (ir_node *node, type *to_tp) {
1374 assert (node->op == op_Cast);
1375 node->attr.cast.totype = to_tp;
1379 /* Checks for upcast.
1381 * Returns true if the Cast node casts a class type to a super type.
1383 int is_Cast_upcast(ir_node *node) {
1384 type *totype = get_Cast_type(node);
1385 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1386 ir_graph *myirg = get_irn_irg(node);
1388 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1391 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1392 totype = get_pointer_points_to_type(totype);
1393 fromtype = get_pointer_points_to_type(fromtype);
1398 if (!is_Class_type(totype)) return false;
1399 return is_subclass_of(fromtype, totype);
1402 /* Checks for downcast.
1404 * Returns true if the Cast node casts a class type to a sub type.
1406 int is_Cast_downcast(ir_node *node) {
1407 type *totype = get_Cast_type(node);
1408 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1410 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1413 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1414 totype = get_pointer_points_to_type(totype);
1415 fromtype = get_pointer_points_to_type(fromtype);
1420 if (!is_Class_type(totype)) return false;
1421 return is_subclass_of(totype, fromtype);
1425 (is_unop)(const ir_node *node) {
1426 return _is_unop(node);
1430 get_unop_op (ir_node *node) {
1431 if (node->op->opar == oparity_unary)
1432 return get_irn_n(node, node->op->op_index);
1434 assert(node->op->opar == oparity_unary);
1439 set_unop_op (ir_node *node, ir_node *op) {
1440 if (node->op->opar == oparity_unary)
1441 set_irn_n(node, node->op->op_index, op);
1443 assert(node->op->opar == oparity_unary);
1447 (is_binop)(const ir_node *node) {
1448 return _is_binop(node);
1452 get_binop_left (ir_node *node) {
1453 if (node->op->opar == oparity_binary)
1454 return get_irn_n(node, node->op->op_index);
1456 assert(node->op->opar == oparity_binary);
1461 set_binop_left (ir_node *node, ir_node *left) {
1462 if (node->op->opar == oparity_binary)
1463 set_irn_n(node, node->op->op_index, left);
1465 assert (node->op->opar == oparity_binary);
1469 get_binop_right (ir_node *node) {
1470 if (node->op->opar == oparity_binary)
1471 return get_irn_n(node, node->op->op_index + 1);
1473 assert(node->op->opar == oparity_binary);
1478 set_binop_right (ir_node *node, ir_node *right) {
1479 if (node->op->opar == oparity_binary)
1480 set_irn_n(node, node->op->op_index + 1, right);
1482 assert (node->op->opar == oparity_binary);
1485 int is_Phi (const ir_node *n) {
1491 if (op == op_Filter) return get_interprocedural_view();
1494 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1495 (get_irn_arity(n) > 0));
1500 int is_Phi0 (const ir_node *n) {
1503 return ((get_irn_op(n) == op_Phi) &&
1504 (get_irn_arity(n) == 0) &&
1505 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1509 get_Phi_preds_arr (ir_node *node) {
1510 assert (node->op == op_Phi);
1511 return (ir_node **)&(get_irn_in(node)[1]);
1515 get_Phi_n_preds (ir_node *node) {
1516 assert (is_Phi(node) || is_Phi0(node));
1517 return (get_irn_arity(node));
1521 void set_Phi_n_preds (ir_node *node, int n_preds) {
1522 assert (node->op == op_Phi);
1527 get_Phi_pred (ir_node *node, int pos) {
1528 assert (is_Phi(node) || is_Phi0(node));
1529 return get_irn_n(node, pos);
1533 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1534 assert (is_Phi(node) || is_Phi0(node));
1535 set_irn_n(node, pos, pred);
1539 int is_memop(ir_node *node) {
1540 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1543 ir_node *get_memop_mem (ir_node *node) {
1544 assert(is_memop(node));
1545 return get_irn_n(node, 0);
1548 void set_memop_mem (ir_node *node, ir_node *mem) {
1549 assert(is_memop(node));
1550 set_irn_n(node, 0, mem);
1553 ir_node *get_memop_ptr (ir_node *node) {
1554 assert(is_memop(node));
1555 return get_irn_n(node, 1);
1558 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1559 assert(is_memop(node));
1560 set_irn_n(node, 1, ptr);
1564 get_Load_mem (ir_node *node) {
1565 assert (node->op == op_Load);
1566 return get_irn_n(node, 0);
1570 set_Load_mem (ir_node *node, ir_node *mem) {
1571 assert (node->op == op_Load);
1572 set_irn_n(node, 0, mem);
1576 get_Load_ptr (ir_node *node) {
1577 assert (node->op == op_Load);
1578 return get_irn_n(node, 1);
1582 set_Load_ptr (ir_node *node, ir_node *ptr) {
1583 assert (node->op == op_Load);
1584 set_irn_n(node, 1, ptr);
1588 get_Load_mode (ir_node *node) {
1589 assert (node->op == op_Load);
1590 return node->attr.load.load_mode;
1594 set_Load_mode (ir_node *node, ir_mode *mode) {
1595 assert (node->op == op_Load);
1596 node->attr.load.load_mode = mode;
1600 get_Load_volatility (ir_node *node) {
1601 assert (node->op == op_Load);
1602 return node->attr.load.volatility;
1606 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1607 assert (node->op == op_Load);
1608 node->attr.load.volatility = volatility;
1613 get_Store_mem (ir_node *node) {
1614 assert (node->op == op_Store);
1615 return get_irn_n(node, 0);
1619 set_Store_mem (ir_node *node, ir_node *mem) {
1620 assert (node->op == op_Store);
1621 set_irn_n(node, 0, mem);
1625 get_Store_ptr (ir_node *node) {
1626 assert (node->op == op_Store);
1627 return get_irn_n(node, 1);
1631 set_Store_ptr (ir_node *node, ir_node *ptr) {
1632 assert (node->op == op_Store);
1633 set_irn_n(node, 1, ptr);
1637 get_Store_value (ir_node *node) {
1638 assert (node->op == op_Store);
1639 return get_irn_n(node, 2);
1643 set_Store_value (ir_node *node, ir_node *value) {
1644 assert (node->op == op_Store);
1645 set_irn_n(node, 2, value);
1649 get_Store_volatility (ir_node *node) {
1650 assert (node->op == op_Store);
1651 return node->attr.store.volatility;
1655 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1656 assert (node->op == op_Store);
1657 node->attr.store.volatility = volatility;
1662 get_Alloc_mem (ir_node *node) {
1663 assert (node->op == op_Alloc);
1664 return get_irn_n(node, 0);
1668 set_Alloc_mem (ir_node *node, ir_node *mem) {
1669 assert (node->op == op_Alloc);
1670 set_irn_n(node, 0, mem);
1674 get_Alloc_size (ir_node *node) {
1675 assert (node->op == op_Alloc);
1676 return get_irn_n(node, 1);
1680 set_Alloc_size (ir_node *node, ir_node *size) {
1681 assert (node->op == op_Alloc);
1682 set_irn_n(node, 1, size);
1686 get_Alloc_type (ir_node *node) {
1687 assert (node->op == op_Alloc);
1688 return node->attr.a.type = skip_tid(node->attr.a.type);
1692 set_Alloc_type (ir_node *node, type *tp) {
1693 assert (node->op == op_Alloc);
1694 node->attr.a.type = tp;
1698 get_Alloc_where (ir_node *node) {
1699 assert (node->op == op_Alloc);
1700 return node->attr.a.where;
1704 set_Alloc_where (ir_node *node, where_alloc where) {
1705 assert (node->op == op_Alloc);
1706 node->attr.a.where = where;
1711 get_Free_mem (ir_node *node) {
1712 assert (node->op == op_Free);
1713 return get_irn_n(node, 0);
1717 set_Free_mem (ir_node *node, ir_node *mem) {
1718 assert (node->op == op_Free);
1719 set_irn_n(node, 0, mem);
1723 get_Free_ptr (ir_node *node) {
1724 assert (node->op == op_Free);
1725 return get_irn_n(node, 1);
1729 set_Free_ptr (ir_node *node, ir_node *ptr) {
1730 assert (node->op == op_Free);
1731 set_irn_n(node, 1, ptr);
1735 get_Free_size (ir_node *node) {
1736 assert (node->op == op_Free);
1737 return get_irn_n(node, 2);
1741 set_Free_size (ir_node *node, ir_node *size) {
1742 assert (node->op == op_Free);
1743 set_irn_n(node, 2, size);
1747 get_Free_type (ir_node *node) {
1748 assert (node->op == op_Free);
1749 return node->attr.f.type = skip_tid(node->attr.f.type);
1753 set_Free_type (ir_node *node, type *tp) {
1754 assert (node->op == op_Free);
1755 node->attr.f.type = tp;
1759 get_Free_where (ir_node *node) {
1760 assert (node->op == op_Free);
1761 return node->attr.f.where;
1765 set_Free_where (ir_node *node, where_alloc where) {
1766 assert (node->op == op_Free);
1767 node->attr.f.where = where;
1771 get_Sync_preds_arr (ir_node *node) {
1772 assert (node->op == op_Sync);
1773 return (ir_node **)&(get_irn_in(node)[1]);
1777 get_Sync_n_preds (ir_node *node) {
1778 assert (node->op == op_Sync);
1779 return (get_irn_arity(node));
1784 set_Sync_n_preds (ir_node *node, int n_preds) {
1785 assert (node->op == op_Sync);
1790 get_Sync_pred (ir_node *node, int pos) {
1791 assert (node->op == op_Sync);
1792 return get_irn_n(node, pos);
1796 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1797 assert (node->op == op_Sync);
1798 set_irn_n(node, pos, pred);
1801 type *get_Proj_type(ir_node *n)
1804 ir_node *pred = get_Proj_pred(n);
1806 switch (get_irn_opcode(pred)) {
1809 /* Deal with Start / Call here: we need to know the Proj Nr. */
1810 assert(get_irn_mode(pred) == mode_T);
1811 pred_pred = get_Proj_pred(pred);
1812 if (get_irn_op(pred_pred) == op_Start) {
1813 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1814 tp = get_method_param_type(mtp, get_Proj_proj(n));
1815 } else if (get_irn_op(pred_pred) == op_Call) {
1816 type *mtp = get_Call_type(pred_pred);
1817 tp = get_method_res_type(mtp, get_Proj_proj(n));
1820 case iro_Start: break;
1821 case iro_Call: break;
1823 ir_node *a = get_Load_ptr(pred);
1824 if (get_irn_op(a) == op_Sel)
1825 tp = get_entity_type(get_Sel_entity(a));
1834 get_Proj_pred (const ir_node *node) {
1835 assert (is_Proj(node));
1836 return get_irn_n(node, 0);
1840 set_Proj_pred (ir_node *node, ir_node *pred) {
1841 assert (is_Proj(node));
1842 set_irn_n(node, 0, pred);
1846 get_Proj_proj (const ir_node *node) {
1847 assert (is_Proj(node));
1848 if (get_irn_opcode(node) == iro_Proj) {
1849 return node->attr.proj;
1851 assert(get_irn_opcode(node) == iro_Filter);
1852 return node->attr.filter.proj;
1857 set_Proj_proj (ir_node *node, long proj) {
1858 assert (node->op == op_Proj);
1859 node->attr.proj = proj;
1863 get_Tuple_preds_arr (ir_node *node) {
1864 assert (node->op == op_Tuple);
1865 return (ir_node **)&(get_irn_in(node)[1]);
1869 get_Tuple_n_preds (ir_node *node) {
1870 assert (node->op == op_Tuple);
1871 return (get_irn_arity(node));
1876 set_Tuple_n_preds (ir_node *node, int n_preds) {
1877 assert (node->op == op_Tuple);
1882 get_Tuple_pred (ir_node *node, int pos) {
1883 assert (node->op == op_Tuple);
1884 return get_irn_n(node, pos);
1888 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1889 assert (node->op == op_Tuple);
1890 set_irn_n(node, pos, pred);
1894 get_Id_pred (ir_node *node) {
1895 assert (node->op == op_Id);
1896 return get_irn_n(node, 0);
1900 set_Id_pred (ir_node *node, ir_node *pred) {
1901 assert (node->op == op_Id);
1902 set_irn_n(node, 0, pred);
1905 ir_node *get_Confirm_value (ir_node *node) {
1906 assert (node->op == op_Confirm);
1907 return get_irn_n(node, 0);
1909 void set_Confirm_value (ir_node *node, ir_node *value) {
1910 assert (node->op == op_Confirm);
1911 set_irn_n(node, 0, value);
1913 ir_node *get_Confirm_bound (ir_node *node) {
1914 assert (node->op == op_Confirm);
1915 return get_irn_n(node, 1);
1917 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1918 assert (node->op == op_Confirm);
1919 set_irn_n(node, 0, bound);
1921 pn_Cmp get_Confirm_cmp (ir_node *node) {
1922 assert (node->op == op_Confirm);
1923 return node->attr.confirm_cmp;
1925 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1926 assert (node->op == op_Confirm);
1927 node->attr.confirm_cmp = cmp;
1932 get_Filter_pred (ir_node *node) {
1933 assert(node->op == op_Filter);
1937 set_Filter_pred (ir_node *node, ir_node *pred) {
1938 assert(node->op == op_Filter);
1942 get_Filter_proj(ir_node *node) {
1943 assert(node->op == op_Filter);
1944 return node->attr.filter.proj;
1947 set_Filter_proj (ir_node *node, long proj) {
1948 assert(node->op == op_Filter);
1949 node->attr.filter.proj = proj;
1952 /* Don't use get_irn_arity, get_irn_n in implementation as access
1953 shall work independent of view!!! */
1954 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1955 assert(node->op == op_Filter);
1956 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1957 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1958 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1959 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1960 node->attr.filter.in_cg[0] = node->in[0];
1962 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1965 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1966 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1967 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1968 node->attr.filter.in_cg[pos + 1] = pred;
1970 int get_Filter_n_cg_preds(ir_node *node) {
1971 assert(node->op == op_Filter && node->attr.filter.in_cg);
1972 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1974 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1976 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1978 arity = ARR_LEN(node->attr.filter.in_cg);
1979 assert(pos < arity - 1);
1980 return node->attr.filter.in_cg[pos + 1];
1984 ir_node *get_Mux_sel (ir_node *node) {
1985 assert(node->op == op_Mux);
1988 void set_Mux_sel (ir_node *node, ir_node *sel) {
1989 assert(node->op == op_Mux);
1993 ir_node *get_Mux_false (ir_node *node) {
1994 assert(node->op == op_Mux);
1997 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1998 assert(node->op == op_Mux);
1999 node->in[2] = ir_false;
2002 ir_node *get_Mux_true (ir_node *node) {
2003 assert(node->op == op_Mux);
2006 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2007 assert(node->op == op_Mux);
2008 node->in[3] = ir_true;
2013 get_irn_irg(const ir_node *node) {
2014 if (! is_Block(node))
2015 node = get_nodes_block(node);
2016 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2017 node = get_nodes_block(node);
2018 assert(get_irn_op(node) == op_Block);
2019 return node->attr.block.irg;
2023 /*----------------------------------------------------------------*/
2024 /* Auxiliary routines */
2025 /*----------------------------------------------------------------*/
2028 skip_Proj (ir_node *node) {
2029 /* don't assert node !!! */
2030 if (node && is_Proj(node)) {
2031 return get_Proj_pred(node);
2038 skip_Tuple (ir_node *node) {
2041 if (!get_opt_normalize()) return node;
2043 node = skip_Id(node);
2044 if (get_irn_op(node) == op_Proj) {
2045 pred = skip_Id(get_Proj_pred(node));
2046 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2047 pred = skip_Id(skip_Tuple(pred));
2048 else if (get_irn_op(pred) == op_Tuple)
2049 return get_Tuple_pred(pred, get_Proj_proj(node));
2054 /* returns operand of node if node is a Cast */
2055 ir_node *skip_Cast (ir_node *node) {
2056 if (node && get_irn_op(node) == op_Cast)
2057 return get_Cast_op(node);
2061 /* returns operand of node if node is a Confirm */
2062 ir_node *skip_Confirm (ir_node *node) {
2063 if (node && get_irn_op(node) == op_Confirm)
2064 return get_Confirm_value(node);
2068 /* skip all high-level ops */
2069 ir_node *skip_HighLevel(ir_node *node) {
2070 if (node && is_op_highlevel(get_irn_op(node)))
2071 return get_irn_n(node, 0);
2076 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2077 than any other approach, as Id chains are resolved and all point to the real node, or
2078 all id's are self loops. */
2080 skip_Id (ir_node *node) {
2081 /* don't assert node !!! */
2083 if (!get_opt_normalize()) return node;
2085 /* Don't use get_Id_pred: We get into an endless loop for
2086 self-referencing Ids. */
2087 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2088 ir_node *rem_pred = node->in[0+1];
2091 assert (get_irn_arity (node) > 0);
2093 node->in[0+1] = node;
2094 res = skip_Id(rem_pred);
2095 if (res->op == op_Id) /* self-loop */ return node;
2097 node->in[0+1] = res;
2104 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2105 than any other approach, as Id chains are resolved and all point to the real node, or
2106 all id's are self loops. */
2108 skip_Id (ir_node *node) {
2110 /* don't assert node !!! */
2112 if (!node || (node->op != op_Id)) return node;
2114 if (!get_opt_normalize()) return node;
2116 /* Don't use get_Id_pred: We get into an endless loop for
2117 self-referencing Ids. */
2118 pred = node->in[0+1];
2120 if (pred->op != op_Id) return pred;
2122 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2123 ir_node *rem_pred, *res;
2125 if (pred->op != op_Id) return pred; /* shortcut */
2128 assert (get_irn_arity (node) > 0);
2130 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2131 res = skip_Id(rem_pred);
2132 if (res->op == op_Id) /* self-loop */ return node;
2134 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2143 (is_Bad)(const ir_node *node) {
2144 return _is_Bad(node);
2148 (is_Const)(const ir_node *node) {
2149 return _is_Const(node);
2153 (is_no_Block)(const ir_node *node) {
2154 return _is_no_Block(node);
2158 (is_Block)(const ir_node *node) {
2159 return _is_Block(node);
2162 /* returns true if node is a Unknown node. */
2164 (is_Unknown)(const ir_node *node) {
2165 return _is_Unknown(node);
2169 is_Proj (const ir_node *node) {
2171 return node->op == op_Proj
2172 || (!get_interprocedural_view() && node->op == op_Filter);
2175 /* Returns true if the operation manipulates control flow. */
2177 is_cfop(const ir_node *node) {
2178 return is_cfopcode(get_irn_op(node));
2181 /* Returns true if the operation manipulates interprocedural control flow:
2182 CallBegin, EndReg, EndExcept */
2183 int is_ip_cfop(const ir_node *node) {
2184 return is_ip_cfopcode(get_irn_op(node));
2187 /* Returns true if the operation can change the control flow because
2190 is_fragile_op(const ir_node *node) {
2191 return is_op_fragile(get_irn_op(node));
2194 /* Returns the memory operand of fragile operations. */
2195 ir_node *get_fragile_op_mem(ir_node *node) {
2196 assert(node && is_fragile_op(node));
2198 switch (get_irn_opcode (node)) {
2207 return get_irn_n(node, 0);
2212 assert(0 && "should not be reached");
2217 /* Returns true if the operation is a forking control flow operation. */
2219 is_forking_op(const ir_node *node) {
2220 return is_op_forking(get_irn_op(node));
2223 type *(get_irn_type)(ir_node *node) {
2224 return _get_irn_type(node);
2227 /** the get_type operation must be always implemented */
2228 static type *get_Null_type(ir_node *n) {
2232 /* set the get_type operation */
2233 ir_op *firm_set_default_get_type(ir_op *op)
2236 case iro_Const: op->get_type = get_Const_type; break;
2237 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2238 case iro_Cast: op->get_type = get_Cast_type; break;
2239 case iro_Proj: op->get_type = get_Proj_type; break;
2240 default: op->get_type = get_Null_type; break;
2245 #ifdef DEBUG_libfirm
2246 void dump_irn (ir_node *n) {
2247 int i, arity = get_irn_arity(n);
2248 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2250 ir_node *pred = get_irn_n(n, -1);
2251 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2252 get_irn_node_nr(pred), (void *)pred);
2254 printf(" preds: \n");
2255 for (i = 0; i < arity; ++i) {
2256 ir_node *pred = get_irn_n(n, i);
2257 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2258 get_irn_node_nr(pred), (void *)pred);
2262 #else /* DEBUG_libfirm */
2263 void dump_irn (ir_node *n) {}
2264 #endif /* DEBUG_libfirm */