3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
240 if (arity != ARR_LEN(*arr) - 1) {
241 ir_node * block = (*arr)[0];
242 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
245 fix_backedges(current_ir_graph->obst, node);
246 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
250 (get_irn_intra_n)(const ir_node *node, int n) {
251 return _get_irn_intra_n (node, n);
255 (get_irn_inter_n)(const ir_node *node, int n) {
256 return _get_irn_inter_n (node, n);
259 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
262 (get_irn_n)(const ir_node *node, int n) {
263 return _get_irn_n(node, n);
267 set_irn_n (ir_node *node, int n, ir_node *in) {
268 assert(node && node->kind == k_ir_node);
270 assert(n < get_irn_arity(node));
271 assert(in && in->kind == k_ir_node);
273 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
274 /* Change block pred in both views! */
275 node->in[n + 1] = in;
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
280 if (get_interprocedural_view()) { /* handle Filter and Block specially */
281 if (get_irn_opcode(node) == iro_Filter) {
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
285 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
286 node->attr.block.in_cg[n + 1] = in;
289 /* else fall through */
293 hook_set_irn_n(node, n, in, node->in[n + 1]);
295 /* Here, we rely on src and tgt being in the current ir graph */
296 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
414 (is_irn_pinned_in_irg) (const ir_node *node) {
415 return _is_irn_pinned_in_irg(node);
418 void set_irn_pinned(ir_node *node, op_pin_state state) {
419 /* due to optimization an opt may be turned into a Tuple */
420 if (get_irn_op(node) == op_Tuple)
423 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
424 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
426 node->attr.except.pin_state = state;
429 #ifdef DO_HEAPANALYSIS
430 /* Access the abstract interpretation information of a node.
431 Returns NULL if no such information is available. */
432 struct abstval *get_irn_abst_value(ir_node *n) {
435 /* Set the abstract interpretation information of a node. */
436 void set_irn_abst_value(ir_node *n, struct abstval *os) {
439 struct section *firm_get_irn_section(ir_node *n) {
442 void firm_set_irn_section(ir_node *n, struct section *s) {
446 /* Dummies needed for firmjni. */
447 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
449 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
450 void firm_set_irn_section(ir_node *n, struct section *s) {}
451 #endif /* DO_HEAPANALYSIS */
454 /* Outputs a unique number for this node */
456 get_irn_node_nr(const ir_node *node) {
459 return node->node_nr;
466 get_irn_const_attr (ir_node *node)
468 assert (node->op == op_Const);
469 return node->attr.con;
473 get_irn_proj_attr (ir_node *node)
475 assert (node->op == op_Proj);
476 return node->attr.proj;
480 get_irn_alloc_attr (ir_node *node)
482 assert (node->op == op_Alloc);
487 get_irn_free_attr (ir_node *node)
489 assert (node->op == op_Free);
494 get_irn_symconst_attr (ir_node *node)
496 assert (node->op == op_SymConst);
501 get_irn_call_attr (ir_node *node)
503 assert (node->op == op_Call);
504 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
508 get_irn_sel_attr (ir_node *node)
510 assert (node->op == op_Sel);
515 get_irn_phi_attr (ir_node *node)
517 assert (node->op == op_Phi);
518 return node->attr.phi0_pos;
522 get_irn_block_attr (ir_node *node)
524 assert (node->op == op_Block);
525 return node->attr.block;
529 get_irn_load_attr (ir_node *node)
531 assert (node->op == op_Load);
532 return node->attr.load;
536 get_irn_store_attr (ir_node *node)
538 assert (node->op == op_Store);
539 return node->attr.store;
543 get_irn_except_attr (ir_node *node)
545 assert (node->op == op_Div || node->op == op_Quot ||
546 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
547 return node->attr.except;
550 /** manipulate fields of individual nodes **/
552 /* this works for all except Block */
554 get_nodes_block (const ir_node *node) {
555 assert (!(node->op == op_Block));
556 return get_irn_n(node, -1);
560 set_nodes_block (ir_node *node, ir_node *block) {
561 assert (!(node->op == op_Block));
562 set_irn_n(node, -1, block);
565 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
566 * from Start. If so returns frame type, else Null. */
567 type *is_frame_pointer(ir_node *n) {
568 if ((get_irn_op(n) == op_Proj) &&
569 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
570 ir_node *start = get_Proj_pred(n);
571 if (get_irn_op(start) == op_Start) {
572 return get_irg_frame_type(get_irn_irg(start));
578 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
579 * from Start. If so returns global type, else Null. */
580 type *is_globals_pointer(ir_node *n) {
581 if ((get_irn_op(n) == op_Proj) &&
582 (get_Proj_proj(n) == pn_Start_P_globals)) {
583 ir_node *start = get_Proj_pred(n);
584 if (get_irn_op(start) == op_Start) {
585 return get_glob_type();
591 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
592 * from Start. If so returns 1, else 0. */
593 int is_value_arg_pointer(ir_node *n) {
594 if ((get_irn_op(n) == op_Proj) &&
595 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
596 (get_irn_op(get_Proj_pred(n)) == op_Start))
601 /* Returns an array with the predecessors of the Block. Depending on
602 the implementation of the graph data structure this can be a copy of
603 the internal representation of predecessors as well as the internal
604 array itself. Therefore writing to this array might obstruct the ir. */
606 get_Block_cfgpred_arr (ir_node *node)
608 assert ((node->op == op_Block));
609 return (ir_node **)&(get_irn_in(node)[1]);
613 (get_Block_n_cfgpreds)(ir_node *node) {
614 return get_Block_n_cfgpreds(node);
618 (get_Block_cfgpred)(ir_node *node, int pos) {
619 return get_Block_cfgpred(node, pos);
623 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
624 assert (node->op == op_Block);
625 set_irn_n(node, pos, pred);
629 get_Block_matured (ir_node *node) {
630 assert (node->op == op_Block);
631 return node->attr.block.matured;
635 set_Block_matured (ir_node *node, bool matured) {
636 assert (node->op == op_Block);
637 node->attr.block.matured = matured;
641 (get_Block_block_visited)(ir_node *node) {
642 return _get_Block_block_visited(node);
646 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
647 _set_Block_block_visited(node, visit);
650 /* For this current_ir_graph must be set. */
652 (mark_Block_block_visited)(ir_node *node) {
653 _mark_Block_block_visited(node);
657 (Block_not_block_visited)(ir_node *node) {
658 return _Block_not_block_visited(node);
662 get_Block_graph_arr (ir_node *node, int pos) {
663 assert (node->op == op_Block);
664 return node->attr.block.graph_arr[pos+1];
668 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
669 assert (node->op == op_Block);
670 node->attr.block.graph_arr[pos+1] = value;
673 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
674 assert(node->op == op_Block);
675 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
676 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
677 node->attr.block.in_cg[0] = NULL;
678 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
680 /* Fix backedge array. fix_backedges operates depending on
681 interprocedural_view. */
682 int ipv = get_interprocedural_view();
683 set_interprocedural_view(true);
684 fix_backedges(current_ir_graph->obst, node);
685 set_interprocedural_view(ipv);
688 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
691 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
692 assert(node->op == op_Block &&
693 node->attr.block.in_cg &&
694 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
695 node->attr.block.in_cg[pos + 1] = pred;
698 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
699 assert(node->op == op_Block);
700 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
703 int get_Block_cg_n_cfgpreds(ir_node * node) {
704 assert(node->op == op_Block);
705 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
708 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
709 assert(node->op == op_Block && node->attr.block.in_cg);
710 return node->attr.block.in_cg[pos + 1];
713 void remove_Block_cg_cfgpred_arr(ir_node * node) {
714 assert(node->op == op_Block);
715 node->attr.block.in_cg = NULL;
718 ir_node *(set_Block_dead)(ir_node *block) {
719 return _set_Block_dead(block);
722 int (is_Block_dead)(const ir_node *block) {
723 return _is_Block_dead(block);
726 ir_extblk *get_Block_extbb(const ir_node *block) {
727 assert(is_Block(block));
728 return block->attr.block.extblk;
731 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
732 assert(is_Block(block));
733 block->attr.block.extblk = extblk;
737 set_Start_irg(ir_node *node, ir_graph *irg) {
738 assert(node->op == op_Start);
739 assert(is_ir_graph(irg));
740 assert(0 && " Why set irg? -- use set_irn_irg");
744 get_End_n_keepalives(ir_node *end) {
745 assert (end->op == op_End);
746 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
750 get_End_keepalive(ir_node *end, int pos) {
751 assert (end->op == op_End);
752 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
756 add_End_keepalive (ir_node *end, ir_node *ka) {
757 assert (end->op == op_End);
758 ARR_APP1 (ir_node *, end->in, ka);
762 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
763 assert (end->op == op_End);
764 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
768 free_End (ir_node *end) {
769 assert (end->op == op_End);
771 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
772 end->in = NULL; /* @@@ make sure we get an error if we use the
773 in array afterwards ... */
778 > Implementing the case construct (which is where the constant Proj node is
779 > important) involves far more than simply determining the constant values.
780 > We could argue that this is more properly a function of the translator from
781 > Firm to the target machine. That could be done if there was some way of
782 > projecting "default" out of the Cond node.
783 I know it's complicated.
784 Basically there are two proglems:
785 - determining the gaps between the projs
786 - determining the biggest case constant to know the proj number for
788 I see several solutions:
789 1. Introduce a ProjDefault node. Solves both problems.
790 This means to extend all optimizations executed during construction.
791 2. Give the Cond node for switch two flavors:
792 a) there are no gaps in the projs (existing flavor)
793 b) gaps may exist, default proj is still the Proj with the largest
794 projection number. This covers also the gaps.
795 3. Fix the semantic of the Cond to that of 2b)
797 Solution 2 seems to be the best:
798 Computing the gaps in the Firm representation is not too hard, i.e.,
799 libFIRM can implement a routine that transforms between the two
800 flavours. This is also possible for 1) but 2) does not require to
801 change any existing optimization.
802 Further it should be far simpler to determine the biggest constant than
804 I don't want to choose 3) as 2a) seems to have advantages for
805 dataflow analysis and 3) does not allow to convert the representation to
809 get_Cond_selector (ir_node *node) {
810 assert (node->op == op_Cond);
811 return get_irn_n(node, 0);
815 set_Cond_selector (ir_node *node, ir_node *selector) {
816 assert (node->op == op_Cond);
817 set_irn_n(node, 0, selector);
821 get_Cond_kind (ir_node *node) {
822 assert (node->op == op_Cond);
823 return node->attr.c.kind;
827 set_Cond_kind (ir_node *node, cond_kind kind) {
828 assert (node->op == op_Cond);
829 node->attr.c.kind = kind;
833 get_Cond_defaultProj (ir_node *node) {
834 assert (node->op == op_Cond);
835 return node->attr.c.default_proj;
839 get_Return_mem (ir_node *node) {
840 assert (node->op == op_Return);
841 return get_irn_n(node, 0);
845 set_Return_mem (ir_node *node, ir_node *mem) {
846 assert (node->op == op_Return);
847 set_irn_n(node, 0, mem);
851 get_Return_n_ress (ir_node *node) {
852 assert (node->op == op_Return);
853 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
857 get_Return_res_arr (ir_node *node)
859 assert ((node->op == op_Return));
860 if (get_Return_n_ress(node) > 0)
861 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
868 set_Return_n_res (ir_node *node, int results) {
869 assert (node->op == op_Return);
874 get_Return_res (ir_node *node, int pos) {
875 assert (node->op == op_Return);
876 assert (get_Return_n_ress(node) > pos);
877 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
881 set_Return_res (ir_node *node, int pos, ir_node *res){
882 assert (node->op == op_Return);
883 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
887 get_Raise_mem (ir_node *node) {
888 assert (node->op == op_Raise);
889 return get_irn_n(node, 0);
893 set_Raise_mem (ir_node *node, ir_node *mem) {
894 assert (node->op == op_Raise);
895 set_irn_n(node, 0, mem);
899 get_Raise_exo_ptr (ir_node *node) {
900 assert (node->op == op_Raise);
901 return get_irn_n(node, 1);
905 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
906 assert (node->op == op_Raise);
907 set_irn_n(node, 1, exo_ptr);
910 tarval *(get_Const_tarval)(ir_node *node) {
911 return _get_Const_tarval(node);
915 set_Const_tarval (ir_node *node, tarval *con) {
916 assert (node->op == op_Const);
917 node->attr.con.tv = con;
920 cnst_classify_t (classify_Const)(ir_node *node)
922 return _classify_Const(node);
926 /* The source language type. Must be an atomic type. Mode of type must
927 be mode of node. For tarvals from entities type must be pointer to
930 get_Const_type (ir_node *node) {
931 assert (node->op == op_Const);
932 return node->attr.con.tp;
936 set_Const_type (ir_node *node, type *tp) {
937 assert (node->op == op_Const);
938 if (tp != firm_unknown_type) {
939 assert (is_atomic_type(tp));
940 assert (get_type_mode(tp) == get_irn_mode(node));
942 node->attr.con.tp = tp;
947 get_SymConst_kind (const ir_node *node) {
948 assert (node->op == op_SymConst);
949 return node->attr.i.num;
953 set_SymConst_kind (ir_node *node, symconst_kind num) {
954 assert (node->op == op_SymConst);
955 node->attr.i.num = num;
959 get_SymConst_type (ir_node *node) {
960 assert ( (node->op == op_SymConst)
961 && ( get_SymConst_kind(node) == symconst_type_tag
962 || get_SymConst_kind(node) == symconst_size));
963 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
967 set_SymConst_type (ir_node *node, type *tp) {
968 assert ( (node->op == op_SymConst)
969 && ( get_SymConst_kind(node) == symconst_type_tag
970 || get_SymConst_kind(node) == symconst_size));
971 node->attr.i.sym.type_p = tp;
975 get_SymConst_name (ir_node *node) {
976 assert ( (node->op == op_SymConst)
977 && (get_SymConst_kind(node) == symconst_addr_name));
978 return node->attr.i.sym.ident_p;
982 set_SymConst_name (ir_node *node, ident *name) {
983 assert ( (node->op == op_SymConst)
984 && (get_SymConst_kind(node) == symconst_addr_name));
985 node->attr.i.sym.ident_p = name;
989 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
990 entity *get_SymConst_entity (ir_node *node) {
991 assert ( (node->op == op_SymConst)
992 && (get_SymConst_kind (node) == symconst_addr_ent));
993 return node->attr.i.sym.entity_p;
996 void set_SymConst_entity (ir_node *node, entity *ent) {
997 assert ( (node->op == op_SymConst)
998 && (get_SymConst_kind(node) == symconst_addr_ent));
999 node->attr.i.sym.entity_p = ent;
1002 union symconst_symbol
1003 get_SymConst_symbol (ir_node *node) {
1004 assert (node->op == op_SymConst);
1005 return node->attr.i.sym;
1009 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1010 assert (node->op == op_SymConst);
1011 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1012 node->attr.i.sym = sym;
1016 get_SymConst_value_type (ir_node *node) {
1017 assert (node->op == op_SymConst);
1018 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1019 return node->attr.i.tp;
1023 set_SymConst_value_type (ir_node *node, type *tp) {
1024 assert (node->op == op_SymConst);
1025 node->attr.i.tp = tp;
1029 get_Sel_mem (ir_node *node) {
1030 assert (node->op == op_Sel);
1031 return get_irn_n(node, 0);
1035 set_Sel_mem (ir_node *node, ir_node *mem) {
1036 assert (node->op == op_Sel);
1037 set_irn_n(node, 0, mem);
1041 get_Sel_ptr (ir_node *node) {
1042 assert (node->op == op_Sel);
1043 return get_irn_n(node, 1);
1047 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1048 assert (node->op == op_Sel);
1049 set_irn_n(node, 1, ptr);
1053 get_Sel_n_indexs (ir_node *node) {
1054 assert (node->op == op_Sel);
1055 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1059 get_Sel_index_arr (ir_node *node)
1061 assert ((node->op == op_Sel));
1062 if (get_Sel_n_indexs(node) > 0)
1063 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1069 get_Sel_index (ir_node *node, int pos) {
1070 assert (node->op == op_Sel);
1071 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1075 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1076 assert (node->op == op_Sel);
1077 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1081 get_Sel_entity (ir_node *node) {
1082 assert (node->op == op_Sel);
1083 return node->attr.s.ent;
1087 set_Sel_entity (ir_node *node, entity *ent) {
1088 assert (node->op == op_Sel);
1089 node->attr.s.ent = ent;
1093 get_InstOf_ent (ir_node *node) {
1094 assert (node->op = op_InstOf);
1095 return (node->attr.io.ent);
1099 set_InstOf_ent (ir_node *node, type *ent) {
1100 assert (node->op = op_InstOf);
1101 node->attr.io.ent = ent;
1105 get_InstOf_store (ir_node *node) {
1106 assert (node->op = op_InstOf);
1107 return (get_irn_n (node, 0));
1111 set_InstOf_store (ir_node *node, ir_node *obj) {
1112 assert (node->op = op_InstOf);
1113 set_irn_n (node, 0, obj);
1117 get_InstOf_obj (ir_node *node) {
1118 assert (node->op = op_InstOf);
1119 return (get_irn_n (node, 1));
1123 set_InstOf_obj (ir_node *node, ir_node *obj) {
1124 assert (node->op = op_InstOf);
1125 set_irn_n (node, 1, obj);
1129 /* For unary and binary arithmetic operations the access to the
1130 operands can be factored out. Left is the first, right the
1131 second arithmetic value as listed in tech report 0999-33.
1132 unops are: Minus, Abs, Not, Conv, Cast
1133 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1134 Shr, Shrs, Rotate, Cmp */
1138 get_Call_mem (ir_node *node) {
1139 assert (node->op == op_Call);
1140 return get_irn_n(node, 0);
1144 set_Call_mem (ir_node *node, ir_node *mem) {
1145 assert (node->op == op_Call);
1146 set_irn_n(node, 0, mem);
1150 get_Call_ptr (ir_node *node) {
1151 assert (node->op == op_Call);
1152 return get_irn_n(node, 1);
1156 set_Call_ptr (ir_node *node, ir_node *ptr) {
1157 assert (node->op == op_Call);
1158 set_irn_n(node, 1, ptr);
1162 get_Call_param_arr (ir_node *node) {
1163 assert (node->op == op_Call);
1164 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1168 get_Call_n_params (ir_node *node) {
1169 assert (node->op == op_Call);
1170 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1174 get_Call_arity (ir_node *node) {
1175 assert (node->op == op_Call);
1176 return get_Call_n_params(node);
1180 set_Call_arity (ir_node *node, ir_node *arity) {
1181 assert (node->op == op_Call);
1186 get_Call_param (ir_node *node, int pos) {
1187 assert (node->op == op_Call);
1188 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1192 set_Call_param (ir_node *node, int pos, ir_node *param) {
1193 assert (node->op == op_Call);
1194 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1198 get_Call_type (ir_node *node) {
1199 assert (node->op == op_Call);
1200 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1204 set_Call_type (ir_node *node, type *tp) {
1205 assert (node->op == op_Call);
1206 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1207 node->attr.call.cld_tp = tp;
1210 int Call_has_callees(ir_node *node) {
1211 assert(node && node->op == op_Call);
1212 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1213 (node->attr.call.callee_arr != NULL));
1216 int get_Call_n_callees(ir_node * node) {
1217 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1218 return ARR_LEN(node->attr.call.callee_arr);
1221 entity * get_Call_callee(ir_node * node, int pos) {
1222 assert(pos >= 0 && pos < get_Call_n_callees(node));
1223 return node->attr.call.callee_arr[pos];
1226 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1227 assert(node->op == op_Call);
1228 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1229 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1231 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1234 void remove_Call_callee_arr(ir_node * node) {
1235 assert(node->op == op_Call);
1236 node->attr.call.callee_arr = NULL;
1239 ir_node * get_CallBegin_ptr (ir_node *node) {
1240 assert(node->op == op_CallBegin);
1241 return get_irn_n(node, 0);
1243 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1244 assert(node->op == op_CallBegin);
1245 set_irn_n(node, 0, ptr);
1247 ir_node * get_CallBegin_call (ir_node *node) {
1248 assert(node->op == op_CallBegin);
1249 return node->attr.callbegin.call;
1251 void set_CallBegin_call (ir_node *node, ir_node *call) {
1252 assert(node->op == op_CallBegin);
1253 node->attr.callbegin.call = call;
1258 ir_node * get_##OP##_left(ir_node *node) { \
1259 assert(node->op == op_##OP); \
1260 return get_irn_n(node, node->op->op_index); \
1262 void set_##OP##_left(ir_node *node, ir_node *left) { \
1263 assert(node->op == op_##OP); \
1264 set_irn_n(node, node->op->op_index, left); \
1266 ir_node *get_##OP##_right(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index + 1); \
1270 void set_##OP##_right(ir_node *node, ir_node *right) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index + 1, right); \
1276 ir_node *get_##OP##_op(ir_node *node) { \
1277 assert(node->op == op_##OP); \
1278 return get_irn_n(node, node->op->op_index); \
1280 void set_##OP##_op (ir_node *node, ir_node *op) { \
1281 assert(node->op == op_##OP); \
1282 set_irn_n(node, node->op->op_index, op); \
1292 get_Quot_mem (ir_node *node) {
1293 assert (node->op == op_Quot);
1294 return get_irn_n(node, 0);
1298 set_Quot_mem (ir_node *node, ir_node *mem) {
1299 assert (node->op == op_Quot);
1300 set_irn_n(node, 0, mem);
1306 get_DivMod_mem (ir_node *node) {
1307 assert (node->op == op_DivMod);
1308 return get_irn_n(node, 0);
1312 set_DivMod_mem (ir_node *node, ir_node *mem) {
1313 assert (node->op == op_DivMod);
1314 set_irn_n(node, 0, mem);
1320 get_Div_mem (ir_node *node) {
1321 assert (node->op == op_Div);
1322 return get_irn_n(node, 0);
1326 set_Div_mem (ir_node *node, ir_node *mem) {
1327 assert (node->op == op_Div);
1328 set_irn_n(node, 0, mem);
1334 get_Mod_mem (ir_node *node) {
1335 assert (node->op == op_Mod);
1336 return get_irn_n(node, 0);
1340 set_Mod_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_Mod);
1342 set_irn_n(node, 0, mem);
1359 get_Cast_type (ir_node *node) {
1360 assert (node->op == op_Cast);
1361 return node->attr.cast.totype;
1365 set_Cast_type (ir_node *node, type *to_tp) {
1366 assert (node->op == op_Cast);
1367 node->attr.cast.totype = to_tp;
1371 /* Checks for upcast.
1373 * Returns true if the Cast node casts a class type to a super type.
1375 int is_Cast_upcast(ir_node *node) {
1376 type *totype = get_Cast_type(node);
1377 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1378 ir_graph *myirg = get_irn_irg(node);
1380 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1383 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1384 totype = get_pointer_points_to_type(totype);
1385 fromtype = get_pointer_points_to_type(fromtype);
1390 if (!is_Class_type(totype)) return false;
1391 return is_subclass_of(fromtype, totype);
1394 /* Checks for downcast.
1396 * Returns true if the Cast node casts a class type to a sub type.
1398 int is_Cast_downcast(ir_node *node) {
1399 type *totype = get_Cast_type(node);
1400 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1402 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1405 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1406 totype = get_pointer_points_to_type(totype);
1407 fromtype = get_pointer_points_to_type(fromtype);
1412 if (!is_Class_type(totype)) return false;
1413 return is_subclass_of(totype, fromtype);
1417 (is_unop)(const ir_node *node) {
1418 return _is_unop(node);
1422 get_unop_op (ir_node *node) {
1423 if (node->op->opar == oparity_unary)
1424 return get_irn_n(node, node->op->op_index);
1426 assert(node->op->opar == oparity_unary);
1431 set_unop_op (ir_node *node, ir_node *op) {
1432 if (node->op->opar == oparity_unary)
1433 set_irn_n(node, node->op->op_index, op);
1435 assert(node->op->opar == oparity_unary);
1439 (is_binop)(const ir_node *node) {
1440 return _is_binop(node);
1444 get_binop_left (ir_node *node) {
1445 if (node->op->opar == oparity_binary)
1446 return get_irn_n(node, node->op->op_index);
1448 assert(node->op->opar == oparity_binary);
1453 set_binop_left (ir_node *node, ir_node *left) {
1454 if (node->op->opar == oparity_binary)
1455 set_irn_n(node, node->op->op_index, left);
1457 assert (node->op->opar == oparity_binary);
1461 get_binop_right (ir_node *node) {
1462 if (node->op->opar == oparity_binary)
1463 return get_irn_n(node, node->op->op_index + 1);
1465 assert(node->op->opar == oparity_binary);
1470 set_binop_right (ir_node *node, ir_node *right) {
1471 if (node->op->opar == oparity_binary)
1472 set_irn_n(node, node->op->op_index + 1, right);
1474 assert (node->op->opar == oparity_binary);
1477 int is_Phi (const ir_node *n) {
1483 if (op == op_Filter) return get_interprocedural_view();
1486 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1487 (get_irn_arity(n) > 0));
1492 int is_Phi0 (const ir_node *n) {
1495 return ((get_irn_op(n) == op_Phi) &&
1496 (get_irn_arity(n) == 0) &&
1497 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1501 get_Phi_preds_arr (ir_node *node) {
1502 assert (node->op == op_Phi);
1503 return (ir_node **)&(get_irn_in(node)[1]);
1507 get_Phi_n_preds (ir_node *node) {
1508 assert (is_Phi(node) || is_Phi0(node));
1509 return (get_irn_arity(node));
1513 void set_Phi_n_preds (ir_node *node, int n_preds) {
1514 assert (node->op == op_Phi);
1519 get_Phi_pred (ir_node *node, int pos) {
1520 assert (is_Phi(node) || is_Phi0(node));
1521 return get_irn_n(node, pos);
1525 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1526 assert (is_Phi(node) || is_Phi0(node));
1527 set_irn_n(node, pos, pred);
1531 int is_memop(ir_node *node) {
1532 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1535 ir_node *get_memop_mem (ir_node *node) {
1536 assert(is_memop(node));
1537 return get_irn_n(node, 0);
1540 void set_memop_mem (ir_node *node, ir_node *mem) {
1541 assert(is_memop(node));
1542 set_irn_n(node, 0, mem);
1545 ir_node *get_memop_ptr (ir_node *node) {
1546 assert(is_memop(node));
1547 return get_irn_n(node, 1);
1550 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1551 assert(is_memop(node));
1552 set_irn_n(node, 1, ptr);
1556 get_Load_mem (ir_node *node) {
1557 assert (node->op == op_Load);
1558 return get_irn_n(node, 0);
1562 set_Load_mem (ir_node *node, ir_node *mem) {
1563 assert (node->op == op_Load);
1564 set_irn_n(node, 0, mem);
1568 get_Load_ptr (ir_node *node) {
1569 assert (node->op == op_Load);
1570 return get_irn_n(node, 1);
1574 set_Load_ptr (ir_node *node, ir_node *ptr) {
1575 assert (node->op == op_Load);
1576 set_irn_n(node, 1, ptr);
1580 get_Load_mode (ir_node *node) {
1581 assert (node->op == op_Load);
1582 return node->attr.load.load_mode;
1586 set_Load_mode (ir_node *node, ir_mode *mode) {
1587 assert (node->op == op_Load);
1588 node->attr.load.load_mode = mode;
1592 get_Load_volatility (ir_node *node) {
1593 assert (node->op == op_Load);
1594 return node->attr.load.volatility;
1598 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1599 assert (node->op == op_Load);
1600 node->attr.load.volatility = volatility;
1605 get_Store_mem (ir_node *node) {
1606 assert (node->op == op_Store);
1607 return get_irn_n(node, 0);
1611 set_Store_mem (ir_node *node, ir_node *mem) {
1612 assert (node->op == op_Store);
1613 set_irn_n(node, 0, mem);
1617 get_Store_ptr (ir_node *node) {
1618 assert (node->op == op_Store);
1619 return get_irn_n(node, 1);
1623 set_Store_ptr (ir_node *node, ir_node *ptr) {
1624 assert (node->op == op_Store);
1625 set_irn_n(node, 1, ptr);
1629 get_Store_value (ir_node *node) {
1630 assert (node->op == op_Store);
1631 return get_irn_n(node, 2);
1635 set_Store_value (ir_node *node, ir_node *value) {
1636 assert (node->op == op_Store);
1637 set_irn_n(node, 2, value);
1641 get_Store_volatility (ir_node *node) {
1642 assert (node->op == op_Store);
1643 return node->attr.store.volatility;
1647 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1648 assert (node->op == op_Store);
1649 node->attr.store.volatility = volatility;
1654 get_Alloc_mem (ir_node *node) {
1655 assert (node->op == op_Alloc);
1656 return get_irn_n(node, 0);
1660 set_Alloc_mem (ir_node *node, ir_node *mem) {
1661 assert (node->op == op_Alloc);
1662 set_irn_n(node, 0, mem);
1666 get_Alloc_size (ir_node *node) {
1667 assert (node->op == op_Alloc);
1668 return get_irn_n(node, 1);
1672 set_Alloc_size (ir_node *node, ir_node *size) {
1673 assert (node->op == op_Alloc);
1674 set_irn_n(node, 1, size);
1678 get_Alloc_type (ir_node *node) {
1679 assert (node->op == op_Alloc);
1680 return node->attr.a.type = skip_tid(node->attr.a.type);
1684 set_Alloc_type (ir_node *node, type *tp) {
1685 assert (node->op == op_Alloc);
1686 node->attr.a.type = tp;
1690 get_Alloc_where (ir_node *node) {
1691 assert (node->op == op_Alloc);
1692 return node->attr.a.where;
1696 set_Alloc_where (ir_node *node, where_alloc where) {
1697 assert (node->op == op_Alloc);
1698 node->attr.a.where = where;
1703 get_Free_mem (ir_node *node) {
1704 assert (node->op == op_Free);
1705 return get_irn_n(node, 0);
1709 set_Free_mem (ir_node *node, ir_node *mem) {
1710 assert (node->op == op_Free);
1711 set_irn_n(node, 0, mem);
1715 get_Free_ptr (ir_node *node) {
1716 assert (node->op == op_Free);
1717 return get_irn_n(node, 1);
1721 set_Free_ptr (ir_node *node, ir_node *ptr) {
1722 assert (node->op == op_Free);
1723 set_irn_n(node, 1, ptr);
1727 get_Free_size (ir_node *node) {
1728 assert (node->op == op_Free);
1729 return get_irn_n(node, 2);
1733 set_Free_size (ir_node *node, ir_node *size) {
1734 assert (node->op == op_Free);
1735 set_irn_n(node, 2, size);
1739 get_Free_type (ir_node *node) {
1740 assert (node->op == op_Free);
1741 return node->attr.f.type = skip_tid(node->attr.f.type);
1745 set_Free_type (ir_node *node, type *tp) {
1746 assert (node->op == op_Free);
1747 node->attr.f.type = tp;
1751 get_Free_where (ir_node *node) {
1752 assert (node->op == op_Free);
1753 return node->attr.f.where;
1757 set_Free_where (ir_node *node, where_alloc where) {
1758 assert (node->op == op_Free);
1759 node->attr.f.where = where;
1763 get_Sync_preds_arr (ir_node *node) {
1764 assert (node->op == op_Sync);
1765 return (ir_node **)&(get_irn_in(node)[1]);
1769 get_Sync_n_preds (ir_node *node) {
1770 assert (node->op == op_Sync);
1771 return (get_irn_arity(node));
1776 set_Sync_n_preds (ir_node *node, int n_preds) {
1777 assert (node->op == op_Sync);
1782 get_Sync_pred (ir_node *node, int pos) {
1783 assert (node->op == op_Sync);
1784 return get_irn_n(node, pos);
1788 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1789 assert (node->op == op_Sync);
1790 set_irn_n(node, pos, pred);
1793 type *get_Proj_type(ir_node *n)
1796 ir_node *pred = get_Proj_pred(n);
1798 switch (get_irn_opcode(pred)) {
1801 /* Deal with Start / Call here: we need to know the Proj Nr. */
1802 assert(get_irn_mode(pred) == mode_T);
1803 pred_pred = get_Proj_pred(pred);
1804 if (get_irn_op(pred_pred) == op_Start) {
1805 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1806 tp = get_method_param_type(mtp, get_Proj_proj(n));
1807 } else if (get_irn_op(pred_pred) == op_Call) {
1808 type *mtp = get_Call_type(pred_pred);
1809 tp = get_method_res_type(mtp, get_Proj_proj(n));
1812 case iro_Start: break;
1813 case iro_Call: break;
1815 ir_node *a = get_Load_ptr(pred);
1816 if (get_irn_op(a) == op_Sel)
1817 tp = get_entity_type(get_Sel_entity(a));
1826 get_Proj_pred (const ir_node *node) {
1827 assert (is_Proj(node));
1828 return get_irn_n(node, 0);
1832 set_Proj_pred (ir_node *node, ir_node *pred) {
1833 assert (is_Proj(node));
1834 set_irn_n(node, 0, pred);
1838 get_Proj_proj (const ir_node *node) {
1839 assert (is_Proj(node));
1840 if (get_irn_opcode(node) == iro_Proj) {
1841 return node->attr.proj;
1843 assert(get_irn_opcode(node) == iro_Filter);
1844 return node->attr.filter.proj;
1849 set_Proj_proj (ir_node *node, long proj) {
1850 assert (node->op == op_Proj);
1851 node->attr.proj = proj;
1855 get_Tuple_preds_arr (ir_node *node) {
1856 assert (node->op == op_Tuple);
1857 return (ir_node **)&(get_irn_in(node)[1]);
1861 get_Tuple_n_preds (ir_node *node) {
1862 assert (node->op == op_Tuple);
1863 return (get_irn_arity(node));
1868 set_Tuple_n_preds (ir_node *node, int n_preds) {
1869 assert (node->op == op_Tuple);
1874 get_Tuple_pred (ir_node *node, int pos) {
1875 assert (node->op == op_Tuple);
1876 return get_irn_n(node, pos);
1880 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1881 assert (node->op == op_Tuple);
1882 set_irn_n(node, pos, pred);
1886 get_Id_pred (ir_node *node) {
1887 assert (node->op == op_Id);
1888 return get_irn_n(node, 0);
1892 set_Id_pred (ir_node *node, ir_node *pred) {
1893 assert (node->op == op_Id);
1894 set_irn_n(node, 0, pred);
1897 ir_node *get_Confirm_value (ir_node *node) {
1898 assert (node->op == op_Confirm);
1899 return get_irn_n(node, 0);
1901 void set_Confirm_value (ir_node *node, ir_node *value) {
1902 assert (node->op == op_Confirm);
1903 set_irn_n(node, 0, value);
1905 ir_node *get_Confirm_bound (ir_node *node) {
1906 assert (node->op == op_Confirm);
1907 return get_irn_n(node, 1);
1909 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1910 assert (node->op == op_Confirm);
1911 set_irn_n(node, 0, bound);
1913 pn_Cmp get_Confirm_cmp (ir_node *node) {
1914 assert (node->op == op_Confirm);
1915 return node->attr.confirm_cmp;
1917 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1918 assert (node->op == op_Confirm);
1919 node->attr.confirm_cmp = cmp;
1924 get_Filter_pred (ir_node *node) {
1925 assert(node->op == op_Filter);
1929 set_Filter_pred (ir_node *node, ir_node *pred) {
1930 assert(node->op == op_Filter);
1934 get_Filter_proj(ir_node *node) {
1935 assert(node->op == op_Filter);
1936 return node->attr.filter.proj;
1939 set_Filter_proj (ir_node *node, long proj) {
1940 assert(node->op == op_Filter);
1941 node->attr.filter.proj = proj;
1944 /* Don't use get_irn_arity, get_irn_n in implementation as access
1945 shall work independent of view!!! */
1946 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1947 assert(node->op == op_Filter);
1948 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1949 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1950 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1951 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1952 node->attr.filter.in_cg[0] = node->in[0];
1954 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1957 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1958 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1959 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1960 node->attr.filter.in_cg[pos + 1] = pred;
1962 int get_Filter_n_cg_preds(ir_node *node) {
1963 assert(node->op == op_Filter && node->attr.filter.in_cg);
1964 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1966 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1968 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1970 arity = ARR_LEN(node->attr.filter.in_cg);
1971 assert(pos < arity - 1);
1972 return node->attr.filter.in_cg[pos + 1];
1976 ir_node *get_Mux_sel (ir_node *node) {
1977 assert(node->op == op_Mux);
1980 void set_Mux_sel (ir_node *node, ir_node *sel) {
1981 assert(node->op == op_Mux);
1985 ir_node *get_Mux_false (ir_node *node) {
1986 assert(node->op == op_Mux);
1989 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1990 assert(node->op == op_Mux);
1991 node->in[2] = ir_false;
1994 ir_node *get_Mux_true (ir_node *node) {
1995 assert(node->op == op_Mux);
1998 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1999 assert(node->op == op_Mux);
2000 node->in[3] = ir_true;
2005 get_irn_irg(const ir_node *node) {
2006 if (! is_Block(node))
2007 node = get_nodes_block(node);
2008 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2009 node = get_nodes_block(node);
2010 assert(get_irn_op(node) == op_Block);
2011 return node->attr.block.irg;
2015 /*----------------------------------------------------------------*/
2016 /* Auxiliary routines */
2017 /*----------------------------------------------------------------*/
2020 skip_Proj (ir_node *node) {
2021 /* don't assert node !!! */
2022 if (node && is_Proj(node)) {
2023 return get_Proj_pred(node);
2030 skip_Tuple (ir_node *node) {
2033 if (!get_opt_normalize()) return node;
2035 node = skip_Id(node);
2036 if (get_irn_op(node) == op_Proj) {
2037 pred = skip_Id(get_Proj_pred(node));
2038 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2039 pred = skip_Id(skip_Tuple(pred));
2040 else if (get_irn_op(pred) == op_Tuple)
2041 return get_Tuple_pred(pred, get_Proj_proj(node));
2046 /* returns operand of node if node is a Cast */
2047 ir_node *skip_Cast (ir_node *node) {
2048 if (node && get_irn_op(node) == op_Cast)
2049 return get_Cast_op(node);
2053 /* returns operand of node if node is a Confirm */
2054 ir_node *skip_Confirm (ir_node *node) {
2055 if (node && get_irn_op(node) == op_Confirm)
2056 return get_Confirm_value(node);
2060 /* skip all high-level ops */
2061 ir_node *skip_HighLevel(ir_node *node) {
2062 if (node && is_op_highlevel(get_irn_op(node)))
2063 return get_irn_n(node, 0);
2068 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2069 than any other approach, as Id chains are resolved and all point to the real node, or
2070 all id's are self loops. */
2072 skip_Id (ir_node *node) {
2073 /* don't assert node !!! */
2075 if (!get_opt_normalize()) return node;
2077 /* Don't use get_Id_pred: We get into an endless loop for
2078 self-referencing Ids. */
2079 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2080 ir_node *rem_pred = node->in[0+1];
2083 assert (get_irn_arity (node) > 0);
2085 node->in[0+1] = node;
2086 res = skip_Id(rem_pred);
2087 if (res->op == op_Id) /* self-loop */ return node;
2089 node->in[0+1] = res;
2096 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2097 than any other approach, as Id chains are resolved and all point to the real node, or
2098 all id's are self loops. */
2100 skip_Id (ir_node *node) {
2102 /* don't assert node !!! */
2104 if (!node || (node->op != op_Id)) return node;
2106 if (!get_opt_normalize()) return node;
2108 /* Don't use get_Id_pred: We get into an endless loop for
2109 self-referencing Ids. */
2110 pred = node->in[0+1];
2112 if (pred->op != op_Id) return pred;
2114 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2115 ir_node *rem_pred, *res;
2117 if (pred->op != op_Id) return pred; /* shortcut */
2120 assert (get_irn_arity (node) > 0);
2122 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2123 res = skip_Id(rem_pred);
2124 if (res->op == op_Id) /* self-loop */ return node;
2126 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2135 (is_Bad)(const ir_node *node) {
2136 return _is_Bad(node);
2140 (is_Const)(const ir_node *node) {
2141 return _is_Const(node);
2145 (is_no_Block)(const ir_node *node) {
2146 return _is_no_Block(node);
2150 (is_Block)(const ir_node *node) {
2151 return _is_Block(node);
2154 /* returns true if node is a Unknown node. */
2156 (is_Unknown)(const ir_node *node) {
2157 return _is_Unknown(node);
2161 is_Proj (const ir_node *node) {
2163 return node->op == op_Proj
2164 || (!get_interprocedural_view() && node->op == op_Filter);
2167 /* Returns true if the operation manipulates control flow. */
2169 is_cfop(const ir_node *node) {
2170 return is_cfopcode(get_irn_op(node));
2173 /* Returns true if the operation manipulates interprocedural control flow:
2174 CallBegin, EndReg, EndExcept */
2175 int is_ip_cfop(const ir_node *node) {
2176 return is_ip_cfopcode(get_irn_op(node));
2179 /* Returns true if the operation can change the control flow because
2182 is_fragile_op(const ir_node *node) {
2183 return is_op_fragile(get_irn_op(node));
2186 /* Returns the memory operand of fragile operations. */
2187 ir_node *get_fragile_op_mem(ir_node *node) {
2188 assert(node && is_fragile_op(node));
2190 switch (get_irn_opcode (node)) {
2199 return get_irn_n(node, 0);
2204 assert(0 && "should not be reached");
2209 /* Returns true if the operation is a forking control flow operation. */
2211 is_forking_op(const ir_node *node) {
2212 return is_op_forking(get_irn_op(node));
2215 type *(get_irn_type)(ir_node *node) {
2216 return _get_irn_type(node);
2219 /** the get_type operation must be always implemented */
2220 static type *get_Null_type(ir_node *n) {
2224 /* set the get_type operation */
2225 ir_op *firm_set_default_get_type(ir_op *op)
2228 case iro_Const: op->get_type = get_Const_type; break;
2229 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2230 case iro_Cast: op->get_type = get_Cast_type; break;
2231 case iro_Proj: op->get_type = get_Proj_type; break;
2232 default: op->get_type = get_Null_type; break;
2237 #ifdef DEBUG_libfirm
2238 void dump_irn (ir_node *n) {
2239 int i, arity = get_irn_arity(n);
2240 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2242 ir_node *pred = get_irn_n(n, -1);
2243 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2244 get_irn_node_nr(pred), (void *)pred);
2246 printf(" preds: \n");
2247 for (i = 0; i < arity; ++i) {
2248 ir_node *pred = get_irn_n(n, i);
2249 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2250 get_irn_node_nr(pred), (void *)pred);
2254 #else /* DEBUG_libfirm */
2255 void dump_irn (ir_node *n) {}
2256 #endif /* DEBUG_libfirm */