3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
240 if (arity != ARR_LEN(*arr) - 1) {
241 ir_node * block = (*arr)[0];
242 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
245 fix_backedges(current_ir_graph->obst, node);
246 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
250 (get_irn_intra_n)(const ir_node *node, int n) {
251 return _get_irn_intra_n (node, n);
255 (get_irn_inter_n)(const ir_node *node, int n) {
256 return _get_irn_inter_n (node, n);
259 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
262 (get_irn_n)(const ir_node *node, int n) {
263 return _get_irn_n(node, n);
267 set_irn_n (ir_node *node, int n, ir_node *in) {
268 assert(node && node->kind == k_ir_node);
270 assert(n < get_irn_arity(node));
271 assert(in && in->kind == k_ir_node);
273 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
274 /* Change block pred in both views! */
275 node->in[n + 1] = in;
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
280 if (get_interprocedural_view()) { /* handle Filter and Block specially */
281 if (get_irn_opcode(node) == iro_Filter) {
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
285 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
286 node->attr.block.in_cg[n + 1] = in;
289 /* else fall through */
293 hook_set_irn_n(node, n, in, node->in[n + 1]);
295 /* Here, we rely on src and tgt being in the current ir graph */
296 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
414 (is_irn_pinned_in_irg) (const ir_node *node) {
415 return _is_irn_pinned_in_irg(node);
418 void set_irn_pinned(ir_node *node, op_pin_state state) {
419 /* due to optimization an opt may be turned into a Tuple */
420 if (get_irn_op(node) == op_Tuple)
423 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
424 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
426 node->attr.except.pin_state = state;
429 #ifdef DO_HEAPANALYSIS
430 /* Access the abstract interpretation information of a node.
431 Returns NULL if no such information is available. */
432 struct abstval *get_irn_abst_value(ir_node *n) {
435 /* Set the abstract interpretation information of a node. */
436 void set_irn_abst_value(ir_node *n, struct abstval *os) {
439 struct section *firm_get_irn_section(ir_node *n) {
442 void firm_set_irn_section(ir_node *n, struct section *s) {
446 /* Dummies needed for firmjni. */
447 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
449 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
450 void firm_set_irn_section(ir_node *n, struct section *s) {}
451 #endif /* DO_HEAPANALYSIS */
454 /* Outputs a unique number for this node */
456 get_irn_node_nr(const ir_node *node) {
459 return node->node_nr;
466 get_irn_const_attr (ir_node *node)
468 assert (node->op == op_Const);
469 return node->attr.con;
473 get_irn_proj_attr (ir_node *node)
475 assert (node->op == op_Proj);
476 return node->attr.proj;
480 get_irn_alloc_attr (ir_node *node)
482 assert (node->op == op_Alloc);
487 get_irn_free_attr (ir_node *node)
489 assert (node->op == op_Free);
494 get_irn_symconst_attr (ir_node *node)
496 assert (node->op == op_SymConst);
501 get_irn_call_attr (ir_node *node)
503 assert (node->op == op_Call);
504 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
508 get_irn_sel_attr (ir_node *node)
510 assert (node->op == op_Sel);
515 get_irn_phi_attr (ir_node *node)
517 assert (node->op == op_Phi);
518 return node->attr.phi0_pos;
522 get_irn_block_attr (ir_node *node)
524 assert (node->op == op_Block);
525 return node->attr.block;
529 get_irn_load_attr (ir_node *node)
531 assert (node->op == op_Load);
532 return node->attr.load;
536 get_irn_store_attr (ir_node *node)
538 assert (node->op == op_Store);
539 return node->attr.store;
543 get_irn_except_attr (ir_node *node)
545 assert (node->op == op_Div || node->op == op_Quot ||
546 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
547 return node->attr.except;
550 /** manipulate fields of individual nodes **/
552 /* this works for all except Block */
554 get_nodes_block (const ir_node *node) {
555 assert (!(node->op == op_Block));
556 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
557 return get_irn_n(node, -1);
561 set_nodes_block (ir_node *node, ir_node *block) {
562 assert (!(node->op == op_Block));
563 set_irn_n(node, -1, block);
566 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
567 * from Start. If so returns frame type, else Null. */
568 type *is_frame_pointer(ir_node *n) {
569 if ((get_irn_op(n) == op_Proj) &&
570 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
571 ir_node *start = get_Proj_pred(n);
572 if (get_irn_op(start) == op_Start) {
573 return get_irg_frame_type(get_irn_irg(start));
579 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
580 * from Start. If so returns global type, else Null. */
581 type *is_globals_pointer(ir_node *n) {
582 if ((get_irn_op(n) == op_Proj) &&
583 (get_Proj_proj(n) == pn_Start_P_globals)) {
584 ir_node *start = get_Proj_pred(n);
585 if (get_irn_op(start) == op_Start) {
586 return get_glob_type();
592 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
593 * from Start. If so returns 1, else 0. */
594 int is_value_arg_pointer(ir_node *n) {
595 if ((get_irn_op(n) == op_Proj) &&
596 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
597 (get_irn_op(get_Proj_pred(n)) == op_Start))
602 /* Returns an array with the predecessors of the Block. Depending on
603 the implementation of the graph data structure this can be a copy of
604 the internal representation of predecessors as well as the internal
605 array itself. Therefore writing to this array might obstruct the ir. */
607 get_Block_cfgpred_arr (ir_node *node)
609 assert ((node->op == op_Block));
610 return (ir_node **)&(get_irn_in(node)[1]);
614 (get_Block_n_cfgpreds)(ir_node *node) {
615 return get_Block_n_cfgpreds(node);
619 (get_Block_cfgpred)(ir_node *node, int pos) {
620 return get_Block_cfgpred(node, pos);
624 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
625 assert (node->op == op_Block);
626 set_irn_n(node, pos, pred);
630 (get_Block_cfgpred_block)(ir_node *node, int pos) {
631 return _get_Block_cfgpred_block(node, pos);
635 get_Block_matured (ir_node *node) {
636 assert (node->op == op_Block);
637 return node->attr.block.matured;
641 set_Block_matured (ir_node *node, bool matured) {
642 assert (node->op == op_Block);
643 node->attr.block.matured = matured;
647 (get_Block_block_visited)(ir_node *node) {
648 return _get_Block_block_visited(node);
652 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
653 _set_Block_block_visited(node, visit);
656 /* For this current_ir_graph must be set. */
658 (mark_Block_block_visited)(ir_node *node) {
659 _mark_Block_block_visited(node);
663 (Block_not_block_visited)(ir_node *node) {
664 return _Block_not_block_visited(node);
668 get_Block_graph_arr (ir_node *node, int pos) {
669 assert (node->op == op_Block);
670 return node->attr.block.graph_arr[pos+1];
674 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
675 assert (node->op == op_Block);
676 node->attr.block.graph_arr[pos+1] = value;
679 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
680 assert(node->op == op_Block);
681 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
682 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
683 node->attr.block.in_cg[0] = NULL;
684 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
686 /* Fix backedge array. fix_backedges operates depending on
687 interprocedural_view. */
688 int ipv = get_interprocedural_view();
689 set_interprocedural_view(true);
690 fix_backedges(current_ir_graph->obst, node);
691 set_interprocedural_view(ipv);
694 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
697 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
698 assert(node->op == op_Block &&
699 node->attr.block.in_cg &&
700 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
701 node->attr.block.in_cg[pos + 1] = pred;
704 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
705 assert(node->op == op_Block);
706 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
709 int get_Block_cg_n_cfgpreds(ir_node * node) {
710 assert(node->op == op_Block);
711 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
714 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
715 assert(node->op == op_Block && node->attr.block.in_cg);
716 return node->attr.block.in_cg[pos + 1];
719 void remove_Block_cg_cfgpred_arr(ir_node * node) {
720 assert(node->op == op_Block);
721 node->attr.block.in_cg = NULL;
724 ir_node *(set_Block_dead)(ir_node *block) {
725 return _set_Block_dead(block);
728 int (is_Block_dead)(const ir_node *block) {
729 return _is_Block_dead(block);
732 ir_extblk *get_Block_extbb(const ir_node *block) {
733 assert(is_Block(block));
734 return block->attr.block.extblk;
737 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
738 assert(is_Block(block));
739 block->attr.block.extblk = extblk;
743 set_Start_irg(ir_node *node, ir_graph *irg) {
744 assert(node->op == op_Start);
745 assert(is_ir_graph(irg));
746 assert(0 && " Why set irg? -- use set_irn_irg");
750 get_End_n_keepalives(ir_node *end) {
751 assert (end->op == op_End);
752 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
756 get_End_keepalive(ir_node *end, int pos) {
757 assert (end->op == op_End);
758 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
762 add_End_keepalive (ir_node *end, ir_node *ka) {
763 assert (end->op == op_End);
764 ARR_APP1 (ir_node *, end->in, ka);
768 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
769 assert (end->op == op_End);
770 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
774 free_End (ir_node *end) {
775 assert (end->op == op_End);
777 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
778 end->in = NULL; /* @@@ make sure we get an error if we use the
779 in array afterwards ... */
784 > Implementing the case construct (which is where the constant Proj node is
785 > important) involves far more than simply determining the constant values.
786 > We could argue that this is more properly a function of the translator from
787 > Firm to the target machine. That could be done if there was some way of
788 > projecting "default" out of the Cond node.
789 I know it's complicated.
790 Basically there are two proglems:
791 - determining the gaps between the projs
792 - determining the biggest case constant to know the proj number for
794 I see several solutions:
795 1. Introduce a ProjDefault node. Solves both problems.
796 This means to extend all optimizations executed during construction.
797 2. Give the Cond node for switch two flavors:
798 a) there are no gaps in the projs (existing flavor)
799 b) gaps may exist, default proj is still the Proj with the largest
800 projection number. This covers also the gaps.
801 3. Fix the semantic of the Cond to that of 2b)
803 Solution 2 seems to be the best:
804 Computing the gaps in the Firm representation is not too hard, i.e.,
805 libFIRM can implement a routine that transforms between the two
806 flavours. This is also possible for 1) but 2) does not require to
807 change any existing optimization.
808 Further it should be far simpler to determine the biggest constant than
810 I don't want to choose 3) as 2a) seems to have advantages for
811 dataflow analysis and 3) does not allow to convert the representation to
815 get_Cond_selector (ir_node *node) {
816 assert (node->op == op_Cond);
817 return get_irn_n(node, 0);
821 set_Cond_selector (ir_node *node, ir_node *selector) {
822 assert (node->op == op_Cond);
823 set_irn_n(node, 0, selector);
827 get_Cond_kind (ir_node *node) {
828 assert (node->op == op_Cond);
829 return node->attr.c.kind;
833 set_Cond_kind (ir_node *node, cond_kind kind) {
834 assert (node->op == op_Cond);
835 node->attr.c.kind = kind;
839 get_Cond_defaultProj (ir_node *node) {
840 assert (node->op == op_Cond);
841 return node->attr.c.default_proj;
845 get_Return_mem (ir_node *node) {
846 assert (node->op == op_Return);
847 return get_irn_n(node, 0);
851 set_Return_mem (ir_node *node, ir_node *mem) {
852 assert (node->op == op_Return);
853 set_irn_n(node, 0, mem);
857 get_Return_n_ress (ir_node *node) {
858 assert (node->op == op_Return);
859 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
863 get_Return_res_arr (ir_node *node)
865 assert ((node->op == op_Return));
866 if (get_Return_n_ress(node) > 0)
867 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
874 set_Return_n_res (ir_node *node, int results) {
875 assert (node->op == op_Return);
880 get_Return_res (ir_node *node, int pos) {
881 assert (node->op == op_Return);
882 assert (get_Return_n_ress(node) > pos);
883 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
887 set_Return_res (ir_node *node, int pos, ir_node *res){
888 assert (node->op == op_Return);
889 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
893 get_Raise_mem (ir_node *node) {
894 assert (node->op == op_Raise);
895 return get_irn_n(node, 0);
899 set_Raise_mem (ir_node *node, ir_node *mem) {
900 assert (node->op == op_Raise);
901 set_irn_n(node, 0, mem);
905 get_Raise_exo_ptr (ir_node *node) {
906 assert (node->op == op_Raise);
907 return get_irn_n(node, 1);
911 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
912 assert (node->op == op_Raise);
913 set_irn_n(node, 1, exo_ptr);
916 tarval *(get_Const_tarval)(ir_node *node) {
917 return _get_Const_tarval(node);
921 set_Const_tarval (ir_node *node, tarval *con) {
922 assert (node->op == op_Const);
923 node->attr.con.tv = con;
926 cnst_classify_t (classify_Const)(ir_node *node)
928 return _classify_Const(node);
932 /* The source language type. Must be an atomic type. Mode of type must
933 be mode of node. For tarvals from entities type must be pointer to
936 get_Const_type (ir_node *node) {
937 assert (node->op == op_Const);
938 return node->attr.con.tp;
942 set_Const_type (ir_node *node, type *tp) {
943 assert (node->op == op_Const);
944 if (tp != firm_unknown_type) {
945 assert (is_atomic_type(tp));
946 assert (get_type_mode(tp) == get_irn_mode(node));
948 node->attr.con.tp = tp;
953 get_SymConst_kind (const ir_node *node) {
954 assert (node->op == op_SymConst);
955 return node->attr.i.num;
959 set_SymConst_kind (ir_node *node, symconst_kind num) {
960 assert (node->op == op_SymConst);
961 node->attr.i.num = num;
965 get_SymConst_type (ir_node *node) {
966 assert ( (node->op == op_SymConst)
967 && ( get_SymConst_kind(node) == symconst_type_tag
968 || get_SymConst_kind(node) == symconst_size));
969 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
973 set_SymConst_type (ir_node *node, type *tp) {
974 assert ( (node->op == op_SymConst)
975 && ( get_SymConst_kind(node) == symconst_type_tag
976 || get_SymConst_kind(node) == symconst_size));
977 node->attr.i.sym.type_p = tp;
981 get_SymConst_name (ir_node *node) {
982 assert ( (node->op == op_SymConst)
983 && (get_SymConst_kind(node) == symconst_addr_name));
984 return node->attr.i.sym.ident_p;
988 set_SymConst_name (ir_node *node, ident *name) {
989 assert ( (node->op == op_SymConst)
990 && (get_SymConst_kind(node) == symconst_addr_name));
991 node->attr.i.sym.ident_p = name;
995 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
996 entity *get_SymConst_entity (ir_node *node) {
997 assert ( (node->op == op_SymConst)
998 && (get_SymConst_kind (node) == symconst_addr_ent));
999 return node->attr.i.sym.entity_p;
1002 void set_SymConst_entity (ir_node *node, entity *ent) {
1003 assert ( (node->op == op_SymConst)
1004 && (get_SymConst_kind(node) == symconst_addr_ent));
1005 node->attr.i.sym.entity_p = ent;
1008 union symconst_symbol
1009 get_SymConst_symbol (ir_node *node) {
1010 assert (node->op == op_SymConst);
1011 return node->attr.i.sym;
1015 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1016 assert (node->op == op_SymConst);
1017 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1018 node->attr.i.sym = sym;
1022 get_SymConst_value_type (ir_node *node) {
1023 assert (node->op == op_SymConst);
1024 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1025 return node->attr.i.tp;
1029 set_SymConst_value_type (ir_node *node, type *tp) {
1030 assert (node->op == op_SymConst);
1031 node->attr.i.tp = tp;
1035 get_Sel_mem (ir_node *node) {
1036 assert (node->op == op_Sel);
1037 return get_irn_n(node, 0);
1041 set_Sel_mem (ir_node *node, ir_node *mem) {
1042 assert (node->op == op_Sel);
1043 set_irn_n(node, 0, mem);
1047 get_Sel_ptr (ir_node *node) {
1048 assert (node->op == op_Sel);
1049 return get_irn_n(node, 1);
1053 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1054 assert (node->op == op_Sel);
1055 set_irn_n(node, 1, ptr);
1059 get_Sel_n_indexs (ir_node *node) {
1060 assert (node->op == op_Sel);
1061 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1065 get_Sel_index_arr (ir_node *node)
1067 assert ((node->op == op_Sel));
1068 if (get_Sel_n_indexs(node) > 0)
1069 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1075 get_Sel_index (ir_node *node, int pos) {
1076 assert (node->op == op_Sel);
1077 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1081 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1082 assert (node->op == op_Sel);
1083 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1087 get_Sel_entity (ir_node *node) {
1088 assert (node->op == op_Sel);
1089 return node->attr.s.ent;
1093 set_Sel_entity (ir_node *node, entity *ent) {
1094 assert (node->op == op_Sel);
1095 node->attr.s.ent = ent;
1099 get_InstOf_ent (ir_node *node) {
1100 assert (node->op = op_InstOf);
1101 return (node->attr.io.ent);
1105 set_InstOf_ent (ir_node *node, type *ent) {
1106 assert (node->op = op_InstOf);
1107 node->attr.io.ent = ent;
1111 get_InstOf_store (ir_node *node) {
1112 assert (node->op = op_InstOf);
1113 return (get_irn_n (node, 0));
1117 set_InstOf_store (ir_node *node, ir_node *obj) {
1118 assert (node->op = op_InstOf);
1119 set_irn_n (node, 0, obj);
1123 get_InstOf_obj (ir_node *node) {
1124 assert (node->op = op_InstOf);
1125 return (get_irn_n (node, 1));
1129 set_InstOf_obj (ir_node *node, ir_node *obj) {
1130 assert (node->op = op_InstOf);
1131 set_irn_n (node, 1, obj);
1135 /* For unary and binary arithmetic operations the access to the
1136 operands can be factored out. Left is the first, right the
1137 second arithmetic value as listed in tech report 0999-33.
1138 unops are: Minus, Abs, Not, Conv, Cast
1139 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1140 Shr, Shrs, Rotate, Cmp */
1144 get_Call_mem (ir_node *node) {
1145 assert (node->op == op_Call);
1146 return get_irn_n(node, 0);
1150 set_Call_mem (ir_node *node, ir_node *mem) {
1151 assert (node->op == op_Call);
1152 set_irn_n(node, 0, mem);
1156 get_Call_ptr (ir_node *node) {
1157 assert (node->op == op_Call);
1158 return get_irn_n(node, 1);
1162 set_Call_ptr (ir_node *node, ir_node *ptr) {
1163 assert (node->op == op_Call);
1164 set_irn_n(node, 1, ptr);
1168 get_Call_param_arr (ir_node *node) {
1169 assert (node->op == op_Call);
1170 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1174 get_Call_n_params (ir_node *node) {
1175 assert (node->op == op_Call);
1176 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1180 get_Call_arity (ir_node *node) {
1181 assert (node->op == op_Call);
1182 return get_Call_n_params(node);
1186 set_Call_arity (ir_node *node, ir_node *arity) {
1187 assert (node->op == op_Call);
1192 get_Call_param (ir_node *node, int pos) {
1193 assert (node->op == op_Call);
1194 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1198 set_Call_param (ir_node *node, int pos, ir_node *param) {
1199 assert (node->op == op_Call);
1200 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1204 get_Call_type (ir_node *node) {
1205 assert (node->op == op_Call);
1206 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1210 set_Call_type (ir_node *node, type *tp) {
1211 assert (node->op == op_Call);
1212 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1213 node->attr.call.cld_tp = tp;
1216 int Call_has_callees(ir_node *node) {
1217 assert(node && node->op == op_Call);
1218 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1219 (node->attr.call.callee_arr != NULL));
1222 int get_Call_n_callees(ir_node * node) {
1223 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1224 return ARR_LEN(node->attr.call.callee_arr);
1227 entity * get_Call_callee(ir_node * node, int pos) {
1228 assert(pos >= 0 && pos < get_Call_n_callees(node));
1229 return node->attr.call.callee_arr[pos];
1232 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1233 assert(node->op == op_Call);
1234 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1235 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1237 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1240 void remove_Call_callee_arr(ir_node * node) {
1241 assert(node->op == op_Call);
1242 node->attr.call.callee_arr = NULL;
1245 ir_node * get_CallBegin_ptr (ir_node *node) {
1246 assert(node->op == op_CallBegin);
1247 return get_irn_n(node, 0);
1249 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1250 assert(node->op == op_CallBegin);
1251 set_irn_n(node, 0, ptr);
1253 ir_node * get_CallBegin_call (ir_node *node) {
1254 assert(node->op == op_CallBegin);
1255 return node->attr.callbegin.call;
1257 void set_CallBegin_call (ir_node *node, ir_node *call) {
1258 assert(node->op == op_CallBegin);
1259 node->attr.callbegin.call = call;
1264 ir_node * get_##OP##_left(ir_node *node) { \
1265 assert(node->op == op_##OP); \
1266 return get_irn_n(node, node->op->op_index); \
1268 void set_##OP##_left(ir_node *node, ir_node *left) { \
1269 assert(node->op == op_##OP); \
1270 set_irn_n(node, node->op->op_index, left); \
1272 ir_node *get_##OP##_right(ir_node *node) { \
1273 assert(node->op == op_##OP); \
1274 return get_irn_n(node, node->op->op_index + 1); \
1276 void set_##OP##_right(ir_node *node, ir_node *right) { \
1277 assert(node->op == op_##OP); \
1278 set_irn_n(node, node->op->op_index + 1, right); \
1282 ir_node *get_##OP##_op(ir_node *node) { \
1283 assert(node->op == op_##OP); \
1284 return get_irn_n(node, node->op->op_index); \
1286 void set_##OP##_op (ir_node *node, ir_node *op) { \
1287 assert(node->op == op_##OP); \
1288 set_irn_n(node, node->op->op_index, op); \
1298 get_Quot_mem (ir_node *node) {
1299 assert (node->op == op_Quot);
1300 return get_irn_n(node, 0);
1304 set_Quot_mem (ir_node *node, ir_node *mem) {
1305 assert (node->op == op_Quot);
1306 set_irn_n(node, 0, mem);
1312 get_DivMod_mem (ir_node *node) {
1313 assert (node->op == op_DivMod);
1314 return get_irn_n(node, 0);
1318 set_DivMod_mem (ir_node *node, ir_node *mem) {
1319 assert (node->op == op_DivMod);
1320 set_irn_n(node, 0, mem);
1326 get_Div_mem (ir_node *node) {
1327 assert (node->op == op_Div);
1328 return get_irn_n(node, 0);
1332 set_Div_mem (ir_node *node, ir_node *mem) {
1333 assert (node->op == op_Div);
1334 set_irn_n(node, 0, mem);
1340 get_Mod_mem (ir_node *node) {
1341 assert (node->op == op_Mod);
1342 return get_irn_n(node, 0);
1346 set_Mod_mem (ir_node *node, ir_node *mem) {
1347 assert (node->op == op_Mod);
1348 set_irn_n(node, 0, mem);
1365 get_Cast_type (ir_node *node) {
1366 assert (node->op == op_Cast);
1367 return node->attr.cast.totype;
1371 set_Cast_type (ir_node *node, type *to_tp) {
1372 assert (node->op == op_Cast);
1373 node->attr.cast.totype = to_tp;
1377 /* Checks for upcast.
1379 * Returns true if the Cast node casts a class type to a super type.
1381 int is_Cast_upcast(ir_node *node) {
1382 type *totype = get_Cast_type(node);
1383 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1384 ir_graph *myirg = get_irn_irg(node);
1386 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1389 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1390 totype = get_pointer_points_to_type(totype);
1391 fromtype = get_pointer_points_to_type(fromtype);
1396 if (!is_Class_type(totype)) return false;
1397 return is_subclass_of(fromtype, totype);
1400 /* Checks for downcast.
1402 * Returns true if the Cast node casts a class type to a sub type.
1404 int is_Cast_downcast(ir_node *node) {
1405 type *totype = get_Cast_type(node);
1406 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1408 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1411 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1412 totype = get_pointer_points_to_type(totype);
1413 fromtype = get_pointer_points_to_type(fromtype);
1418 if (!is_Class_type(totype)) return false;
1419 return is_subclass_of(totype, fromtype);
1423 (is_unop)(const ir_node *node) {
1424 return _is_unop(node);
1428 get_unop_op (ir_node *node) {
1429 if (node->op->opar == oparity_unary)
1430 return get_irn_n(node, node->op->op_index);
1432 assert(node->op->opar == oparity_unary);
1437 set_unop_op (ir_node *node, ir_node *op) {
1438 if (node->op->opar == oparity_unary)
1439 set_irn_n(node, node->op->op_index, op);
1441 assert(node->op->opar == oparity_unary);
1445 (is_binop)(const ir_node *node) {
1446 return _is_binop(node);
1450 get_binop_left (ir_node *node) {
1451 if (node->op->opar == oparity_binary)
1452 return get_irn_n(node, node->op->op_index);
1454 assert(node->op->opar == oparity_binary);
1459 set_binop_left (ir_node *node, ir_node *left) {
1460 if (node->op->opar == oparity_binary)
1461 set_irn_n(node, node->op->op_index, left);
1463 assert (node->op->opar == oparity_binary);
1467 get_binop_right (ir_node *node) {
1468 if (node->op->opar == oparity_binary)
1469 return get_irn_n(node, node->op->op_index + 1);
1471 assert(node->op->opar == oparity_binary);
1476 set_binop_right (ir_node *node, ir_node *right) {
1477 if (node->op->opar == oparity_binary)
1478 set_irn_n(node, node->op->op_index + 1, right);
1480 assert (node->op->opar == oparity_binary);
1483 int is_Phi (const ir_node *n) {
1489 if (op == op_Filter) return get_interprocedural_view();
1492 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1493 (get_irn_arity(n) > 0));
1498 int is_Phi0 (const ir_node *n) {
1501 return ((get_irn_op(n) == op_Phi) &&
1502 (get_irn_arity(n) == 0) &&
1503 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1507 get_Phi_preds_arr (ir_node *node) {
1508 assert (node->op == op_Phi);
1509 return (ir_node **)&(get_irn_in(node)[1]);
1513 get_Phi_n_preds (ir_node *node) {
1514 assert (is_Phi(node) || is_Phi0(node));
1515 return (get_irn_arity(node));
1519 void set_Phi_n_preds (ir_node *node, int n_preds) {
1520 assert (node->op == op_Phi);
1525 get_Phi_pred (ir_node *node, int pos) {
1526 assert (is_Phi(node) || is_Phi0(node));
1527 return get_irn_n(node, pos);
1531 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1532 assert (is_Phi(node) || is_Phi0(node));
1533 set_irn_n(node, pos, pred);
1537 int is_memop(ir_node *node) {
1538 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1541 ir_node *get_memop_mem (ir_node *node) {
1542 assert(is_memop(node));
1543 return get_irn_n(node, 0);
1546 void set_memop_mem (ir_node *node, ir_node *mem) {
1547 assert(is_memop(node));
1548 set_irn_n(node, 0, mem);
1551 ir_node *get_memop_ptr (ir_node *node) {
1552 assert(is_memop(node));
1553 return get_irn_n(node, 1);
1556 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1557 assert(is_memop(node));
1558 set_irn_n(node, 1, ptr);
1562 get_Load_mem (ir_node *node) {
1563 assert (node->op == op_Load);
1564 return get_irn_n(node, 0);
1568 set_Load_mem (ir_node *node, ir_node *mem) {
1569 assert (node->op == op_Load);
1570 set_irn_n(node, 0, mem);
1574 get_Load_ptr (ir_node *node) {
1575 assert (node->op == op_Load);
1576 return get_irn_n(node, 1);
1580 set_Load_ptr (ir_node *node, ir_node *ptr) {
1581 assert (node->op == op_Load);
1582 set_irn_n(node, 1, ptr);
1586 get_Load_mode (ir_node *node) {
1587 assert (node->op == op_Load);
1588 return node->attr.load.load_mode;
1592 set_Load_mode (ir_node *node, ir_mode *mode) {
1593 assert (node->op == op_Load);
1594 node->attr.load.load_mode = mode;
1598 get_Load_volatility (ir_node *node) {
1599 assert (node->op == op_Load);
1600 return node->attr.load.volatility;
1604 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1605 assert (node->op == op_Load);
1606 node->attr.load.volatility = volatility;
1611 get_Store_mem (ir_node *node) {
1612 assert (node->op == op_Store);
1613 return get_irn_n(node, 0);
1617 set_Store_mem (ir_node *node, ir_node *mem) {
1618 assert (node->op == op_Store);
1619 set_irn_n(node, 0, mem);
1623 get_Store_ptr (ir_node *node) {
1624 assert (node->op == op_Store);
1625 return get_irn_n(node, 1);
1629 set_Store_ptr (ir_node *node, ir_node *ptr) {
1630 assert (node->op == op_Store);
1631 set_irn_n(node, 1, ptr);
1635 get_Store_value (ir_node *node) {
1636 assert (node->op == op_Store);
1637 return get_irn_n(node, 2);
1641 set_Store_value (ir_node *node, ir_node *value) {
1642 assert (node->op == op_Store);
1643 set_irn_n(node, 2, value);
1647 get_Store_volatility (ir_node *node) {
1648 assert (node->op == op_Store);
1649 return node->attr.store.volatility;
1653 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1654 assert (node->op == op_Store);
1655 node->attr.store.volatility = volatility;
1660 get_Alloc_mem (ir_node *node) {
1661 assert (node->op == op_Alloc);
1662 return get_irn_n(node, 0);
1666 set_Alloc_mem (ir_node *node, ir_node *mem) {
1667 assert (node->op == op_Alloc);
1668 set_irn_n(node, 0, mem);
1672 get_Alloc_size (ir_node *node) {
1673 assert (node->op == op_Alloc);
1674 return get_irn_n(node, 1);
1678 set_Alloc_size (ir_node *node, ir_node *size) {
1679 assert (node->op == op_Alloc);
1680 set_irn_n(node, 1, size);
1684 get_Alloc_type (ir_node *node) {
1685 assert (node->op == op_Alloc);
1686 return node->attr.a.type = skip_tid(node->attr.a.type);
1690 set_Alloc_type (ir_node *node, type *tp) {
1691 assert (node->op == op_Alloc);
1692 node->attr.a.type = tp;
1696 get_Alloc_where (ir_node *node) {
1697 assert (node->op == op_Alloc);
1698 return node->attr.a.where;
1702 set_Alloc_where (ir_node *node, where_alloc where) {
1703 assert (node->op == op_Alloc);
1704 node->attr.a.where = where;
1709 get_Free_mem (ir_node *node) {
1710 assert (node->op == op_Free);
1711 return get_irn_n(node, 0);
1715 set_Free_mem (ir_node *node, ir_node *mem) {
1716 assert (node->op == op_Free);
1717 set_irn_n(node, 0, mem);
1721 get_Free_ptr (ir_node *node) {
1722 assert (node->op == op_Free);
1723 return get_irn_n(node, 1);
1727 set_Free_ptr (ir_node *node, ir_node *ptr) {
1728 assert (node->op == op_Free);
1729 set_irn_n(node, 1, ptr);
1733 get_Free_size (ir_node *node) {
1734 assert (node->op == op_Free);
1735 return get_irn_n(node, 2);
1739 set_Free_size (ir_node *node, ir_node *size) {
1740 assert (node->op == op_Free);
1741 set_irn_n(node, 2, size);
1745 get_Free_type (ir_node *node) {
1746 assert (node->op == op_Free);
1747 return node->attr.f.type = skip_tid(node->attr.f.type);
1751 set_Free_type (ir_node *node, type *tp) {
1752 assert (node->op == op_Free);
1753 node->attr.f.type = tp;
1757 get_Free_where (ir_node *node) {
1758 assert (node->op == op_Free);
1759 return node->attr.f.where;
1763 set_Free_where (ir_node *node, where_alloc where) {
1764 assert (node->op == op_Free);
1765 node->attr.f.where = where;
1769 get_Sync_preds_arr (ir_node *node) {
1770 assert (node->op == op_Sync);
1771 return (ir_node **)&(get_irn_in(node)[1]);
1775 get_Sync_n_preds (ir_node *node) {
1776 assert (node->op == op_Sync);
1777 return (get_irn_arity(node));
1782 set_Sync_n_preds (ir_node *node, int n_preds) {
1783 assert (node->op == op_Sync);
1788 get_Sync_pred (ir_node *node, int pos) {
1789 assert (node->op == op_Sync);
1790 return get_irn_n(node, pos);
1794 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1795 assert (node->op == op_Sync);
1796 set_irn_n(node, pos, pred);
1799 type *get_Proj_type(ir_node *n)
1802 ir_node *pred = get_Proj_pred(n);
1804 switch (get_irn_opcode(pred)) {
1807 /* Deal with Start / Call here: we need to know the Proj Nr. */
1808 assert(get_irn_mode(pred) == mode_T);
1809 pred_pred = get_Proj_pred(pred);
1810 if (get_irn_op(pred_pred) == op_Start) {
1811 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1812 tp = get_method_param_type(mtp, get_Proj_proj(n));
1813 } else if (get_irn_op(pred_pred) == op_Call) {
1814 type *mtp = get_Call_type(pred_pred);
1815 tp = get_method_res_type(mtp, get_Proj_proj(n));
1818 case iro_Start: break;
1819 case iro_Call: break;
1821 ir_node *a = get_Load_ptr(pred);
1822 if (get_irn_op(a) == op_Sel)
1823 tp = get_entity_type(get_Sel_entity(a));
1832 get_Proj_pred (const ir_node *node) {
1833 assert (is_Proj(node));
1834 return get_irn_n(node, 0);
1838 set_Proj_pred (ir_node *node, ir_node *pred) {
1839 assert (is_Proj(node));
1840 set_irn_n(node, 0, pred);
1844 get_Proj_proj (const ir_node *node) {
1845 assert (is_Proj(node));
1846 if (get_irn_opcode(node) == iro_Proj) {
1847 return node->attr.proj;
1849 assert(get_irn_opcode(node) == iro_Filter);
1850 return node->attr.filter.proj;
1855 set_Proj_proj (ir_node *node, long proj) {
1856 assert (node->op == op_Proj);
1857 node->attr.proj = proj;
1861 get_Tuple_preds_arr (ir_node *node) {
1862 assert (node->op == op_Tuple);
1863 return (ir_node **)&(get_irn_in(node)[1]);
1867 get_Tuple_n_preds (ir_node *node) {
1868 assert (node->op == op_Tuple);
1869 return (get_irn_arity(node));
1874 set_Tuple_n_preds (ir_node *node, int n_preds) {
1875 assert (node->op == op_Tuple);
1880 get_Tuple_pred (ir_node *node, int pos) {
1881 assert (node->op == op_Tuple);
1882 return get_irn_n(node, pos);
1886 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1887 assert (node->op == op_Tuple);
1888 set_irn_n(node, pos, pred);
1892 get_Id_pred (ir_node *node) {
1893 assert (node->op == op_Id);
1894 return get_irn_n(node, 0);
1898 set_Id_pred (ir_node *node, ir_node *pred) {
1899 assert (node->op == op_Id);
1900 set_irn_n(node, 0, pred);
1903 ir_node *get_Confirm_value (ir_node *node) {
1904 assert (node->op == op_Confirm);
1905 return get_irn_n(node, 0);
1907 void set_Confirm_value (ir_node *node, ir_node *value) {
1908 assert (node->op == op_Confirm);
1909 set_irn_n(node, 0, value);
1911 ir_node *get_Confirm_bound (ir_node *node) {
1912 assert (node->op == op_Confirm);
1913 return get_irn_n(node, 1);
1915 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1916 assert (node->op == op_Confirm);
1917 set_irn_n(node, 0, bound);
1919 pn_Cmp get_Confirm_cmp (ir_node *node) {
1920 assert (node->op == op_Confirm);
1921 return node->attr.confirm_cmp;
1923 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1924 assert (node->op == op_Confirm);
1925 node->attr.confirm_cmp = cmp;
1930 get_Filter_pred (ir_node *node) {
1931 assert(node->op == op_Filter);
1935 set_Filter_pred (ir_node *node, ir_node *pred) {
1936 assert(node->op == op_Filter);
1940 get_Filter_proj(ir_node *node) {
1941 assert(node->op == op_Filter);
1942 return node->attr.filter.proj;
1945 set_Filter_proj (ir_node *node, long proj) {
1946 assert(node->op == op_Filter);
1947 node->attr.filter.proj = proj;
1950 /* Don't use get_irn_arity, get_irn_n in implementation as access
1951 shall work independent of view!!! */
1952 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1953 assert(node->op == op_Filter);
1954 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1955 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1956 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1957 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1958 node->attr.filter.in_cg[0] = node->in[0];
1960 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1963 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1964 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1965 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1966 node->attr.filter.in_cg[pos + 1] = pred;
1968 int get_Filter_n_cg_preds(ir_node *node) {
1969 assert(node->op == op_Filter && node->attr.filter.in_cg);
1970 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1972 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1974 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1976 arity = ARR_LEN(node->attr.filter.in_cg);
1977 assert(pos < arity - 1);
1978 return node->attr.filter.in_cg[pos + 1];
1982 ir_node *get_Mux_sel (ir_node *node) {
1983 assert(node->op == op_Mux);
1986 void set_Mux_sel (ir_node *node, ir_node *sel) {
1987 assert(node->op == op_Mux);
1991 ir_node *get_Mux_false (ir_node *node) {
1992 assert(node->op == op_Mux);
1995 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1996 assert(node->op == op_Mux);
1997 node->in[2] = ir_false;
2000 ir_node *get_Mux_true (ir_node *node) {
2001 assert(node->op == op_Mux);
2004 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2005 assert(node->op == op_Mux);
2006 node->in[3] = ir_true;
2010 get_irn_irg(const ir_node *node) {
2012 * Do not use get_nodes_Block() here, because this
2013 * will check the pinned state.
2014 * However even a 'wrong' block is always in the proper
2017 if (! is_Block(node))
2018 node = get_irn_n(node, -1);
2019 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2020 node = get_irn_n(node, -1);
2021 assert(get_irn_op(node) == op_Block);
2022 return node->attr.block.irg;
2026 /*----------------------------------------------------------------*/
2027 /* Auxiliary routines */
2028 /*----------------------------------------------------------------*/
2031 skip_Proj (ir_node *node) {
2032 /* don't assert node !!! */
2033 if (node && is_Proj(node)) {
2034 return get_Proj_pred(node);
2041 skip_Tuple (ir_node *node) {
2044 if (!get_opt_normalize()) return node;
2046 node = skip_Id(node);
2047 if (get_irn_op(node) == op_Proj) {
2048 pred = skip_Id(get_Proj_pred(node));
2049 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2050 pred = skip_Id(skip_Tuple(pred));
2051 else if (get_irn_op(pred) == op_Tuple)
2052 return get_Tuple_pred(pred, get_Proj_proj(node));
2057 /* returns operand of node if node is a Cast */
2058 ir_node *skip_Cast (ir_node *node) {
2059 if (node && get_irn_op(node) == op_Cast)
2060 return get_Cast_op(node);
2064 /* returns operand of node if node is a Confirm */
2065 ir_node *skip_Confirm (ir_node *node) {
2066 if (node && get_irn_op(node) == op_Confirm)
2067 return get_Confirm_value(node);
2071 /* skip all high-level ops */
2072 ir_node *skip_HighLevel(ir_node *node) {
2073 if (node && is_op_highlevel(get_irn_op(node)))
2074 return get_irn_n(node, 0);
2079 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2080 than any other approach, as Id chains are resolved and all point to the real node, or
2081 all id's are self loops. */
2083 skip_Id (ir_node *node) {
2084 /* don't assert node !!! */
2086 if (!get_opt_normalize()) return node;
2088 /* Don't use get_Id_pred: We get into an endless loop for
2089 self-referencing Ids. */
2090 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2091 ir_node *rem_pred = node->in[0+1];
2094 assert (get_irn_arity (node) > 0);
2096 node->in[0+1] = node;
2097 res = skip_Id(rem_pred);
2098 if (res->op == op_Id) /* self-loop */ return node;
2100 node->in[0+1] = res;
2107 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2108 than any other approach, as Id chains are resolved and all point to the real node, or
2109 all id's are self loops. */
2111 skip_Id (ir_node *node) {
2113 /* don't assert node !!! */
2115 if (!node || (node->op != op_Id)) return node;
2117 if (!get_opt_normalize()) return node;
2119 /* Don't use get_Id_pred: We get into an endless loop for
2120 self-referencing Ids. */
2121 pred = node->in[0+1];
2123 if (pred->op != op_Id) return pred;
2125 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2126 ir_node *rem_pred, *res;
2128 if (pred->op != op_Id) return pred; /* shortcut */
2131 assert (get_irn_arity (node) > 0);
2133 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2134 res = skip_Id(rem_pred);
2135 if (res->op == op_Id) /* self-loop */ return node;
2137 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2146 (is_Bad)(const ir_node *node) {
2147 return _is_Bad(node);
2151 (is_Const)(const ir_node *node) {
2152 return _is_Const(node);
2156 (is_no_Block)(const ir_node *node) {
2157 return _is_no_Block(node);
2161 (is_Block)(const ir_node *node) {
2162 return _is_Block(node);
2165 /* returns true if node is a Unknown node. */
2167 (is_Unknown)(const ir_node *node) {
2168 return _is_Unknown(node);
2172 is_Proj (const ir_node *node) {
2174 return node->op == op_Proj
2175 || (!get_interprocedural_view() && node->op == op_Filter);
2178 /* Returns true if the operation manipulates control flow. */
2180 is_cfop(const ir_node *node) {
2181 return is_cfopcode(get_irn_op(node));
2184 /* Returns true if the operation manipulates interprocedural control flow:
2185 CallBegin, EndReg, EndExcept */
2186 int is_ip_cfop(const ir_node *node) {
2187 return is_ip_cfopcode(get_irn_op(node));
2190 /* Returns true if the operation can change the control flow because
2193 is_fragile_op(const ir_node *node) {
2194 return is_op_fragile(get_irn_op(node));
2197 /* Returns the memory operand of fragile operations. */
2198 ir_node *get_fragile_op_mem(ir_node *node) {
2199 assert(node && is_fragile_op(node));
2201 switch (get_irn_opcode (node)) {
2210 return get_irn_n(node, 0);
2215 assert(0 && "should not be reached");
2220 /* Returns true if the operation is a forking control flow operation. */
2222 is_forking_op(const ir_node *node) {
2223 return is_op_forking(get_irn_op(node));
2226 type *(get_irn_type)(ir_node *node) {
2227 return _get_irn_type(node);
2230 /** the get_type operation must be always implemented */
2231 static type *get_Null_type(ir_node *n) {
2235 /* set the get_type operation */
2236 ir_op *firm_set_default_get_type(ir_op *op)
2239 case iro_Const: op->get_type = get_Const_type; break;
2240 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2241 case iro_Cast: op->get_type = get_Cast_type; break;
2242 case iro_Proj: op->get_type = get_Proj_type; break;
2243 default: op->get_type = get_Null_type; break;
2248 #ifdef DEBUG_libfirm
2249 void dump_irn (ir_node *n) {
2250 int i, arity = get_irn_arity(n);
2251 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2253 ir_node *pred = get_irn_n(n, -1);
2254 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2255 get_irn_node_nr(pred), (void *)pred);
2257 printf(" preds: \n");
2258 for (i = 0; i < arity; ++i) {
2259 ir_node *pred = get_irn_n(n, i);
2260 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2261 get_irn_node_nr(pred), (void *)pred);
2265 #else /* DEBUG_libfirm */
2266 void dump_irn (ir_node *n) {}
2267 #endif /* DEBUG_libfirm */