3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
240 if (arity != ARR_LEN(*arr) - 1) {
241 ir_node * block = (*arr)[0];
242 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
245 fix_backedges(current_ir_graph->obst, node);
246 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
250 (get_irn_intra_n)(const ir_node *node, int n) {
251 return _get_irn_intra_n (node, n);
255 (get_irn_inter_n)(const ir_node *node, int n) {
256 return _get_irn_inter_n (node, n);
259 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
262 (get_irn_n)(const ir_node *node, int n) {
263 return _get_irn_n(node, n);
267 set_irn_n (ir_node *node, int n, ir_node *in) {
268 assert(node && node->kind == k_ir_node);
270 assert(n < get_irn_arity(node));
271 assert(in && in->kind == k_ir_node);
273 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
274 /* Change block pred in both views! */
275 node->in[n + 1] = in;
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
280 if (get_interprocedural_view()) { /* handle Filter and Block specially */
281 if (get_irn_opcode(node) == iro_Filter) {
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
285 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
286 node->attr.block.in_cg[n + 1] = in;
289 /* else fall through */
293 hook_set_irn_n(node, n, in, node->in[n + 1]);
295 /* Here, we rely on src and tgt being in the current ir graph */
296 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
414 (is_irn_pinned_in_irg) (const ir_node *node) {
415 return _is_irn_pinned_in_irg(node);
418 void set_irn_pinned(ir_node *node, op_pin_state state) {
419 /* due to optimization an opt may be turned into a Tuple */
420 if (get_irn_op(node) == op_Tuple)
423 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
424 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
426 node->attr.except.pin_state = state;
429 #ifdef DO_HEAPANALYSIS
430 /* Access the abstract interpretation information of a node.
431 Returns NULL if no such information is available. */
432 struct abstval *get_irn_abst_value(ir_node *n) {
435 /* Set the abstract interpretation information of a node. */
436 void set_irn_abst_value(ir_node *n, struct abstval *os) {
439 struct section *firm_get_irn_section(ir_node *n) {
442 void firm_set_irn_section(ir_node *n, struct section *s) {
446 /* Dummies needed for firmjni. */
447 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
449 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
450 void firm_set_irn_section(ir_node *n, struct section *s) {}
451 #endif /* DO_HEAPANALYSIS */
454 /* Outputs a unique number for this node */
456 get_irn_node_nr(const ir_node *node) {
459 return node->node_nr;
466 get_irn_const_attr (ir_node *node)
468 assert (node->op == op_Const);
469 return node->attr.con;
473 get_irn_proj_attr (ir_node *node)
475 assert (node->op == op_Proj);
476 return node->attr.proj;
480 get_irn_alloc_attr (ir_node *node)
482 assert (node->op == op_Alloc);
487 get_irn_free_attr (ir_node *node)
489 assert (node->op == op_Free);
494 get_irn_symconst_attr (ir_node *node)
496 assert (node->op == op_SymConst);
501 get_irn_call_attr (ir_node *node)
503 assert (node->op == op_Call);
504 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
508 get_irn_sel_attr (ir_node *node)
510 assert (node->op == op_Sel);
515 get_irn_phi_attr (ir_node *node)
517 assert (node->op == op_Phi);
518 return node->attr.phi0_pos;
522 get_irn_block_attr (ir_node *node)
524 assert (node->op == op_Block);
525 return node->attr.block;
529 get_irn_load_attr (ir_node *node)
531 assert (node->op == op_Load);
532 return node->attr.load;
536 get_irn_store_attr (ir_node *node)
538 assert (node->op == op_Store);
539 return node->attr.store;
543 get_irn_except_attr (ir_node *node)
545 assert (node->op == op_Div || node->op == op_Quot ||
546 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
547 return node->attr.except;
550 /** manipulate fields of individual nodes **/
552 /* this works for all except Block */
554 get_nodes_block (const ir_node *node) {
555 assert (!(node->op == op_Block));
556 return get_irn_n(node, -1);
560 set_nodes_block (ir_node *node, ir_node *block) {
561 assert (!(node->op == op_Block));
562 set_irn_n(node, -1, block);
565 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
566 * from Start. If so returns frame type, else Null. */
567 type *is_frame_pointer(ir_node *n) {
568 if ((get_irn_op(n) == op_Proj) &&
569 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
570 ir_node *start = get_Proj_pred(n);
571 if (get_irn_op(start) == op_Start) {
572 return get_irg_frame_type(get_irn_irg(start));
578 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
579 * from Start. If so returns global type, else Null. */
580 type *is_globals_pointer(ir_node *n) {
581 if ((get_irn_op(n) == op_Proj) &&
582 (get_Proj_proj(n) == pn_Start_P_globals)) {
583 ir_node *start = get_Proj_pred(n);
584 if (get_irn_op(start) == op_Start) {
585 return get_glob_type();
591 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
592 * from Start. If so returns 1, else 0. */
593 int is_value_arg_pointer(ir_node *n) {
594 if ((get_irn_op(n) == op_Proj) &&
595 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
596 (get_irn_op(get_Proj_pred(n)) == op_Start))
601 /* Returns an array with the predecessors of the Block. Depending on
602 the implementation of the graph data structure this can be a copy of
603 the internal representation of predecessors as well as the internal
604 array itself. Therefore writing to this array might obstruct the ir. */
606 get_Block_cfgpred_arr (ir_node *node)
608 assert ((node->op == op_Block));
609 return (ir_node **)&(get_irn_in(node)[1]);
613 (get_Block_n_cfgpreds)(ir_node *node) {
614 return get_Block_n_cfgpreds(node);
618 (get_Block_cfgpred)(ir_node *node, int pos) {
619 return get_Block_cfgpred(node, pos);
623 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
624 assert (node->op == op_Block);
625 set_irn_n(node, pos, pred);
629 (get_Block_cfgpred_block)(ir_node *node, int pos) {
630 return _get_Block_cfgpred_block(node, pos);
634 get_Block_matured (ir_node *node) {
635 assert (node->op == op_Block);
636 return node->attr.block.matured;
640 set_Block_matured (ir_node *node, bool matured) {
641 assert (node->op == op_Block);
642 node->attr.block.matured = matured;
646 (get_Block_block_visited)(ir_node *node) {
647 return _get_Block_block_visited(node);
651 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
652 _set_Block_block_visited(node, visit);
655 /* For this current_ir_graph must be set. */
657 (mark_Block_block_visited)(ir_node *node) {
658 _mark_Block_block_visited(node);
662 (Block_not_block_visited)(ir_node *node) {
663 return _Block_not_block_visited(node);
667 get_Block_graph_arr (ir_node *node, int pos) {
668 assert (node->op == op_Block);
669 return node->attr.block.graph_arr[pos+1];
673 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
674 assert (node->op == op_Block);
675 node->attr.block.graph_arr[pos+1] = value;
678 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
679 assert(node->op == op_Block);
680 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
681 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
682 node->attr.block.in_cg[0] = NULL;
683 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
685 /* Fix backedge array. fix_backedges operates depending on
686 interprocedural_view. */
687 int ipv = get_interprocedural_view();
688 set_interprocedural_view(true);
689 fix_backedges(current_ir_graph->obst, node);
690 set_interprocedural_view(ipv);
693 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
696 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
697 assert(node->op == op_Block &&
698 node->attr.block.in_cg &&
699 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
700 node->attr.block.in_cg[pos + 1] = pred;
703 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
704 assert(node->op == op_Block);
705 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
708 int get_Block_cg_n_cfgpreds(ir_node * node) {
709 assert(node->op == op_Block);
710 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
713 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
714 assert(node->op == op_Block && node->attr.block.in_cg);
715 return node->attr.block.in_cg[pos + 1];
718 void remove_Block_cg_cfgpred_arr(ir_node * node) {
719 assert(node->op == op_Block);
720 node->attr.block.in_cg = NULL;
723 ir_node *(set_Block_dead)(ir_node *block) {
724 return _set_Block_dead(block);
727 int (is_Block_dead)(const ir_node *block) {
728 return _is_Block_dead(block);
731 ir_extblk *get_Block_extbb(const ir_node *block) {
732 assert(is_Block(block));
733 return block->attr.block.extblk;
736 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
737 assert(is_Block(block));
738 block->attr.block.extblk = extblk;
742 set_Start_irg(ir_node *node, ir_graph *irg) {
743 assert(node->op == op_Start);
744 assert(is_ir_graph(irg));
745 assert(0 && " Why set irg? -- use set_irn_irg");
749 get_End_n_keepalives(ir_node *end) {
750 assert (end->op == op_End);
751 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
755 get_End_keepalive(ir_node *end, int pos) {
756 assert (end->op == op_End);
757 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
761 add_End_keepalive (ir_node *end, ir_node *ka) {
762 assert (end->op == op_End);
763 ARR_APP1 (ir_node *, end->in, ka);
767 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
768 assert (end->op == op_End);
769 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
773 free_End (ir_node *end) {
774 assert (end->op == op_End);
776 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
777 end->in = NULL; /* @@@ make sure we get an error if we use the
778 in array afterwards ... */
783 > Implementing the case construct (which is where the constant Proj node is
784 > important) involves far more than simply determining the constant values.
785 > We could argue that this is more properly a function of the translator from
786 > Firm to the target machine. That could be done if there was some way of
787 > projecting "default" out of the Cond node.
788 I know it's complicated.
789 Basically there are two proglems:
790 - determining the gaps between the projs
791 - determining the biggest case constant to know the proj number for
793 I see several solutions:
794 1. Introduce a ProjDefault node. Solves both problems.
795 This means to extend all optimizations executed during construction.
796 2. Give the Cond node for switch two flavors:
797 a) there are no gaps in the projs (existing flavor)
798 b) gaps may exist, default proj is still the Proj with the largest
799 projection number. This covers also the gaps.
800 3. Fix the semantic of the Cond to that of 2b)
802 Solution 2 seems to be the best:
803 Computing the gaps in the Firm representation is not too hard, i.e.,
804 libFIRM can implement a routine that transforms between the two
805 flavours. This is also possible for 1) but 2) does not require to
806 change any existing optimization.
807 Further it should be far simpler to determine the biggest constant than
809 I don't want to choose 3) as 2a) seems to have advantages for
810 dataflow analysis and 3) does not allow to convert the representation to
814 get_Cond_selector (ir_node *node) {
815 assert (node->op == op_Cond);
816 return get_irn_n(node, 0);
820 set_Cond_selector (ir_node *node, ir_node *selector) {
821 assert (node->op == op_Cond);
822 set_irn_n(node, 0, selector);
826 get_Cond_kind (ir_node *node) {
827 assert (node->op == op_Cond);
828 return node->attr.c.kind;
832 set_Cond_kind (ir_node *node, cond_kind kind) {
833 assert (node->op == op_Cond);
834 node->attr.c.kind = kind;
838 get_Cond_defaultProj (ir_node *node) {
839 assert (node->op == op_Cond);
840 return node->attr.c.default_proj;
844 get_Return_mem (ir_node *node) {
845 assert (node->op == op_Return);
846 return get_irn_n(node, 0);
850 set_Return_mem (ir_node *node, ir_node *mem) {
851 assert (node->op == op_Return);
852 set_irn_n(node, 0, mem);
856 get_Return_n_ress (ir_node *node) {
857 assert (node->op == op_Return);
858 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
862 get_Return_res_arr (ir_node *node)
864 assert ((node->op == op_Return));
865 if (get_Return_n_ress(node) > 0)
866 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
873 set_Return_n_res (ir_node *node, int results) {
874 assert (node->op == op_Return);
879 get_Return_res (ir_node *node, int pos) {
880 assert (node->op == op_Return);
881 assert (get_Return_n_ress(node) > pos);
882 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
886 set_Return_res (ir_node *node, int pos, ir_node *res){
887 assert (node->op == op_Return);
888 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
892 get_Raise_mem (ir_node *node) {
893 assert (node->op == op_Raise);
894 return get_irn_n(node, 0);
898 set_Raise_mem (ir_node *node, ir_node *mem) {
899 assert (node->op == op_Raise);
900 set_irn_n(node, 0, mem);
904 get_Raise_exo_ptr (ir_node *node) {
905 assert (node->op == op_Raise);
906 return get_irn_n(node, 1);
910 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
911 assert (node->op == op_Raise);
912 set_irn_n(node, 1, exo_ptr);
915 tarval *(get_Const_tarval)(ir_node *node) {
916 return _get_Const_tarval(node);
920 set_Const_tarval (ir_node *node, tarval *con) {
921 assert (node->op == op_Const);
922 node->attr.con.tv = con;
925 cnst_classify_t (classify_Const)(ir_node *node)
927 return _classify_Const(node);
931 /* The source language type. Must be an atomic type. Mode of type must
932 be mode of node. For tarvals from entities type must be pointer to
935 get_Const_type (ir_node *node) {
936 assert (node->op == op_Const);
937 return node->attr.con.tp;
941 set_Const_type (ir_node *node, type *tp) {
942 assert (node->op == op_Const);
943 if (tp != firm_unknown_type) {
944 assert (is_atomic_type(tp));
945 assert (get_type_mode(tp) == get_irn_mode(node));
947 node->attr.con.tp = tp;
952 get_SymConst_kind (const ir_node *node) {
953 assert (node->op == op_SymConst);
954 return node->attr.i.num;
958 set_SymConst_kind (ir_node *node, symconst_kind num) {
959 assert (node->op == op_SymConst);
960 node->attr.i.num = num;
964 get_SymConst_type (ir_node *node) {
965 assert ( (node->op == op_SymConst)
966 && ( get_SymConst_kind(node) == symconst_type_tag
967 || get_SymConst_kind(node) == symconst_size));
968 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
972 set_SymConst_type (ir_node *node, type *tp) {
973 assert ( (node->op == op_SymConst)
974 && ( get_SymConst_kind(node) == symconst_type_tag
975 || get_SymConst_kind(node) == symconst_size));
976 node->attr.i.sym.type_p = tp;
980 get_SymConst_name (ir_node *node) {
981 assert ( (node->op == op_SymConst)
982 && (get_SymConst_kind(node) == symconst_addr_name));
983 return node->attr.i.sym.ident_p;
987 set_SymConst_name (ir_node *node, ident *name) {
988 assert ( (node->op == op_SymConst)
989 && (get_SymConst_kind(node) == symconst_addr_name));
990 node->attr.i.sym.ident_p = name;
994 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
995 entity *get_SymConst_entity (ir_node *node) {
996 assert ( (node->op == op_SymConst)
997 && (get_SymConst_kind (node) == symconst_addr_ent));
998 return node->attr.i.sym.entity_p;
1001 void set_SymConst_entity (ir_node *node, entity *ent) {
1002 assert ( (node->op == op_SymConst)
1003 && (get_SymConst_kind(node) == symconst_addr_ent));
1004 node->attr.i.sym.entity_p = ent;
1007 union symconst_symbol
1008 get_SymConst_symbol (ir_node *node) {
1009 assert (node->op == op_SymConst);
1010 return node->attr.i.sym;
1014 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1015 assert (node->op == op_SymConst);
1016 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1017 node->attr.i.sym = sym;
1021 get_SymConst_value_type (ir_node *node) {
1022 assert (node->op == op_SymConst);
1023 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1024 return node->attr.i.tp;
1028 set_SymConst_value_type (ir_node *node, type *tp) {
1029 assert (node->op == op_SymConst);
1030 node->attr.i.tp = tp;
1034 get_Sel_mem (ir_node *node) {
1035 assert (node->op == op_Sel);
1036 return get_irn_n(node, 0);
1040 set_Sel_mem (ir_node *node, ir_node *mem) {
1041 assert (node->op == op_Sel);
1042 set_irn_n(node, 0, mem);
1046 get_Sel_ptr (ir_node *node) {
1047 assert (node->op == op_Sel);
1048 return get_irn_n(node, 1);
1052 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1053 assert (node->op == op_Sel);
1054 set_irn_n(node, 1, ptr);
1058 get_Sel_n_indexs (ir_node *node) {
1059 assert (node->op == op_Sel);
1060 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1064 get_Sel_index_arr (ir_node *node)
1066 assert ((node->op == op_Sel));
1067 if (get_Sel_n_indexs(node) > 0)
1068 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1074 get_Sel_index (ir_node *node, int pos) {
1075 assert (node->op == op_Sel);
1076 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1080 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1081 assert (node->op == op_Sel);
1082 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1086 get_Sel_entity (ir_node *node) {
1087 assert (node->op == op_Sel);
1088 return node->attr.s.ent;
1092 set_Sel_entity (ir_node *node, entity *ent) {
1093 assert (node->op == op_Sel);
1094 node->attr.s.ent = ent;
1098 get_InstOf_ent (ir_node *node) {
1099 assert (node->op = op_InstOf);
1100 return (node->attr.io.ent);
1104 set_InstOf_ent (ir_node *node, type *ent) {
1105 assert (node->op = op_InstOf);
1106 node->attr.io.ent = ent;
1110 get_InstOf_store (ir_node *node) {
1111 assert (node->op = op_InstOf);
1112 return (get_irn_n (node, 0));
1116 set_InstOf_store (ir_node *node, ir_node *obj) {
1117 assert (node->op = op_InstOf);
1118 set_irn_n (node, 0, obj);
1122 get_InstOf_obj (ir_node *node) {
1123 assert (node->op = op_InstOf);
1124 return (get_irn_n (node, 1));
1128 set_InstOf_obj (ir_node *node, ir_node *obj) {
1129 assert (node->op = op_InstOf);
1130 set_irn_n (node, 1, obj);
1134 /* For unary and binary arithmetic operations the access to the
1135 operands can be factored out. Left is the first, right the
1136 second arithmetic value as listed in tech report 0999-33.
1137 unops are: Minus, Abs, Not, Conv, Cast
1138 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1139 Shr, Shrs, Rotate, Cmp */
1143 get_Call_mem (ir_node *node) {
1144 assert (node->op == op_Call);
1145 return get_irn_n(node, 0);
1149 set_Call_mem (ir_node *node, ir_node *mem) {
1150 assert (node->op == op_Call);
1151 set_irn_n(node, 0, mem);
1155 get_Call_ptr (ir_node *node) {
1156 assert (node->op == op_Call);
1157 return get_irn_n(node, 1);
1161 set_Call_ptr (ir_node *node, ir_node *ptr) {
1162 assert (node->op == op_Call);
1163 set_irn_n(node, 1, ptr);
1167 get_Call_param_arr (ir_node *node) {
1168 assert (node->op == op_Call);
1169 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1173 get_Call_n_params (ir_node *node) {
1174 assert (node->op == op_Call);
1175 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1179 get_Call_arity (ir_node *node) {
1180 assert (node->op == op_Call);
1181 return get_Call_n_params(node);
1185 set_Call_arity (ir_node *node, ir_node *arity) {
1186 assert (node->op == op_Call);
1191 get_Call_param (ir_node *node, int pos) {
1192 assert (node->op == op_Call);
1193 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1197 set_Call_param (ir_node *node, int pos, ir_node *param) {
1198 assert (node->op == op_Call);
1199 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1203 get_Call_type (ir_node *node) {
1204 assert (node->op == op_Call);
1205 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1209 set_Call_type (ir_node *node, type *tp) {
1210 assert (node->op == op_Call);
1211 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1212 node->attr.call.cld_tp = tp;
1215 int Call_has_callees(ir_node *node) {
1216 assert(node && node->op == op_Call);
1217 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1218 (node->attr.call.callee_arr != NULL));
1221 int get_Call_n_callees(ir_node * node) {
1222 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1223 return ARR_LEN(node->attr.call.callee_arr);
1226 entity * get_Call_callee(ir_node * node, int pos) {
1227 assert(pos >= 0 && pos < get_Call_n_callees(node));
1228 return node->attr.call.callee_arr[pos];
1231 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1232 assert(node->op == op_Call);
1233 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1234 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1236 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1239 void remove_Call_callee_arr(ir_node * node) {
1240 assert(node->op == op_Call);
1241 node->attr.call.callee_arr = NULL;
1244 ir_node * get_CallBegin_ptr (ir_node *node) {
1245 assert(node->op == op_CallBegin);
1246 return get_irn_n(node, 0);
1248 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1249 assert(node->op == op_CallBegin);
1250 set_irn_n(node, 0, ptr);
1252 ir_node * get_CallBegin_call (ir_node *node) {
1253 assert(node->op == op_CallBegin);
1254 return node->attr.callbegin.call;
1256 void set_CallBegin_call (ir_node *node, ir_node *call) {
1257 assert(node->op == op_CallBegin);
1258 node->attr.callbegin.call = call;
1263 ir_node * get_##OP##_left(ir_node *node) { \
1264 assert(node->op == op_##OP); \
1265 return get_irn_n(node, node->op->op_index); \
1267 void set_##OP##_left(ir_node *node, ir_node *left) { \
1268 assert(node->op == op_##OP); \
1269 set_irn_n(node, node->op->op_index, left); \
1271 ir_node *get_##OP##_right(ir_node *node) { \
1272 assert(node->op == op_##OP); \
1273 return get_irn_n(node, node->op->op_index + 1); \
1275 void set_##OP##_right(ir_node *node, ir_node *right) { \
1276 assert(node->op == op_##OP); \
1277 set_irn_n(node, node->op->op_index + 1, right); \
1281 ir_node *get_##OP##_op(ir_node *node) { \
1282 assert(node->op == op_##OP); \
1283 return get_irn_n(node, node->op->op_index); \
1285 void set_##OP##_op (ir_node *node, ir_node *op) { \
1286 assert(node->op == op_##OP); \
1287 set_irn_n(node, node->op->op_index, op); \
1297 get_Quot_mem (ir_node *node) {
1298 assert (node->op == op_Quot);
1299 return get_irn_n(node, 0);
1303 set_Quot_mem (ir_node *node, ir_node *mem) {
1304 assert (node->op == op_Quot);
1305 set_irn_n(node, 0, mem);
1311 get_DivMod_mem (ir_node *node) {
1312 assert (node->op == op_DivMod);
1313 return get_irn_n(node, 0);
1317 set_DivMod_mem (ir_node *node, ir_node *mem) {
1318 assert (node->op == op_DivMod);
1319 set_irn_n(node, 0, mem);
1325 get_Div_mem (ir_node *node) {
1326 assert (node->op == op_Div);
1327 return get_irn_n(node, 0);
1331 set_Div_mem (ir_node *node, ir_node *mem) {
1332 assert (node->op == op_Div);
1333 set_irn_n(node, 0, mem);
1339 get_Mod_mem (ir_node *node) {
1340 assert (node->op == op_Mod);
1341 return get_irn_n(node, 0);
1345 set_Mod_mem (ir_node *node, ir_node *mem) {
1346 assert (node->op == op_Mod);
1347 set_irn_n(node, 0, mem);
1364 get_Cast_type (ir_node *node) {
1365 assert (node->op == op_Cast);
1366 return node->attr.cast.totype;
1370 set_Cast_type (ir_node *node, type *to_tp) {
1371 assert (node->op == op_Cast);
1372 node->attr.cast.totype = to_tp;
1376 /* Checks for upcast.
1378 * Returns true if the Cast node casts a class type to a super type.
1380 int is_Cast_upcast(ir_node *node) {
1381 type *totype = get_Cast_type(node);
1382 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1383 ir_graph *myirg = get_irn_irg(node);
1385 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1388 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1389 totype = get_pointer_points_to_type(totype);
1390 fromtype = get_pointer_points_to_type(fromtype);
1395 if (!is_Class_type(totype)) return false;
1396 return is_subclass_of(fromtype, totype);
1399 /* Checks for downcast.
1401 * Returns true if the Cast node casts a class type to a sub type.
1403 int is_Cast_downcast(ir_node *node) {
1404 type *totype = get_Cast_type(node);
1405 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1407 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1410 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1411 totype = get_pointer_points_to_type(totype);
1412 fromtype = get_pointer_points_to_type(fromtype);
1417 if (!is_Class_type(totype)) return false;
1418 return is_subclass_of(totype, fromtype);
1422 (is_unop)(const ir_node *node) {
1423 return _is_unop(node);
1427 get_unop_op (ir_node *node) {
1428 if (node->op->opar == oparity_unary)
1429 return get_irn_n(node, node->op->op_index);
1431 assert(node->op->opar == oparity_unary);
1436 set_unop_op (ir_node *node, ir_node *op) {
1437 if (node->op->opar == oparity_unary)
1438 set_irn_n(node, node->op->op_index, op);
1440 assert(node->op->opar == oparity_unary);
1444 (is_binop)(const ir_node *node) {
1445 return _is_binop(node);
1449 get_binop_left (ir_node *node) {
1450 if (node->op->opar == oparity_binary)
1451 return get_irn_n(node, node->op->op_index);
1453 assert(node->op->opar == oparity_binary);
1458 set_binop_left (ir_node *node, ir_node *left) {
1459 if (node->op->opar == oparity_binary)
1460 set_irn_n(node, node->op->op_index, left);
1462 assert (node->op->opar == oparity_binary);
1466 get_binop_right (ir_node *node) {
1467 if (node->op->opar == oparity_binary)
1468 return get_irn_n(node, node->op->op_index + 1);
1470 assert(node->op->opar == oparity_binary);
1475 set_binop_right (ir_node *node, ir_node *right) {
1476 if (node->op->opar == oparity_binary)
1477 set_irn_n(node, node->op->op_index + 1, right);
1479 assert (node->op->opar == oparity_binary);
1482 int is_Phi (const ir_node *n) {
1488 if (op == op_Filter) return get_interprocedural_view();
1491 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1492 (get_irn_arity(n) > 0));
1497 int is_Phi0 (const ir_node *n) {
1500 return ((get_irn_op(n) == op_Phi) &&
1501 (get_irn_arity(n) == 0) &&
1502 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1506 get_Phi_preds_arr (ir_node *node) {
1507 assert (node->op == op_Phi);
1508 return (ir_node **)&(get_irn_in(node)[1]);
1512 get_Phi_n_preds (ir_node *node) {
1513 assert (is_Phi(node) || is_Phi0(node));
1514 return (get_irn_arity(node));
1518 void set_Phi_n_preds (ir_node *node, int n_preds) {
1519 assert (node->op == op_Phi);
1524 get_Phi_pred (ir_node *node, int pos) {
1525 assert (is_Phi(node) || is_Phi0(node));
1526 return get_irn_n(node, pos);
1530 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1531 assert (is_Phi(node) || is_Phi0(node));
1532 set_irn_n(node, pos, pred);
1536 int is_memop(ir_node *node) {
1537 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1540 ir_node *get_memop_mem (ir_node *node) {
1541 assert(is_memop(node));
1542 return get_irn_n(node, 0);
1545 void set_memop_mem (ir_node *node, ir_node *mem) {
1546 assert(is_memop(node));
1547 set_irn_n(node, 0, mem);
1550 ir_node *get_memop_ptr (ir_node *node) {
1551 assert(is_memop(node));
1552 return get_irn_n(node, 1);
1555 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1556 assert(is_memop(node));
1557 set_irn_n(node, 1, ptr);
1561 get_Load_mem (ir_node *node) {
1562 assert (node->op == op_Load);
1563 return get_irn_n(node, 0);
1567 set_Load_mem (ir_node *node, ir_node *mem) {
1568 assert (node->op == op_Load);
1569 set_irn_n(node, 0, mem);
1573 get_Load_ptr (ir_node *node) {
1574 assert (node->op == op_Load);
1575 return get_irn_n(node, 1);
1579 set_Load_ptr (ir_node *node, ir_node *ptr) {
1580 assert (node->op == op_Load);
1581 set_irn_n(node, 1, ptr);
1585 get_Load_mode (ir_node *node) {
1586 assert (node->op == op_Load);
1587 return node->attr.load.load_mode;
1591 set_Load_mode (ir_node *node, ir_mode *mode) {
1592 assert (node->op == op_Load);
1593 node->attr.load.load_mode = mode;
1597 get_Load_volatility (ir_node *node) {
1598 assert (node->op == op_Load);
1599 return node->attr.load.volatility;
1603 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1604 assert (node->op == op_Load);
1605 node->attr.load.volatility = volatility;
1610 get_Store_mem (ir_node *node) {
1611 assert (node->op == op_Store);
1612 return get_irn_n(node, 0);
1616 set_Store_mem (ir_node *node, ir_node *mem) {
1617 assert (node->op == op_Store);
1618 set_irn_n(node, 0, mem);
1622 get_Store_ptr (ir_node *node) {
1623 assert (node->op == op_Store);
1624 return get_irn_n(node, 1);
1628 set_Store_ptr (ir_node *node, ir_node *ptr) {
1629 assert (node->op == op_Store);
1630 set_irn_n(node, 1, ptr);
1634 get_Store_value (ir_node *node) {
1635 assert (node->op == op_Store);
1636 return get_irn_n(node, 2);
1640 set_Store_value (ir_node *node, ir_node *value) {
1641 assert (node->op == op_Store);
1642 set_irn_n(node, 2, value);
1646 get_Store_volatility (ir_node *node) {
1647 assert (node->op == op_Store);
1648 return node->attr.store.volatility;
1652 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1653 assert (node->op == op_Store);
1654 node->attr.store.volatility = volatility;
1659 get_Alloc_mem (ir_node *node) {
1660 assert (node->op == op_Alloc);
1661 return get_irn_n(node, 0);
1665 set_Alloc_mem (ir_node *node, ir_node *mem) {
1666 assert (node->op == op_Alloc);
1667 set_irn_n(node, 0, mem);
1671 get_Alloc_size (ir_node *node) {
1672 assert (node->op == op_Alloc);
1673 return get_irn_n(node, 1);
1677 set_Alloc_size (ir_node *node, ir_node *size) {
1678 assert (node->op == op_Alloc);
1679 set_irn_n(node, 1, size);
1683 get_Alloc_type (ir_node *node) {
1684 assert (node->op == op_Alloc);
1685 return node->attr.a.type = skip_tid(node->attr.a.type);
1689 set_Alloc_type (ir_node *node, type *tp) {
1690 assert (node->op == op_Alloc);
1691 node->attr.a.type = tp;
1695 get_Alloc_where (ir_node *node) {
1696 assert (node->op == op_Alloc);
1697 return node->attr.a.where;
1701 set_Alloc_where (ir_node *node, where_alloc where) {
1702 assert (node->op == op_Alloc);
1703 node->attr.a.where = where;
1708 get_Free_mem (ir_node *node) {
1709 assert (node->op == op_Free);
1710 return get_irn_n(node, 0);
1714 set_Free_mem (ir_node *node, ir_node *mem) {
1715 assert (node->op == op_Free);
1716 set_irn_n(node, 0, mem);
1720 get_Free_ptr (ir_node *node) {
1721 assert (node->op == op_Free);
1722 return get_irn_n(node, 1);
1726 set_Free_ptr (ir_node *node, ir_node *ptr) {
1727 assert (node->op == op_Free);
1728 set_irn_n(node, 1, ptr);
1732 get_Free_size (ir_node *node) {
1733 assert (node->op == op_Free);
1734 return get_irn_n(node, 2);
1738 set_Free_size (ir_node *node, ir_node *size) {
1739 assert (node->op == op_Free);
1740 set_irn_n(node, 2, size);
1744 get_Free_type (ir_node *node) {
1745 assert (node->op == op_Free);
1746 return node->attr.f.type = skip_tid(node->attr.f.type);
1750 set_Free_type (ir_node *node, type *tp) {
1751 assert (node->op == op_Free);
1752 node->attr.f.type = tp;
1756 get_Free_where (ir_node *node) {
1757 assert (node->op == op_Free);
1758 return node->attr.f.where;
1762 set_Free_where (ir_node *node, where_alloc where) {
1763 assert (node->op == op_Free);
1764 node->attr.f.where = where;
1768 get_Sync_preds_arr (ir_node *node) {
1769 assert (node->op == op_Sync);
1770 return (ir_node **)&(get_irn_in(node)[1]);
1774 get_Sync_n_preds (ir_node *node) {
1775 assert (node->op == op_Sync);
1776 return (get_irn_arity(node));
1781 set_Sync_n_preds (ir_node *node, int n_preds) {
1782 assert (node->op == op_Sync);
1787 get_Sync_pred (ir_node *node, int pos) {
1788 assert (node->op == op_Sync);
1789 return get_irn_n(node, pos);
1793 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1794 assert (node->op == op_Sync);
1795 set_irn_n(node, pos, pred);
1798 type *get_Proj_type(ir_node *n)
1801 ir_node *pred = get_Proj_pred(n);
1803 switch (get_irn_opcode(pred)) {
1806 /* Deal with Start / Call here: we need to know the Proj Nr. */
1807 assert(get_irn_mode(pred) == mode_T);
1808 pred_pred = get_Proj_pred(pred);
1809 if (get_irn_op(pred_pred) == op_Start) {
1810 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1811 tp = get_method_param_type(mtp, get_Proj_proj(n));
1812 } else if (get_irn_op(pred_pred) == op_Call) {
1813 type *mtp = get_Call_type(pred_pred);
1814 tp = get_method_res_type(mtp, get_Proj_proj(n));
1817 case iro_Start: break;
1818 case iro_Call: break;
1820 ir_node *a = get_Load_ptr(pred);
1821 if (get_irn_op(a) == op_Sel)
1822 tp = get_entity_type(get_Sel_entity(a));
1831 get_Proj_pred (const ir_node *node) {
1832 assert (is_Proj(node));
1833 return get_irn_n(node, 0);
1837 set_Proj_pred (ir_node *node, ir_node *pred) {
1838 assert (is_Proj(node));
1839 set_irn_n(node, 0, pred);
1843 get_Proj_proj (const ir_node *node) {
1844 assert (is_Proj(node));
1845 if (get_irn_opcode(node) == iro_Proj) {
1846 return node->attr.proj;
1848 assert(get_irn_opcode(node) == iro_Filter);
1849 return node->attr.filter.proj;
1854 set_Proj_proj (ir_node *node, long proj) {
1855 assert (node->op == op_Proj);
1856 node->attr.proj = proj;
1860 get_Tuple_preds_arr (ir_node *node) {
1861 assert (node->op == op_Tuple);
1862 return (ir_node **)&(get_irn_in(node)[1]);
1866 get_Tuple_n_preds (ir_node *node) {
1867 assert (node->op == op_Tuple);
1868 return (get_irn_arity(node));
1873 set_Tuple_n_preds (ir_node *node, int n_preds) {
1874 assert (node->op == op_Tuple);
1879 get_Tuple_pred (ir_node *node, int pos) {
1880 assert (node->op == op_Tuple);
1881 return get_irn_n(node, pos);
1885 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1886 assert (node->op == op_Tuple);
1887 set_irn_n(node, pos, pred);
1891 get_Id_pred (ir_node *node) {
1892 assert (node->op == op_Id);
1893 return get_irn_n(node, 0);
1897 set_Id_pred (ir_node *node, ir_node *pred) {
1898 assert (node->op == op_Id);
1899 set_irn_n(node, 0, pred);
1902 ir_node *get_Confirm_value (ir_node *node) {
1903 assert (node->op == op_Confirm);
1904 return get_irn_n(node, 0);
1906 void set_Confirm_value (ir_node *node, ir_node *value) {
1907 assert (node->op == op_Confirm);
1908 set_irn_n(node, 0, value);
1910 ir_node *get_Confirm_bound (ir_node *node) {
1911 assert (node->op == op_Confirm);
1912 return get_irn_n(node, 1);
1914 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1915 assert (node->op == op_Confirm);
1916 set_irn_n(node, 0, bound);
1918 pn_Cmp get_Confirm_cmp (ir_node *node) {
1919 assert (node->op == op_Confirm);
1920 return node->attr.confirm_cmp;
1922 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1923 assert (node->op == op_Confirm);
1924 node->attr.confirm_cmp = cmp;
1929 get_Filter_pred (ir_node *node) {
1930 assert(node->op == op_Filter);
1934 set_Filter_pred (ir_node *node, ir_node *pred) {
1935 assert(node->op == op_Filter);
1939 get_Filter_proj(ir_node *node) {
1940 assert(node->op == op_Filter);
1941 return node->attr.filter.proj;
1944 set_Filter_proj (ir_node *node, long proj) {
1945 assert(node->op == op_Filter);
1946 node->attr.filter.proj = proj;
1949 /* Don't use get_irn_arity, get_irn_n in implementation as access
1950 shall work independent of view!!! */
1951 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1952 assert(node->op == op_Filter);
1953 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1954 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1955 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1956 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1957 node->attr.filter.in_cg[0] = node->in[0];
1959 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1962 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1963 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1964 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1965 node->attr.filter.in_cg[pos + 1] = pred;
1967 int get_Filter_n_cg_preds(ir_node *node) {
1968 assert(node->op == op_Filter && node->attr.filter.in_cg);
1969 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1971 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1973 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1975 arity = ARR_LEN(node->attr.filter.in_cg);
1976 assert(pos < arity - 1);
1977 return node->attr.filter.in_cg[pos + 1];
1981 ir_node *get_Mux_sel (ir_node *node) {
1982 assert(node->op == op_Mux);
1985 void set_Mux_sel (ir_node *node, ir_node *sel) {
1986 assert(node->op == op_Mux);
1990 ir_node *get_Mux_false (ir_node *node) {
1991 assert(node->op == op_Mux);
1994 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1995 assert(node->op == op_Mux);
1996 node->in[2] = ir_false;
1999 ir_node *get_Mux_true (ir_node *node) {
2000 assert(node->op == op_Mux);
2003 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2004 assert(node->op == op_Mux);
2005 node->in[3] = ir_true;
2010 get_irn_irg(const ir_node *node) {
2011 if (! is_Block(node))
2012 node = get_nodes_block(node);
2013 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2014 node = get_nodes_block(node);
2015 assert(get_irn_op(node) == op_Block);
2016 return node->attr.block.irg;
2020 /*----------------------------------------------------------------*/
2021 /* Auxiliary routines */
2022 /*----------------------------------------------------------------*/
2025 skip_Proj (ir_node *node) {
2026 /* don't assert node !!! */
2027 if (node && is_Proj(node)) {
2028 return get_Proj_pred(node);
2035 skip_Tuple (ir_node *node) {
2038 if (!get_opt_normalize()) return node;
2040 node = skip_Id(node);
2041 if (get_irn_op(node) == op_Proj) {
2042 pred = skip_Id(get_Proj_pred(node));
2043 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2044 pred = skip_Id(skip_Tuple(pred));
2045 else if (get_irn_op(pred) == op_Tuple)
2046 return get_Tuple_pred(pred, get_Proj_proj(node));
2051 /* returns operand of node if node is a Cast */
2052 ir_node *skip_Cast (ir_node *node) {
2053 if (node && get_irn_op(node) == op_Cast)
2054 return get_Cast_op(node);
2058 /* returns operand of node if node is a Confirm */
2059 ir_node *skip_Confirm (ir_node *node) {
2060 if (node && get_irn_op(node) == op_Confirm)
2061 return get_Confirm_value(node);
2065 /* skip all high-level ops */
2066 ir_node *skip_HighLevel(ir_node *node) {
2067 if (node && is_op_highlevel(get_irn_op(node)))
2068 return get_irn_n(node, 0);
2073 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2074 than any other approach, as Id chains are resolved and all point to the real node, or
2075 all id's are self loops. */
2077 skip_Id (ir_node *node) {
2078 /* don't assert node !!! */
2080 if (!get_opt_normalize()) return node;
2082 /* Don't use get_Id_pred: We get into an endless loop for
2083 self-referencing Ids. */
2084 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2085 ir_node *rem_pred = node->in[0+1];
2088 assert (get_irn_arity (node) > 0);
2090 node->in[0+1] = node;
2091 res = skip_Id(rem_pred);
2092 if (res->op == op_Id) /* self-loop */ return node;
2094 node->in[0+1] = res;
2101 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2102 than any other approach, as Id chains are resolved and all point to the real node, or
2103 all id's are self loops. */
2105 skip_Id (ir_node *node) {
2107 /* don't assert node !!! */
2109 if (!node || (node->op != op_Id)) return node;
2111 if (!get_opt_normalize()) return node;
2113 /* Don't use get_Id_pred: We get into an endless loop for
2114 self-referencing Ids. */
2115 pred = node->in[0+1];
2117 if (pred->op != op_Id) return pred;
2119 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2120 ir_node *rem_pred, *res;
2122 if (pred->op != op_Id) return pred; /* shortcut */
2125 assert (get_irn_arity (node) > 0);
2127 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2128 res = skip_Id(rem_pred);
2129 if (res->op == op_Id) /* self-loop */ return node;
2131 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2140 (is_Bad)(const ir_node *node) {
2141 return _is_Bad(node);
2145 (is_Const)(const ir_node *node) {
2146 return _is_Const(node);
2150 (is_no_Block)(const ir_node *node) {
2151 return _is_no_Block(node);
2155 (is_Block)(const ir_node *node) {
2156 return _is_Block(node);
2159 /* returns true if node is a Unknown node. */
2161 (is_Unknown)(const ir_node *node) {
2162 return _is_Unknown(node);
2166 is_Proj (const ir_node *node) {
2168 return node->op == op_Proj
2169 || (!get_interprocedural_view() && node->op == op_Filter);
2172 /* Returns true if the operation manipulates control flow. */
2174 is_cfop(const ir_node *node) {
2175 return is_cfopcode(get_irn_op(node));
2178 /* Returns true if the operation manipulates interprocedural control flow:
2179 CallBegin, EndReg, EndExcept */
2180 int is_ip_cfop(const ir_node *node) {
2181 return is_ip_cfopcode(get_irn_op(node));
2184 /* Returns true if the operation can change the control flow because
2187 is_fragile_op(const ir_node *node) {
2188 return is_op_fragile(get_irn_op(node));
2191 /* Returns the memory operand of fragile operations. */
2192 ir_node *get_fragile_op_mem(ir_node *node) {
2193 assert(node && is_fragile_op(node));
2195 switch (get_irn_opcode (node)) {
2204 return get_irn_n(node, 0);
2209 assert(0 && "should not be reached");
2214 /* Returns true if the operation is a forking control flow operation. */
2216 is_forking_op(const ir_node *node) {
2217 return is_op_forking(get_irn_op(node));
2220 type *(get_irn_type)(ir_node *node) {
2221 return _get_irn_type(node);
2224 /** the get_type operation must be always implemented */
2225 static type *get_Null_type(ir_node *n) {
2229 /* set the get_type operation */
2230 ir_op *firm_set_default_get_type(ir_op *op)
2233 case iro_Const: op->get_type = get_Const_type; break;
2234 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2235 case iro_Cast: op->get_type = get_Cast_type; break;
2236 case iro_Proj: op->get_type = get_Proj_type; break;
2237 default: op->get_type = get_Null_type; break;
2242 #ifdef DEBUG_libfirm
2243 void dump_irn (ir_node *n) {
2244 int i, arity = get_irn_arity(n);
2245 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2247 ir_node *pred = get_irn_n(n, -1);
2248 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2249 get_irn_node_nr(pred), (void *)pred);
2251 printf(" preds: \n");
2252 for (i = 0; i < arity; ++i) {
2253 ir_node *pred = get_irn_n(n, i);
2254 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2255 get_irn_node_nr(pred), (void *)pred);
2259 #else /* DEBUG_libfirm */
2260 void dump_irn (ir_node *n) {}
2261 #endif /* DEBUG_libfirm */