3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
240 if (arity != ARR_LEN(*arr) - 1) {
241 ir_node * block = (*arr)[0];
242 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
245 fix_backedges(current_ir_graph->obst, node);
246 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
250 (get_irn_intra_n)(const ir_node *node, int n) {
251 return _get_irn_intra_n (node, n);
255 (get_irn_inter_n)(const ir_node *node, int n) {
256 return _get_irn_inter_n (node, n);
259 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
262 (get_irn_n)(const ir_node *node, int n) {
263 return _get_irn_n(node, n);
267 set_irn_n (ir_node *node, int n, ir_node *in) {
268 assert(node && node->kind == k_ir_node);
270 assert(n < get_irn_arity(node));
271 assert(in && in->kind == k_ir_node);
273 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
274 /* Change block pred in both views! */
275 node->in[n + 1] = in;
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
280 if (get_interprocedural_view()) { /* handle Filter and Block specially */
281 if (get_irn_opcode(node) == iro_Filter) {
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
285 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
286 node->attr.block.in_cg[n + 1] = in;
289 /* else fall through */
293 hook_set_irn_n(node, n, in, node->in[n + 1]);
295 /* Here, we rely on src and tgt being in the current ir graph */
296 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
414 (is_irn_pinned_in_irg) (const ir_node *node) {
415 return _is_irn_pinned_in_irg(node);
418 void set_irn_pinned(ir_node *node, op_pin_state state) {
419 /* due to optimization an opt may be turned into a Tuple */
420 if (get_irn_op(node) == op_Tuple)
423 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
424 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
426 node->attr.except.pin_state = state;
429 #ifdef DO_HEAPANALYSIS
430 /* Access the abstract interpretation information of a node.
431 Returns NULL if no such information is available. */
432 struct abstval *get_irn_abst_value(ir_node *n) {
435 /* Set the abstract interpretation information of a node. */
436 void set_irn_abst_value(ir_node *n, struct abstval *os) {
439 struct section *firm_get_irn_section(ir_node *n) {
442 void firm_set_irn_section(ir_node *n, struct section *s) {
446 /* Dummies needed for firmjni. */
447 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
449 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
450 void firm_set_irn_section(ir_node *n, struct section *s) {}
451 #endif /* DO_HEAPANALYSIS */
454 /* Outputs a unique number for this node */
456 get_irn_node_nr(const ir_node *node) {
459 return node->node_nr;
466 get_irn_const_attr (ir_node *node)
468 assert (node->op == op_Const);
469 return node->attr.con;
473 get_irn_proj_attr (ir_node *node)
475 assert (node->op == op_Proj);
476 return node->attr.proj;
480 get_irn_alloc_attr (ir_node *node)
482 assert (node->op == op_Alloc);
487 get_irn_free_attr (ir_node *node)
489 assert (node->op == op_Free);
494 get_irn_symconst_attr (ir_node *node)
496 assert (node->op == op_SymConst);
501 get_irn_call_attr (ir_node *node)
503 assert (node->op == op_Call);
504 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
508 get_irn_sel_attr (ir_node *node)
510 assert (node->op == op_Sel);
515 get_irn_phi_attr (ir_node *node)
517 assert (node->op == op_Phi);
518 return node->attr.phi0_pos;
522 get_irn_block_attr (ir_node *node)
524 assert (node->op == op_Block);
525 return node->attr.block;
529 get_irn_load_attr (ir_node *node)
531 assert (node->op == op_Load);
532 return node->attr.load;
536 get_irn_store_attr (ir_node *node)
538 assert (node->op == op_Store);
539 return node->attr.store;
543 get_irn_except_attr (ir_node *node)
545 assert (node->op == op_Div || node->op == op_Quot ||
546 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
547 return node->attr.except;
550 /** manipulate fields of individual nodes **/
552 /* this works for all except Block */
554 get_nodes_block (const ir_node *node) {
555 assert (!(node->op == op_Block));
556 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
557 return get_irn_n(node, -1);
561 set_nodes_block (ir_node *node, ir_node *block) {
562 assert (!(node->op == op_Block));
563 set_irn_n(node, -1, block);
566 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
567 * from Start. If so returns frame type, else Null. */
568 type *is_frame_pointer(ir_node *n) {
569 if ((get_irn_op(n) == op_Proj) &&
570 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
571 ir_node *start = get_Proj_pred(n);
572 if (get_irn_op(start) == op_Start) {
573 return get_irg_frame_type(get_irn_irg(start));
579 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
580 * from Start. If so returns global type, else Null. */
581 type *is_globals_pointer(ir_node *n) {
582 if ((get_irn_op(n) == op_Proj) &&
583 (get_Proj_proj(n) == pn_Start_P_globals)) {
584 ir_node *start = get_Proj_pred(n);
585 if (get_irn_op(start) == op_Start) {
586 return get_glob_type();
592 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
593 * from Start. If so returns 1, else 0. */
594 int is_value_arg_pointer(ir_node *n) {
595 if ((get_irn_op(n) == op_Proj) &&
596 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
597 (get_irn_op(get_Proj_pred(n)) == op_Start))
602 /* Returns an array with the predecessors of the Block. Depending on
603 the implementation of the graph data structure this can be a copy of
604 the internal representation of predecessors as well as the internal
605 array itself. Therefore writing to this array might obstruct the ir. */
607 get_Block_cfgpred_arr (ir_node *node)
609 assert ((node->op == op_Block));
610 return (ir_node **)&(get_irn_in(node)[1]);
614 (get_Block_n_cfgpreds)(ir_node *node) {
615 return get_Block_n_cfgpreds(node);
619 (get_Block_cfgpred)(ir_node *node, int pos) {
620 return get_Block_cfgpred(node, pos);
624 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
625 assert (node->op == op_Block);
626 set_irn_n(node, pos, pred);
630 (get_Block_cfgpred_block)(ir_node *node, int pos) {
631 return _get_Block_cfgpred_block(node, pos);
635 get_Block_matured (ir_node *node) {
636 assert (node->op == op_Block);
637 return node->attr.block.matured;
641 set_Block_matured (ir_node *node, bool matured) {
642 assert (node->op == op_Block);
643 node->attr.block.matured = matured;
647 (get_Block_block_visited)(ir_node *node) {
648 return _get_Block_block_visited(node);
652 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
653 _set_Block_block_visited(node, visit);
656 /* For this current_ir_graph must be set. */
658 (mark_Block_block_visited)(ir_node *node) {
659 _mark_Block_block_visited(node);
663 (Block_not_block_visited)(ir_node *node) {
664 return _Block_not_block_visited(node);
668 get_Block_graph_arr (ir_node *node, int pos) {
669 assert (node->op == op_Block);
670 return node->attr.block.graph_arr[pos+1];
674 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
675 assert (node->op == op_Block);
676 node->attr.block.graph_arr[pos+1] = value;
679 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
680 assert(node->op == op_Block);
681 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
682 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
683 node->attr.block.in_cg[0] = NULL;
684 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
686 /* Fix backedge array. fix_backedges operates depending on
687 interprocedural_view. */
688 int ipv = get_interprocedural_view();
689 set_interprocedural_view(true);
690 fix_backedges(current_ir_graph->obst, node);
691 set_interprocedural_view(ipv);
694 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
697 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
698 assert(node->op == op_Block &&
699 node->attr.block.in_cg &&
700 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
701 node->attr.block.in_cg[pos + 1] = pred;
704 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
705 assert(node->op == op_Block);
706 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
709 int get_Block_cg_n_cfgpreds(ir_node * node) {
710 assert(node->op == op_Block);
711 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
714 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
715 assert(node->op == op_Block && node->attr.block.in_cg);
716 return node->attr.block.in_cg[pos + 1];
719 void remove_Block_cg_cfgpred_arr(ir_node * node) {
720 assert(node->op == op_Block);
721 node->attr.block.in_cg = NULL;
724 ir_node *(set_Block_dead)(ir_node *block) {
725 return _set_Block_dead(block);
728 int (is_Block_dead)(const ir_node *block) {
729 return _is_Block_dead(block);
732 ir_extblk *get_Block_extbb(const ir_node *block) {
733 assert(is_Block(block));
734 return block->attr.block.extblk;
737 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
738 assert(is_Block(block));
739 block->attr.block.extblk = extblk;
743 set_Start_irg(ir_node *node, ir_graph *irg) {
744 assert(node->op == op_Start);
745 assert(is_ir_graph(irg));
746 assert(0 && " Why set irg? -- use set_irn_irg");
750 get_End_n_keepalives(ir_node *end) {
751 assert (end->op == op_End);
752 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
756 get_End_keepalive(ir_node *end, int pos) {
757 assert (end->op == op_End);
758 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
762 add_End_keepalive (ir_node *end, ir_node *ka) {
763 assert (end->op == op_End);
764 ARR_APP1 (ir_node *, end->in, ka);
768 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
769 assert (end->op == op_End);
770 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
774 free_End (ir_node *end) {
775 assert (end->op == op_End);
777 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
778 end->in = NULL; /* @@@ make sure we get an error if we use the
779 in array afterwards ... */
782 /* Return the target address of an IJmp */
783 ir_node *get_IJmp_target(ir_node *ijmp) {
784 assert(ijmp->op == op_IJmp);
785 return get_irn_n(ijmp, 0);
788 /** Sets the target address of an IJmp */
789 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
790 assert(ijmp->op == op_IJmp);
791 set_irn_n(ijmp, 0, tgt);
795 > Implementing the case construct (which is where the constant Proj node is
796 > important) involves far more than simply determining the constant values.
797 > We could argue that this is more properly a function of the translator from
798 > Firm to the target machine. That could be done if there was some way of
799 > projecting "default" out of the Cond node.
800 I know it's complicated.
801 Basically there are two proglems:
802 - determining the gaps between the projs
803 - determining the biggest case constant to know the proj number for
805 I see several solutions:
806 1. Introduce a ProjDefault node. Solves both problems.
807 This means to extend all optimizations executed during construction.
808 2. Give the Cond node for switch two flavors:
809 a) there are no gaps in the projs (existing flavor)
810 b) gaps may exist, default proj is still the Proj with the largest
811 projection number. This covers also the gaps.
812 3. Fix the semantic of the Cond to that of 2b)
814 Solution 2 seems to be the best:
815 Computing the gaps in the Firm representation is not too hard, i.e.,
816 libFIRM can implement a routine that transforms between the two
817 flavours. This is also possible for 1) but 2) does not require to
818 change any existing optimization.
819 Further it should be far simpler to determine the biggest constant than
821 I don't want to choose 3) as 2a) seems to have advantages for
822 dataflow analysis and 3) does not allow to convert the representation to
826 get_Cond_selector (ir_node *node) {
827 assert (node->op == op_Cond);
828 return get_irn_n(node, 0);
832 set_Cond_selector (ir_node *node, ir_node *selector) {
833 assert (node->op == op_Cond);
834 set_irn_n(node, 0, selector);
838 get_Cond_kind (ir_node *node) {
839 assert (node->op == op_Cond);
840 return node->attr.c.kind;
844 set_Cond_kind (ir_node *node, cond_kind kind) {
845 assert (node->op == op_Cond);
846 node->attr.c.kind = kind;
850 get_Cond_defaultProj (ir_node *node) {
851 assert (node->op == op_Cond);
852 return node->attr.c.default_proj;
856 get_Return_mem (ir_node *node) {
857 assert (node->op == op_Return);
858 return get_irn_n(node, 0);
862 set_Return_mem (ir_node *node, ir_node *mem) {
863 assert (node->op == op_Return);
864 set_irn_n(node, 0, mem);
868 get_Return_n_ress (ir_node *node) {
869 assert (node->op == op_Return);
870 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
874 get_Return_res_arr (ir_node *node)
876 assert ((node->op == op_Return));
877 if (get_Return_n_ress(node) > 0)
878 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
885 set_Return_n_res (ir_node *node, int results) {
886 assert (node->op == op_Return);
891 get_Return_res (ir_node *node, int pos) {
892 assert (node->op == op_Return);
893 assert (get_Return_n_ress(node) > pos);
894 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
898 set_Return_res (ir_node *node, int pos, ir_node *res){
899 assert (node->op == op_Return);
900 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
904 get_Raise_mem (ir_node *node) {
905 assert (node->op == op_Raise);
906 return get_irn_n(node, 0);
910 set_Raise_mem (ir_node *node, ir_node *mem) {
911 assert (node->op == op_Raise);
912 set_irn_n(node, 0, mem);
916 get_Raise_exo_ptr (ir_node *node) {
917 assert (node->op == op_Raise);
918 return get_irn_n(node, 1);
922 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
923 assert (node->op == op_Raise);
924 set_irn_n(node, 1, exo_ptr);
927 tarval *(get_Const_tarval)(ir_node *node) {
928 return _get_Const_tarval(node);
932 set_Const_tarval (ir_node *node, tarval *con) {
933 assert (node->op == op_Const);
934 node->attr.con.tv = con;
937 cnst_classify_t (classify_Const)(ir_node *node)
939 return _classify_Const(node);
943 /* The source language type. Must be an atomic type. Mode of type must
944 be mode of node. For tarvals from entities type must be pointer to
947 get_Const_type (ir_node *node) {
948 assert (node->op == op_Const);
949 return node->attr.con.tp;
953 set_Const_type (ir_node *node, type *tp) {
954 assert (node->op == op_Const);
955 if (tp != firm_unknown_type) {
956 assert (is_atomic_type(tp));
957 assert (get_type_mode(tp) == get_irn_mode(node));
959 node->attr.con.tp = tp;
964 get_SymConst_kind (const ir_node *node) {
965 assert (node->op == op_SymConst);
966 return node->attr.i.num;
970 set_SymConst_kind (ir_node *node, symconst_kind num) {
971 assert (node->op == op_SymConst);
972 node->attr.i.num = num;
976 get_SymConst_type (ir_node *node) {
977 assert ( (node->op == op_SymConst)
978 && ( get_SymConst_kind(node) == symconst_type_tag
979 || get_SymConst_kind(node) == symconst_size));
980 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
984 set_SymConst_type (ir_node *node, type *tp) {
985 assert ( (node->op == op_SymConst)
986 && ( get_SymConst_kind(node) == symconst_type_tag
987 || get_SymConst_kind(node) == symconst_size));
988 node->attr.i.sym.type_p = tp;
992 get_SymConst_name (ir_node *node) {
993 assert ( (node->op == op_SymConst)
994 && (get_SymConst_kind(node) == symconst_addr_name));
995 return node->attr.i.sym.ident_p;
999 set_SymConst_name (ir_node *node, ident *name) {
1000 assert ( (node->op == op_SymConst)
1001 && (get_SymConst_kind(node) == symconst_addr_name));
1002 node->attr.i.sym.ident_p = name;
1006 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1007 entity *get_SymConst_entity (ir_node *node) {
1008 assert ( (node->op == op_SymConst)
1009 && (get_SymConst_kind (node) == symconst_addr_ent));
1010 return node->attr.i.sym.entity_p;
1013 void set_SymConst_entity (ir_node *node, entity *ent) {
1014 assert ( (node->op == op_SymConst)
1015 && (get_SymConst_kind(node) == symconst_addr_ent));
1016 node->attr.i.sym.entity_p = ent;
1019 union symconst_symbol
1020 get_SymConst_symbol (ir_node *node) {
1021 assert (node->op == op_SymConst);
1022 return node->attr.i.sym;
1026 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1027 assert (node->op == op_SymConst);
1028 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1029 node->attr.i.sym = sym;
1033 get_SymConst_value_type (ir_node *node) {
1034 assert (node->op == op_SymConst);
1035 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1036 return node->attr.i.tp;
1040 set_SymConst_value_type (ir_node *node, type *tp) {
1041 assert (node->op == op_SymConst);
1042 node->attr.i.tp = tp;
1046 get_Sel_mem (ir_node *node) {
1047 assert (node->op == op_Sel);
1048 return get_irn_n(node, 0);
1052 set_Sel_mem (ir_node *node, ir_node *mem) {
1053 assert (node->op == op_Sel);
1054 set_irn_n(node, 0, mem);
1058 get_Sel_ptr (ir_node *node) {
1059 assert (node->op == op_Sel);
1060 return get_irn_n(node, 1);
1064 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1065 assert (node->op == op_Sel);
1066 set_irn_n(node, 1, ptr);
1070 get_Sel_n_indexs (ir_node *node) {
1071 assert (node->op == op_Sel);
1072 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1076 get_Sel_index_arr (ir_node *node)
1078 assert ((node->op == op_Sel));
1079 if (get_Sel_n_indexs(node) > 0)
1080 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1086 get_Sel_index (ir_node *node, int pos) {
1087 assert (node->op == op_Sel);
1088 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1092 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1093 assert (node->op == op_Sel);
1094 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1098 get_Sel_entity (ir_node *node) {
1099 assert (node->op == op_Sel);
1100 return node->attr.s.ent;
1104 set_Sel_entity (ir_node *node, entity *ent) {
1105 assert (node->op == op_Sel);
1106 node->attr.s.ent = ent;
1110 get_InstOf_ent (ir_node *node) {
1111 assert (node->op = op_InstOf);
1112 return (node->attr.io.ent);
1116 set_InstOf_ent (ir_node *node, type *ent) {
1117 assert (node->op = op_InstOf);
1118 node->attr.io.ent = ent;
1122 get_InstOf_store (ir_node *node) {
1123 assert (node->op = op_InstOf);
1124 return (get_irn_n (node, 0));
1128 set_InstOf_store (ir_node *node, ir_node *obj) {
1129 assert (node->op = op_InstOf);
1130 set_irn_n (node, 0, obj);
1134 get_InstOf_obj (ir_node *node) {
1135 assert (node->op = op_InstOf);
1136 return (get_irn_n (node, 1));
1140 set_InstOf_obj (ir_node *node, ir_node *obj) {
1141 assert (node->op = op_InstOf);
1142 set_irn_n (node, 1, obj);
1146 /* For unary and binary arithmetic operations the access to the
1147 operands can be factored out. Left is the first, right the
1148 second arithmetic value as listed in tech report 0999-33.
1149 unops are: Minus, Abs, Not, Conv, Cast
1150 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1151 Shr, Shrs, Rotate, Cmp */
1155 get_Call_mem (ir_node *node) {
1156 assert (node->op == op_Call);
1157 return get_irn_n(node, 0);
1161 set_Call_mem (ir_node *node, ir_node *mem) {
1162 assert (node->op == op_Call);
1163 set_irn_n(node, 0, mem);
1167 get_Call_ptr (ir_node *node) {
1168 assert (node->op == op_Call);
1169 return get_irn_n(node, 1);
1173 set_Call_ptr (ir_node *node, ir_node *ptr) {
1174 assert (node->op == op_Call);
1175 set_irn_n(node, 1, ptr);
1179 get_Call_param_arr (ir_node *node) {
1180 assert (node->op == op_Call);
1181 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1185 get_Call_n_params (ir_node *node) {
1186 assert (node->op == op_Call);
1187 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1191 get_Call_arity (ir_node *node) {
1192 assert (node->op == op_Call);
1193 return get_Call_n_params(node);
1197 set_Call_arity (ir_node *node, ir_node *arity) {
1198 assert (node->op == op_Call);
1203 get_Call_param (ir_node *node, int pos) {
1204 assert (node->op == op_Call);
1205 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1209 set_Call_param (ir_node *node, int pos, ir_node *param) {
1210 assert (node->op == op_Call);
1211 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1215 get_Call_type (ir_node *node) {
1216 assert (node->op == op_Call);
1217 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1221 set_Call_type (ir_node *node, type *tp) {
1222 assert (node->op == op_Call);
1223 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1224 node->attr.call.cld_tp = tp;
1227 int Call_has_callees(ir_node *node) {
1228 assert(node && node->op == op_Call);
1229 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1230 (node->attr.call.callee_arr != NULL));
1233 int get_Call_n_callees(ir_node * node) {
1234 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1235 return ARR_LEN(node->attr.call.callee_arr);
1238 entity * get_Call_callee(ir_node * node, int pos) {
1239 assert(pos >= 0 && pos < get_Call_n_callees(node));
1240 return node->attr.call.callee_arr[pos];
1243 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1244 assert(node->op == op_Call);
1245 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1246 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1248 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1251 void remove_Call_callee_arr(ir_node * node) {
1252 assert(node->op == op_Call);
1253 node->attr.call.callee_arr = NULL;
1256 ir_node * get_CallBegin_ptr (ir_node *node) {
1257 assert(node->op == op_CallBegin);
1258 return get_irn_n(node, 0);
1260 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1261 assert(node->op == op_CallBegin);
1262 set_irn_n(node, 0, ptr);
1264 ir_node * get_CallBegin_call (ir_node *node) {
1265 assert(node->op == op_CallBegin);
1266 return node->attr.callbegin.call;
1268 void set_CallBegin_call (ir_node *node, ir_node *call) {
1269 assert(node->op == op_CallBegin);
1270 node->attr.callbegin.call = call;
1275 ir_node * get_##OP##_left(ir_node *node) { \
1276 assert(node->op == op_##OP); \
1277 return get_irn_n(node, node->op->op_index); \
1279 void set_##OP##_left(ir_node *node, ir_node *left) { \
1280 assert(node->op == op_##OP); \
1281 set_irn_n(node, node->op->op_index, left); \
1283 ir_node *get_##OP##_right(ir_node *node) { \
1284 assert(node->op == op_##OP); \
1285 return get_irn_n(node, node->op->op_index + 1); \
1287 void set_##OP##_right(ir_node *node, ir_node *right) { \
1288 assert(node->op == op_##OP); \
1289 set_irn_n(node, node->op->op_index + 1, right); \
1293 ir_node *get_##OP##_op(ir_node *node) { \
1294 assert(node->op == op_##OP); \
1295 return get_irn_n(node, node->op->op_index); \
1297 void set_##OP##_op (ir_node *node, ir_node *op) { \
1298 assert(node->op == op_##OP); \
1299 set_irn_n(node, node->op->op_index, op); \
1309 get_Quot_mem (ir_node *node) {
1310 assert (node->op == op_Quot);
1311 return get_irn_n(node, 0);
1315 set_Quot_mem (ir_node *node, ir_node *mem) {
1316 assert (node->op == op_Quot);
1317 set_irn_n(node, 0, mem);
1323 get_DivMod_mem (ir_node *node) {
1324 assert (node->op == op_DivMod);
1325 return get_irn_n(node, 0);
1329 set_DivMod_mem (ir_node *node, ir_node *mem) {
1330 assert (node->op == op_DivMod);
1331 set_irn_n(node, 0, mem);
1337 get_Div_mem (ir_node *node) {
1338 assert (node->op == op_Div);
1339 return get_irn_n(node, 0);
1343 set_Div_mem (ir_node *node, ir_node *mem) {
1344 assert (node->op == op_Div);
1345 set_irn_n(node, 0, mem);
1351 get_Mod_mem (ir_node *node) {
1352 assert (node->op == op_Mod);
1353 return get_irn_n(node, 0);
1357 set_Mod_mem (ir_node *node, ir_node *mem) {
1358 assert (node->op == op_Mod);
1359 set_irn_n(node, 0, mem);
1376 get_Cast_type (ir_node *node) {
1377 assert (node->op == op_Cast);
1378 return node->attr.cast.totype;
1382 set_Cast_type (ir_node *node, type *to_tp) {
1383 assert (node->op == op_Cast);
1384 node->attr.cast.totype = to_tp;
1388 /* Checks for upcast.
1390 * Returns true if the Cast node casts a class type to a super type.
1392 int is_Cast_upcast(ir_node *node) {
1393 type *totype = get_Cast_type(node);
1394 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1395 ir_graph *myirg = get_irn_irg(node);
1397 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1400 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1401 totype = get_pointer_points_to_type(totype);
1402 fromtype = get_pointer_points_to_type(fromtype);
1407 if (!is_Class_type(totype)) return false;
1408 return is_subclass_of(fromtype, totype);
1411 /* Checks for downcast.
1413 * Returns true if the Cast node casts a class type to a sub type.
1415 int is_Cast_downcast(ir_node *node) {
1416 type *totype = get_Cast_type(node);
1417 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1419 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1422 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1423 totype = get_pointer_points_to_type(totype);
1424 fromtype = get_pointer_points_to_type(fromtype);
1429 if (!is_Class_type(totype)) return false;
1430 return is_subclass_of(totype, fromtype);
1434 (is_unop)(const ir_node *node) {
1435 return _is_unop(node);
1439 get_unop_op (ir_node *node) {
1440 if (node->op->opar == oparity_unary)
1441 return get_irn_n(node, node->op->op_index);
1443 assert(node->op->opar == oparity_unary);
1448 set_unop_op (ir_node *node, ir_node *op) {
1449 if (node->op->opar == oparity_unary)
1450 set_irn_n(node, node->op->op_index, op);
1452 assert(node->op->opar == oparity_unary);
1456 (is_binop)(const ir_node *node) {
1457 return _is_binop(node);
1461 get_binop_left (ir_node *node) {
1462 if (node->op->opar == oparity_binary)
1463 return get_irn_n(node, node->op->op_index);
1465 assert(node->op->opar == oparity_binary);
1470 set_binop_left (ir_node *node, ir_node *left) {
1471 if (node->op->opar == oparity_binary)
1472 set_irn_n(node, node->op->op_index, left);
1474 assert (node->op->opar == oparity_binary);
1478 get_binop_right (ir_node *node) {
1479 if (node->op->opar == oparity_binary)
1480 return get_irn_n(node, node->op->op_index + 1);
1482 assert(node->op->opar == oparity_binary);
1487 set_binop_right (ir_node *node, ir_node *right) {
1488 if (node->op->opar == oparity_binary)
1489 set_irn_n(node, node->op->op_index + 1, right);
1491 assert (node->op->opar == oparity_binary);
1494 int is_Phi (const ir_node *n) {
1500 if (op == op_Filter) return get_interprocedural_view();
1503 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1504 (get_irn_arity(n) > 0));
1509 int is_Phi0 (const ir_node *n) {
1512 return ((get_irn_op(n) == op_Phi) &&
1513 (get_irn_arity(n) == 0) &&
1514 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1518 get_Phi_preds_arr (ir_node *node) {
1519 assert (node->op == op_Phi);
1520 return (ir_node **)&(get_irn_in(node)[1]);
1524 get_Phi_n_preds (ir_node *node) {
1525 assert (is_Phi(node) || is_Phi0(node));
1526 return (get_irn_arity(node));
1530 void set_Phi_n_preds (ir_node *node, int n_preds) {
1531 assert (node->op == op_Phi);
1536 get_Phi_pred (ir_node *node, int pos) {
1537 assert (is_Phi(node) || is_Phi0(node));
1538 return get_irn_n(node, pos);
1542 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1543 assert (is_Phi(node) || is_Phi0(node));
1544 set_irn_n(node, pos, pred);
1548 int is_memop(ir_node *node) {
1549 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1552 ir_node *get_memop_mem (ir_node *node) {
1553 assert(is_memop(node));
1554 return get_irn_n(node, 0);
1557 void set_memop_mem (ir_node *node, ir_node *mem) {
1558 assert(is_memop(node));
1559 set_irn_n(node, 0, mem);
1562 ir_node *get_memop_ptr (ir_node *node) {
1563 assert(is_memop(node));
1564 return get_irn_n(node, 1);
1567 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1568 assert(is_memop(node));
1569 set_irn_n(node, 1, ptr);
1573 get_Load_mem (ir_node *node) {
1574 assert (node->op == op_Load);
1575 return get_irn_n(node, 0);
1579 set_Load_mem (ir_node *node, ir_node *mem) {
1580 assert (node->op == op_Load);
1581 set_irn_n(node, 0, mem);
1585 get_Load_ptr (ir_node *node) {
1586 assert (node->op == op_Load);
1587 return get_irn_n(node, 1);
1591 set_Load_ptr (ir_node *node, ir_node *ptr) {
1592 assert (node->op == op_Load);
1593 set_irn_n(node, 1, ptr);
1597 get_Load_mode (ir_node *node) {
1598 assert (node->op == op_Load);
1599 return node->attr.load.load_mode;
1603 set_Load_mode (ir_node *node, ir_mode *mode) {
1604 assert (node->op == op_Load);
1605 node->attr.load.load_mode = mode;
1609 get_Load_volatility (ir_node *node) {
1610 assert (node->op == op_Load);
1611 return node->attr.load.volatility;
1615 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1616 assert (node->op == op_Load);
1617 node->attr.load.volatility = volatility;
1622 get_Store_mem (ir_node *node) {
1623 assert (node->op == op_Store);
1624 return get_irn_n(node, 0);
1628 set_Store_mem (ir_node *node, ir_node *mem) {
1629 assert (node->op == op_Store);
1630 set_irn_n(node, 0, mem);
1634 get_Store_ptr (ir_node *node) {
1635 assert (node->op == op_Store);
1636 return get_irn_n(node, 1);
1640 set_Store_ptr (ir_node *node, ir_node *ptr) {
1641 assert (node->op == op_Store);
1642 set_irn_n(node, 1, ptr);
1646 get_Store_value (ir_node *node) {
1647 assert (node->op == op_Store);
1648 return get_irn_n(node, 2);
1652 set_Store_value (ir_node *node, ir_node *value) {
1653 assert (node->op == op_Store);
1654 set_irn_n(node, 2, value);
1658 get_Store_volatility (ir_node *node) {
1659 assert (node->op == op_Store);
1660 return node->attr.store.volatility;
1664 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1665 assert (node->op == op_Store);
1666 node->attr.store.volatility = volatility;
1671 get_Alloc_mem (ir_node *node) {
1672 assert (node->op == op_Alloc);
1673 return get_irn_n(node, 0);
1677 set_Alloc_mem (ir_node *node, ir_node *mem) {
1678 assert (node->op == op_Alloc);
1679 set_irn_n(node, 0, mem);
1683 get_Alloc_size (ir_node *node) {
1684 assert (node->op == op_Alloc);
1685 return get_irn_n(node, 1);
1689 set_Alloc_size (ir_node *node, ir_node *size) {
1690 assert (node->op == op_Alloc);
1691 set_irn_n(node, 1, size);
1695 get_Alloc_type (ir_node *node) {
1696 assert (node->op == op_Alloc);
1697 return node->attr.a.type = skip_tid(node->attr.a.type);
1701 set_Alloc_type (ir_node *node, type *tp) {
1702 assert (node->op == op_Alloc);
1703 node->attr.a.type = tp;
1707 get_Alloc_where (ir_node *node) {
1708 assert (node->op == op_Alloc);
1709 return node->attr.a.where;
1713 set_Alloc_where (ir_node *node, where_alloc where) {
1714 assert (node->op == op_Alloc);
1715 node->attr.a.where = where;
1720 get_Free_mem (ir_node *node) {
1721 assert (node->op == op_Free);
1722 return get_irn_n(node, 0);
1726 set_Free_mem (ir_node *node, ir_node *mem) {
1727 assert (node->op == op_Free);
1728 set_irn_n(node, 0, mem);
1732 get_Free_ptr (ir_node *node) {
1733 assert (node->op == op_Free);
1734 return get_irn_n(node, 1);
1738 set_Free_ptr (ir_node *node, ir_node *ptr) {
1739 assert (node->op == op_Free);
1740 set_irn_n(node, 1, ptr);
1744 get_Free_size (ir_node *node) {
1745 assert (node->op == op_Free);
1746 return get_irn_n(node, 2);
1750 set_Free_size (ir_node *node, ir_node *size) {
1751 assert (node->op == op_Free);
1752 set_irn_n(node, 2, size);
1756 get_Free_type (ir_node *node) {
1757 assert (node->op == op_Free);
1758 return node->attr.f.type = skip_tid(node->attr.f.type);
1762 set_Free_type (ir_node *node, type *tp) {
1763 assert (node->op == op_Free);
1764 node->attr.f.type = tp;
1768 get_Free_where (ir_node *node) {
1769 assert (node->op == op_Free);
1770 return node->attr.f.where;
1774 set_Free_where (ir_node *node, where_alloc where) {
1775 assert (node->op == op_Free);
1776 node->attr.f.where = where;
1780 get_Sync_preds_arr (ir_node *node) {
1781 assert (node->op == op_Sync);
1782 return (ir_node **)&(get_irn_in(node)[1]);
1786 get_Sync_n_preds (ir_node *node) {
1787 assert (node->op == op_Sync);
1788 return (get_irn_arity(node));
1793 set_Sync_n_preds (ir_node *node, int n_preds) {
1794 assert (node->op == op_Sync);
1799 get_Sync_pred (ir_node *node, int pos) {
1800 assert (node->op == op_Sync);
1801 return get_irn_n(node, pos);
1805 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1806 assert (node->op == op_Sync);
1807 set_irn_n(node, pos, pred);
1810 type *get_Proj_type(ir_node *n)
1813 ir_node *pred = get_Proj_pred(n);
1815 switch (get_irn_opcode(pred)) {
1818 /* Deal with Start / Call here: we need to know the Proj Nr. */
1819 assert(get_irn_mode(pred) == mode_T);
1820 pred_pred = get_Proj_pred(pred);
1821 if (get_irn_op(pred_pred) == op_Start) {
1822 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1823 tp = get_method_param_type(mtp, get_Proj_proj(n));
1824 } else if (get_irn_op(pred_pred) == op_Call) {
1825 type *mtp = get_Call_type(pred_pred);
1826 tp = get_method_res_type(mtp, get_Proj_proj(n));
1829 case iro_Start: break;
1830 case iro_Call: break;
1832 ir_node *a = get_Load_ptr(pred);
1833 if (get_irn_op(a) == op_Sel)
1834 tp = get_entity_type(get_Sel_entity(a));
1843 get_Proj_pred (const ir_node *node) {
1844 assert (is_Proj(node));
1845 return get_irn_n(node, 0);
1849 set_Proj_pred (ir_node *node, ir_node *pred) {
1850 assert (is_Proj(node));
1851 set_irn_n(node, 0, pred);
1855 get_Proj_proj (const ir_node *node) {
1856 assert (is_Proj(node));
1857 if (get_irn_opcode(node) == iro_Proj) {
1858 return node->attr.proj;
1860 assert(get_irn_opcode(node) == iro_Filter);
1861 return node->attr.filter.proj;
1866 set_Proj_proj (ir_node *node, long proj) {
1867 assert (node->op == op_Proj);
1868 node->attr.proj = proj;
1872 get_Tuple_preds_arr (ir_node *node) {
1873 assert (node->op == op_Tuple);
1874 return (ir_node **)&(get_irn_in(node)[1]);
1878 get_Tuple_n_preds (ir_node *node) {
1879 assert (node->op == op_Tuple);
1880 return (get_irn_arity(node));
1885 set_Tuple_n_preds (ir_node *node, int n_preds) {
1886 assert (node->op == op_Tuple);
1891 get_Tuple_pred (ir_node *node, int pos) {
1892 assert (node->op == op_Tuple);
1893 return get_irn_n(node, pos);
1897 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1898 assert (node->op == op_Tuple);
1899 set_irn_n(node, pos, pred);
1903 get_Id_pred (ir_node *node) {
1904 assert (node->op == op_Id);
1905 return get_irn_n(node, 0);
1909 set_Id_pred (ir_node *node, ir_node *pred) {
1910 assert (node->op == op_Id);
1911 set_irn_n(node, 0, pred);
1914 ir_node *get_Confirm_value (ir_node *node) {
1915 assert (node->op == op_Confirm);
1916 return get_irn_n(node, 0);
1918 void set_Confirm_value (ir_node *node, ir_node *value) {
1919 assert (node->op == op_Confirm);
1920 set_irn_n(node, 0, value);
1922 ir_node *get_Confirm_bound (ir_node *node) {
1923 assert (node->op == op_Confirm);
1924 return get_irn_n(node, 1);
1926 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1927 assert (node->op == op_Confirm);
1928 set_irn_n(node, 0, bound);
1930 pn_Cmp get_Confirm_cmp (ir_node *node) {
1931 assert (node->op == op_Confirm);
1932 return node->attr.confirm_cmp;
1934 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1935 assert (node->op == op_Confirm);
1936 node->attr.confirm_cmp = cmp;
1941 get_Filter_pred (ir_node *node) {
1942 assert(node->op == op_Filter);
1946 set_Filter_pred (ir_node *node, ir_node *pred) {
1947 assert(node->op == op_Filter);
1951 get_Filter_proj(ir_node *node) {
1952 assert(node->op == op_Filter);
1953 return node->attr.filter.proj;
1956 set_Filter_proj (ir_node *node, long proj) {
1957 assert(node->op == op_Filter);
1958 node->attr.filter.proj = proj;
1961 /* Don't use get_irn_arity, get_irn_n in implementation as access
1962 shall work independent of view!!! */
1963 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1964 assert(node->op == op_Filter);
1965 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1966 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1967 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1968 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1969 node->attr.filter.in_cg[0] = node->in[0];
1971 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1974 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1975 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1976 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1977 node->attr.filter.in_cg[pos + 1] = pred;
1979 int get_Filter_n_cg_preds(ir_node *node) {
1980 assert(node->op == op_Filter && node->attr.filter.in_cg);
1981 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1983 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1985 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1987 arity = ARR_LEN(node->attr.filter.in_cg);
1988 assert(pos < arity - 1);
1989 return node->attr.filter.in_cg[pos + 1];
1993 ir_node *get_Mux_sel (ir_node *node) {
1994 assert(node->op == op_Mux);
1997 void set_Mux_sel (ir_node *node, ir_node *sel) {
1998 assert(node->op == op_Mux);
2002 ir_node *get_Mux_false (ir_node *node) {
2003 assert(node->op == op_Mux);
2006 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2007 assert(node->op == op_Mux);
2008 node->in[2] = ir_false;
2011 ir_node *get_Mux_true (ir_node *node) {
2012 assert(node->op == op_Mux);
2015 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2016 assert(node->op == op_Mux);
2017 node->in[3] = ir_true;
2021 get_irn_irg(const ir_node *node) {
2023 * Do not use get_nodes_Block() here, because this
2024 * will check the pinned state.
2025 * However even a 'wrong' block is always in the proper
2028 if (! is_Block(node))
2029 node = get_irn_n(node, -1);
2030 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2031 node = get_irn_n(node, -1);
2032 assert(get_irn_op(node) == op_Block);
2033 return node->attr.block.irg;
2037 /*----------------------------------------------------------------*/
2038 /* Auxiliary routines */
2039 /*----------------------------------------------------------------*/
2042 skip_Proj (ir_node *node) {
2043 /* don't assert node !!! */
2044 if (node && is_Proj(node)) {
2045 return get_Proj_pred(node);
2052 skip_Tuple (ir_node *node) {
2055 if (!get_opt_normalize()) return node;
2057 node = skip_Id(node);
2058 if (get_irn_op(node) == op_Proj) {
2059 pred = skip_Id(get_Proj_pred(node));
2060 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2061 pred = skip_Id(skip_Tuple(pred));
2062 else if (get_irn_op(pred) == op_Tuple)
2063 return get_Tuple_pred(pred, get_Proj_proj(node));
2068 /* returns operand of node if node is a Cast */
2069 ir_node *skip_Cast (ir_node *node) {
2070 if (node && get_irn_op(node) == op_Cast)
2071 return get_Cast_op(node);
2075 /* returns operand of node if node is a Confirm */
2076 ir_node *skip_Confirm (ir_node *node) {
2077 if (node && get_irn_op(node) == op_Confirm)
2078 return get_Confirm_value(node);
2082 /* skip all high-level ops */
2083 ir_node *skip_HighLevel(ir_node *node) {
2084 if (node && is_op_highlevel(get_irn_op(node)))
2085 return get_irn_n(node, 0);
2090 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2091 than any other approach, as Id chains are resolved and all point to the real node, or
2092 all id's are self loops. */
2094 skip_Id (ir_node *node) {
2095 /* don't assert node !!! */
2097 if (!get_opt_normalize()) return node;
2099 /* Don't use get_Id_pred: We get into an endless loop for
2100 self-referencing Ids. */
2101 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2102 ir_node *rem_pred = node->in[0+1];
2105 assert (get_irn_arity (node) > 0);
2107 node->in[0+1] = node;
2108 res = skip_Id(rem_pred);
2109 if (res->op == op_Id) /* self-loop */ return node;
2111 node->in[0+1] = res;
2118 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2119 than any other approach, as Id chains are resolved and all point to the real node, or
2120 all id's are self loops. */
2122 skip_Id (ir_node *node) {
2124 /* don't assert node !!! */
2126 if (!node || (node->op != op_Id)) return node;
2128 if (!get_opt_normalize()) return node;
2130 /* Don't use get_Id_pred: We get into an endless loop for
2131 self-referencing Ids. */
2132 pred = node->in[0+1];
2134 if (pred->op != op_Id) return pred;
2136 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2137 ir_node *rem_pred, *res;
2139 if (pred->op != op_Id) return pred; /* shortcut */
2142 assert (get_irn_arity (node) > 0);
2144 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2145 res = skip_Id(rem_pred);
2146 if (res->op == op_Id) /* self-loop */ return node;
2148 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2157 (is_Bad)(const ir_node *node) {
2158 return _is_Bad(node);
2162 (is_Const)(const ir_node *node) {
2163 return _is_Const(node);
2167 (is_no_Block)(const ir_node *node) {
2168 return _is_no_Block(node);
2172 (is_Block)(const ir_node *node) {
2173 return _is_Block(node);
2176 /* returns true if node is a Unknown node. */
2178 (is_Unknown)(const ir_node *node) {
2179 return _is_Unknown(node);
2183 is_Proj (const ir_node *node) {
2185 return node->op == op_Proj
2186 || (!get_interprocedural_view() && node->op == op_Filter);
2189 /* Returns true if the operation manipulates control flow. */
2191 is_cfop(const ir_node *node) {
2192 return is_cfopcode(get_irn_op(node));
2195 /* Returns true if the operation manipulates interprocedural control flow:
2196 CallBegin, EndReg, EndExcept */
2197 int is_ip_cfop(const ir_node *node) {
2198 return is_ip_cfopcode(get_irn_op(node));
2201 /* Returns true if the operation can change the control flow because
2204 is_fragile_op(const ir_node *node) {
2205 return is_op_fragile(get_irn_op(node));
2208 /* Returns the memory operand of fragile operations. */
2209 ir_node *get_fragile_op_mem(ir_node *node) {
2210 assert(node && is_fragile_op(node));
2212 switch (get_irn_opcode (node)) {
2221 return get_irn_n(node, 0);
2226 assert(0 && "should not be reached");
2231 /* Returns true if the operation is a forking control flow operation. */
2233 is_forking_op(const ir_node *node) {
2234 return is_op_forking(get_irn_op(node));
2237 type *(get_irn_type)(ir_node *node) {
2238 return _get_irn_type(node);
2241 /** the get_type operation must be always implemented */
2242 static type *get_Null_type(ir_node *n) {
2246 /* set the get_type operation */
2247 ir_op *firm_set_default_get_type(ir_op *op)
2250 case iro_Const: op->get_type = get_Const_type; break;
2251 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2252 case iro_Cast: op->get_type = get_Cast_type; break;
2253 case iro_Proj: op->get_type = get_Proj_type; break;
2254 default: op->get_type = get_Null_type; break;
2259 #ifdef DEBUG_libfirm
2260 void dump_irn (ir_node *n) {
2261 int i, arity = get_irn_arity(n);
2262 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2264 ir_node *pred = get_irn_n(n, -1);
2265 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2266 get_irn_node_nr(pred), (void *)pred);
2268 printf(" preds: \n");
2269 for (i = 0; i < arity; ++i) {
2270 ir_node *pred = get_irn_n(n, i);
2271 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2272 get_irn_node_nr(pred), (void *)pred);
2276 #else /* DEBUG_libfirm */
2277 void dump_irn (ir_node *n) {}
2278 #endif /* DEBUG_libfirm */