3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case pn_Cmp_False: return pn_Cmp_True;
63 case pn_Cmp_Eq: return pn_Cmp_Ne;
64 case pn_Cmp_Lt: return pn_Cmp_Uge;
65 case pn_Cmp_Le: return pn_Cmp_Ug;
66 case pn_Cmp_Gt: return pn_Cmp_Ule;
67 case pn_Cmp_Ge: return pn_Cmp_Ul;
68 case pn_Cmp_Lg: return pn_Cmp_Ue;
69 case pn_Cmp_Leg: return pn_Cmp_Uo;
70 case pn_Cmp_Uo: return pn_Cmp_Leg;
71 case pn_Cmp_Ue: return pn_Cmp_Lg;
72 case pn_Cmp_Ul: return pn_Cmp_Ge;
73 case pn_Cmp_Ule: return pn_Cmp_Gt;
74 case pn_Cmp_Ug: return pn_Cmp_Le;
75 case pn_Cmp_Uge: return pn_Cmp_Lt;
76 case pn_Cmp_Ne: return pn_Cmp_Eq;
77 case pn_Cmp_True: return pn_Cmp_False;
79 return 99; /* to shut up gcc */
82 /* Calculates the swapped pnc condition, i.e., "<" --> ">" */
84 get_swapped_pnc(int pnc) {
85 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
86 int lesser = pnc & pn_Cmp_Lt;
87 int greater = pnc & pn_Cmp_Gt;
89 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
94 const char *pns_name_arr [] = {
95 "initial_exec", "global_store",
96 "frame_base", "globals", "args"
99 const char *symconst_name_arr [] = {
100 "type_tag", "size", "addr_name", "addr_ent"
104 * Indicates, whether additional data can be registered to ir nodes.
105 * If set to 1, this is not possible anymore.
107 static int forbid_new_data = 0;
110 * The amount of additional space for custom data to be allocated upon
111 * creating a new node.
113 unsigned firm_add_node_size = 0;
116 /* register new space for every node */
117 unsigned register_additional_node_data(unsigned size) {
118 assert(!forbid_new_data && "Too late to register additional node data");
123 return firm_add_node_size += size;
129 /* Forbid the addition of new data to an ir node. */
134 * irnode constructor.
135 * Create a new irnode in irg, with an op, mode, arity and
136 * some incoming irnodes.
137 * If arity is negative, a node with a dynamic array is created.
140 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
141 int arity, ir_node **in)
144 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
147 assert(irg && op && mode);
148 p = obstack_alloc (irg->obst, node_size);
149 memset(p, 0, node_size);
150 res = (ir_node *) (p + firm_add_node_size);
152 res->kind = k_ir_node;
158 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
160 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
161 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
165 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
172 #if FIRM_EDGES_INPLACE
175 int not_a_block = is_no_Block(res);
177 INIT_LIST_HEAD(&res->edge_info.outs_head);
179 for (i = 0, n = arity + not_a_block; i < n; ++i)
180 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
184 hook_new_node(irg, res);
189 /*-- getting some parameters from ir_nodes --*/
192 (is_ir_node)(const void *thing) {
193 return _is_ir_node(thing);
197 (get_irn_intra_arity)(const ir_node *node) {
198 return _get_irn_intra_arity(node);
202 (get_irn_inter_arity)(const ir_node *node) {
203 return _get_irn_inter_arity(node);
206 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
209 (get_irn_arity)(const ir_node *node) {
210 return _get_irn_arity(node);
213 /* Returns the array with ins. This array is shifted with respect to the
214 array accessed by get_irn_n: The block operand is at position 0 not -1.
215 (@@@ This should be changed.)
216 The order of the predecessors in this array is not guaranteed, except that
217 lists of operands as predecessors of Block or arguments of a Call are
220 get_irn_in (const ir_node *node) {
222 if (get_interprocedural_view()) { /* handle Filter and Block specially */
223 if (get_irn_opcode(node) == iro_Filter) {
224 assert(node->attr.filter.in_cg);
225 return node->attr.filter.in_cg;
226 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
227 return node->attr.block.in_cg;
229 /* else fall through */
235 set_irn_in (ir_node *node, int arity, ir_node **in) {
238 if (get_interprocedural_view()) { /* handle Filter and Block specially */
239 if (get_irn_opcode(node) == iro_Filter) {
240 assert(node->attr.filter.in_cg);
241 arr = &node->attr.filter.in_cg;
242 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
243 arr = &node->attr.block.in_cg;
250 if (arity != ARR_LEN(*arr) - 1) {
251 ir_node * block = (*arr)[0];
252 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
255 fix_backedges(current_ir_graph->obst, node);
256 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
260 (get_irn_intra_n)(const ir_node *node, int n) {
261 return _get_irn_intra_n (node, n);
265 (get_irn_inter_n)(const ir_node *node, int n) {
266 return _get_irn_inter_n (node, n);
269 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
272 (get_irn_n)(const ir_node *node, int n) {
273 return _get_irn_n(node, n);
277 set_irn_n (ir_node *node, int n, ir_node *in) {
278 assert(node && node->kind == k_ir_node);
280 assert(n < get_irn_arity(node));
281 assert(in && in->kind == k_ir_node);
283 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
284 /* Change block pred in both views! */
285 node->in[n + 1] = in;
286 assert(node->attr.filter.in_cg);
287 node->attr.filter.in_cg[n + 1] = in;
290 if (get_interprocedural_view()) { /* handle Filter and Block specially */
291 if (get_irn_opcode(node) == iro_Filter) {
292 assert(node->attr.filter.in_cg);
293 node->attr.filter.in_cg[n + 1] = in;
295 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
296 node->attr.block.in_cg[n + 1] = in;
299 /* else fall through */
303 hook_set_irn_n(node, n, in, node->in[n + 1]);
305 /* Here, we rely on src and tgt being in the current ir graph */
306 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
308 node->in[n + 1] = in;
312 (get_irn_mode)(const ir_node *node) {
313 return _get_irn_mode(node);
317 (set_irn_mode)(ir_node *node, ir_mode *mode)
319 _set_irn_mode(node, mode);
323 get_irn_modecode (const ir_node *node)
326 return node->mode->code;
329 /** Gets the string representation of the mode .*/
331 get_irn_modename (const ir_node *node)
334 return get_mode_name(node->mode);
338 get_irn_modeident (const ir_node *node)
341 return get_mode_ident(node->mode);
345 (get_irn_op)(const ir_node *node)
347 return _get_irn_op(node);
350 /* should be private to the library: */
352 set_irn_op (ir_node *node, ir_op *op)
359 (get_irn_opcode)(const ir_node *node)
361 return _get_irn_opcode(node);
365 get_irn_opname (const ir_node *node)
368 if ((get_irn_op((ir_node *)node) == op_Phi) &&
369 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
370 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
371 return get_id_str(node->op->name);
375 get_irn_opident (const ir_node *node)
378 return node->op->name;
382 (get_irn_visited)(const ir_node *node)
384 return _get_irn_visited(node);
388 (set_irn_visited)(ir_node *node, unsigned long visited)
390 _set_irn_visited(node, visited);
394 (mark_irn_visited)(ir_node *node) {
395 _mark_irn_visited(node);
399 (irn_not_visited)(const ir_node *node) {
400 return _irn_not_visited(node);
404 (irn_visited)(const ir_node *node) {
405 return _irn_visited(node);
409 (set_irn_link)(ir_node *node, void *link) {
410 _set_irn_link(node, link);
414 (get_irn_link)(const ir_node *node) {
415 return _get_irn_link(node);
419 (get_irn_pinned)(const ir_node *node) {
420 return _get_irn_pinned(node);
423 void set_irn_pinned(ir_node *node, op_pin_state state) {
424 /* due to optimization an opt may be turned into a Tuple */
425 if (get_irn_op(node) == op_Tuple)
428 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
429 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
431 node->attr.except.pin_state = state;
434 #ifdef DO_HEAPANALYSIS
435 /* Access the abstract interpretation information of a node.
436 Returns NULL if no such information is available. */
437 struct abstval *get_irn_abst_value(ir_node *n) {
440 /* Set the abstract interpretation information of a node. */
441 void set_irn_abst_value(ir_node *n, struct abstval *os) {
444 struct section *firm_get_irn_section(ir_node *n) {
447 void firm_set_irn_section(ir_node *n, struct section *s) {
451 /* Dummies needed for firmjni. */
452 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
453 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
454 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
455 void firm_set_irn_section(ir_node *n, struct section *s) {}
456 #endif /* DO_HEAPANALYSIS */
459 /* Outputs a unique number for this node */
461 get_irn_node_nr(const ir_node *node) {
464 return node->node_nr;
471 get_irn_const_attr (ir_node *node)
473 assert (node->op == op_Const);
474 return node->attr.con;
478 get_irn_proj_attr (ir_node *node)
480 assert (node->op == op_Proj);
481 return node->attr.proj;
485 get_irn_alloc_attr (ir_node *node)
487 assert (node->op == op_Alloc);
492 get_irn_free_attr (ir_node *node)
494 assert (node->op == op_Free);
499 get_irn_symconst_attr (ir_node *node)
501 assert (node->op == op_SymConst);
506 get_irn_call_attr (ir_node *node)
508 assert (node->op == op_Call);
509 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
513 get_irn_sel_attr (ir_node *node)
515 assert (node->op == op_Sel);
520 get_irn_phi_attr (ir_node *node)
522 assert (node->op == op_Phi);
523 return node->attr.phi0_pos;
527 get_irn_block_attr (ir_node *node)
529 assert (node->op == op_Block);
530 return node->attr.block;
534 get_irn_load_attr (ir_node *node)
536 assert (node->op == op_Load);
537 return node->attr.load;
541 get_irn_store_attr (ir_node *node)
543 assert (node->op == op_Store);
544 return node->attr.store;
548 get_irn_except_attr (ir_node *node)
550 assert (node->op == op_Div || node->op == op_Quot ||
551 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
552 return node->attr.except;
555 /** manipulate fields of individual nodes **/
557 /* this works for all except Block */
559 get_nodes_block (const ir_node *node) {
560 assert (!(node->op == op_Block));
561 return get_irn_n(node, -1);
565 set_nodes_block (ir_node *node, ir_node *block) {
566 assert (!(node->op == op_Block));
567 set_irn_n(node, -1, block);
570 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
571 * from Start. If so returns frame type, else Null. */
572 type *is_frame_pointer(ir_node *n) {
573 if ((get_irn_op(n) == op_Proj) &&
574 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
575 ir_node *start = get_Proj_pred(n);
576 if (get_irn_op(start) == op_Start) {
577 return get_irg_frame_type(get_irn_irg(start));
583 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
584 * from Start. If so returns global type, else Null. */
585 type *is_globals_pointer(ir_node *n) {
586 if ((get_irn_op(n) == op_Proj) &&
587 (get_Proj_proj(n) == pn_Start_P_globals)) {
588 ir_node *start = get_Proj_pred(n);
589 if (get_irn_op(start) == op_Start) {
590 return get_glob_type();
596 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
597 * from Start. If so returns 1, else 0. */
598 int is_value_arg_pointer(ir_node *n) {
599 if ((get_irn_op(n) == op_Proj) &&
600 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
601 (get_irn_op(get_Proj_pred(n)) == op_Start))
606 /* Returns an array with the predecessors of the Block. Depending on
607 the implementation of the graph data structure this can be a copy of
608 the internal representation of predecessors as well as the internal
609 array itself. Therefore writing to this array might obstruct the ir. */
611 get_Block_cfgpred_arr (ir_node *node)
613 assert ((node->op == op_Block));
614 return (ir_node **)&(get_irn_in(node)[1]);
619 get_Block_n_cfgpreds (ir_node *node) {
620 assert ((node->op == op_Block));
621 return get_irn_arity(node);
625 get_Block_cfgpred (ir_node *node, int pos) {
626 assert(-1 <= pos && pos < get_irn_arity(node));
627 assert(node->op == op_Block);
628 return get_irn_n(node, pos);
632 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
633 assert (node->op == op_Block);
634 set_irn_n(node, pos, pred);
638 get_Block_matured (ir_node *node) {
639 assert (node->op == op_Block);
640 return node->attr.block.matured;
644 set_Block_matured (ir_node *node, bool matured) {
645 assert (node->op == op_Block);
646 node->attr.block.matured = matured;
650 (get_Block_block_visited)(ir_node *node) {
651 return _get_Block_block_visited(node);
655 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
656 _set_Block_block_visited(node, visit);
659 /* For this current_ir_graph must be set. */
661 (mark_Block_block_visited)(ir_node *node) {
662 _mark_Block_block_visited(node);
666 (Block_not_block_visited)(ir_node *node) {
667 return _Block_not_block_visited(node);
671 get_Block_graph_arr (ir_node *node, int pos) {
672 assert (node->op == op_Block);
673 return node->attr.block.graph_arr[pos+1];
677 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
678 assert (node->op == op_Block);
679 node->attr.block.graph_arr[pos+1] = value;
682 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
683 assert(node->op == op_Block);
684 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
685 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
686 node->attr.block.in_cg[0] = NULL;
687 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
689 /* Fix backedge array. fix_backedges operates depending on
690 interprocedural_view. */
691 int ipv = get_interprocedural_view();
692 set_interprocedural_view(true);
693 fix_backedges(current_ir_graph->obst, node);
694 set_interprocedural_view(ipv);
697 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
700 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
701 assert(node->op == op_Block &&
702 node->attr.block.in_cg &&
703 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
704 node->attr.block.in_cg[pos + 1] = pred;
707 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
708 assert(node->op == op_Block);
709 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
712 int get_Block_cg_n_cfgpreds(ir_node * node) {
713 assert(node->op == op_Block);
714 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
717 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
718 assert(node->op == op_Block && node->attr.block.in_cg);
719 return node->attr.block.in_cg[pos + 1];
722 void remove_Block_cg_cfgpred_arr(ir_node * node) {
723 assert(node->op == op_Block);
724 node->attr.block.in_cg = NULL;
727 ir_node *(set_Block_dead)(ir_node *block) {
728 return _set_Block_dead(block);
731 int (is_Block_dead)(const ir_node *block) {
732 return _is_Block_dead(block);
736 set_Start_irg(ir_node *node, ir_graph *irg) {
737 assert(node->op == op_Start);
738 assert(is_ir_graph(irg));
739 assert(0 && " Why set irg? -- use set_irn_irg");
743 get_End_n_keepalives(ir_node *end) {
744 assert (end->op == op_End);
745 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
749 get_End_keepalive(ir_node *end, int pos) {
750 assert (end->op == op_End);
751 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
755 add_End_keepalive (ir_node *end, ir_node *ka) {
756 assert (end->op == op_End);
757 ARR_APP1 (ir_node *, end->in, ka);
761 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
762 assert (end->op == op_End);
763 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
767 free_End (ir_node *end) {
768 assert (end->op == op_End);
770 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
771 end->in = NULL; /* @@@ make sure we get an error if we use the
772 in array afterwards ... */
777 > Implementing the case construct (which is where the constant Proj node is
778 > important) involves far more than simply determining the constant values.
779 > We could argue that this is more properly a function of the translator from
780 > Firm to the target machine. That could be done if there was some way of
781 > projecting "default" out of the Cond node.
782 I know it's complicated.
783 Basically there are two proglems:
784 - determining the gaps between the projs
785 - determining the biggest case constant to know the proj number for
787 I see several solutions:
788 1. Introduce a ProjDefault node. Solves both problems.
789 This means to extend all optimizations executed during construction.
790 2. Give the Cond node for switch two flavors:
791 a) there are no gaps in the projs (existing flavor)
792 b) gaps may exist, default proj is still the Proj with the largest
793 projection number. This covers also the gaps.
794 3. Fix the semantic of the Cond to that of 2b)
796 Solution 2 seems to be the best:
797 Computing the gaps in the Firm representation is not too hard, i.e.,
798 libFIRM can implement a routine that transforms between the two
799 flavours. This is also possible for 1) but 2) does not require to
800 change any existing optimization.
801 Further it should be far simpler to determine the biggest constant than
803 I don't want to choose 3) as 2a) seems to have advantages for
804 dataflow analysis and 3) does not allow to convert the representation to
808 get_Cond_selector (ir_node *node) {
809 assert (node->op == op_Cond);
810 return get_irn_n(node, 0);
814 set_Cond_selector (ir_node *node, ir_node *selector) {
815 assert (node->op == op_Cond);
816 set_irn_n(node, 0, selector);
820 get_Cond_kind (ir_node *node) {
821 assert (node->op == op_Cond);
822 return node->attr.c.kind;
826 set_Cond_kind (ir_node *node, cond_kind kind) {
827 assert (node->op == op_Cond);
828 node->attr.c.kind = kind;
832 get_Cond_defaultProj (ir_node *node) {
833 assert (node->op == op_Cond);
834 return node->attr.c.default_proj;
838 get_Return_mem (ir_node *node) {
839 assert (node->op == op_Return);
840 return get_irn_n(node, 0);
844 set_Return_mem (ir_node *node, ir_node *mem) {
845 assert (node->op == op_Return);
846 set_irn_n(node, 0, mem);
850 get_Return_n_ress (ir_node *node) {
851 assert (node->op == op_Return);
852 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
856 get_Return_res_arr (ir_node *node)
858 assert ((node->op == op_Return));
859 if (get_Return_n_ress(node) > 0)
860 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
867 set_Return_n_res (ir_node *node, int results) {
868 assert (node->op == op_Return);
873 get_Return_res (ir_node *node, int pos) {
874 assert (node->op == op_Return);
875 assert (get_Return_n_ress(node) > pos);
876 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
880 set_Return_res (ir_node *node, int pos, ir_node *res){
881 assert (node->op == op_Return);
882 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
886 get_Raise_mem (ir_node *node) {
887 assert (node->op == op_Raise);
888 return get_irn_n(node, 0);
892 set_Raise_mem (ir_node *node, ir_node *mem) {
893 assert (node->op == op_Raise);
894 set_irn_n(node, 0, mem);
898 get_Raise_exo_ptr (ir_node *node) {
899 assert (node->op == op_Raise);
900 return get_irn_n(node, 1);
904 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
905 assert (node->op == op_Raise);
906 set_irn_n(node, 1, exo_ptr);
909 tarval *(get_Const_tarval)(ir_node *node) {
910 return _get_Const_tarval(node);
914 set_Const_tarval (ir_node *node, tarval *con) {
915 assert (node->op == op_Const);
916 node->attr.con.tv = con;
919 cnst_classify_t (classify_Const)(ir_node *node)
921 return _classify_Const(node);
925 /* The source language type. Must be an atomic type. Mode of type must
926 be mode of node. For tarvals from entities type must be pointer to
929 get_Const_type (ir_node *node) {
930 assert (node->op == op_Const);
931 return node->attr.con.tp;
935 set_Const_type (ir_node *node, type *tp) {
936 assert (node->op == op_Const);
937 if (tp != firm_unknown_type) {
938 assert (is_atomic_type(tp));
939 assert (get_type_mode(tp) == get_irn_mode(node));
941 node->attr.con.tp = tp;
946 get_SymConst_kind (const ir_node *node) {
947 assert (node->op == op_SymConst);
948 return node->attr.i.num;
952 set_SymConst_kind (ir_node *node, symconst_kind num) {
953 assert (node->op == op_SymConst);
954 node->attr.i.num = num;
958 get_SymConst_type (ir_node *node) {
959 assert ( (node->op == op_SymConst)
960 && ( get_SymConst_kind(node) == symconst_type_tag
961 || get_SymConst_kind(node) == symconst_size));
962 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
966 set_SymConst_type (ir_node *node, type *tp) {
967 assert ( (node->op == op_SymConst)
968 && ( get_SymConst_kind(node) == symconst_type_tag
969 || get_SymConst_kind(node) == symconst_size));
970 node->attr.i.sym.type_p = tp;
974 get_SymConst_name (ir_node *node) {
975 assert ( (node->op == op_SymConst)
976 && (get_SymConst_kind(node) == symconst_addr_name));
977 return node->attr.i.sym.ident_p;
981 set_SymConst_name (ir_node *node, ident *name) {
982 assert ( (node->op == op_SymConst)
983 && (get_SymConst_kind(node) == symconst_addr_name));
984 node->attr.i.sym.ident_p = name;
988 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
989 entity *get_SymConst_entity (ir_node *node) {
990 assert ( (node->op == op_SymConst)
991 && (get_SymConst_kind (node) == symconst_addr_ent));
992 return node->attr.i.sym.entity_p;
995 void set_SymConst_entity (ir_node *node, entity *ent) {
996 assert ( (node->op == op_SymConst)
997 && (get_SymConst_kind(node) == symconst_addr_ent));
998 node->attr.i.sym.entity_p = ent;
1001 union symconst_symbol
1002 get_SymConst_symbol (ir_node *node) {
1003 assert (node->op == op_SymConst);
1004 return node->attr.i.sym;
1008 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1009 assert (node->op == op_SymConst);
1010 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1011 node->attr.i.sym = sym;
1015 get_SymConst_value_type (ir_node *node) {
1016 assert (node->op == op_SymConst);
1017 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1018 return node->attr.i.tp;
1022 set_SymConst_value_type (ir_node *node, type *tp) {
1023 assert (node->op == op_SymConst);
1024 node->attr.i.tp = tp;
1028 get_Sel_mem (ir_node *node) {
1029 assert (node->op == op_Sel);
1030 return get_irn_n(node, 0);
1034 set_Sel_mem (ir_node *node, ir_node *mem) {
1035 assert (node->op == op_Sel);
1036 set_irn_n(node, 0, mem);
1040 get_Sel_ptr (ir_node *node) {
1041 assert (node->op == op_Sel);
1042 return get_irn_n(node, 1);
1046 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1047 assert (node->op == op_Sel);
1048 set_irn_n(node, 1, ptr);
1052 get_Sel_n_indexs (ir_node *node) {
1053 assert (node->op == op_Sel);
1054 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1058 get_Sel_index_arr (ir_node *node)
1060 assert ((node->op == op_Sel));
1061 if (get_Sel_n_indexs(node) > 0)
1062 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1068 get_Sel_index (ir_node *node, int pos) {
1069 assert (node->op == op_Sel);
1070 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1074 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1075 assert (node->op == op_Sel);
1076 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1080 get_Sel_entity (ir_node *node) {
1081 assert (node->op == op_Sel);
1082 return node->attr.s.ent;
1086 set_Sel_entity (ir_node *node, entity *ent) {
1087 assert (node->op == op_Sel);
1088 node->attr.s.ent = ent;
1092 get_InstOf_ent (ir_node *node) {
1093 assert (node->op = op_InstOf);
1094 return (node->attr.io.ent);
1098 set_InstOf_ent (ir_node *node, type *ent) {
1099 assert (node->op = op_InstOf);
1100 node->attr.io.ent = ent;
1104 get_InstOf_store (ir_node *node) {
1105 assert (node->op = op_InstOf);
1106 return (get_irn_n (node, 0));
1110 set_InstOf_store (ir_node *node, ir_node *obj) {
1111 assert (node->op = op_InstOf);
1112 set_irn_n (node, 0, obj);
1116 get_InstOf_obj (ir_node *node) {
1117 assert (node->op = op_InstOf);
1118 return (get_irn_n (node, 1));
1122 set_InstOf_obj (ir_node *node, ir_node *obj) {
1123 assert (node->op = op_InstOf);
1124 set_irn_n (node, 1, obj);
1128 /* For unary and binary arithmetic operations the access to the
1129 operands can be factored out. Left is the first, right the
1130 second arithmetic value as listed in tech report 0999-33.
1131 unops are: Minus, Abs, Not, Conv, Cast
1132 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1133 Shr, Shrs, Rotate, Cmp */
1137 get_Call_mem (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return get_irn_n(node, 0);
1143 set_Call_mem (ir_node *node, ir_node *mem) {
1144 assert (node->op == op_Call);
1145 set_irn_n(node, 0, mem);
1149 get_Call_ptr (ir_node *node) {
1150 assert (node->op == op_Call);
1151 return get_irn_n(node, 1);
1155 set_Call_ptr (ir_node *node, ir_node *ptr) {
1156 assert (node->op == op_Call);
1157 set_irn_n(node, 1, ptr);
1161 get_Call_param_arr (ir_node *node) {
1162 assert (node->op == op_Call);
1163 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1167 get_Call_n_params (ir_node *node) {
1168 assert (node->op == op_Call);
1169 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1173 get_Call_arity (ir_node *node) {
1174 assert (node->op == op_Call);
1175 return get_Call_n_params(node);
1179 set_Call_arity (ir_node *node, ir_node *arity) {
1180 assert (node->op == op_Call);
1185 get_Call_param (ir_node *node, int pos) {
1186 assert (node->op == op_Call);
1187 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1191 set_Call_param (ir_node *node, int pos, ir_node *param) {
1192 assert (node->op == op_Call);
1193 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1197 get_Call_type (ir_node *node) {
1198 assert (node->op == op_Call);
1199 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1203 set_Call_type (ir_node *node, type *tp) {
1204 assert (node->op == op_Call);
1205 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1206 node->attr.call.cld_tp = tp;
1209 int Call_has_callees(ir_node *node) {
1210 assert(node && node->op == op_Call);
1211 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1212 (node->attr.call.callee_arr != NULL));
1215 int get_Call_n_callees(ir_node * node) {
1216 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1217 return ARR_LEN(node->attr.call.callee_arr);
1220 entity * get_Call_callee(ir_node * node, int pos) {
1221 assert(pos >= 0 && pos < get_Call_n_callees(node));
1222 return node->attr.call.callee_arr[pos];
1225 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1226 assert(node->op == op_Call);
1227 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1228 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1230 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1233 void remove_Call_callee_arr(ir_node * node) {
1234 assert(node->op == op_Call);
1235 node->attr.call.callee_arr = NULL;
1238 ir_node * get_CallBegin_ptr (ir_node *node) {
1239 assert(node->op == op_CallBegin);
1240 return get_irn_n(node, 0);
1242 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1243 assert(node->op == op_CallBegin);
1244 set_irn_n(node, 0, ptr);
1246 ir_node * get_CallBegin_call (ir_node *node) {
1247 assert(node->op == op_CallBegin);
1248 return node->attr.callbegin.call;
1250 void set_CallBegin_call (ir_node *node, ir_node *call) {
1251 assert(node->op == op_CallBegin);
1252 node->attr.callbegin.call = call;
1257 ir_node * get_##OP##_left(ir_node *node) { \
1258 assert(node->op == op_##OP); \
1259 return get_irn_n(node, node->op->op_index); \
1261 void set_##OP##_left(ir_node *node, ir_node *left) { \
1262 assert(node->op == op_##OP); \
1263 set_irn_n(node, node->op->op_index, left); \
1265 ir_node *get_##OP##_right(ir_node *node) { \
1266 assert(node->op == op_##OP); \
1267 return get_irn_n(node, node->op->op_index + 1); \
1269 void set_##OP##_right(ir_node *node, ir_node *right) { \
1270 assert(node->op == op_##OP); \
1271 set_irn_n(node, node->op->op_index + 1, right); \
1275 ir_node *get_##OP##_op(ir_node *node) { \
1276 assert(node->op == op_##OP); \
1277 return get_irn_n(node, node->op->op_index); \
1279 void set_##OP##_op (ir_node *node, ir_node *op) { \
1280 assert(node->op == op_##OP); \
1281 set_irn_n(node, node->op->op_index, op); \
1291 get_Quot_mem (ir_node *node) {
1292 assert (node->op == op_Quot);
1293 return get_irn_n(node, 0);
1297 set_Quot_mem (ir_node *node, ir_node *mem) {
1298 assert (node->op == op_Quot);
1299 set_irn_n(node, 0, mem);
1305 get_DivMod_mem (ir_node *node) {
1306 assert (node->op == op_DivMod);
1307 return get_irn_n(node, 0);
1311 set_DivMod_mem (ir_node *node, ir_node *mem) {
1312 assert (node->op == op_DivMod);
1313 set_irn_n(node, 0, mem);
1319 get_Div_mem (ir_node *node) {
1320 assert (node->op == op_Div);
1321 return get_irn_n(node, 0);
1325 set_Div_mem (ir_node *node, ir_node *mem) {
1326 assert (node->op == op_Div);
1327 set_irn_n(node, 0, mem);
1333 get_Mod_mem (ir_node *node) {
1334 assert (node->op == op_Mod);
1335 return get_irn_n(node, 0);
1339 set_Mod_mem (ir_node *node, ir_node *mem) {
1340 assert (node->op == op_Mod);
1341 set_irn_n(node, 0, mem);
1358 get_Cast_type (ir_node *node) {
1359 assert (node->op == op_Cast);
1360 return node->attr.cast.totype;
1364 set_Cast_type (ir_node *node, type *to_tp) {
1365 assert (node->op == op_Cast);
1366 node->attr.cast.totype = to_tp;
1370 /* Checks for upcast.
1372 * Returns true if the Cast node casts a class type to a super type.
1374 int is_Cast_upcast(ir_node *node) {
1375 type *totype = get_Cast_type(node);
1376 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1377 ir_graph *myirg = get_irn_irg(node);
1379 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1382 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1383 totype = get_pointer_points_to_type(totype);
1384 fromtype = get_pointer_points_to_type(fromtype);
1389 if (!is_Class_type(totype)) return false;
1390 return is_subclass_of(fromtype, totype);
1393 /* Checks for downcast.
1395 * Returns true if the Cast node casts a class type to a sub type.
1397 int is_Cast_downcast(ir_node *node) {
1398 type *totype = get_Cast_type(node);
1399 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1401 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1404 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1405 totype = get_pointer_points_to_type(totype);
1406 fromtype = get_pointer_points_to_type(fromtype);
1411 if (!is_Class_type(totype)) return false;
1412 return is_subclass_of(totype, fromtype);
1416 (is_unop)(const ir_node *node) {
1417 return _is_unop(node);
1421 get_unop_op (ir_node *node) {
1422 if (node->op->opar == oparity_unary)
1423 return get_irn_n(node, node->op->op_index);
1425 assert(node->op->opar == oparity_unary);
1430 set_unop_op (ir_node *node, ir_node *op) {
1431 if (node->op->opar == oparity_unary)
1432 set_irn_n(node, node->op->op_index, op);
1434 assert(node->op->opar == oparity_unary);
1438 (is_binop)(const ir_node *node) {
1439 return _is_binop(node);
1443 get_binop_left (ir_node *node) {
1444 if (node->op->opar == oparity_binary)
1445 return get_irn_n(node, node->op->op_index);
1447 assert(node->op->opar == oparity_binary);
1452 set_binop_left (ir_node *node, ir_node *left) {
1453 if (node->op->opar == oparity_binary)
1454 set_irn_n(node, node->op->op_index, left);
1456 assert (node->op->opar == oparity_binary);
1460 get_binop_right (ir_node *node) {
1461 if (node->op->opar == oparity_binary)
1462 return get_irn_n(node, node->op->op_index + 1);
1464 assert(node->op->opar == oparity_binary);
1469 set_binop_right (ir_node *node, ir_node *right) {
1470 if (node->op->opar == oparity_binary)
1471 set_irn_n(node, node->op->op_index + 1, right);
1473 assert (node->op->opar == oparity_binary);
1476 int is_Phi (const ir_node *n) {
1482 if (op == op_Filter) return get_interprocedural_view();
1485 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1486 (get_irn_arity(n) > 0));
1491 int is_Phi0 (const ir_node *n) {
1494 return ((get_irn_op(n) == op_Phi) &&
1495 (get_irn_arity(n) == 0) &&
1496 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1500 get_Phi_preds_arr (ir_node *node) {
1501 assert (node->op == op_Phi);
1502 return (ir_node **)&(get_irn_in(node)[1]);
1506 get_Phi_n_preds (ir_node *node) {
1507 assert (is_Phi(node) || is_Phi0(node));
1508 return (get_irn_arity(node));
1512 void set_Phi_n_preds (ir_node *node, int n_preds) {
1513 assert (node->op == op_Phi);
1518 get_Phi_pred (ir_node *node, int pos) {
1519 assert (is_Phi(node) || is_Phi0(node));
1520 return get_irn_n(node, pos);
1524 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1525 assert (is_Phi(node) || is_Phi0(node));
1526 set_irn_n(node, pos, pred);
1530 int is_memop(ir_node *node) {
1531 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1534 ir_node *get_memop_mem (ir_node *node) {
1535 assert(is_memop(node));
1536 return get_irn_n(node, 0);
1539 void set_memop_mem (ir_node *node, ir_node *mem) {
1540 assert(is_memop(node));
1541 set_irn_n(node, 0, mem);
1544 ir_node *get_memop_ptr (ir_node *node) {
1545 assert(is_memop(node));
1546 return get_irn_n(node, 1);
1549 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1550 assert(is_memop(node));
1551 set_irn_n(node, 1, ptr);
1555 get_Load_mem (ir_node *node) {
1556 assert (node->op == op_Load);
1557 return get_irn_n(node, 0);
1561 set_Load_mem (ir_node *node, ir_node *mem) {
1562 assert (node->op == op_Load);
1563 set_irn_n(node, 0, mem);
1567 get_Load_ptr (ir_node *node) {
1568 assert (node->op == op_Load);
1569 return get_irn_n(node, 1);
1573 set_Load_ptr (ir_node *node, ir_node *ptr) {
1574 assert (node->op == op_Load);
1575 set_irn_n(node, 1, ptr);
1579 get_Load_mode (ir_node *node) {
1580 assert (node->op == op_Load);
1581 return node->attr.load.load_mode;
1585 set_Load_mode (ir_node *node, ir_mode *mode) {
1586 assert (node->op == op_Load);
1587 node->attr.load.load_mode = mode;
1591 get_Load_volatility (ir_node *node) {
1592 assert (node->op == op_Load);
1593 return node->attr.load.volatility;
1597 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1598 assert (node->op == op_Load);
1599 node->attr.load.volatility = volatility;
1604 get_Store_mem (ir_node *node) {
1605 assert (node->op == op_Store);
1606 return get_irn_n(node, 0);
1610 set_Store_mem (ir_node *node, ir_node *mem) {
1611 assert (node->op == op_Store);
1612 set_irn_n(node, 0, mem);
1616 get_Store_ptr (ir_node *node) {
1617 assert (node->op == op_Store);
1618 return get_irn_n(node, 1);
1622 set_Store_ptr (ir_node *node, ir_node *ptr) {
1623 assert (node->op == op_Store);
1624 set_irn_n(node, 1, ptr);
1628 get_Store_value (ir_node *node) {
1629 assert (node->op == op_Store);
1630 return get_irn_n(node, 2);
1634 set_Store_value (ir_node *node, ir_node *value) {
1635 assert (node->op == op_Store);
1636 set_irn_n(node, 2, value);
1640 get_Store_volatility (ir_node *node) {
1641 assert (node->op == op_Store);
1642 return node->attr.store.volatility;
1646 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1647 assert (node->op == op_Store);
1648 node->attr.store.volatility = volatility;
1653 get_Alloc_mem (ir_node *node) {
1654 assert (node->op == op_Alloc);
1655 return get_irn_n(node, 0);
1659 set_Alloc_mem (ir_node *node, ir_node *mem) {
1660 assert (node->op == op_Alloc);
1661 set_irn_n(node, 0, mem);
1665 get_Alloc_size (ir_node *node) {
1666 assert (node->op == op_Alloc);
1667 return get_irn_n(node, 1);
1671 set_Alloc_size (ir_node *node, ir_node *size) {
1672 assert (node->op == op_Alloc);
1673 set_irn_n(node, 1, size);
1677 get_Alloc_type (ir_node *node) {
1678 assert (node->op == op_Alloc);
1679 return node->attr.a.type = skip_tid(node->attr.a.type);
1683 set_Alloc_type (ir_node *node, type *tp) {
1684 assert (node->op == op_Alloc);
1685 node->attr.a.type = tp;
1689 get_Alloc_where (ir_node *node) {
1690 assert (node->op == op_Alloc);
1691 return node->attr.a.where;
1695 set_Alloc_where (ir_node *node, where_alloc where) {
1696 assert (node->op == op_Alloc);
1697 node->attr.a.where = where;
1702 get_Free_mem (ir_node *node) {
1703 assert (node->op == op_Free);
1704 return get_irn_n(node, 0);
1708 set_Free_mem (ir_node *node, ir_node *mem) {
1709 assert (node->op == op_Free);
1710 set_irn_n(node, 0, mem);
1714 get_Free_ptr (ir_node *node) {
1715 assert (node->op == op_Free);
1716 return get_irn_n(node, 1);
1720 set_Free_ptr (ir_node *node, ir_node *ptr) {
1721 assert (node->op == op_Free);
1722 set_irn_n(node, 1, ptr);
1726 get_Free_size (ir_node *node) {
1727 assert (node->op == op_Free);
1728 return get_irn_n(node, 2);
1732 set_Free_size (ir_node *node, ir_node *size) {
1733 assert (node->op == op_Free);
1734 set_irn_n(node, 2, size);
1738 get_Free_type (ir_node *node) {
1739 assert (node->op == op_Free);
1740 return node->attr.f.type = skip_tid(node->attr.f.type);
1744 set_Free_type (ir_node *node, type *tp) {
1745 assert (node->op == op_Free);
1746 node->attr.f.type = tp;
1750 get_Free_where (ir_node *node) {
1751 assert (node->op == op_Free);
1752 return node->attr.f.where;
1756 set_Free_where (ir_node *node, where_alloc where) {
1757 assert (node->op == op_Free);
1758 node->attr.f.where = where;
1762 get_Sync_preds_arr (ir_node *node) {
1763 assert (node->op == op_Sync);
1764 return (ir_node **)&(get_irn_in(node)[1]);
1768 get_Sync_n_preds (ir_node *node) {
1769 assert (node->op == op_Sync);
1770 return (get_irn_arity(node));
1775 set_Sync_n_preds (ir_node *node, int n_preds) {
1776 assert (node->op == op_Sync);
1781 get_Sync_pred (ir_node *node, int pos) {
1782 assert (node->op == op_Sync);
1783 return get_irn_n(node, pos);
1787 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1788 assert (node->op == op_Sync);
1789 set_irn_n(node, pos, pred);
1792 type *get_Proj_type(ir_node *n)
1795 ir_node *pred = get_Proj_pred(n);
1797 switch (get_irn_opcode(pred)) {
1800 /* Deal with Start / Call here: we need to know the Proj Nr. */
1801 assert(get_irn_mode(pred) == mode_T);
1802 pred_pred = get_Proj_pred(pred);
1803 if (get_irn_op(pred_pred) == op_Start) {
1804 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1805 tp = get_method_param_type(mtp, get_Proj_proj(n));
1806 } else if (get_irn_op(pred_pred) == op_Call) {
1807 type *mtp = get_Call_type(pred_pred);
1808 tp = get_method_res_type(mtp, get_Proj_proj(n));
1811 case iro_Start: break;
1812 case iro_Call: break;
1814 ir_node *a = get_Load_ptr(pred);
1815 if (get_irn_op(a) == op_Sel)
1816 tp = get_entity_type(get_Sel_entity(a));
1825 get_Proj_pred (ir_node *node) {
1826 assert (is_Proj(node));
1827 return get_irn_n(node, 0);
1831 set_Proj_pred (ir_node *node, ir_node *pred) {
1832 assert (is_Proj(node));
1833 set_irn_n(node, 0, pred);
1837 get_Proj_proj (ir_node *node) {
1838 assert (is_Proj(node));
1839 if (get_irn_opcode(node) == iro_Proj) {
1840 return node->attr.proj;
1842 assert(get_irn_opcode(node) == iro_Filter);
1843 return node->attr.filter.proj;
1848 set_Proj_proj (ir_node *node, long proj) {
1849 assert (node->op == op_Proj);
1850 node->attr.proj = proj;
1854 get_Tuple_preds_arr (ir_node *node) {
1855 assert (node->op == op_Tuple);
1856 return (ir_node **)&(get_irn_in(node)[1]);
1860 get_Tuple_n_preds (ir_node *node) {
1861 assert (node->op == op_Tuple);
1862 return (get_irn_arity(node));
1867 set_Tuple_n_preds (ir_node *node, int n_preds) {
1868 assert (node->op == op_Tuple);
1873 get_Tuple_pred (ir_node *node, int pos) {
1874 assert (node->op == op_Tuple);
1875 return get_irn_n(node, pos);
1879 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1880 assert (node->op == op_Tuple);
1881 set_irn_n(node, pos, pred);
1885 get_Id_pred (ir_node *node) {
1886 assert (node->op == op_Id);
1887 return get_irn_n(node, 0);
1891 set_Id_pred (ir_node *node, ir_node *pred) {
1892 assert (node->op == op_Id);
1893 set_irn_n(node, 0, pred);
1896 ir_node *get_Confirm_value (ir_node *node) {
1897 assert (node->op == op_Confirm);
1898 return get_irn_n(node, 0);
1900 void set_Confirm_value (ir_node *node, ir_node *value) {
1901 assert (node->op == op_Confirm);
1902 set_irn_n(node, 0, value);
1904 ir_node *get_Confirm_bound (ir_node *node) {
1905 assert (node->op == op_Confirm);
1906 return get_irn_n(node, 1);
1908 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1909 assert (node->op == op_Confirm);
1910 set_irn_n(node, 0, bound);
1912 pn_Cmp get_Confirm_cmp (ir_node *node) {
1913 assert (node->op == op_Confirm);
1914 return node->attr.confirm_cmp;
1916 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1917 assert (node->op == op_Confirm);
1918 node->attr.confirm_cmp = cmp;
1923 get_Filter_pred (ir_node *node) {
1924 assert(node->op == op_Filter);
1928 set_Filter_pred (ir_node *node, ir_node *pred) {
1929 assert(node->op == op_Filter);
1933 get_Filter_proj(ir_node *node) {
1934 assert(node->op == op_Filter);
1935 return node->attr.filter.proj;
1938 set_Filter_proj (ir_node *node, long proj) {
1939 assert(node->op == op_Filter);
1940 node->attr.filter.proj = proj;
1943 /* Don't use get_irn_arity, get_irn_n in implementation as access
1944 shall work independent of view!!! */
1945 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1946 assert(node->op == op_Filter);
1947 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1948 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1949 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1950 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1951 node->attr.filter.in_cg[0] = node->in[0];
1953 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1956 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1957 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1958 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1959 node->attr.filter.in_cg[pos + 1] = pred;
1961 int get_Filter_n_cg_preds(ir_node *node) {
1962 assert(node->op == op_Filter && node->attr.filter.in_cg);
1963 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1965 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1967 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1969 arity = ARR_LEN(node->attr.filter.in_cg);
1970 assert(pos < arity - 1);
1971 return node->attr.filter.in_cg[pos + 1];
1975 ir_node *get_Mux_sel (ir_node *node) {
1976 assert(node->op == op_Mux);
1979 void set_Mux_sel (ir_node *node, ir_node *sel) {
1980 assert(node->op == op_Mux);
1984 ir_node *get_Mux_false (ir_node *node) {
1985 assert(node->op == op_Mux);
1988 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1989 assert(node->op == op_Mux);
1990 node->in[2] = ir_false;
1993 ir_node *get_Mux_true (ir_node *node) {
1994 assert(node->op == op_Mux);
1997 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1998 assert(node->op == op_Mux);
1999 node->in[3] = ir_true;
2004 get_irn_irg(const ir_node *node) {
2005 if (! is_Block(node))
2006 node = get_nodes_block(node);
2007 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2008 node = get_nodes_block(node);
2009 assert(get_irn_op(node) == op_Block);
2010 return node->attr.block.irg;
2014 /*----------------------------------------------------------------*/
2015 /* Auxiliary routines */
2016 /*----------------------------------------------------------------*/
2019 skip_Proj (ir_node *node) {
2020 /* don't assert node !!! */
2021 if (node && is_Proj(node)) {
2022 return get_Proj_pred(node);
2029 skip_Tuple (ir_node *node) {
2032 if (!get_opt_normalize()) return node;
2034 node = skip_Id(node);
2035 if (get_irn_op(node) == op_Proj) {
2036 pred = skip_Id(get_Proj_pred(node));
2037 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2038 pred = skip_Id(skip_Tuple(pred));
2039 if (get_irn_op(pred) == op_Tuple)
2040 return get_Tuple_pred(pred, get_Proj_proj(node));
2045 /** returns operand of node if node is a Cast */
2046 ir_node *skip_Cast (ir_node *node) {
2047 if (node && get_irn_op(node) == op_Cast) {
2048 return skip_Id(get_irn_n(node, 0));
2055 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2056 than any other approach, as Id chains are resolved and all point to the real node, or
2057 all id's are self loops. */
2059 skip_Id (ir_node *node) {
2060 /* don't assert node !!! */
2062 if (!get_opt_normalize()) return node;
2064 /* Don't use get_Id_pred: We get into an endless loop for
2065 self-referencing Ids. */
2066 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2067 ir_node *rem_pred = node->in[0+1];
2070 assert (get_irn_arity (node) > 0);
2072 node->in[0+1] = node;
2073 res = skip_Id(rem_pred);
2074 if (res->op == op_Id) /* self-loop */ return node;
2076 node->in[0+1] = res;
2083 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2084 than any other approach, as Id chains are resolved and all point to the real node, or
2085 all id's are self loops. */
2087 skip_Id (ir_node *node) {
2089 /* don't assert node !!! */
2091 if (!node || (node->op != op_Id)) return node;
2093 if (!get_opt_normalize()) return node;
2095 /* Don't use get_Id_pred: We get into an endless loop for
2096 self-referencing Ids. */
2097 pred = node->in[0+1];
2099 if (pred->op != op_Id) return pred;
2101 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2102 ir_node *rem_pred, *res;
2104 if (pred->op != op_Id) return pred; /* shortcut */
2107 assert (get_irn_arity (node) > 0);
2109 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2110 res = skip_Id(rem_pred);
2111 if (res->op == op_Id) /* self-loop */ return node;
2113 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2122 (is_Bad)(const ir_node *node) {
2123 return _is_Bad(node);
2127 (is_no_Block)(const ir_node *node) {
2128 return _is_no_Block(node);
2132 (is_Block)(const ir_node *node) {
2133 return _is_Block(node);
2136 /* returns true if node is a Unknown node. */
2138 is_Unknown (const ir_node *node) {
2140 return (get_irn_op(node) == op_Unknown);
2144 is_Proj (const ir_node *node) {
2146 return node->op == op_Proj
2147 || (!get_interprocedural_view() && node->op == op_Filter);
2150 /* Returns true if the operation manipulates control flow. */
2152 is_cfop(const ir_node *node) {
2153 return is_cfopcode(get_irn_op(node));
2156 /* Returns true if the operation manipulates interprocedural control flow:
2157 CallBegin, EndReg, EndExcept */
2158 int is_ip_cfop(const ir_node *node) {
2159 return is_ip_cfopcode(get_irn_op(node));
2162 /* Returns true if the operation can change the control flow because
2165 is_fragile_op(const ir_node *node) {
2166 return is_op_fragile(get_irn_op(node));
2169 /* Returns the memory operand of fragile operations. */
2170 ir_node *get_fragile_op_mem(ir_node *node) {
2171 assert(node && is_fragile_op(node));
2173 switch (get_irn_opcode (node)) {
2182 return get_irn_n(node, 0);
2187 assert(0 && "should not be reached");
2192 /* Returns true if the operation is a forking control flow operation. */
2194 is_forking_op(const ir_node *node) {
2195 return is_op_forking(get_irn_op(node));
2198 type *(get_irn_type)(ir_node *node) {
2199 return _get_irn_type(node);
2202 /** the get_type operation must be always implemented */
2203 static type *get_Null_type(ir_node *n) {
2207 /* set the get_type operation */
2208 ir_op *firm_set_default_get_type(ir_op *op)
2211 case iro_Const: op->get_type = get_Const_type; break;
2212 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2213 case iro_Cast: op->get_type = get_Cast_type; break;
2214 case iro_Proj: op->get_type = get_Proj_type; break;
2215 default: op->get_type = get_Null_type; break;
2220 #ifdef DEBUG_libfirm
2221 void dump_irn (ir_node *n) {
2222 int i, arity = get_irn_arity(n);
2223 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2225 ir_node *pred = get_irn_n(n, -1);
2226 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2227 get_irn_node_nr(pred), (void *)pred);
2229 printf(" preds: \n");
2230 for (i = 0; i < arity; ++i) {
2231 ir_node *pred = get_irn_n(n, i);
2232 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2233 get_irn_node_nr(pred), (void *)pred);
2237 #else /* DEBUG_libfirm */
2238 void dump_irn (ir_node *n) {}
2239 #endif /* DEBUG_libfirm */