3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case pn_Cmp_False: return pn_Cmp_True; break;
63 case pn_Cmp_Eq: return pn_Cmp_Ne; break;
64 case pn_Cmp_Lt: return pn_Cmp_Uge; break;
65 case pn_Cmp_Le: return pn_Cmp_Ug; break;
66 case pn_Cmp_Gt: return pn_Cmp_Ule; break;
67 case pn_Cmp_Ge: return pn_Cmp_Ul; break;
68 case pn_Cmp_Lg: return pn_Cmp_Ue; break;
69 case pn_Cmp_Leg: return pn_Cmp_Uo; break;
70 case pn_Cmp_Uo: return pn_Cmp_Leg; break;
71 case pn_Cmp_Ue: return pn_Cmp_Lg; break;
72 case pn_Cmp_Ul: return pn_Cmp_Ge; break;
73 case pn_Cmp_Ule: return pn_Cmp_Gt; break;
74 case pn_Cmp_Ug: return pn_Cmp_Le; break;
75 case pn_Cmp_Uge: return pn_Cmp_Lt; break;
76 case pn_Cmp_Ne: return pn_Cmp_Eq; break;
77 case pn_Cmp_True: return pn_Cmp_False; break;
79 return 99; /* to shut up gcc */
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
153 set_irn_dbg_info(res, db);
157 res->node_nr = get_irp_new_node_nr();
160 #if FIRM_EDGES_INPLACE
163 int not_a_block = is_no_Block(res);
165 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 for (i = 0, n = arity + not_a_block; i < n; ++i)
168 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 if (get_interprocedural_view()) { /* handle Filter and Block specially */
227 if (get_irn_opcode(node) == iro_Filter) {
228 assert(node->attr.filter.in_cg);
229 arr = &node->attr.filter.in_cg;
230 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
231 arr = &node->attr.block.in_cg;
238 if (arity != ARR_LEN(*arr) - 1) {
239 ir_node * block = (*arr)[0];
240 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
243 fix_backedges(current_ir_graph->obst, node);
244 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
248 (get_irn_intra_n)(const ir_node *node, int n) {
249 return _get_irn_intra_n (node, n);
253 (get_irn_inter_n)(const ir_node *node, int n) {
254 return _get_irn_inter_n (node, n);
257 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
260 (get_irn_n)(const ir_node *node, int n) {
261 return _get_irn_n(node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && node->kind == k_ir_node);
268 assert(n < get_irn_arity(node));
269 assert(in && in->kind == k_ir_node);
271 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
272 /* Change block pred in both views! */
273 node->in[n + 1] = in;
274 assert(node->attr.filter.in_cg);
275 node->attr.filter.in_cg[n + 1] = in;
278 if (get_interprocedural_view()) { /* handle Filter and Block specially */
279 if (get_irn_opcode(node) == iro_Filter) {
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
283 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
284 node->attr.block.in_cg[n + 1] = in;
287 /* else fall through */
291 hook_set_irn_n(node, n, in, node->in[n + 1]);
293 /* Here, we rely on src and tgt being in the current ir graph */
294 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
296 node->in[n + 1] = in;
300 (get_irn_mode)(const ir_node *node) {
301 return _get_irn_mode(node);
305 (set_irn_mode)(ir_node *node, ir_mode *mode)
307 _set_irn_mode(node, mode);
311 get_irn_modecode (const ir_node *node)
314 return node->mode->code;
317 /** Gets the string representation of the mode .*/
319 get_irn_modename (const ir_node *node)
322 return get_mode_name(node->mode);
326 get_irn_modeident (const ir_node *node)
329 return get_mode_ident(node->mode);
333 (get_irn_op)(const ir_node *node)
335 return _get_irn_op(node);
338 /* should be private to the library: */
340 set_irn_op (ir_node *node, ir_op *op)
347 (get_irn_opcode)(const ir_node *node)
349 return _get_irn_opcode(node);
353 get_irn_opname (const ir_node *node)
356 if ((get_irn_op((ir_node *)node) == op_Phi) &&
357 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
358 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
359 return get_id_str(node->op->name);
363 get_irn_opident (const ir_node *node)
366 return node->op->name;
370 (get_irn_visited)(const ir_node *node)
372 return _get_irn_visited(node);
376 (set_irn_visited)(ir_node *node, unsigned long visited)
378 _set_irn_visited(node, visited);
382 (mark_irn_visited)(ir_node *node) {
383 _mark_irn_visited(node);
387 (irn_not_visited)(const ir_node *node) {
388 return _irn_not_visited(node);
392 (irn_visited)(const ir_node *node) {
393 return _irn_visited(node);
397 (set_irn_link)(ir_node *node, void *link) {
398 _set_irn_link(node, link);
402 (get_irn_link)(const ir_node *node) {
403 return _get_irn_link(node);
407 (get_irn_pinned)(const ir_node *node) {
408 return _get_irn_pinned(node);
411 void set_irn_pinned(ir_node *node, op_pin_state state) {
412 /* due to optimization an opt may be turned into a Tuple */
413 if (get_irn_op(node) == op_Tuple)
416 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
417 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
419 node->attr.except.pin_state = state;
422 #ifdef DO_HEAPANALYSIS
423 /* Access the abstract interpretation information of a node.
424 Returns NULL if no such information is available. */
425 struct abstval *get_irn_abst_value(ir_node *n) {
428 /* Set the abstract interpretation information of a node. */
429 void set_irn_abst_value(ir_node *n, struct abstval *os) {
432 struct section *firm_get_irn_section(ir_node *n) {
435 void firm_set_irn_section(ir_node *n, struct section *s) {
439 /* Dummies needed for firmjni. */
440 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
441 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
442 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
443 void firm_set_irn_section(ir_node *n, struct section *s) {}
444 #endif /* DO_HEAPANALYSIS */
447 /* Outputs a unique number for this node */
449 get_irn_node_nr(const ir_node *node) {
452 return node->node_nr;
459 get_irn_const_attr (ir_node *node)
461 assert (node->op == op_Const);
462 return node->attr.con;
466 get_irn_proj_attr (ir_node *node)
468 assert (node->op == op_Proj);
469 return node->attr.proj;
473 get_irn_alloc_attr (ir_node *node)
475 assert (node->op == op_Alloc);
480 get_irn_free_attr (ir_node *node)
482 assert (node->op == op_Free);
487 get_irn_symconst_attr (ir_node *node)
489 assert (node->op == op_SymConst);
494 get_irn_call_attr (ir_node *node)
496 assert (node->op == op_Call);
497 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
501 get_irn_sel_attr (ir_node *node)
503 assert (node->op == op_Sel);
508 get_irn_phi_attr (ir_node *node)
510 assert (node->op == op_Phi);
511 return node->attr.phi0_pos;
515 get_irn_block_attr (ir_node *node)
517 assert (node->op == op_Block);
518 return node->attr.block;
522 get_irn_load_attr (ir_node *node)
524 assert (node->op == op_Load);
525 return node->attr.load;
529 get_irn_store_attr (ir_node *node)
531 assert (node->op == op_Store);
532 return node->attr.store;
536 get_irn_except_attr (ir_node *node)
538 assert (node->op == op_Div || node->op == op_Quot ||
539 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
540 return node->attr.except;
543 /** manipulate fields of individual nodes **/
545 /* this works for all except Block */
547 get_nodes_block (const ir_node *node) {
548 assert (!(node->op == op_Block));
549 return get_irn_n(node, -1);
553 set_nodes_block (ir_node *node, ir_node *block) {
554 assert (!(node->op == op_Block));
555 set_irn_n(node, -1, block);
558 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
559 * from Start. If so returns frame type, else Null. */
560 type *is_frame_pointer(ir_node *n) {
561 if ((get_irn_op(n) == op_Proj) &&
562 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
563 ir_node *start = get_Proj_pred(n);
564 if (get_irn_op(start) == op_Start) {
565 return get_irg_frame_type(get_irn_irg(start));
571 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
572 * from Start. If so returns global type, else Null. */
573 type *is_globals_pointer(ir_node *n) {
574 if ((get_irn_op(n) == op_Proj) &&
575 (get_Proj_proj(n) == pn_Start_P_globals)) {
576 ir_node *start = get_Proj_pred(n);
577 if (get_irn_op(start) == op_Start) {
578 return get_glob_type();
584 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
585 * from Start. If so returns 1, else 0. */
586 int is_value_arg_pointer(ir_node *n) {
587 if ((get_irn_op(n) == op_Proj) &&
588 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
589 (get_irn_op(get_Proj_pred(n)) == op_Start))
594 /* Returns an array with the predecessors of the Block. Depending on
595 the implementation of the graph data structure this can be a copy of
596 the internal representation of predecessors as well as the internal
597 array itself. Therefore writing to this array might obstruct the ir. */
599 get_Block_cfgpred_arr (ir_node *node)
601 assert ((node->op == op_Block));
602 return (ir_node **)&(get_irn_in(node)[1]);
607 get_Block_n_cfgpreds (ir_node *node) {
608 assert ((node->op == op_Block));
609 return get_irn_arity(node);
613 get_Block_cfgpred (ir_node *node, int pos) {
614 assert(-1 <= pos && pos < get_irn_arity(node));
615 assert(node->op == op_Block);
616 return get_irn_n(node, pos);
620 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
621 assert (node->op == op_Block);
622 set_irn_n(node, pos, pred);
626 get_Block_matured (ir_node *node) {
627 assert (node->op == op_Block);
628 return node->attr.block.matured;
632 set_Block_matured (ir_node *node, bool matured) {
633 assert (node->op == op_Block);
634 node->attr.block.matured = matured;
638 (get_Block_block_visited)(ir_node *node) {
639 return _get_Block_block_visited(node);
643 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
644 _set_Block_block_visited(node, visit);
647 /* For this current_ir_graph must be set. */
649 (mark_Block_block_visited)(ir_node *node) {
650 _mark_Block_block_visited(node);
654 (Block_not_block_visited)(ir_node *node) {
655 return _Block_not_block_visited(node);
659 get_Block_graph_arr (ir_node *node, int pos) {
660 assert (node->op == op_Block);
661 return node->attr.block.graph_arr[pos+1];
665 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
666 assert (node->op == op_Block);
667 node->attr.block.graph_arr[pos+1] = value;
670 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
671 assert(node->op == op_Block);
672 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
673 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
674 node->attr.block.in_cg[0] = NULL;
675 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
677 /* Fix backedge array. fix_backedges operates depending on
678 interprocedural_view. */
679 int ipv = get_interprocedural_view();
680 set_interprocedural_view(true);
681 fix_backedges(current_ir_graph->obst, node);
682 set_interprocedural_view(ipv);
685 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
688 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
689 assert(node->op == op_Block &&
690 node->attr.block.in_cg &&
691 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
692 node->attr.block.in_cg[pos + 1] = pred;
695 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
696 assert(node->op == op_Block);
697 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
700 int get_Block_cg_n_cfgpreds(ir_node * node) {
701 assert(node->op == op_Block);
702 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
705 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
706 assert(node->op == op_Block && node->attr.block.in_cg);
707 return node->attr.block.in_cg[pos + 1];
710 void remove_Block_cg_cfgpred_arr(ir_node * node) {
711 assert(node->op == op_Block);
712 node->attr.block.in_cg = NULL;
715 ir_node *(set_Block_dead)(ir_node *block) {
716 return _set_Block_dead(block);
719 int (is_Block_dead)(const ir_node *block) {
720 return _is_Block_dead(block);
724 set_Start_irg(ir_node *node, ir_graph *irg) {
725 assert(node->op == op_Start);
726 assert(is_ir_graph(irg));
727 assert(0 && " Why set irg? -- use set_irn_irg");
731 get_End_n_keepalives(ir_node *end) {
732 assert (end->op == op_End);
733 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
737 get_End_keepalive(ir_node *end, int pos) {
738 assert (end->op == op_End);
739 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
743 add_End_keepalive (ir_node *end, ir_node *ka) {
744 assert (end->op == op_End);
745 ARR_APP1 (ir_node *, end->in, ka);
749 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
750 assert (end->op == op_End);
751 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
755 free_End (ir_node *end) {
756 assert (end->op == op_End);
758 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
759 end->in = NULL; /* @@@ make sure we get an error if we use the
760 in array afterwards ... */
765 > Implementing the case construct (which is where the constant Proj node is
766 > important) involves far more than simply determining the constant values.
767 > We could argue that this is more properly a function of the translator from
768 > Firm to the target machine. That could be done if there was some way of
769 > projecting "default" out of the Cond node.
770 I know it's complicated.
771 Basically there are two proglems:
772 - determining the gaps between the projs
773 - determining the biggest case constant to know the proj number for
775 I see several solutions:
776 1. Introduce a ProjDefault node. Solves both problems.
777 This means to extend all optimizations executed during construction.
778 2. Give the Cond node for switch two flavors:
779 a) there are no gaps in the projs (existing flavor)
780 b) gaps may exist, default proj is still the Proj with the largest
781 projection number. This covers also the gaps.
782 3. Fix the semantic of the Cond to that of 2b)
784 Solution 2 seems to be the best:
785 Computing the gaps in the Firm representation is not too hard, i.e.,
786 libFIRM can implement a routine that transforms between the two
787 flavours. This is also possible for 1) but 2) does not require to
788 change any existing optimization.
789 Further it should be far simpler to determine the biggest constant than
791 I don't want to choose 3) as 2a) seems to have advantages for
792 dataflow analysis and 3) does not allow to convert the representation to
796 get_Cond_selector (ir_node *node) {
797 assert (node->op == op_Cond);
798 return get_irn_n(node, 0);
802 set_Cond_selector (ir_node *node, ir_node *selector) {
803 assert (node->op == op_Cond);
804 set_irn_n(node, 0, selector);
808 get_Cond_kind (ir_node *node) {
809 assert (node->op == op_Cond);
810 return node->attr.c.kind;
814 set_Cond_kind (ir_node *node, cond_kind kind) {
815 assert (node->op == op_Cond);
816 node->attr.c.kind = kind;
820 get_Cond_defaultProj (ir_node *node) {
821 assert (node->op == op_Cond);
822 return node->attr.c.default_proj;
826 get_Return_mem (ir_node *node) {
827 assert (node->op == op_Return);
828 return get_irn_n(node, 0);
832 set_Return_mem (ir_node *node, ir_node *mem) {
833 assert (node->op == op_Return);
834 set_irn_n(node, 0, mem);
838 get_Return_n_ress (ir_node *node) {
839 assert (node->op == op_Return);
840 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
844 get_Return_res_arr (ir_node *node)
846 assert ((node->op == op_Return));
847 if (get_Return_n_ress(node) > 0)
848 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
855 set_Return_n_res (ir_node *node, int results) {
856 assert (node->op == op_Return);
861 get_Return_res (ir_node *node, int pos) {
862 assert (node->op == op_Return);
863 assert (get_Return_n_ress(node) > pos);
864 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
868 set_Return_res (ir_node *node, int pos, ir_node *res){
869 assert (node->op == op_Return);
870 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
874 get_Raise_mem (ir_node *node) {
875 assert (node->op == op_Raise);
876 return get_irn_n(node, 0);
880 set_Raise_mem (ir_node *node, ir_node *mem) {
881 assert (node->op == op_Raise);
882 set_irn_n(node, 0, mem);
886 get_Raise_exo_ptr (ir_node *node) {
887 assert (node->op == op_Raise);
888 return get_irn_n(node, 1);
892 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
893 assert (node->op == op_Raise);
894 set_irn_n(node, 1, exo_ptr);
897 tarval *(get_Const_tarval)(ir_node *node) {
898 return _get_Const_tarval(node);
902 set_Const_tarval (ir_node *node, tarval *con) {
903 assert (node->op == op_Const);
904 node->attr.con.tv = con;
907 cnst_classify_t (classify_Const)(ir_node *node)
909 return _classify_Const(node);
913 /* The source language type. Must be an atomic type. Mode of type must
914 be mode of node. For tarvals from entities type must be pointer to
917 get_Const_type (ir_node *node) {
918 assert (node->op == op_Const);
919 return node->attr.con.tp;
923 set_Const_type (ir_node *node, type *tp) {
924 assert (node->op == op_Const);
925 if (tp != firm_unknown_type) {
926 assert (is_atomic_type(tp));
927 assert (get_type_mode(tp) == get_irn_mode(node));
929 node->attr.con.tp = tp;
934 get_SymConst_kind (const ir_node *node) {
935 assert (node->op == op_SymConst);
936 return node->attr.i.num;
940 set_SymConst_kind (ir_node *node, symconst_kind num) {
941 assert (node->op == op_SymConst);
942 node->attr.i.num = num;
946 get_SymConst_type (ir_node *node) {
947 assert ( (node->op == op_SymConst)
948 && ( get_SymConst_kind(node) == symconst_type_tag
949 || get_SymConst_kind(node) == symconst_size));
950 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
954 set_SymConst_type (ir_node *node, type *tp) {
955 assert ( (node->op == op_SymConst)
956 && ( get_SymConst_kind(node) == symconst_type_tag
957 || get_SymConst_kind(node) == symconst_size));
958 node->attr.i.sym.type_p = tp;
962 get_SymConst_name (ir_node *node) {
963 assert ( (node->op == op_SymConst)
964 && (get_SymConst_kind(node) == symconst_addr_name));
965 return node->attr.i.sym.ident_p;
969 set_SymConst_name (ir_node *node, ident *name) {
970 assert ( (node->op == op_SymConst)
971 && (get_SymConst_kind(node) == symconst_addr_name));
972 node->attr.i.sym.ident_p = name;
976 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
977 entity *get_SymConst_entity (ir_node *node) {
978 assert ( (node->op == op_SymConst)
979 && (get_SymConst_kind (node) == symconst_addr_ent));
980 return node->attr.i.sym.entity_p;
983 void set_SymConst_entity (ir_node *node, entity *ent) {
984 assert ( (node->op == op_SymConst)
985 && (get_SymConst_kind(node) == symconst_addr_ent));
986 node->attr.i.sym.entity_p = ent;
989 union symconst_symbol
990 get_SymConst_symbol (ir_node *node) {
991 assert (node->op == op_SymConst);
992 return node->attr.i.sym;
996 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
997 assert (node->op == op_SymConst);
998 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
999 node->attr.i.sym = sym;
1003 get_SymConst_value_type (ir_node *node) {
1004 assert (node->op == op_SymConst);
1005 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1006 return node->attr.i.tp;
1010 set_SymConst_value_type (ir_node *node, type *tp) {
1011 assert (node->op == op_SymConst);
1012 node->attr.i.tp = tp;
1016 get_Sel_mem (ir_node *node) {
1017 assert (node->op == op_Sel);
1018 return get_irn_n(node, 0);
1022 set_Sel_mem (ir_node *node, ir_node *mem) {
1023 assert (node->op == op_Sel);
1024 set_irn_n(node, 0, mem);
1028 get_Sel_ptr (ir_node *node) {
1029 assert (node->op == op_Sel);
1030 return get_irn_n(node, 1);
1034 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1035 assert (node->op == op_Sel);
1036 set_irn_n(node, 1, ptr);
1040 get_Sel_n_indexs (ir_node *node) {
1041 assert (node->op == op_Sel);
1042 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1046 get_Sel_index_arr (ir_node *node)
1048 assert ((node->op == op_Sel));
1049 if (get_Sel_n_indexs(node) > 0)
1050 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1056 get_Sel_index (ir_node *node, int pos) {
1057 assert (node->op == op_Sel);
1058 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1062 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1063 assert (node->op == op_Sel);
1064 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1068 get_Sel_entity (ir_node *node) {
1069 assert (node->op == op_Sel);
1070 return node->attr.s.ent;
1074 set_Sel_entity (ir_node *node, entity *ent) {
1075 assert (node->op == op_Sel);
1076 node->attr.s.ent = ent;
1080 get_InstOf_ent (ir_node *node) {
1081 assert (node->op = op_InstOf);
1082 return (node->attr.io.ent);
1086 set_InstOf_ent (ir_node *node, type *ent) {
1087 assert (node->op = op_InstOf);
1088 node->attr.io.ent = ent;
1092 get_InstOf_store (ir_node *node) {
1093 assert (node->op = op_InstOf);
1094 return (get_irn_n (node, 0));
1098 set_InstOf_store (ir_node *node, ir_node *obj) {
1099 assert (node->op = op_InstOf);
1100 set_irn_n (node, 0, obj);
1104 get_InstOf_obj (ir_node *node) {
1105 assert (node->op = op_InstOf);
1106 return (get_irn_n (node, 1));
1110 set_InstOf_obj (ir_node *node, ir_node *obj) {
1111 assert (node->op = op_InstOf);
1112 set_irn_n (node, 1, obj);
1116 /* For unary and binary arithmetic operations the access to the
1117 operands can be factored out. Left is the first, right the
1118 second arithmetic value as listed in tech report 0999-33.
1119 unops are: Minus, Abs, Not, Conv, Cast
1120 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1121 Shr, Shrs, Rotate, Cmp */
1125 get_Call_mem (ir_node *node) {
1126 assert (node->op == op_Call);
1127 return get_irn_n(node, 0);
1131 set_Call_mem (ir_node *node, ir_node *mem) {
1132 assert (node->op == op_Call);
1133 set_irn_n(node, 0, mem);
1137 get_Call_ptr (ir_node *node) {
1138 assert (node->op == op_Call);
1139 return get_irn_n(node, 1);
1143 set_Call_ptr (ir_node *node, ir_node *ptr) {
1144 assert (node->op == op_Call);
1145 set_irn_n(node, 1, ptr);
1149 get_Call_param_arr (ir_node *node) {
1150 assert (node->op == op_Call);
1151 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1155 get_Call_n_params (ir_node *node) {
1156 assert (node->op == op_Call);
1157 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1161 get_Call_arity (ir_node *node) {
1162 assert (node->op == op_Call);
1163 return get_Call_n_params(node);
1167 set_Call_arity (ir_node *node, ir_node *arity) {
1168 assert (node->op == op_Call);
1173 get_Call_param (ir_node *node, int pos) {
1174 assert (node->op == op_Call);
1175 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1179 set_Call_param (ir_node *node, int pos, ir_node *param) {
1180 assert (node->op == op_Call);
1181 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1185 get_Call_type (ir_node *node) {
1186 assert (node->op == op_Call);
1187 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1191 set_Call_type (ir_node *node, type *tp) {
1192 assert (node->op == op_Call);
1193 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1194 node->attr.call.cld_tp = tp;
1197 int Call_has_callees(ir_node *node) {
1198 assert(node && node->op == op_Call);
1199 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1200 (node->attr.call.callee_arr != NULL));
1203 int get_Call_n_callees(ir_node * node) {
1204 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1205 return ARR_LEN(node->attr.call.callee_arr);
1208 entity * get_Call_callee(ir_node * node, int pos) {
1209 assert(pos >= 0 && pos < get_Call_n_callees(node));
1210 return node->attr.call.callee_arr[pos];
1213 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1214 assert(node->op == op_Call);
1215 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1216 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1218 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1221 void remove_Call_callee_arr(ir_node * node) {
1222 assert(node->op == op_Call);
1223 node->attr.call.callee_arr = NULL;
1226 ir_node * get_CallBegin_ptr (ir_node *node) {
1227 assert(node->op == op_CallBegin);
1228 return get_irn_n(node, 0);
1230 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1231 assert(node->op == op_CallBegin);
1232 set_irn_n(node, 0, ptr);
1234 ir_node * get_CallBegin_call (ir_node *node) {
1235 assert(node->op == op_CallBegin);
1236 return node->attr.callbegin.call;
1238 void set_CallBegin_call (ir_node *node, ir_node *call) {
1239 assert(node->op == op_CallBegin);
1240 node->attr.callbegin.call = call;
1245 ir_node * get_##OP##_left(ir_node *node) { \
1246 assert(node->op == op_##OP); \
1247 return get_irn_n(node, node->op->op_index); \
1249 void set_##OP##_left(ir_node *node, ir_node *left) { \
1250 assert(node->op == op_##OP); \
1251 set_irn_n(node, node->op->op_index, left); \
1253 ir_node *get_##OP##_right(ir_node *node) { \
1254 assert(node->op == op_##OP); \
1255 return get_irn_n(node, node->op->op_index + 1); \
1257 void set_##OP##_right(ir_node *node, ir_node *right) { \
1258 assert(node->op == op_##OP); \
1259 set_irn_n(node, node->op->op_index + 1, right); \
1263 ir_node *get_##OP##_op(ir_node *node) { \
1264 assert(node->op == op_##OP); \
1265 return get_irn_n(node, node->op->op_index); \
1267 void set_##OP##_op (ir_node *node, ir_node *op) { \
1268 assert(node->op == op_##OP); \
1269 set_irn_n(node, node->op->op_index, op); \
1279 get_Quot_mem (ir_node *node) {
1280 assert (node->op == op_Quot);
1281 return get_irn_n(node, 0);
1285 set_Quot_mem (ir_node *node, ir_node *mem) {
1286 assert (node->op == op_Quot);
1287 set_irn_n(node, 0, mem);
1293 get_DivMod_mem (ir_node *node) {
1294 assert (node->op == op_DivMod);
1295 return get_irn_n(node, 0);
1299 set_DivMod_mem (ir_node *node, ir_node *mem) {
1300 assert (node->op == op_DivMod);
1301 set_irn_n(node, 0, mem);
1307 get_Div_mem (ir_node *node) {
1308 assert (node->op == op_Div);
1309 return get_irn_n(node, 0);
1313 set_Div_mem (ir_node *node, ir_node *mem) {
1314 assert (node->op == op_Div);
1315 set_irn_n(node, 0, mem);
1321 get_Mod_mem (ir_node *node) {
1322 assert (node->op == op_Mod);
1323 return get_irn_n(node, 0);
1327 set_Mod_mem (ir_node *node, ir_node *mem) {
1328 assert (node->op == op_Mod);
1329 set_irn_n(node, 0, mem);
1346 get_Cast_type (ir_node *node) {
1347 assert (node->op == op_Cast);
1348 return node->attr.cast.totype;
1352 set_Cast_type (ir_node *node, type *to_tp) {
1353 assert (node->op == op_Cast);
1354 node->attr.cast.totype = to_tp;
1358 /* Checks for upcast.
1360 * Returns true if the Cast node casts a class type to a super type.
1362 int is_Cast_upcast(ir_node *node) {
1363 type *totype = get_Cast_type(node);
1364 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1365 ir_graph *myirg = get_irn_irg(node);
1367 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1370 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1371 totype = get_pointer_points_to_type(totype);
1372 fromtype = get_pointer_points_to_type(fromtype);
1377 if (!is_Class_type(totype)) return false;
1378 return is_subclass_of(fromtype, totype);
1381 /* Checks for downcast.
1383 * Returns true if the Cast node casts a class type to a sub type.
1385 int is_Cast_downcast(ir_node *node) {
1386 type *totype = get_Cast_type(node);
1387 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1389 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1392 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1393 totype = get_pointer_points_to_type(totype);
1394 fromtype = get_pointer_points_to_type(fromtype);
1399 if (!is_Class_type(totype)) return false;
1400 return is_subclass_of(totype, fromtype);
1404 (is_unop)(const ir_node *node) {
1405 return _is_unop(node);
1409 get_unop_op (ir_node *node) {
1410 if (node->op->opar == oparity_unary)
1411 return get_irn_n(node, node->op->op_index);
1413 assert(node->op->opar == oparity_unary);
1418 set_unop_op (ir_node *node, ir_node *op) {
1419 if (node->op->opar == oparity_unary)
1420 set_irn_n(node, node->op->op_index, op);
1422 assert(node->op->opar == oparity_unary);
1426 (is_binop)(const ir_node *node) {
1427 return _is_binop(node);
1431 get_binop_left (ir_node *node) {
1432 if (node->op->opar == oparity_binary)
1433 return get_irn_n(node, node->op->op_index);
1435 assert(node->op->opar == oparity_binary);
1440 set_binop_left (ir_node *node, ir_node *left) {
1441 if (node->op->opar == oparity_binary)
1442 set_irn_n(node, node->op->op_index, left);
1444 assert (node->op->opar == oparity_binary);
1448 get_binop_right (ir_node *node) {
1449 if (node->op->opar == oparity_binary)
1450 return get_irn_n(node, node->op->op_index + 1);
1452 assert(node->op->opar == oparity_binary);
1457 set_binop_right (ir_node *node, ir_node *right) {
1458 if (node->op->opar == oparity_binary)
1459 set_irn_n(node, node->op->op_index + 1, right);
1461 assert (node->op->opar == oparity_binary);
1464 int is_Phi (const ir_node *n) {
1470 if (op == op_Filter) return get_interprocedural_view();
1473 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1474 (get_irn_arity(n) > 0));
1479 int is_Phi0 (const ir_node *n) {
1482 return ((get_irn_op(n) == op_Phi) &&
1483 (get_irn_arity(n) == 0) &&
1484 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1488 get_Phi_preds_arr (ir_node *node) {
1489 assert (node->op == op_Phi);
1490 return (ir_node **)&(get_irn_in(node)[1]);
1494 get_Phi_n_preds (ir_node *node) {
1495 assert (is_Phi(node) || is_Phi0(node));
1496 return (get_irn_arity(node));
1500 void set_Phi_n_preds (ir_node *node, int n_preds) {
1501 assert (node->op == op_Phi);
1506 get_Phi_pred (ir_node *node, int pos) {
1507 assert (is_Phi(node) || is_Phi0(node));
1508 return get_irn_n(node, pos);
1512 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1513 assert (is_Phi(node) || is_Phi0(node));
1514 set_irn_n(node, pos, pred);
1518 int is_memop(ir_node *node) {
1519 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1522 ir_node *get_memop_mem (ir_node *node) {
1523 assert(is_memop(node));
1524 return get_irn_n(node, 0);
1527 void set_memop_mem (ir_node *node, ir_node *mem) {
1528 assert(is_memop(node));
1529 set_irn_n(node, 0, mem);
1532 ir_node *get_memop_ptr (ir_node *node) {
1533 assert(is_memop(node));
1534 return get_irn_n(node, 1);
1537 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1538 assert(is_memop(node));
1539 set_irn_n(node, 1, ptr);
1543 get_Load_mem (ir_node *node) {
1544 assert (node->op == op_Load);
1545 return get_irn_n(node, 0);
1549 set_Load_mem (ir_node *node, ir_node *mem) {
1550 assert (node->op == op_Load);
1551 set_irn_n(node, 0, mem);
1555 get_Load_ptr (ir_node *node) {
1556 assert (node->op == op_Load);
1557 return get_irn_n(node, 1);
1561 set_Load_ptr (ir_node *node, ir_node *ptr) {
1562 assert (node->op == op_Load);
1563 set_irn_n(node, 1, ptr);
1567 get_Load_mode (ir_node *node) {
1568 assert (node->op == op_Load);
1569 return node->attr.load.load_mode;
1573 set_Load_mode (ir_node *node, ir_mode *mode) {
1574 assert (node->op == op_Load);
1575 node->attr.load.load_mode = mode;
1579 get_Load_volatility (ir_node *node) {
1580 assert (node->op == op_Load);
1581 return node->attr.load.volatility;
1585 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1586 assert (node->op == op_Load);
1587 node->attr.load.volatility = volatility;
1592 get_Store_mem (ir_node *node) {
1593 assert (node->op == op_Store);
1594 return get_irn_n(node, 0);
1598 set_Store_mem (ir_node *node, ir_node *mem) {
1599 assert (node->op == op_Store);
1600 set_irn_n(node, 0, mem);
1604 get_Store_ptr (ir_node *node) {
1605 assert (node->op == op_Store);
1606 return get_irn_n(node, 1);
1610 set_Store_ptr (ir_node *node, ir_node *ptr) {
1611 assert (node->op == op_Store);
1612 set_irn_n(node, 1, ptr);
1616 get_Store_value (ir_node *node) {
1617 assert (node->op == op_Store);
1618 return get_irn_n(node, 2);
1622 set_Store_value (ir_node *node, ir_node *value) {
1623 assert (node->op == op_Store);
1624 set_irn_n(node, 2, value);
1628 get_Store_volatility (ir_node *node) {
1629 assert (node->op == op_Store);
1630 return node->attr.store.volatility;
1634 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1635 assert (node->op == op_Store);
1636 node->attr.store.volatility = volatility;
1641 get_Alloc_mem (ir_node *node) {
1642 assert (node->op == op_Alloc);
1643 return get_irn_n(node, 0);
1647 set_Alloc_mem (ir_node *node, ir_node *mem) {
1648 assert (node->op == op_Alloc);
1649 set_irn_n(node, 0, mem);
1653 get_Alloc_size (ir_node *node) {
1654 assert (node->op == op_Alloc);
1655 return get_irn_n(node, 1);
1659 set_Alloc_size (ir_node *node, ir_node *size) {
1660 assert (node->op == op_Alloc);
1661 set_irn_n(node, 1, size);
1665 get_Alloc_type (ir_node *node) {
1666 assert (node->op == op_Alloc);
1667 return node->attr.a.type = skip_tid(node->attr.a.type);
1671 set_Alloc_type (ir_node *node, type *tp) {
1672 assert (node->op == op_Alloc);
1673 node->attr.a.type = tp;
1677 get_Alloc_where (ir_node *node) {
1678 assert (node->op == op_Alloc);
1679 return node->attr.a.where;
1683 set_Alloc_where (ir_node *node, where_alloc where) {
1684 assert (node->op == op_Alloc);
1685 node->attr.a.where = where;
1690 get_Free_mem (ir_node *node) {
1691 assert (node->op == op_Free);
1692 return get_irn_n(node, 0);
1696 set_Free_mem (ir_node *node, ir_node *mem) {
1697 assert (node->op == op_Free);
1698 set_irn_n(node, 0, mem);
1702 get_Free_ptr (ir_node *node) {
1703 assert (node->op == op_Free);
1704 return get_irn_n(node, 1);
1708 set_Free_ptr (ir_node *node, ir_node *ptr) {
1709 assert (node->op == op_Free);
1710 set_irn_n(node, 1, ptr);
1714 get_Free_size (ir_node *node) {
1715 assert (node->op == op_Free);
1716 return get_irn_n(node, 2);
1720 set_Free_size (ir_node *node, ir_node *size) {
1721 assert (node->op == op_Free);
1722 set_irn_n(node, 2, size);
1726 get_Free_type (ir_node *node) {
1727 assert (node->op == op_Free);
1728 return node->attr.f.type = skip_tid(node->attr.f.type);
1732 set_Free_type (ir_node *node, type *tp) {
1733 assert (node->op == op_Free);
1734 node->attr.f.type = tp;
1738 get_Free_where (ir_node *node) {
1739 assert (node->op == op_Free);
1740 return node->attr.f.where;
1744 set_Free_where (ir_node *node, where_alloc where) {
1745 assert (node->op == op_Free);
1746 node->attr.f.where = where;
1750 get_Sync_preds_arr (ir_node *node) {
1751 assert (node->op == op_Sync);
1752 return (ir_node **)&(get_irn_in(node)[1]);
1756 get_Sync_n_preds (ir_node *node) {
1757 assert (node->op == op_Sync);
1758 return (get_irn_arity(node));
1763 set_Sync_n_preds (ir_node *node, int n_preds) {
1764 assert (node->op == op_Sync);
1769 get_Sync_pred (ir_node *node, int pos) {
1770 assert (node->op == op_Sync);
1771 return get_irn_n(node, pos);
1775 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1776 assert (node->op == op_Sync);
1777 set_irn_n(node, pos, pred);
1780 type *get_Proj_type(ir_node *n)
1783 ir_node *pred = get_Proj_pred(n);
1785 switch (get_irn_opcode(pred)) {
1788 /* Deal with Start / Call here: we need to know the Proj Nr. */
1789 assert(get_irn_mode(pred) == mode_T);
1790 pred_pred = get_Proj_pred(pred);
1791 if (get_irn_op(pred_pred) == op_Start) {
1792 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1793 tp = get_method_param_type(mtp, get_Proj_proj(n));
1794 } else if (get_irn_op(pred_pred) == op_Call) {
1795 type *mtp = get_Call_type(pred_pred);
1796 tp = get_method_res_type(mtp, get_Proj_proj(n));
1799 case iro_Start: break;
1800 case iro_Call: break;
1802 ir_node *a = get_Load_ptr(pred);
1803 if (get_irn_op(a) == op_Sel)
1804 tp = get_entity_type(get_Sel_entity(a));
1813 get_Proj_pred (ir_node *node) {
1814 assert (is_Proj(node));
1815 return get_irn_n(node, 0);
1819 set_Proj_pred (ir_node *node, ir_node *pred) {
1820 assert (is_Proj(node));
1821 set_irn_n(node, 0, pred);
1825 get_Proj_proj (ir_node *node) {
1826 assert (is_Proj(node));
1827 if (get_irn_opcode(node) == iro_Proj) {
1828 return node->attr.proj;
1830 assert(get_irn_opcode(node) == iro_Filter);
1831 return node->attr.filter.proj;
1836 set_Proj_proj (ir_node *node, long proj) {
1837 assert (node->op == op_Proj);
1838 node->attr.proj = proj;
1842 get_Tuple_preds_arr (ir_node *node) {
1843 assert (node->op == op_Tuple);
1844 return (ir_node **)&(get_irn_in(node)[1]);
1848 get_Tuple_n_preds (ir_node *node) {
1849 assert (node->op == op_Tuple);
1850 return (get_irn_arity(node));
1855 set_Tuple_n_preds (ir_node *node, int n_preds) {
1856 assert (node->op == op_Tuple);
1861 get_Tuple_pred (ir_node *node, int pos) {
1862 assert (node->op == op_Tuple);
1863 return get_irn_n(node, pos);
1867 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1868 assert (node->op == op_Tuple);
1869 set_irn_n(node, pos, pred);
1873 get_Id_pred (ir_node *node) {
1874 assert (node->op == op_Id);
1875 return get_irn_n(node, 0);
1879 set_Id_pred (ir_node *node, ir_node *pred) {
1880 assert (node->op == op_Id);
1881 set_irn_n(node, 0, pred);
1884 ir_node *get_Confirm_value (ir_node *node) {
1885 assert (node->op == op_Confirm);
1886 return get_irn_n(node, 0);
1888 void set_Confirm_value (ir_node *node, ir_node *value) {
1889 assert (node->op == op_Confirm);
1890 set_irn_n(node, 0, value);
1892 ir_node *get_Confirm_bound (ir_node *node) {
1893 assert (node->op == op_Confirm);
1894 return get_irn_n(node, 1);
1896 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1897 assert (node->op == op_Confirm);
1898 set_irn_n(node, 0, bound);
1900 pn_Cmp get_Confirm_cmp (ir_node *node) {
1901 assert (node->op == op_Confirm);
1902 return node->attr.confirm_cmp;
1904 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1905 assert (node->op == op_Confirm);
1906 node->attr.confirm_cmp = cmp;
1911 get_Filter_pred (ir_node *node) {
1912 assert(node->op == op_Filter);
1916 set_Filter_pred (ir_node *node, ir_node *pred) {
1917 assert(node->op == op_Filter);
1921 get_Filter_proj(ir_node *node) {
1922 assert(node->op == op_Filter);
1923 return node->attr.filter.proj;
1926 set_Filter_proj (ir_node *node, long proj) {
1927 assert(node->op == op_Filter);
1928 node->attr.filter.proj = proj;
1931 /* Don't use get_irn_arity, get_irn_n in implementation as access
1932 shall work independent of view!!! */
1933 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1934 assert(node->op == op_Filter);
1935 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1936 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1937 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1938 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1939 node->attr.filter.in_cg[0] = node->in[0];
1941 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1944 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1945 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1946 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1947 node->attr.filter.in_cg[pos + 1] = pred;
1949 int get_Filter_n_cg_preds(ir_node *node) {
1950 assert(node->op == op_Filter && node->attr.filter.in_cg);
1951 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1953 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1955 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1957 arity = ARR_LEN(node->attr.filter.in_cg);
1958 assert(pos < arity - 1);
1959 return node->attr.filter.in_cg[pos + 1];
1963 ir_node *get_Mux_sel (ir_node *node) {
1964 assert(node->op == op_Mux);
1967 void set_Mux_sel (ir_node *node, ir_node *sel) {
1968 assert(node->op == op_Mux);
1972 ir_node *get_Mux_false (ir_node *node) {
1973 assert(node->op == op_Mux);
1976 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1977 assert(node->op == op_Mux);
1978 node->in[2] = ir_false;
1981 ir_node *get_Mux_true (ir_node *node) {
1982 assert(node->op == op_Mux);
1985 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1986 assert(node->op == op_Mux);
1987 node->in[3] = ir_true;
1992 get_irn_irg(const ir_node *node) {
1993 if (! is_Block(node))
1994 node = get_nodes_block(node);
1995 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1996 node = get_nodes_block(node);
1997 assert(get_irn_op(node) == op_Block);
1998 return node->attr.block.irg;
2002 /*----------------------------------------------------------------*/
2003 /* Auxiliary routines */
2004 /*----------------------------------------------------------------*/
2007 skip_Proj (ir_node *node) {
2008 /* don't assert node !!! */
2009 if (node && is_Proj(node)) {
2010 return get_Proj_pred(node);
2017 skip_Tuple (ir_node *node) {
2020 if (!get_opt_normalize()) return node;
2022 node = skip_Id(node);
2023 if (get_irn_op(node) == op_Proj) {
2024 pred = skip_Id(get_Proj_pred(node));
2025 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2026 pred = skip_Id(skip_Tuple(pred));
2027 if (get_irn_op(pred) == op_Tuple)
2028 return get_Tuple_pred(pred, get_Proj_proj(node));
2033 /** returns operand of node if node is a Cast */
2034 ir_node *skip_Cast (ir_node *node) {
2035 if (node && get_irn_op(node) == op_Cast) {
2036 return skip_Id(get_irn_n(node, 0));
2043 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2044 than any other approach, as Id chains are resolved and all point to the real node, or
2045 all id's are self loops. */
2047 skip_Id (ir_node *node) {
2048 /* don't assert node !!! */
2050 if (!get_opt_normalize()) return node;
2052 /* Don't use get_Id_pred: We get into an endless loop for
2053 self-referencing Ids. */
2054 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2055 ir_node *rem_pred = node->in[0+1];
2058 assert (get_irn_arity (node) > 0);
2060 node->in[0+1] = node;
2061 res = skip_Id(rem_pred);
2062 if (res->op == op_Id) /* self-loop */ return node;
2064 node->in[0+1] = res;
2071 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2072 than any other approach, as Id chains are resolved and all point to the real node, or
2073 all id's are self loops. */
2075 skip_Id (ir_node *node) {
2077 /* don't assert node !!! */
2079 if (!node || (node->op != op_Id)) return node;
2081 if (!get_opt_normalize()) return node;
2083 /* Don't use get_Id_pred: We get into an endless loop for
2084 self-referencing Ids. */
2085 pred = node->in[0+1];
2087 if (pred->op != op_Id) return pred;
2089 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2090 ir_node *rem_pred, *res;
2092 if (pred->op != op_Id) return pred; /* shortcut */
2095 assert (get_irn_arity (node) > 0);
2097 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2098 res = skip_Id(rem_pred);
2099 if (res->op == op_Id) /* self-loop */ return node;
2101 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2110 (is_Bad)(const ir_node *node) {
2111 return _is_Bad(node);
2115 (is_no_Block)(const ir_node *node) {
2116 return _is_no_Block(node);
2120 (is_Block)(const ir_node *node) {
2121 return _is_Block(node);
2124 /* returns true if node is a Unknown node. */
2126 is_Unknown (const ir_node *node) {
2128 return (get_irn_op(node) == op_Unknown);
2132 is_Proj (const ir_node *node) {
2134 return node->op == op_Proj
2135 || (!get_interprocedural_view() && node->op == op_Filter);
2138 /* Returns true if the operation manipulates control flow. */
2140 is_cfop(const ir_node *node) {
2141 return is_cfopcode(get_irn_op(node));
2144 /* Returns true if the operation manipulates interprocedural control flow:
2145 CallBegin, EndReg, EndExcept */
2146 int is_ip_cfop(const ir_node *node) {
2147 return is_ip_cfopcode(get_irn_op(node));
2150 /* Returns true if the operation can change the control flow because
2153 is_fragile_op(const ir_node *node) {
2154 return is_op_fragile(get_irn_op(node));
2157 /* Returns the memory operand of fragile operations. */
2158 ir_node *get_fragile_op_mem(ir_node *node) {
2159 assert(node && is_fragile_op(node));
2161 switch (get_irn_opcode (node)) {
2170 return get_irn_n(node, 0);
2175 assert(0 && "should not be reached");
2180 /* Returns true if the operation is a forking control flow operation. */
2182 is_forking_op(const ir_node *node) {
2183 return is_op_forking(get_irn_op(node));
2186 type *(get_irn_type)(ir_node *node) {
2187 return _get_irn_type(node);
2190 /** the get_type operation must be always implemented */
2191 static type *get_Null_type(ir_node *n) {
2195 /* set the get_type operation */
2196 ir_op *firm_set_default_get_type(ir_op *op)
2199 case iro_Const: op->get_type = get_Const_type; break;
2200 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2201 case iro_Cast: op->get_type = get_Cast_type; break;
2202 case iro_Proj: op->get_type = get_Proj_type; break;
2203 default: op->get_type = get_Null_type; break;
2208 #ifdef DEBUG_libfirm
2209 void dump_irn (ir_node *n) {
2210 int i, arity = get_irn_arity(n);
2211 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2213 ir_node *pred = get_irn_n(n, -1);
2214 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2215 get_irn_node_nr(pred), (void *)pred);
2217 printf(" preds: \n");
2218 for (i = 0; i < arity; ++i) {
2219 ir_node *pred = get_irn_n(n, i);
2220 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2221 get_irn_node_nr(pred), (void *)pred);
2225 #else /* DEBUG_libfirm */
2226 void dump_irn (ir_node *n) {}
2227 #endif /* DEBUG_libfirm */