3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated pnc condition.
60 get_negated_pnc(int pnc) {
62 case pn_Cmp_False: return pn_Cmp_True; break;
63 case pn_Cmp_Eq: return pn_Cmp_Ne; break;
64 case pn_Cmp_Lt: return pn_Cmp_Uge; break;
65 case pn_Cmp_Le: return pn_Cmp_Ug; break;
66 case pn_Cmp_Gt: return pn_Cmp_Ule; break;
67 case pn_Cmp_Ge: return pn_Cmp_Ul; break;
68 case pn_Cmp_Lg: return pn_Cmp_Ue; break;
69 case pn_Cmp_Leg: return pn_Cmp_Uo; break;
70 case pn_Cmp_Uo: return pn_Cmp_Leg; break;
71 case pn_Cmp_Ue: return pn_Cmp_Lg; break;
72 case pn_Cmp_Ul: return pn_Cmp_Ge; break;
73 case pn_Cmp_Ule: return pn_Cmp_Gt; break;
74 case pn_Cmp_Ug: return pn_Cmp_Le; break;
75 case pn_Cmp_Uge: return pn_Cmp_Lt; break;
76 case pn_Cmp_Ne: return pn_Cmp_Eq; break;
77 case pn_Cmp_True: return pn_Cmp_False; break;
79 return 99; /* to shut up gcc */
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
153 set_irn_dbg_info(res, db);
157 res->node_nr = get_irp_new_node_nr();
160 #if FIRM_EDGES_INPLACE
163 int not_a_block = is_no_Block(res);
165 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 for (i = 0, n = arity + not_a_block; i < n; ++i)
168 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 if (get_interprocedural_view()) { /* handle Filter and Block specially */
227 if (get_irn_opcode(node) == iro_Filter) {
228 assert(node->attr.filter.in_cg);
229 arr = &node->attr.filter.in_cg;
230 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
231 arr = &node->attr.block.in_cg;
238 if (arity != ARR_LEN(*arr) - 1) {
239 ir_node * block = (*arr)[0];
240 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
243 fix_backedges(current_ir_graph->obst, node);
244 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
248 (get_irn_intra_n)(const ir_node *node, int n) {
249 return _get_irn_intra_n (node, n);
253 (get_irn_inter_n)(const ir_node *node, int n) {
254 return _get_irn_inter_n (node, n);
257 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
260 (get_irn_n)(const ir_node *node, int n) {
261 return _get_irn_n(node, n);
265 set_irn_n (ir_node *node, int n, ir_node *in) {
266 assert(node && node->kind == k_ir_node);
268 assert(n < get_irn_arity(node));
269 assert(in && in->kind == k_ir_node);
271 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
272 /* Change block pred in both views! */
273 node->in[n + 1] = in;
274 assert(node->attr.filter.in_cg);
275 node->attr.filter.in_cg[n + 1] = in;
278 if (get_interprocedural_view()) { /* handle Filter and Block specially */
279 if (get_irn_opcode(node) == iro_Filter) {
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
283 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
284 node->attr.block.in_cg[n + 1] = in;
287 /* else fall through */
291 hook_set_irn_n(node, n, in, node->in[n + 1]);
293 /* Here, we rely on src and tgt being in the current ir graph */
294 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
296 node->in[n + 1] = in;
300 (get_irn_mode)(const ir_node *node) {
301 return _get_irn_mode(node);
305 (set_irn_mode)(ir_node *node, ir_mode *mode)
307 _set_irn_mode(node, mode);
311 get_irn_modecode (const ir_node *node)
314 return node->mode->code;
317 /** Gets the string representation of the mode .*/
319 get_irn_modename (const ir_node *node)
322 return get_mode_name(node->mode);
326 get_irn_modeident (const ir_node *node)
329 return get_mode_ident(node->mode);
333 (get_irn_op)(const ir_node *node)
335 return _get_irn_op(node);
338 /* should be private to the library: */
340 set_irn_op (ir_node *node, ir_op *op)
347 (get_irn_opcode)(const ir_node *node)
349 return _get_irn_opcode(node);
353 get_irn_opname (const ir_node *node)
356 if ((get_irn_op((ir_node *)node) == op_Phi) &&
357 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
358 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
359 return get_id_str(node->op->name);
363 get_irn_opident (const ir_node *node)
366 return node->op->name;
370 (get_irn_visited)(const ir_node *node)
372 return _get_irn_visited(node);
376 (set_irn_visited)(ir_node *node, unsigned long visited)
378 _set_irn_visited(node, visited);
382 (mark_irn_visited)(ir_node *node) {
383 _mark_irn_visited(node);
387 (irn_not_visited)(const ir_node *node) {
388 return _irn_not_visited(node);
392 (irn_visited)(const ir_node *node) {
393 return _irn_visited(node);
397 (set_irn_link)(ir_node *node, void *link) {
398 _set_irn_link(node, link);
402 (get_irn_link)(const ir_node *node) {
403 return _get_irn_link(node);
407 (get_irn_pinned)(const ir_node *node) {
408 return _get_irn_pinned(node);
411 void set_irn_pinned(ir_node *node, op_pin_state state) {
412 /* due to optimization an opt may be turned into a Tuple */
413 if (get_irn_op(node) == op_Tuple)
416 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
417 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
419 node->attr.except.pin_state = state;
422 #ifdef DO_HEAPANALYSIS
423 /* Access the abstract interpretation information of a node.
424 Returns NULL if no such information is available. */
425 struct abstval *get_irn_abst_value(ir_node *n) {
428 /* Set the abstract interpretation information of a node. */
429 void set_irn_abst_value(ir_node *n, struct abstval *os) {
432 struct section *firm_get_irn_section(ir_node *n) {
435 void firm_set_irn_section(ir_node *n, struct section *s) {
439 /* Dummies needed for firmjni. */
440 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
441 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
442 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
443 void firm_set_irn_section(ir_node *n, struct section *s) {}
444 #endif /* DO_HEAPANALYSIS */
447 /* Outputs a unique number for this node */
449 get_irn_node_nr(const ir_node *node) {
452 return node->node_nr;
459 get_irn_const_attr (ir_node *node)
461 assert (node->op == op_Const);
462 return node->attr.con;
466 get_irn_proj_attr (ir_node *node)
468 assert (node->op == op_Proj);
469 return node->attr.proj;
473 get_irn_alloc_attr (ir_node *node)
475 assert (node->op == op_Alloc);
480 get_irn_free_attr (ir_node *node)
482 assert (node->op == op_Free);
487 get_irn_symconst_attr (ir_node *node)
489 assert (node->op == op_SymConst);
494 get_irn_call_attr (ir_node *node)
496 assert (node->op == op_Call);
497 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
501 get_irn_sel_attr (ir_node *node)
503 assert (node->op == op_Sel);
508 get_irn_phi_attr (ir_node *node)
510 assert (node->op == op_Phi);
511 return node->attr.phi0_pos;
515 get_irn_block_attr (ir_node *node)
517 assert (node->op == op_Block);
518 return node->attr.block;
522 get_irn_load_attr (ir_node *node)
524 assert (node->op == op_Load);
525 return node->attr.load;
529 get_irn_store_attr (ir_node *node)
531 assert (node->op == op_Store);
532 return node->attr.store;
536 get_irn_except_attr (ir_node *node)
538 assert (node->op == op_Div || node->op == op_Quot ||
539 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
540 return node->attr.except;
543 /** manipulate fields of individual nodes **/
545 /* this works for all except Block */
547 get_nodes_block (const ir_node *node) {
548 assert (!(node->op == op_Block));
549 return get_irn_n(node, -1);
553 set_nodes_block (ir_node *node, ir_node *block) {
554 assert (!(node->op == op_Block));
555 set_irn_n(node, -1, block);
558 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
559 * from Start. If so returns frame type, else Null. */
560 type *is_frame_pointer(ir_node *n) {
561 if ((get_irn_op(n) == op_Proj) &&
562 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
563 ir_node *start = get_Proj_pred(n);
564 if (get_irn_op(start) == op_Start) {
565 return get_irg_frame_type(get_irn_irg(start));
571 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
572 * from Start. If so returns global type, else Null. */
573 type *is_globals_pointer(ir_node *n) {
574 if ((get_irn_op(n) == op_Proj) &&
575 (get_Proj_proj(n) == pn_Start_P_globals)) {
576 ir_node *start = get_Proj_pred(n);
577 if (get_irn_op(start) == op_Start) {
578 return get_glob_type();
584 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
585 * from Start. If so returns 1, else 0. */
586 int is_value_arg_pointer(ir_node *n) {
587 if ((get_irn_op(n) == op_Proj) &&
588 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
589 (get_irn_op(get_Proj_pred(n)) == op_Start))
594 /* Returns an array with the predecessors of the Block. Depending on
595 the implementation of the graph data structure this can be a copy of
596 the internal representation of predecessors as well as the internal
597 array itself. Therefore writing to this array might obstruct the ir. */
599 get_Block_cfgpred_arr (ir_node *node)
601 assert ((node->op == op_Block));
602 return (ir_node **)&(get_irn_in(node)[1]);
607 get_Block_n_cfgpreds (ir_node *node) {
608 assert ((node->op == op_Block));
609 return get_irn_arity(node);
613 get_Block_cfgpred (ir_node *node, int pos) {
614 assert(-1 <= pos && pos < get_irn_arity(node));
615 assert(node->op == op_Block);
616 return get_irn_n(node, pos);
620 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
621 assert (node->op == op_Block);
622 set_irn_n(node, pos, pred);
626 get_Block_matured (ir_node *node) {
627 assert (node->op == op_Block);
628 return node->attr.block.matured;
632 set_Block_matured (ir_node *node, bool matured) {
633 assert (node->op == op_Block);
634 node->attr.block.matured = matured;
637 get_Block_block_visited (ir_node *node) {
638 assert (node->op == op_Block);
639 return node->attr.block.block_visited;
643 set_Block_block_visited (ir_node *node, unsigned long visit) {
644 assert (node->op == op_Block);
645 node->attr.block.block_visited = visit;
648 /* For this current_ir_graph must be set. */
650 mark_Block_block_visited (ir_node *node) {
651 assert (node->op == op_Block);
652 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
656 Block_not_block_visited(ir_node *node) {
657 assert (node->op == op_Block);
658 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
662 get_Block_graph_arr (ir_node *node, int pos) {
663 assert (node->op == op_Block);
664 return node->attr.block.graph_arr[pos+1];
668 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
669 assert (node->op == op_Block);
670 node->attr.block.graph_arr[pos+1] = value;
673 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
674 assert(node->op == op_Block);
675 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
676 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
677 node->attr.block.in_cg[0] = NULL;
678 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
680 /* Fix backedge array. fix_backedges operates depending on
681 interprocedural_view. */
682 int ipv = get_interprocedural_view();
683 set_interprocedural_view(true);
684 fix_backedges(current_ir_graph->obst, node);
685 set_interprocedural_view(ipv);
688 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
691 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
692 assert(node->op == op_Block &&
693 node->attr.block.in_cg &&
694 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
695 node->attr.block.in_cg[pos + 1] = pred;
698 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
699 assert(node->op == op_Block);
700 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
703 int get_Block_cg_n_cfgpreds(ir_node * node) {
704 assert(node->op == op_Block);
705 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
708 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
709 assert(node->op == op_Block && node->attr.block.in_cg);
710 return node->attr.block.in_cg[pos + 1];
713 void remove_Block_cg_cfgpred_arr(ir_node * node) {
714 assert(node->op == op_Block);
715 node->attr.block.in_cg = NULL;
718 ir_node *(set_Block_dead)(ir_node *block) {
719 return _set_Block_dead(block);
722 int (is_Block_dead)(const ir_node *block) {
723 return _is_Block_dead(block);
727 set_Start_irg(ir_node *node, ir_graph *irg) {
728 assert(node->op == op_Start);
729 assert(is_ir_graph(irg));
730 assert(0 && " Why set irg? -- use set_irn_irg");
734 get_End_n_keepalives(ir_node *end) {
735 assert (end->op == op_End);
736 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
740 get_End_keepalive(ir_node *end, int pos) {
741 assert (end->op == op_End);
742 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
746 add_End_keepalive (ir_node *end, ir_node *ka) {
747 assert (end->op == op_End);
748 ARR_APP1 (ir_node *, end->in, ka);
752 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
753 assert (end->op == op_End);
754 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
758 free_End (ir_node *end) {
759 assert (end->op == op_End);
761 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
762 end->in = NULL; /* @@@ make sure we get an error if we use the
763 in array afterwards ... */
768 > Implementing the case construct (which is where the constant Proj node is
769 > important) involves far more than simply determining the constant values.
770 > We could argue that this is more properly a function of the translator from
771 > Firm to the target machine. That could be done if there was some way of
772 > projecting "default" out of the Cond node.
773 I know it's complicated.
774 Basically there are two proglems:
775 - determining the gaps between the projs
776 - determining the biggest case constant to know the proj number for
778 I see several solutions:
779 1. Introduce a ProjDefault node. Solves both problems.
780 This means to extend all optimizations executed during construction.
781 2. Give the Cond node for switch two flavors:
782 a) there are no gaps in the projs (existing flavor)
783 b) gaps may exist, default proj is still the Proj with the largest
784 projection number. This covers also the gaps.
785 3. Fix the semantic of the Cond to that of 2b)
787 Solution 2 seems to be the best:
788 Computing the gaps in the Firm representation is not too hard, i.e.,
789 libFIRM can implement a routine that transforms between the two
790 flavours. This is also possible for 1) but 2) does not require to
791 change any existing optimization.
792 Further it should be far simpler to determine the biggest constant than
794 I don't want to choose 3) as 2a) seems to have advantages for
795 dataflow analysis and 3) does not allow to convert the representation to
799 get_Cond_selector (ir_node *node) {
800 assert (node->op == op_Cond);
801 return get_irn_n(node, 0);
805 set_Cond_selector (ir_node *node, ir_node *selector) {
806 assert (node->op == op_Cond);
807 set_irn_n(node, 0, selector);
811 get_Cond_kind (ir_node *node) {
812 assert (node->op == op_Cond);
813 return node->attr.c.kind;
817 set_Cond_kind (ir_node *node, cond_kind kind) {
818 assert (node->op == op_Cond);
819 node->attr.c.kind = kind;
823 get_Cond_defaultProj (ir_node *node) {
824 assert (node->op == op_Cond);
825 return node->attr.c.default_proj;
829 get_Return_mem (ir_node *node) {
830 assert (node->op == op_Return);
831 return get_irn_n(node, 0);
835 set_Return_mem (ir_node *node, ir_node *mem) {
836 assert (node->op == op_Return);
837 set_irn_n(node, 0, mem);
841 get_Return_n_ress (ir_node *node) {
842 assert (node->op == op_Return);
843 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
847 get_Return_res_arr (ir_node *node)
849 assert ((node->op == op_Return));
850 if (get_Return_n_ress(node) > 0)
851 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
858 set_Return_n_res (ir_node *node, int results) {
859 assert (node->op == op_Return);
864 get_Return_res (ir_node *node, int pos) {
865 assert (node->op == op_Return);
866 assert (get_Return_n_ress(node) > pos);
867 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
871 set_Return_res (ir_node *node, int pos, ir_node *res){
872 assert (node->op == op_Return);
873 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
877 get_Raise_mem (ir_node *node) {
878 assert (node->op == op_Raise);
879 return get_irn_n(node, 0);
883 set_Raise_mem (ir_node *node, ir_node *mem) {
884 assert (node->op == op_Raise);
885 set_irn_n(node, 0, mem);
889 get_Raise_exo_ptr (ir_node *node) {
890 assert (node->op == op_Raise);
891 return get_irn_n(node, 1);
895 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
896 assert (node->op == op_Raise);
897 set_irn_n(node, 1, exo_ptr);
900 tarval *(get_Const_tarval)(ir_node *node) {
901 return _get_Const_tarval(node);
905 set_Const_tarval (ir_node *node, tarval *con) {
906 assert (node->op == op_Const);
907 node->attr.con.tv = con;
910 cnst_classify_t (classify_Const)(ir_node *node)
912 return _classify_Const(node);
916 /* The source language type. Must be an atomic type. Mode of type must
917 be mode of node. For tarvals from entities type must be pointer to
920 get_Const_type (ir_node *node) {
921 assert (node->op == op_Const);
922 return node->attr.con.tp;
926 set_Const_type (ir_node *node, type *tp) {
927 assert (node->op == op_Const);
928 if (tp != firm_unknown_type) {
929 assert (is_atomic_type(tp));
930 assert (get_type_mode(tp) == get_irn_mode(node));
932 node->attr.con.tp = tp;
937 get_SymConst_kind (const ir_node *node) {
938 assert (node->op == op_SymConst);
939 return node->attr.i.num;
943 set_SymConst_kind (ir_node *node, symconst_kind num) {
944 assert (node->op == op_SymConst);
945 node->attr.i.num = num;
949 get_SymConst_type (ir_node *node) {
950 assert ( (node->op == op_SymConst)
951 && ( get_SymConst_kind(node) == symconst_type_tag
952 || get_SymConst_kind(node) == symconst_size));
953 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
957 set_SymConst_type (ir_node *node, type *tp) {
958 assert ( (node->op == op_SymConst)
959 && ( get_SymConst_kind(node) == symconst_type_tag
960 || get_SymConst_kind(node) == symconst_size));
961 node->attr.i.sym.type_p = tp;
965 get_SymConst_name (ir_node *node) {
966 assert ( (node->op == op_SymConst)
967 && (get_SymConst_kind(node) == symconst_addr_name));
968 return node->attr.i.sym.ident_p;
972 set_SymConst_name (ir_node *node, ident *name) {
973 assert ( (node->op == op_SymConst)
974 && (get_SymConst_kind(node) == symconst_addr_name));
975 node->attr.i.sym.ident_p = name;
979 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
980 entity *get_SymConst_entity (ir_node *node) {
981 assert ( (node->op == op_SymConst)
982 && (get_SymConst_kind (node) == symconst_addr_ent));
983 return node->attr.i.sym.entity_p;
986 void set_SymConst_entity (ir_node *node, entity *ent) {
987 assert ( (node->op == op_SymConst)
988 && (get_SymConst_kind(node) == symconst_addr_ent));
989 node->attr.i.sym.entity_p = ent;
992 union symconst_symbol
993 get_SymConst_symbol (ir_node *node) {
994 assert (node->op == op_SymConst);
995 return node->attr.i.sym;
999 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1000 assert (node->op == op_SymConst);
1001 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1002 node->attr.i.sym = sym;
1006 get_SymConst_value_type (ir_node *node) {
1007 assert (node->op == op_SymConst);
1008 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1009 return node->attr.i.tp;
1013 set_SymConst_value_type (ir_node *node, type *tp) {
1014 assert (node->op == op_SymConst);
1015 node->attr.i.tp = tp;
1019 get_Sel_mem (ir_node *node) {
1020 assert (node->op == op_Sel);
1021 return get_irn_n(node, 0);
1025 set_Sel_mem (ir_node *node, ir_node *mem) {
1026 assert (node->op == op_Sel);
1027 set_irn_n(node, 0, mem);
1031 get_Sel_ptr (ir_node *node) {
1032 assert (node->op == op_Sel);
1033 return get_irn_n(node, 1);
1037 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1038 assert (node->op == op_Sel);
1039 set_irn_n(node, 1, ptr);
1043 get_Sel_n_indexs (ir_node *node) {
1044 assert (node->op == op_Sel);
1045 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1049 get_Sel_index_arr (ir_node *node)
1051 assert ((node->op == op_Sel));
1052 if (get_Sel_n_indexs(node) > 0)
1053 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1059 get_Sel_index (ir_node *node, int pos) {
1060 assert (node->op == op_Sel);
1061 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1065 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1066 assert (node->op == op_Sel);
1067 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1071 get_Sel_entity (ir_node *node) {
1072 assert (node->op == op_Sel);
1073 return node->attr.s.ent;
1077 set_Sel_entity (ir_node *node, entity *ent) {
1078 assert (node->op == op_Sel);
1079 node->attr.s.ent = ent;
1083 get_InstOf_ent (ir_node *node) {
1084 assert (node->op = op_InstOf);
1085 return (node->attr.io.ent);
1089 set_InstOf_ent (ir_node *node, type *ent) {
1090 assert (node->op = op_InstOf);
1091 node->attr.io.ent = ent;
1095 get_InstOf_store (ir_node *node) {
1096 assert (node->op = op_InstOf);
1097 return (get_irn_n (node, 0));
1101 set_InstOf_store (ir_node *node, ir_node *obj) {
1102 assert (node->op = op_InstOf);
1103 set_irn_n (node, 0, obj);
1107 get_InstOf_obj (ir_node *node) {
1108 assert (node->op = op_InstOf);
1109 return (get_irn_n (node, 1));
1113 set_InstOf_obj (ir_node *node, ir_node *obj) {
1114 assert (node->op = op_InstOf);
1115 set_irn_n (node, 1, obj);
1119 /* For unary and binary arithmetic operations the access to the
1120 operands can be factored out. Left is the first, right the
1121 second arithmetic value as listed in tech report 0999-33.
1122 unops are: Minus, Abs, Not, Conv, Cast
1123 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1124 Shr, Shrs, Rotate, Cmp */
1128 get_Call_mem (ir_node *node) {
1129 assert (node->op == op_Call);
1130 return get_irn_n(node, 0);
1134 set_Call_mem (ir_node *node, ir_node *mem) {
1135 assert (node->op == op_Call);
1136 set_irn_n(node, 0, mem);
1140 get_Call_ptr (ir_node *node) {
1141 assert (node->op == op_Call);
1142 return get_irn_n(node, 1);
1146 set_Call_ptr (ir_node *node, ir_node *ptr) {
1147 assert (node->op == op_Call);
1148 set_irn_n(node, 1, ptr);
1152 get_Call_param_arr (ir_node *node) {
1153 assert (node->op == op_Call);
1154 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1158 get_Call_n_params (ir_node *node) {
1159 assert (node->op == op_Call);
1160 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1164 get_Call_arity (ir_node *node) {
1165 assert (node->op == op_Call);
1166 return get_Call_n_params(node);
1170 set_Call_arity (ir_node *node, ir_node *arity) {
1171 assert (node->op == op_Call);
1176 get_Call_param (ir_node *node, int pos) {
1177 assert (node->op == op_Call);
1178 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1182 set_Call_param (ir_node *node, int pos, ir_node *param) {
1183 assert (node->op == op_Call);
1184 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1188 get_Call_type (ir_node *node) {
1189 assert (node->op == op_Call);
1190 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1194 set_Call_type (ir_node *node, type *tp) {
1195 assert (node->op == op_Call);
1196 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1197 node->attr.call.cld_tp = tp;
1200 int Call_has_callees(ir_node *node) {
1201 assert(node && node->op == op_Call);
1202 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1203 (node->attr.call.callee_arr != NULL));
1206 int get_Call_n_callees(ir_node * node) {
1207 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1208 return ARR_LEN(node->attr.call.callee_arr);
1211 entity * get_Call_callee(ir_node * node, int pos) {
1212 assert(pos >= 0 && pos < get_Call_n_callees(node));
1213 return node->attr.call.callee_arr[pos];
1216 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1217 assert(node->op == op_Call);
1218 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1219 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1221 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1224 void remove_Call_callee_arr(ir_node * node) {
1225 assert(node->op == op_Call);
1226 node->attr.call.callee_arr = NULL;
1229 ir_node * get_CallBegin_ptr (ir_node *node) {
1230 assert(node->op == op_CallBegin);
1231 return get_irn_n(node, 0);
1233 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1234 assert(node->op == op_CallBegin);
1235 set_irn_n(node, 0, ptr);
1237 ir_node * get_CallBegin_call (ir_node *node) {
1238 assert(node->op == op_CallBegin);
1239 return node->attr.callbegin.call;
1241 void set_CallBegin_call (ir_node *node, ir_node *call) {
1242 assert(node->op == op_CallBegin);
1243 node->attr.callbegin.call = call;
1248 ir_node * get_##OP##_left(ir_node *node) { \
1249 assert(node->op == op_##OP); \
1250 return get_irn_n(node, node->op->op_index); \
1252 void set_##OP##_left(ir_node *node, ir_node *left) { \
1253 assert(node->op == op_##OP); \
1254 set_irn_n(node, node->op->op_index, left); \
1256 ir_node *get_##OP##_right(ir_node *node) { \
1257 assert(node->op == op_##OP); \
1258 return get_irn_n(node, node->op->op_index + 1); \
1260 void set_##OP##_right(ir_node *node, ir_node *right) { \
1261 assert(node->op == op_##OP); \
1262 set_irn_n(node, node->op->op_index + 1, right); \
1266 ir_node *get_##OP##_op(ir_node *node) { \
1267 assert(node->op == op_##OP); \
1268 return get_irn_n(node, node->op->op_index); \
1270 void set_##OP##_op (ir_node *node, ir_node *op) { \
1271 assert(node->op == op_##OP); \
1272 set_irn_n(node, node->op->op_index, op); \
1282 get_Quot_mem (ir_node *node) {
1283 assert (node->op == op_Quot);
1284 return get_irn_n(node, 0);
1288 set_Quot_mem (ir_node *node, ir_node *mem) {
1289 assert (node->op == op_Quot);
1290 set_irn_n(node, 0, mem);
1296 get_DivMod_mem (ir_node *node) {
1297 assert (node->op == op_DivMod);
1298 return get_irn_n(node, 0);
1302 set_DivMod_mem (ir_node *node, ir_node *mem) {
1303 assert (node->op == op_DivMod);
1304 set_irn_n(node, 0, mem);
1310 get_Div_mem (ir_node *node) {
1311 assert (node->op == op_Div);
1312 return get_irn_n(node, 0);
1316 set_Div_mem (ir_node *node, ir_node *mem) {
1317 assert (node->op == op_Div);
1318 set_irn_n(node, 0, mem);
1324 get_Mod_mem (ir_node *node) {
1325 assert (node->op == op_Mod);
1326 return get_irn_n(node, 0);
1330 set_Mod_mem (ir_node *node, ir_node *mem) {
1331 assert (node->op == op_Mod);
1332 set_irn_n(node, 0, mem);
1349 get_Cast_type (ir_node *node) {
1350 assert (node->op == op_Cast);
1351 return node->attr.cast.totype;
1355 set_Cast_type (ir_node *node, type *to_tp) {
1356 assert (node->op == op_Cast);
1357 node->attr.cast.totype = to_tp;
1361 /* Checks for upcast.
1363 * Returns true if the Cast node casts a class type to a super type.
1365 int is_Cast_upcast(ir_node *node) {
1366 type *totype = get_Cast_type(node);
1367 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1368 ir_graph *myirg = get_irn_irg(node);
1370 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1373 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1374 totype = get_pointer_points_to_type(totype);
1375 fromtype = get_pointer_points_to_type(fromtype);
1380 if (!is_Class_type(totype)) return false;
1381 return is_subclass_of(fromtype, totype);
1384 /* Checks for downcast.
1386 * Returns true if the Cast node casts a class type to a sub type.
1388 int is_Cast_downcast(ir_node *node) {
1389 type *totype = get_Cast_type(node);
1390 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1392 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1395 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1396 totype = get_pointer_points_to_type(totype);
1397 fromtype = get_pointer_points_to_type(fromtype);
1402 if (!is_Class_type(totype)) return false;
1403 return is_subclass_of(totype, fromtype);
1407 (is_unop)(const ir_node *node) {
1408 return _is_unop(node);
1412 get_unop_op (ir_node *node) {
1413 if (node->op->opar == oparity_unary)
1414 return get_irn_n(node, node->op->op_index);
1416 assert(node->op->opar == oparity_unary);
1421 set_unop_op (ir_node *node, ir_node *op) {
1422 if (node->op->opar == oparity_unary)
1423 set_irn_n(node, node->op->op_index, op);
1425 assert(node->op->opar == oparity_unary);
1429 (is_binop)(const ir_node *node) {
1430 return _is_binop(node);
1434 get_binop_left (ir_node *node) {
1435 if (node->op->opar == oparity_binary)
1436 return get_irn_n(node, node->op->op_index);
1438 assert(node->op->opar == oparity_binary);
1443 set_binop_left (ir_node *node, ir_node *left) {
1444 if (node->op->opar == oparity_binary)
1445 set_irn_n(node, node->op->op_index, left);
1447 assert (node->op->opar == oparity_binary);
1451 get_binop_right (ir_node *node) {
1452 if (node->op->opar == oparity_binary)
1453 return get_irn_n(node, node->op->op_index + 1);
1455 assert(node->op->opar == oparity_binary);
1460 set_binop_right (ir_node *node, ir_node *right) {
1461 if (node->op->opar == oparity_binary)
1462 set_irn_n(node, node->op->op_index + 1, right);
1464 assert (node->op->opar == oparity_binary);
1467 int is_Phi (const ir_node *n) {
1473 if (op == op_Filter) return get_interprocedural_view();
1476 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1477 (get_irn_arity(n) > 0));
1482 int is_Phi0 (const ir_node *n) {
1485 return ((get_irn_op(n) == op_Phi) &&
1486 (get_irn_arity(n) == 0) &&
1487 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1491 get_Phi_preds_arr (ir_node *node) {
1492 assert (node->op == op_Phi);
1493 return (ir_node **)&(get_irn_in(node)[1]);
1497 get_Phi_n_preds (ir_node *node) {
1498 assert (is_Phi(node) || is_Phi0(node));
1499 return (get_irn_arity(node));
1503 void set_Phi_n_preds (ir_node *node, int n_preds) {
1504 assert (node->op == op_Phi);
1509 get_Phi_pred (ir_node *node, int pos) {
1510 assert (is_Phi(node) || is_Phi0(node));
1511 return get_irn_n(node, pos);
1515 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1516 assert (is_Phi(node) || is_Phi0(node));
1517 set_irn_n(node, pos, pred);
1521 int is_memop(ir_node *node) {
1522 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1525 ir_node *get_memop_mem (ir_node *node) {
1526 assert(is_memop(node));
1527 return get_irn_n(node, 0);
1530 void set_memop_mem (ir_node *node, ir_node *mem) {
1531 assert(is_memop(node));
1532 set_irn_n(node, 0, mem);
1535 ir_node *get_memop_ptr (ir_node *node) {
1536 assert(is_memop(node));
1537 return get_irn_n(node, 1);
1540 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1541 assert(is_memop(node));
1542 set_irn_n(node, 1, ptr);
1546 get_Load_mem (ir_node *node) {
1547 assert (node->op == op_Load);
1548 return get_irn_n(node, 0);
1552 set_Load_mem (ir_node *node, ir_node *mem) {
1553 assert (node->op == op_Load);
1554 set_irn_n(node, 0, mem);
1558 get_Load_ptr (ir_node *node) {
1559 assert (node->op == op_Load);
1560 return get_irn_n(node, 1);
1564 set_Load_ptr (ir_node *node, ir_node *ptr) {
1565 assert (node->op == op_Load);
1566 set_irn_n(node, 1, ptr);
1570 get_Load_mode (ir_node *node) {
1571 assert (node->op == op_Load);
1572 return node->attr.load.load_mode;
1576 set_Load_mode (ir_node *node, ir_mode *mode) {
1577 assert (node->op == op_Load);
1578 node->attr.load.load_mode = mode;
1582 get_Load_volatility (ir_node *node) {
1583 assert (node->op == op_Load);
1584 return node->attr.load.volatility;
1588 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1589 assert (node->op == op_Load);
1590 node->attr.load.volatility = volatility;
1595 get_Store_mem (ir_node *node) {
1596 assert (node->op == op_Store);
1597 return get_irn_n(node, 0);
1601 set_Store_mem (ir_node *node, ir_node *mem) {
1602 assert (node->op == op_Store);
1603 set_irn_n(node, 0, mem);
1607 get_Store_ptr (ir_node *node) {
1608 assert (node->op == op_Store);
1609 return get_irn_n(node, 1);
1613 set_Store_ptr (ir_node *node, ir_node *ptr) {
1614 assert (node->op == op_Store);
1615 set_irn_n(node, 1, ptr);
1619 get_Store_value (ir_node *node) {
1620 assert (node->op == op_Store);
1621 return get_irn_n(node, 2);
1625 set_Store_value (ir_node *node, ir_node *value) {
1626 assert (node->op == op_Store);
1627 set_irn_n(node, 2, value);
1631 get_Store_volatility (ir_node *node) {
1632 assert (node->op == op_Store);
1633 return node->attr.store.volatility;
1637 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1638 assert (node->op == op_Store);
1639 node->attr.store.volatility = volatility;
1644 get_Alloc_mem (ir_node *node) {
1645 assert (node->op == op_Alloc);
1646 return get_irn_n(node, 0);
1650 set_Alloc_mem (ir_node *node, ir_node *mem) {
1651 assert (node->op == op_Alloc);
1652 set_irn_n(node, 0, mem);
1656 get_Alloc_size (ir_node *node) {
1657 assert (node->op == op_Alloc);
1658 return get_irn_n(node, 1);
1662 set_Alloc_size (ir_node *node, ir_node *size) {
1663 assert (node->op == op_Alloc);
1664 set_irn_n(node, 1, size);
1668 get_Alloc_type (ir_node *node) {
1669 assert (node->op == op_Alloc);
1670 return node->attr.a.type = skip_tid(node->attr.a.type);
1674 set_Alloc_type (ir_node *node, type *tp) {
1675 assert (node->op == op_Alloc);
1676 node->attr.a.type = tp;
1680 get_Alloc_where (ir_node *node) {
1681 assert (node->op == op_Alloc);
1682 return node->attr.a.where;
1686 set_Alloc_where (ir_node *node, where_alloc where) {
1687 assert (node->op == op_Alloc);
1688 node->attr.a.where = where;
1693 get_Free_mem (ir_node *node) {
1694 assert (node->op == op_Free);
1695 return get_irn_n(node, 0);
1699 set_Free_mem (ir_node *node, ir_node *mem) {
1700 assert (node->op == op_Free);
1701 set_irn_n(node, 0, mem);
1705 get_Free_ptr (ir_node *node) {
1706 assert (node->op == op_Free);
1707 return get_irn_n(node, 1);
1711 set_Free_ptr (ir_node *node, ir_node *ptr) {
1712 assert (node->op == op_Free);
1713 set_irn_n(node, 1, ptr);
1717 get_Free_size (ir_node *node) {
1718 assert (node->op == op_Free);
1719 return get_irn_n(node, 2);
1723 set_Free_size (ir_node *node, ir_node *size) {
1724 assert (node->op == op_Free);
1725 set_irn_n(node, 2, size);
1729 get_Free_type (ir_node *node) {
1730 assert (node->op == op_Free);
1731 return node->attr.f.type = skip_tid(node->attr.f.type);
1735 set_Free_type (ir_node *node, type *tp) {
1736 assert (node->op == op_Free);
1737 node->attr.f.type = tp;
1741 get_Free_where (ir_node *node) {
1742 assert (node->op == op_Free);
1743 return node->attr.f.where;
1747 set_Free_where (ir_node *node, where_alloc where) {
1748 assert (node->op == op_Free);
1749 node->attr.f.where = where;
1753 get_Sync_preds_arr (ir_node *node) {
1754 assert (node->op == op_Sync);
1755 return (ir_node **)&(get_irn_in(node)[1]);
1759 get_Sync_n_preds (ir_node *node) {
1760 assert (node->op == op_Sync);
1761 return (get_irn_arity(node));
1766 set_Sync_n_preds (ir_node *node, int n_preds) {
1767 assert (node->op == op_Sync);
1772 get_Sync_pred (ir_node *node, int pos) {
1773 assert (node->op == op_Sync);
1774 return get_irn_n(node, pos);
1778 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1779 assert (node->op == op_Sync);
1780 set_irn_n(node, pos, pred);
1783 type *get_Proj_type(ir_node *n)
1786 ir_node *pred = get_Proj_pred(n);
1788 switch (get_irn_opcode(pred)) {
1791 /* Deal with Start / Call here: we need to know the Proj Nr. */
1792 assert(get_irn_mode(pred) == mode_T);
1793 pred_pred = get_Proj_pred(pred);
1794 if (get_irn_op(pred_pred) == op_Start) {
1795 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1796 tp = get_method_param_type(mtp, get_Proj_proj(n));
1797 } else if (get_irn_op(pred_pred) == op_Call) {
1798 type *mtp = get_Call_type(pred_pred);
1799 tp = get_method_res_type(mtp, get_Proj_proj(n));
1802 case iro_Start: break;
1803 case iro_Call: break;
1805 ir_node *a = get_Load_ptr(pred);
1806 if (get_irn_op(a) == op_Sel)
1807 tp = get_entity_type(get_Sel_entity(a));
1816 get_Proj_pred (ir_node *node) {
1817 assert (is_Proj(node));
1818 return get_irn_n(node, 0);
1822 set_Proj_pred (ir_node *node, ir_node *pred) {
1823 assert (is_Proj(node));
1824 set_irn_n(node, 0, pred);
1828 get_Proj_proj (ir_node *node) {
1829 assert (is_Proj(node));
1830 if (get_irn_opcode(node) == iro_Proj) {
1831 return node->attr.proj;
1833 assert(get_irn_opcode(node) == iro_Filter);
1834 return node->attr.filter.proj;
1839 set_Proj_proj (ir_node *node, long proj) {
1840 assert (node->op == op_Proj);
1841 node->attr.proj = proj;
1845 get_Tuple_preds_arr (ir_node *node) {
1846 assert (node->op == op_Tuple);
1847 return (ir_node **)&(get_irn_in(node)[1]);
1851 get_Tuple_n_preds (ir_node *node) {
1852 assert (node->op == op_Tuple);
1853 return (get_irn_arity(node));
1858 set_Tuple_n_preds (ir_node *node, int n_preds) {
1859 assert (node->op == op_Tuple);
1864 get_Tuple_pred (ir_node *node, int pos) {
1865 assert (node->op == op_Tuple);
1866 return get_irn_n(node, pos);
1870 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1871 assert (node->op == op_Tuple);
1872 set_irn_n(node, pos, pred);
1876 get_Id_pred (ir_node *node) {
1877 assert (node->op == op_Id);
1878 return get_irn_n(node, 0);
1882 set_Id_pred (ir_node *node, ir_node *pred) {
1883 assert (node->op == op_Id);
1884 set_irn_n(node, 0, pred);
1887 ir_node *get_Confirm_value (ir_node *node) {
1888 assert (node->op == op_Confirm);
1889 return get_irn_n(node, 0);
1891 void set_Confirm_value (ir_node *node, ir_node *value) {
1892 assert (node->op == op_Confirm);
1893 set_irn_n(node, 0, value);
1895 ir_node *get_Confirm_bound (ir_node *node) {
1896 assert (node->op == op_Confirm);
1897 return get_irn_n(node, 1);
1899 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1900 assert (node->op == op_Confirm);
1901 set_irn_n(node, 0, bound);
1903 pn_Cmp get_Confirm_cmp (ir_node *node) {
1904 assert (node->op == op_Confirm);
1905 return node->attr.confirm_cmp;
1907 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1908 assert (node->op == op_Confirm);
1909 node->attr.confirm_cmp = cmp;
1914 get_Filter_pred (ir_node *node) {
1915 assert(node->op == op_Filter);
1919 set_Filter_pred (ir_node *node, ir_node *pred) {
1920 assert(node->op == op_Filter);
1924 get_Filter_proj(ir_node *node) {
1925 assert(node->op == op_Filter);
1926 return node->attr.filter.proj;
1929 set_Filter_proj (ir_node *node, long proj) {
1930 assert(node->op == op_Filter);
1931 node->attr.filter.proj = proj;
1934 /* Don't use get_irn_arity, get_irn_n in implementation as access
1935 shall work independent of view!!! */
1936 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1937 assert(node->op == op_Filter);
1938 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1939 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1940 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1941 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1942 node->attr.filter.in_cg[0] = node->in[0];
1944 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1947 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1948 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1949 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1950 node->attr.filter.in_cg[pos + 1] = pred;
1952 int get_Filter_n_cg_preds(ir_node *node) {
1953 assert(node->op == op_Filter && node->attr.filter.in_cg);
1954 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1956 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1958 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1960 arity = ARR_LEN(node->attr.filter.in_cg);
1961 assert(pos < arity - 1);
1962 return node->attr.filter.in_cg[pos + 1];
1966 ir_node *get_Mux_sel (ir_node *node) {
1967 assert(node->op == op_Mux);
1970 void set_Mux_sel (ir_node *node, ir_node *sel) {
1971 assert(node->op == op_Mux);
1975 ir_node *get_Mux_false (ir_node *node) {
1976 assert(node->op == op_Mux);
1979 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1980 assert(node->op == op_Mux);
1981 node->in[2] = ir_false;
1984 ir_node *get_Mux_true (ir_node *node) {
1985 assert(node->op == op_Mux);
1988 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1989 assert(node->op == op_Mux);
1990 node->in[3] = ir_true;
1995 get_irn_irg(const ir_node *node) {
1996 if (! is_Block(node))
1997 node = get_nodes_block(node);
1998 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1999 node = get_nodes_block(node);
2000 assert(get_irn_op(node) == op_Block);
2001 return node->attr.block.irg;
2005 /*----------------------------------------------------------------*/
2006 /* Auxiliary routines */
2007 /*----------------------------------------------------------------*/
2010 skip_Proj (ir_node *node) {
2011 /* don't assert node !!! */
2012 if (node && is_Proj(node)) {
2013 return get_Proj_pred(node);
2020 skip_Tuple (ir_node *node) {
2023 if (!get_opt_normalize()) return node;
2025 node = skip_Id(node);
2026 if (get_irn_op(node) == op_Proj) {
2027 pred = skip_Id(get_Proj_pred(node));
2028 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2029 pred = skip_Id(skip_Tuple(pred));
2030 if (get_irn_op(pred) == op_Tuple)
2031 return get_Tuple_pred(pred, get_Proj_proj(node));
2036 /** returns operand of node if node is a Cast */
2037 ir_node *skip_Cast (ir_node *node) {
2038 if (node && get_irn_op(node) == op_Cast) {
2039 return skip_Id(get_irn_n(node, 0));
2046 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2047 than any other approach, as Id chains are resolved and all point to the real node, or
2048 all id's are self loops. */
2050 skip_Id (ir_node *node) {
2051 /* don't assert node !!! */
2053 if (!get_opt_normalize()) return node;
2055 /* Don't use get_Id_pred: We get into an endless loop for
2056 self-referencing Ids. */
2057 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2058 ir_node *rem_pred = node->in[0+1];
2061 assert (get_irn_arity (node) > 0);
2063 node->in[0+1] = node;
2064 res = skip_Id(rem_pred);
2065 if (res->op == op_Id) /* self-loop */ return node;
2067 node->in[0+1] = res;
2074 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2075 than any other approach, as Id chains are resolved and all point to the real node, or
2076 all id's are self loops. */
2078 skip_Id (ir_node *node) {
2080 /* don't assert node !!! */
2082 if (!node || (node->op != op_Id)) return node;
2084 if (!get_opt_normalize()) return node;
2086 /* Don't use get_Id_pred: We get into an endless loop for
2087 self-referencing Ids. */
2088 pred = node->in[0+1];
2090 if (pred->op != op_Id) return pred;
2092 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2093 ir_node *rem_pred, *res;
2095 if (pred->op != op_Id) return pred; /* shortcut */
2098 assert (get_irn_arity (node) > 0);
2100 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2101 res = skip_Id(rem_pred);
2102 if (res->op == op_Id) /* self-loop */ return node;
2104 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2113 (is_Bad)(const ir_node *node) {
2114 return _is_Bad(node);
2118 (is_no_Block)(const ir_node *node) {
2119 return _is_no_Block(node);
2123 (is_Block)(const ir_node *node) {
2124 return _is_Block(node);
2127 /* returns true if node is a Unknown node. */
2129 is_Unknown (const ir_node *node) {
2131 return (get_irn_op(node) == op_Unknown);
2135 is_Proj (const ir_node *node) {
2137 return node->op == op_Proj
2138 || (!get_interprocedural_view() && node->op == op_Filter);
2141 /* Returns true if the operation manipulates control flow. */
2143 is_cfop(const ir_node *node) {
2144 return is_cfopcode(get_irn_op(node));
2147 /* Returns true if the operation manipulates interprocedural control flow:
2148 CallBegin, EndReg, EndExcept */
2149 int is_ip_cfop(const ir_node *node) {
2150 return is_ip_cfopcode(get_irn_op(node));
2153 /* Returns true if the operation can change the control flow because
2156 is_fragile_op(const ir_node *node) {
2157 return is_op_fragile(get_irn_op(node));
2160 /* Returns the memory operand of fragile operations. */
2161 ir_node *get_fragile_op_mem(ir_node *node) {
2162 assert(node && is_fragile_op(node));
2164 switch (get_irn_opcode (node)) {
2173 return get_irn_n(node, 0);
2178 assert(0 && "should not be reached");
2183 /* Returns true if the operation is a forking control flow operation. */
2185 is_forking_op(const ir_node *node) {
2186 return is_op_forking(get_irn_op(node));
2189 type *(get_irn_type)(ir_node *node) {
2190 return _get_irn_type(node);
2193 /** the get_type operation must be always implemented */
2194 static type *get_Null_type(ir_node *n) {
2198 /* set the get_type operation */
2199 ir_op *firm_set_default_get_type(ir_op *op)
2202 case iro_Const: op->get_type = get_Const_type; break;
2203 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2204 case iro_Cast: op->get_type = get_Cast_type; break;
2205 case iro_Proj: op->get_type = get_Proj_type; break;
2206 default: op->get_type = get_Null_type; break;
2211 #ifdef DEBUG_libfirm
2212 void dump_irn (ir_node *n) {
2213 int i, arity = get_irn_arity(n);
2214 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2216 ir_node *pred = get_irn_n(n, -1);
2217 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2218 get_irn_node_nr(pred), (void *)pred);
2220 printf(" preds: \n");
2221 for (i = 0; i < arity; ++i) {
2222 ir_node *pred = get_irn_n(n, i);
2223 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2224 get_irn_node_nr(pred), (void *)pred);
2228 #else /* DEBUG_libfirm */
2229 void dump_irn (ir_node *n) {}
2230 #endif /* DEBUG_libfirm */