3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 for (i = 0, n = arity + not_a_block; i < n; ++i)
167 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
171 hook_new_node(irg, res);
176 /*-- getting some parameters from ir_nodes --*/
179 (is_ir_node)(const void *thing) {
180 return _is_ir_node(thing);
184 (get_irn_intra_arity)(const ir_node *node) {
185 return _get_irn_intra_arity(node);
189 (get_irn_inter_arity)(const ir_node *node) {
190 return _get_irn_inter_arity(node);
193 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
196 (get_irn_arity)(const ir_node *node) {
197 return _get_irn_arity(node);
200 /* Returns the array with ins. This array is shifted with respect to the
201 array accessed by get_irn_n: The block operand is at position 0 not -1.
202 (@@@ This should be changed.)
203 The order of the predecessors in this array is not guaranteed, except that
204 lists of operands as predecessors of Block or arguments of a Call are
207 get_irn_in (const ir_node *node) {
209 if (get_interprocedural_view()) { /* handle Filter and Block specially */
210 if (get_irn_opcode(node) == iro_Filter) {
211 assert(node->attr.filter.in_cg);
212 return node->attr.filter.in_cg;
213 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
214 return node->attr.block.in_cg;
216 /* else fall through */
222 set_irn_in (ir_node *node, int arity, ir_node **in) {
225 if (get_interprocedural_view()) { /* handle Filter and Block specially */
226 if (get_irn_opcode(node) == iro_Filter) {
227 assert(node->attr.filter.in_cg);
228 arr = &node->attr.filter.in_cg;
229 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
230 arr = &node->attr.block.in_cg;
237 if (arity != ARR_LEN(*arr) - 1) {
238 ir_node * block = (*arr)[0];
239 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
242 fix_backedges(current_ir_graph->obst, node);
243 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
247 (get_irn_intra_n)(const ir_node *node, int n) {
248 return _get_irn_intra_n (node, n);
252 (get_irn_inter_n)(const ir_node *node, int n) {
253 return _get_irn_inter_n (node, n);
256 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
259 (get_irn_n)(const ir_node *node, int n) {
260 return _get_irn_n(node, n);
264 set_irn_n (ir_node *node, int n, ir_node *in) {
265 assert(node && node->kind == k_ir_node);
267 assert(n < get_irn_arity(node));
268 assert(in && in->kind == k_ir_node);
270 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
271 /* Change block pred in both views! */
272 node->in[n + 1] = in;
273 assert(node->attr.filter.in_cg);
274 node->attr.filter.in_cg[n + 1] = in;
277 if (get_interprocedural_view()) { /* handle Filter and Block specially */
278 if (get_irn_opcode(node) == iro_Filter) {
279 assert(node->attr.filter.in_cg);
280 node->attr.filter.in_cg[n + 1] = in;
282 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
283 node->attr.block.in_cg[n + 1] = in;
286 /* else fall through */
290 hook_set_irn_n(node, n, in, node->in[n + 1]);
292 /* Here, we rely on src and tgt being in the current ir graph */
293 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
295 node->in[n + 1] = in;
299 (get_irn_mode)(const ir_node *node) {
300 return _get_irn_mode(node);
304 (set_irn_mode)(ir_node *node, ir_mode *mode)
306 _set_irn_mode(node, mode);
310 get_irn_modecode (const ir_node *node)
313 return node->mode->code;
316 /** Gets the string representation of the mode .*/
318 get_irn_modename (const ir_node *node)
321 return get_mode_name(node->mode);
325 get_irn_modeident (const ir_node *node)
328 return get_mode_ident(node->mode);
332 (get_irn_op)(const ir_node *node)
334 return _get_irn_op(node);
337 /* should be private to the library: */
339 set_irn_op (ir_node *node, ir_op *op)
346 (get_irn_opcode)(const ir_node *node)
348 return _get_irn_opcode(node);
352 get_irn_opname (const ir_node *node)
355 if ((get_irn_op((ir_node *)node) == op_Phi) &&
356 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
357 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
358 return get_id_str(node->op->name);
362 get_irn_opident (const ir_node *node)
365 return node->op->name;
369 (get_irn_visited)(const ir_node *node)
371 return _get_irn_visited(node);
375 (set_irn_visited)(ir_node *node, unsigned long visited)
377 _set_irn_visited(node, visited);
381 (mark_irn_visited)(ir_node *node) {
382 _mark_irn_visited(node);
386 (irn_not_visited)(const ir_node *node) {
387 return _irn_not_visited(node);
391 (irn_visited)(const ir_node *node) {
392 return _irn_visited(node);
396 (set_irn_link)(ir_node *node, void *link) {
397 _set_irn_link(node, link);
401 (get_irn_link)(const ir_node *node) {
402 return _get_irn_link(node);
406 (get_irn_pinned)(const ir_node *node) {
407 return _get_irn_pinned(node);
411 (is_irn_pinned_in_irg) (const ir_node *node) {
412 return _is_irn_pinned_in_irg(node);
415 void set_irn_pinned(ir_node *node, op_pin_state state) {
416 /* due to optimization an opt may be turned into a Tuple */
417 if (get_irn_op(node) == op_Tuple)
420 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
421 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
423 node->attr.except.pin_state = state;
426 #ifdef DO_HEAPANALYSIS
427 /* Access the abstract interpretation information of a node.
428 Returns NULL if no such information is available. */
429 struct abstval *get_irn_abst_value(ir_node *n) {
432 /* Set the abstract interpretation information of a node. */
433 void set_irn_abst_value(ir_node *n, struct abstval *os) {
436 struct section *firm_get_irn_section(ir_node *n) {
439 void firm_set_irn_section(ir_node *n, struct section *s) {
443 /* Dummies needed for firmjni. */
444 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
445 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
446 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
447 void firm_set_irn_section(ir_node *n, struct section *s) {}
448 #endif /* DO_HEAPANALYSIS */
451 /* Outputs a unique number for this node */
453 get_irn_node_nr(const ir_node *node) {
456 return node->node_nr;
463 get_irn_const_attr (ir_node *node)
465 assert (node->op == op_Const);
466 return node->attr.con;
470 get_irn_proj_attr (ir_node *node)
472 assert (node->op == op_Proj);
473 return node->attr.proj;
477 get_irn_alloc_attr (ir_node *node)
479 assert (node->op == op_Alloc);
484 get_irn_free_attr (ir_node *node)
486 assert (node->op == op_Free);
491 get_irn_symconst_attr (ir_node *node)
493 assert (node->op == op_SymConst);
498 get_irn_call_attr (ir_node *node)
500 assert (node->op == op_Call);
501 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
505 get_irn_sel_attr (ir_node *node)
507 assert (node->op == op_Sel);
512 get_irn_phi_attr (ir_node *node)
514 assert (node->op == op_Phi);
515 return node->attr.phi0_pos;
519 get_irn_block_attr (ir_node *node)
521 assert (node->op == op_Block);
522 return node->attr.block;
526 get_irn_load_attr (ir_node *node)
528 assert (node->op == op_Load);
529 return node->attr.load;
533 get_irn_store_attr (ir_node *node)
535 assert (node->op == op_Store);
536 return node->attr.store;
540 get_irn_except_attr (ir_node *node)
542 assert (node->op == op_Div || node->op == op_Quot ||
543 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
544 return node->attr.except;
547 /** manipulate fields of individual nodes **/
549 /* this works for all except Block */
551 get_nodes_block (const ir_node *node) {
552 assert (!(node->op == op_Block));
553 return get_irn_n(node, -1);
557 set_nodes_block (ir_node *node, ir_node *block) {
558 assert (!(node->op == op_Block));
559 set_irn_n(node, -1, block);
562 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
563 * from Start. If so returns frame type, else Null. */
564 type *is_frame_pointer(ir_node *n) {
565 if ((get_irn_op(n) == op_Proj) &&
566 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
567 ir_node *start = get_Proj_pred(n);
568 if (get_irn_op(start) == op_Start) {
569 return get_irg_frame_type(get_irn_irg(start));
575 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
576 * from Start. If so returns global type, else Null. */
577 type *is_globals_pointer(ir_node *n) {
578 if ((get_irn_op(n) == op_Proj) &&
579 (get_Proj_proj(n) == pn_Start_P_globals)) {
580 ir_node *start = get_Proj_pred(n);
581 if (get_irn_op(start) == op_Start) {
582 return get_glob_type();
588 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
589 * from Start. If so returns 1, else 0. */
590 int is_value_arg_pointer(ir_node *n) {
591 if ((get_irn_op(n) == op_Proj) &&
592 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
593 (get_irn_op(get_Proj_pred(n)) == op_Start))
598 /* Returns an array with the predecessors of the Block. Depending on
599 the implementation of the graph data structure this can be a copy of
600 the internal representation of predecessors as well as the internal
601 array itself. Therefore writing to this array might obstruct the ir. */
603 get_Block_cfgpred_arr (ir_node *node)
605 assert ((node->op == op_Block));
606 return (ir_node **)&(get_irn_in(node)[1]);
610 (get_Block_n_cfgpreds)(ir_node *node) {
611 return get_Block_n_cfgpreds(node);
615 (get_Block_cfgpred)(ir_node *node, int pos) {
616 return get_Block_cfgpred(node, pos);
620 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
621 assert (node->op == op_Block);
622 set_irn_n(node, pos, pred);
626 get_Block_matured (ir_node *node) {
627 assert (node->op == op_Block);
628 return node->attr.block.matured;
632 set_Block_matured (ir_node *node, bool matured) {
633 assert (node->op == op_Block);
634 node->attr.block.matured = matured;
638 (get_Block_block_visited)(ir_node *node) {
639 return _get_Block_block_visited(node);
643 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
644 _set_Block_block_visited(node, visit);
647 /* For this current_ir_graph must be set. */
649 (mark_Block_block_visited)(ir_node *node) {
650 _mark_Block_block_visited(node);
654 (Block_not_block_visited)(ir_node *node) {
655 return _Block_not_block_visited(node);
659 get_Block_graph_arr (ir_node *node, int pos) {
660 assert (node->op == op_Block);
661 return node->attr.block.graph_arr[pos+1];
665 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
666 assert (node->op == op_Block);
667 node->attr.block.graph_arr[pos+1] = value;
670 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
671 assert(node->op == op_Block);
672 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
673 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
674 node->attr.block.in_cg[0] = NULL;
675 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
677 /* Fix backedge array. fix_backedges operates depending on
678 interprocedural_view. */
679 int ipv = get_interprocedural_view();
680 set_interprocedural_view(true);
681 fix_backedges(current_ir_graph->obst, node);
682 set_interprocedural_view(ipv);
685 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
688 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
689 assert(node->op == op_Block &&
690 node->attr.block.in_cg &&
691 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
692 node->attr.block.in_cg[pos + 1] = pred;
695 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
696 assert(node->op == op_Block);
697 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
700 int get_Block_cg_n_cfgpreds(ir_node * node) {
701 assert(node->op == op_Block);
702 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
705 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
706 assert(node->op == op_Block && node->attr.block.in_cg);
707 return node->attr.block.in_cg[pos + 1];
710 void remove_Block_cg_cfgpred_arr(ir_node * node) {
711 assert(node->op == op_Block);
712 node->attr.block.in_cg = NULL;
715 ir_node *(set_Block_dead)(ir_node *block) {
716 return _set_Block_dead(block);
719 int (is_Block_dead)(const ir_node *block) {
720 return _is_Block_dead(block);
723 ir_extblk *get_Block_extbb(const ir_node *block) {
724 assert(is_Block(block));
725 return block->attr.block.extblk;
728 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
729 assert(is_Block(block));
730 block->attr.block.extblk = extblk;
734 set_Start_irg(ir_node *node, ir_graph *irg) {
735 assert(node->op == op_Start);
736 assert(is_ir_graph(irg));
737 assert(0 && " Why set irg? -- use set_irn_irg");
741 get_End_n_keepalives(ir_node *end) {
742 assert (end->op == op_End);
743 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
747 get_End_keepalive(ir_node *end, int pos) {
748 assert (end->op == op_End);
749 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
753 add_End_keepalive (ir_node *end, ir_node *ka) {
754 assert (end->op == op_End);
755 ARR_APP1 (ir_node *, end->in, ka);
759 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
760 assert (end->op == op_End);
761 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
765 free_End (ir_node *end) {
766 assert (end->op == op_End);
768 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
769 end->in = NULL; /* @@@ make sure we get an error if we use the
770 in array afterwards ... */
775 > Implementing the case construct (which is where the constant Proj node is
776 > important) involves far more than simply determining the constant values.
777 > We could argue that this is more properly a function of the translator from
778 > Firm to the target machine. That could be done if there was some way of
779 > projecting "default" out of the Cond node.
780 I know it's complicated.
781 Basically there are two proglems:
782 - determining the gaps between the projs
783 - determining the biggest case constant to know the proj number for
785 I see several solutions:
786 1. Introduce a ProjDefault node. Solves both problems.
787 This means to extend all optimizations executed during construction.
788 2. Give the Cond node for switch two flavors:
789 a) there are no gaps in the projs (existing flavor)
790 b) gaps may exist, default proj is still the Proj with the largest
791 projection number. This covers also the gaps.
792 3. Fix the semantic of the Cond to that of 2b)
794 Solution 2 seems to be the best:
795 Computing the gaps in the Firm representation is not too hard, i.e.,
796 libFIRM can implement a routine that transforms between the two
797 flavours. This is also possible for 1) but 2) does not require to
798 change any existing optimization.
799 Further it should be far simpler to determine the biggest constant than
801 I don't want to choose 3) as 2a) seems to have advantages for
802 dataflow analysis and 3) does not allow to convert the representation to
806 get_Cond_selector (ir_node *node) {
807 assert (node->op == op_Cond);
808 return get_irn_n(node, 0);
812 set_Cond_selector (ir_node *node, ir_node *selector) {
813 assert (node->op == op_Cond);
814 set_irn_n(node, 0, selector);
818 get_Cond_kind (ir_node *node) {
819 assert (node->op == op_Cond);
820 return node->attr.c.kind;
824 set_Cond_kind (ir_node *node, cond_kind kind) {
825 assert (node->op == op_Cond);
826 node->attr.c.kind = kind;
830 get_Cond_defaultProj (ir_node *node) {
831 assert (node->op == op_Cond);
832 return node->attr.c.default_proj;
836 get_Return_mem (ir_node *node) {
837 assert (node->op == op_Return);
838 return get_irn_n(node, 0);
842 set_Return_mem (ir_node *node, ir_node *mem) {
843 assert (node->op == op_Return);
844 set_irn_n(node, 0, mem);
848 get_Return_n_ress (ir_node *node) {
849 assert (node->op == op_Return);
850 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
854 get_Return_res_arr (ir_node *node)
856 assert ((node->op == op_Return));
857 if (get_Return_n_ress(node) > 0)
858 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
865 set_Return_n_res (ir_node *node, int results) {
866 assert (node->op == op_Return);
871 get_Return_res (ir_node *node, int pos) {
872 assert (node->op == op_Return);
873 assert (get_Return_n_ress(node) > pos);
874 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
878 set_Return_res (ir_node *node, int pos, ir_node *res){
879 assert (node->op == op_Return);
880 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
884 get_Raise_mem (ir_node *node) {
885 assert (node->op == op_Raise);
886 return get_irn_n(node, 0);
890 set_Raise_mem (ir_node *node, ir_node *mem) {
891 assert (node->op == op_Raise);
892 set_irn_n(node, 0, mem);
896 get_Raise_exo_ptr (ir_node *node) {
897 assert (node->op == op_Raise);
898 return get_irn_n(node, 1);
902 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
903 assert (node->op == op_Raise);
904 set_irn_n(node, 1, exo_ptr);
907 tarval *(get_Const_tarval)(ir_node *node) {
908 return _get_Const_tarval(node);
912 set_Const_tarval (ir_node *node, tarval *con) {
913 assert (node->op == op_Const);
914 node->attr.con.tv = con;
917 cnst_classify_t (classify_Const)(ir_node *node)
919 return _classify_Const(node);
923 /* The source language type. Must be an atomic type. Mode of type must
924 be mode of node. For tarvals from entities type must be pointer to
927 get_Const_type (ir_node *node) {
928 assert (node->op == op_Const);
929 return node->attr.con.tp;
933 set_Const_type (ir_node *node, type *tp) {
934 assert (node->op == op_Const);
935 if (tp != firm_unknown_type) {
936 assert (is_atomic_type(tp));
937 assert (get_type_mode(tp) == get_irn_mode(node));
939 node->attr.con.tp = tp;
944 get_SymConst_kind (const ir_node *node) {
945 assert (node->op == op_SymConst);
946 return node->attr.i.num;
950 set_SymConst_kind (ir_node *node, symconst_kind num) {
951 assert (node->op == op_SymConst);
952 node->attr.i.num = num;
956 get_SymConst_type (ir_node *node) {
957 assert ( (node->op == op_SymConst)
958 && ( get_SymConst_kind(node) == symconst_type_tag
959 || get_SymConst_kind(node) == symconst_size));
960 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
964 set_SymConst_type (ir_node *node, type *tp) {
965 assert ( (node->op == op_SymConst)
966 && ( get_SymConst_kind(node) == symconst_type_tag
967 || get_SymConst_kind(node) == symconst_size));
968 node->attr.i.sym.type_p = tp;
972 get_SymConst_name (ir_node *node) {
973 assert ( (node->op == op_SymConst)
974 && (get_SymConst_kind(node) == symconst_addr_name));
975 return node->attr.i.sym.ident_p;
979 set_SymConst_name (ir_node *node, ident *name) {
980 assert ( (node->op == op_SymConst)
981 && (get_SymConst_kind(node) == symconst_addr_name));
982 node->attr.i.sym.ident_p = name;
986 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
987 entity *get_SymConst_entity (ir_node *node) {
988 assert ( (node->op == op_SymConst)
989 && (get_SymConst_kind (node) == symconst_addr_ent));
990 return node->attr.i.sym.entity_p;
993 void set_SymConst_entity (ir_node *node, entity *ent) {
994 assert ( (node->op == op_SymConst)
995 && (get_SymConst_kind(node) == symconst_addr_ent));
996 node->attr.i.sym.entity_p = ent;
999 union symconst_symbol
1000 get_SymConst_symbol (ir_node *node) {
1001 assert (node->op == op_SymConst);
1002 return node->attr.i.sym;
1006 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1007 assert (node->op == op_SymConst);
1008 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1009 node->attr.i.sym = sym;
1013 get_SymConst_value_type (ir_node *node) {
1014 assert (node->op == op_SymConst);
1015 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1016 return node->attr.i.tp;
1020 set_SymConst_value_type (ir_node *node, type *tp) {
1021 assert (node->op == op_SymConst);
1022 node->attr.i.tp = tp;
1026 get_Sel_mem (ir_node *node) {
1027 assert (node->op == op_Sel);
1028 return get_irn_n(node, 0);
1032 set_Sel_mem (ir_node *node, ir_node *mem) {
1033 assert (node->op == op_Sel);
1034 set_irn_n(node, 0, mem);
1038 get_Sel_ptr (ir_node *node) {
1039 assert (node->op == op_Sel);
1040 return get_irn_n(node, 1);
1044 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1045 assert (node->op == op_Sel);
1046 set_irn_n(node, 1, ptr);
1050 get_Sel_n_indexs (ir_node *node) {
1051 assert (node->op == op_Sel);
1052 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1056 get_Sel_index_arr (ir_node *node)
1058 assert ((node->op == op_Sel));
1059 if (get_Sel_n_indexs(node) > 0)
1060 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1066 get_Sel_index (ir_node *node, int pos) {
1067 assert (node->op == op_Sel);
1068 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1072 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1073 assert (node->op == op_Sel);
1074 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1078 get_Sel_entity (ir_node *node) {
1079 assert (node->op == op_Sel);
1080 return node->attr.s.ent;
1084 set_Sel_entity (ir_node *node, entity *ent) {
1085 assert (node->op == op_Sel);
1086 node->attr.s.ent = ent;
1090 get_InstOf_ent (ir_node *node) {
1091 assert (node->op = op_InstOf);
1092 return (node->attr.io.ent);
1096 set_InstOf_ent (ir_node *node, type *ent) {
1097 assert (node->op = op_InstOf);
1098 node->attr.io.ent = ent;
1102 get_InstOf_store (ir_node *node) {
1103 assert (node->op = op_InstOf);
1104 return (get_irn_n (node, 0));
1108 set_InstOf_store (ir_node *node, ir_node *obj) {
1109 assert (node->op = op_InstOf);
1110 set_irn_n (node, 0, obj);
1114 get_InstOf_obj (ir_node *node) {
1115 assert (node->op = op_InstOf);
1116 return (get_irn_n (node, 1));
1120 set_InstOf_obj (ir_node *node, ir_node *obj) {
1121 assert (node->op = op_InstOf);
1122 set_irn_n (node, 1, obj);
1126 /* For unary and binary arithmetic operations the access to the
1127 operands can be factored out. Left is the first, right the
1128 second arithmetic value as listed in tech report 0999-33.
1129 unops are: Minus, Abs, Not, Conv, Cast
1130 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1131 Shr, Shrs, Rotate, Cmp */
1135 get_Call_mem (ir_node *node) {
1136 assert (node->op == op_Call);
1137 return get_irn_n(node, 0);
1141 set_Call_mem (ir_node *node, ir_node *mem) {
1142 assert (node->op == op_Call);
1143 set_irn_n(node, 0, mem);
1147 get_Call_ptr (ir_node *node) {
1148 assert (node->op == op_Call);
1149 return get_irn_n(node, 1);
1153 set_Call_ptr (ir_node *node, ir_node *ptr) {
1154 assert (node->op == op_Call);
1155 set_irn_n(node, 1, ptr);
1159 get_Call_param_arr (ir_node *node) {
1160 assert (node->op == op_Call);
1161 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1165 get_Call_n_params (ir_node *node) {
1166 assert (node->op == op_Call);
1167 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1171 get_Call_arity (ir_node *node) {
1172 assert (node->op == op_Call);
1173 return get_Call_n_params(node);
1177 set_Call_arity (ir_node *node, ir_node *arity) {
1178 assert (node->op == op_Call);
1183 get_Call_param (ir_node *node, int pos) {
1184 assert (node->op == op_Call);
1185 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1189 set_Call_param (ir_node *node, int pos, ir_node *param) {
1190 assert (node->op == op_Call);
1191 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1195 get_Call_type (ir_node *node) {
1196 assert (node->op == op_Call);
1197 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1201 set_Call_type (ir_node *node, type *tp) {
1202 assert (node->op == op_Call);
1203 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1204 node->attr.call.cld_tp = tp;
1207 int Call_has_callees(ir_node *node) {
1208 assert(node && node->op == op_Call);
1209 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1210 (node->attr.call.callee_arr != NULL));
1213 int get_Call_n_callees(ir_node * node) {
1214 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1215 return ARR_LEN(node->attr.call.callee_arr);
1218 entity * get_Call_callee(ir_node * node, int pos) {
1219 assert(pos >= 0 && pos < get_Call_n_callees(node));
1220 return node->attr.call.callee_arr[pos];
1223 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1224 assert(node->op == op_Call);
1225 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1226 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1228 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1231 void remove_Call_callee_arr(ir_node * node) {
1232 assert(node->op == op_Call);
1233 node->attr.call.callee_arr = NULL;
1236 ir_node * get_CallBegin_ptr (ir_node *node) {
1237 assert(node->op == op_CallBegin);
1238 return get_irn_n(node, 0);
1240 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1241 assert(node->op == op_CallBegin);
1242 set_irn_n(node, 0, ptr);
1244 ir_node * get_CallBegin_call (ir_node *node) {
1245 assert(node->op == op_CallBegin);
1246 return node->attr.callbegin.call;
1248 void set_CallBegin_call (ir_node *node, ir_node *call) {
1249 assert(node->op == op_CallBegin);
1250 node->attr.callbegin.call = call;
1255 ir_node * get_##OP##_left(ir_node *node) { \
1256 assert(node->op == op_##OP); \
1257 return get_irn_n(node, node->op->op_index); \
1259 void set_##OP##_left(ir_node *node, ir_node *left) { \
1260 assert(node->op == op_##OP); \
1261 set_irn_n(node, node->op->op_index, left); \
1263 ir_node *get_##OP##_right(ir_node *node) { \
1264 assert(node->op == op_##OP); \
1265 return get_irn_n(node, node->op->op_index + 1); \
1267 void set_##OP##_right(ir_node *node, ir_node *right) { \
1268 assert(node->op == op_##OP); \
1269 set_irn_n(node, node->op->op_index + 1, right); \
1273 ir_node *get_##OP##_op(ir_node *node) { \
1274 assert(node->op == op_##OP); \
1275 return get_irn_n(node, node->op->op_index); \
1277 void set_##OP##_op (ir_node *node, ir_node *op) { \
1278 assert(node->op == op_##OP); \
1279 set_irn_n(node, node->op->op_index, op); \
1289 get_Quot_mem (ir_node *node) {
1290 assert (node->op == op_Quot);
1291 return get_irn_n(node, 0);
1295 set_Quot_mem (ir_node *node, ir_node *mem) {
1296 assert (node->op == op_Quot);
1297 set_irn_n(node, 0, mem);
1303 get_DivMod_mem (ir_node *node) {
1304 assert (node->op == op_DivMod);
1305 return get_irn_n(node, 0);
1309 set_DivMod_mem (ir_node *node, ir_node *mem) {
1310 assert (node->op == op_DivMod);
1311 set_irn_n(node, 0, mem);
1317 get_Div_mem (ir_node *node) {
1318 assert (node->op == op_Div);
1319 return get_irn_n(node, 0);
1323 set_Div_mem (ir_node *node, ir_node *mem) {
1324 assert (node->op == op_Div);
1325 set_irn_n(node, 0, mem);
1331 get_Mod_mem (ir_node *node) {
1332 assert (node->op == op_Mod);
1333 return get_irn_n(node, 0);
1337 set_Mod_mem (ir_node *node, ir_node *mem) {
1338 assert (node->op == op_Mod);
1339 set_irn_n(node, 0, mem);
1356 get_Cast_type (ir_node *node) {
1357 assert (node->op == op_Cast);
1358 return node->attr.cast.totype;
1362 set_Cast_type (ir_node *node, type *to_tp) {
1363 assert (node->op == op_Cast);
1364 node->attr.cast.totype = to_tp;
1368 /* Checks for upcast.
1370 * Returns true if the Cast node casts a class type to a super type.
1372 int is_Cast_upcast(ir_node *node) {
1373 type *totype = get_Cast_type(node);
1374 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1375 ir_graph *myirg = get_irn_irg(node);
1377 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1380 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1381 totype = get_pointer_points_to_type(totype);
1382 fromtype = get_pointer_points_to_type(fromtype);
1387 if (!is_Class_type(totype)) return false;
1388 return is_subclass_of(fromtype, totype);
1391 /* Checks for downcast.
1393 * Returns true if the Cast node casts a class type to a sub type.
1395 int is_Cast_downcast(ir_node *node) {
1396 type *totype = get_Cast_type(node);
1397 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1399 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1402 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1403 totype = get_pointer_points_to_type(totype);
1404 fromtype = get_pointer_points_to_type(fromtype);
1409 if (!is_Class_type(totype)) return false;
1410 return is_subclass_of(totype, fromtype);
1414 (is_unop)(const ir_node *node) {
1415 return _is_unop(node);
1419 get_unop_op (ir_node *node) {
1420 if (node->op->opar == oparity_unary)
1421 return get_irn_n(node, node->op->op_index);
1423 assert(node->op->opar == oparity_unary);
1428 set_unop_op (ir_node *node, ir_node *op) {
1429 if (node->op->opar == oparity_unary)
1430 set_irn_n(node, node->op->op_index, op);
1432 assert(node->op->opar == oparity_unary);
1436 (is_binop)(const ir_node *node) {
1437 return _is_binop(node);
1441 get_binop_left (ir_node *node) {
1442 if (node->op->opar == oparity_binary)
1443 return get_irn_n(node, node->op->op_index);
1445 assert(node->op->opar == oparity_binary);
1450 set_binop_left (ir_node *node, ir_node *left) {
1451 if (node->op->opar == oparity_binary)
1452 set_irn_n(node, node->op->op_index, left);
1454 assert (node->op->opar == oparity_binary);
1458 get_binop_right (ir_node *node) {
1459 if (node->op->opar == oparity_binary)
1460 return get_irn_n(node, node->op->op_index + 1);
1462 assert(node->op->opar == oparity_binary);
1467 set_binop_right (ir_node *node, ir_node *right) {
1468 if (node->op->opar == oparity_binary)
1469 set_irn_n(node, node->op->op_index + 1, right);
1471 assert (node->op->opar == oparity_binary);
1474 int is_Phi (const ir_node *n) {
1480 if (op == op_Filter) return get_interprocedural_view();
1483 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1484 (get_irn_arity(n) > 0));
1489 int is_Phi0 (const ir_node *n) {
1492 return ((get_irn_op(n) == op_Phi) &&
1493 (get_irn_arity(n) == 0) &&
1494 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1498 get_Phi_preds_arr (ir_node *node) {
1499 assert (node->op == op_Phi);
1500 return (ir_node **)&(get_irn_in(node)[1]);
1504 get_Phi_n_preds (ir_node *node) {
1505 assert (is_Phi(node) || is_Phi0(node));
1506 return (get_irn_arity(node));
1510 void set_Phi_n_preds (ir_node *node, int n_preds) {
1511 assert (node->op == op_Phi);
1516 get_Phi_pred (ir_node *node, int pos) {
1517 assert (is_Phi(node) || is_Phi0(node));
1518 return get_irn_n(node, pos);
1522 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1523 assert (is_Phi(node) || is_Phi0(node));
1524 set_irn_n(node, pos, pred);
1528 int is_memop(ir_node *node) {
1529 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1532 ir_node *get_memop_mem (ir_node *node) {
1533 assert(is_memop(node));
1534 return get_irn_n(node, 0);
1537 void set_memop_mem (ir_node *node, ir_node *mem) {
1538 assert(is_memop(node));
1539 set_irn_n(node, 0, mem);
1542 ir_node *get_memop_ptr (ir_node *node) {
1543 assert(is_memop(node));
1544 return get_irn_n(node, 1);
1547 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1548 assert(is_memop(node));
1549 set_irn_n(node, 1, ptr);
1553 get_Load_mem (ir_node *node) {
1554 assert (node->op == op_Load);
1555 return get_irn_n(node, 0);
1559 set_Load_mem (ir_node *node, ir_node *mem) {
1560 assert (node->op == op_Load);
1561 set_irn_n(node, 0, mem);
1565 get_Load_ptr (ir_node *node) {
1566 assert (node->op == op_Load);
1567 return get_irn_n(node, 1);
1571 set_Load_ptr (ir_node *node, ir_node *ptr) {
1572 assert (node->op == op_Load);
1573 set_irn_n(node, 1, ptr);
1577 get_Load_mode (ir_node *node) {
1578 assert (node->op == op_Load);
1579 return node->attr.load.load_mode;
1583 set_Load_mode (ir_node *node, ir_mode *mode) {
1584 assert (node->op == op_Load);
1585 node->attr.load.load_mode = mode;
1589 get_Load_volatility (ir_node *node) {
1590 assert (node->op == op_Load);
1591 return node->attr.load.volatility;
1595 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1596 assert (node->op == op_Load);
1597 node->attr.load.volatility = volatility;
1602 get_Store_mem (ir_node *node) {
1603 assert (node->op == op_Store);
1604 return get_irn_n(node, 0);
1608 set_Store_mem (ir_node *node, ir_node *mem) {
1609 assert (node->op == op_Store);
1610 set_irn_n(node, 0, mem);
1614 get_Store_ptr (ir_node *node) {
1615 assert (node->op == op_Store);
1616 return get_irn_n(node, 1);
1620 set_Store_ptr (ir_node *node, ir_node *ptr) {
1621 assert (node->op == op_Store);
1622 set_irn_n(node, 1, ptr);
1626 get_Store_value (ir_node *node) {
1627 assert (node->op == op_Store);
1628 return get_irn_n(node, 2);
1632 set_Store_value (ir_node *node, ir_node *value) {
1633 assert (node->op == op_Store);
1634 set_irn_n(node, 2, value);
1638 get_Store_volatility (ir_node *node) {
1639 assert (node->op == op_Store);
1640 return node->attr.store.volatility;
1644 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1645 assert (node->op == op_Store);
1646 node->attr.store.volatility = volatility;
1651 get_Alloc_mem (ir_node *node) {
1652 assert (node->op == op_Alloc);
1653 return get_irn_n(node, 0);
1657 set_Alloc_mem (ir_node *node, ir_node *mem) {
1658 assert (node->op == op_Alloc);
1659 set_irn_n(node, 0, mem);
1663 get_Alloc_size (ir_node *node) {
1664 assert (node->op == op_Alloc);
1665 return get_irn_n(node, 1);
1669 set_Alloc_size (ir_node *node, ir_node *size) {
1670 assert (node->op == op_Alloc);
1671 set_irn_n(node, 1, size);
1675 get_Alloc_type (ir_node *node) {
1676 assert (node->op == op_Alloc);
1677 return node->attr.a.type = skip_tid(node->attr.a.type);
1681 set_Alloc_type (ir_node *node, type *tp) {
1682 assert (node->op == op_Alloc);
1683 node->attr.a.type = tp;
1687 get_Alloc_where (ir_node *node) {
1688 assert (node->op == op_Alloc);
1689 return node->attr.a.where;
1693 set_Alloc_where (ir_node *node, where_alloc where) {
1694 assert (node->op == op_Alloc);
1695 node->attr.a.where = where;
1700 get_Free_mem (ir_node *node) {
1701 assert (node->op == op_Free);
1702 return get_irn_n(node, 0);
1706 set_Free_mem (ir_node *node, ir_node *mem) {
1707 assert (node->op == op_Free);
1708 set_irn_n(node, 0, mem);
1712 get_Free_ptr (ir_node *node) {
1713 assert (node->op == op_Free);
1714 return get_irn_n(node, 1);
1718 set_Free_ptr (ir_node *node, ir_node *ptr) {
1719 assert (node->op == op_Free);
1720 set_irn_n(node, 1, ptr);
1724 get_Free_size (ir_node *node) {
1725 assert (node->op == op_Free);
1726 return get_irn_n(node, 2);
1730 set_Free_size (ir_node *node, ir_node *size) {
1731 assert (node->op == op_Free);
1732 set_irn_n(node, 2, size);
1736 get_Free_type (ir_node *node) {
1737 assert (node->op == op_Free);
1738 return node->attr.f.type = skip_tid(node->attr.f.type);
1742 set_Free_type (ir_node *node, type *tp) {
1743 assert (node->op == op_Free);
1744 node->attr.f.type = tp;
1748 get_Free_where (ir_node *node) {
1749 assert (node->op == op_Free);
1750 return node->attr.f.where;
1754 set_Free_where (ir_node *node, where_alloc where) {
1755 assert (node->op == op_Free);
1756 node->attr.f.where = where;
1760 get_Sync_preds_arr (ir_node *node) {
1761 assert (node->op == op_Sync);
1762 return (ir_node **)&(get_irn_in(node)[1]);
1766 get_Sync_n_preds (ir_node *node) {
1767 assert (node->op == op_Sync);
1768 return (get_irn_arity(node));
1773 set_Sync_n_preds (ir_node *node, int n_preds) {
1774 assert (node->op == op_Sync);
1779 get_Sync_pred (ir_node *node, int pos) {
1780 assert (node->op == op_Sync);
1781 return get_irn_n(node, pos);
1785 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1786 assert (node->op == op_Sync);
1787 set_irn_n(node, pos, pred);
1790 type *get_Proj_type(ir_node *n)
1793 ir_node *pred = get_Proj_pred(n);
1795 switch (get_irn_opcode(pred)) {
1798 /* Deal with Start / Call here: we need to know the Proj Nr. */
1799 assert(get_irn_mode(pred) == mode_T);
1800 pred_pred = get_Proj_pred(pred);
1801 if (get_irn_op(pred_pred) == op_Start) {
1802 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1803 tp = get_method_param_type(mtp, get_Proj_proj(n));
1804 } else if (get_irn_op(pred_pred) == op_Call) {
1805 type *mtp = get_Call_type(pred_pred);
1806 tp = get_method_res_type(mtp, get_Proj_proj(n));
1809 case iro_Start: break;
1810 case iro_Call: break;
1812 ir_node *a = get_Load_ptr(pred);
1813 if (get_irn_op(a) == op_Sel)
1814 tp = get_entity_type(get_Sel_entity(a));
1823 get_Proj_pred (const ir_node *node) {
1824 assert (is_Proj(node));
1825 return get_irn_n(node, 0);
1829 set_Proj_pred (ir_node *node, ir_node *pred) {
1830 assert (is_Proj(node));
1831 set_irn_n(node, 0, pred);
1835 get_Proj_proj (const ir_node *node) {
1836 assert (is_Proj(node));
1837 if (get_irn_opcode(node) == iro_Proj) {
1838 return node->attr.proj;
1840 assert(get_irn_opcode(node) == iro_Filter);
1841 return node->attr.filter.proj;
1846 set_Proj_proj (ir_node *node, long proj) {
1847 assert (node->op == op_Proj);
1848 node->attr.proj = proj;
1852 get_Tuple_preds_arr (ir_node *node) {
1853 assert (node->op == op_Tuple);
1854 return (ir_node **)&(get_irn_in(node)[1]);
1858 get_Tuple_n_preds (ir_node *node) {
1859 assert (node->op == op_Tuple);
1860 return (get_irn_arity(node));
1865 set_Tuple_n_preds (ir_node *node, int n_preds) {
1866 assert (node->op == op_Tuple);
1871 get_Tuple_pred (ir_node *node, int pos) {
1872 assert (node->op == op_Tuple);
1873 return get_irn_n(node, pos);
1877 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1878 assert (node->op == op_Tuple);
1879 set_irn_n(node, pos, pred);
1883 get_Id_pred (ir_node *node) {
1884 assert (node->op == op_Id);
1885 return get_irn_n(node, 0);
1889 set_Id_pred (ir_node *node, ir_node *pred) {
1890 assert (node->op == op_Id);
1891 set_irn_n(node, 0, pred);
1894 ir_node *get_Confirm_value (ir_node *node) {
1895 assert (node->op == op_Confirm);
1896 return get_irn_n(node, 0);
1898 void set_Confirm_value (ir_node *node, ir_node *value) {
1899 assert (node->op == op_Confirm);
1900 set_irn_n(node, 0, value);
1902 ir_node *get_Confirm_bound (ir_node *node) {
1903 assert (node->op == op_Confirm);
1904 return get_irn_n(node, 1);
1906 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1907 assert (node->op == op_Confirm);
1908 set_irn_n(node, 0, bound);
1910 pn_Cmp get_Confirm_cmp (ir_node *node) {
1911 assert (node->op == op_Confirm);
1912 return node->attr.confirm_cmp;
1914 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1915 assert (node->op == op_Confirm);
1916 node->attr.confirm_cmp = cmp;
1921 get_Filter_pred (ir_node *node) {
1922 assert(node->op == op_Filter);
1926 set_Filter_pred (ir_node *node, ir_node *pred) {
1927 assert(node->op == op_Filter);
1931 get_Filter_proj(ir_node *node) {
1932 assert(node->op == op_Filter);
1933 return node->attr.filter.proj;
1936 set_Filter_proj (ir_node *node, long proj) {
1937 assert(node->op == op_Filter);
1938 node->attr.filter.proj = proj;
1941 /* Don't use get_irn_arity, get_irn_n in implementation as access
1942 shall work independent of view!!! */
1943 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1944 assert(node->op == op_Filter);
1945 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1946 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1947 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1948 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1949 node->attr.filter.in_cg[0] = node->in[0];
1951 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1954 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1955 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1956 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1957 node->attr.filter.in_cg[pos + 1] = pred;
1959 int get_Filter_n_cg_preds(ir_node *node) {
1960 assert(node->op == op_Filter && node->attr.filter.in_cg);
1961 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1963 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1965 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1967 arity = ARR_LEN(node->attr.filter.in_cg);
1968 assert(pos < arity - 1);
1969 return node->attr.filter.in_cg[pos + 1];
1973 ir_node *get_Mux_sel (ir_node *node) {
1974 assert(node->op == op_Mux);
1977 void set_Mux_sel (ir_node *node, ir_node *sel) {
1978 assert(node->op == op_Mux);
1982 ir_node *get_Mux_false (ir_node *node) {
1983 assert(node->op == op_Mux);
1986 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1987 assert(node->op == op_Mux);
1988 node->in[2] = ir_false;
1991 ir_node *get_Mux_true (ir_node *node) {
1992 assert(node->op == op_Mux);
1995 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1996 assert(node->op == op_Mux);
1997 node->in[3] = ir_true;
2002 get_irn_irg(const ir_node *node) {
2003 if (! is_Block(node))
2004 node = get_nodes_block(node);
2005 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2006 node = get_nodes_block(node);
2007 assert(get_irn_op(node) == op_Block);
2008 return node->attr.block.irg;
2012 /*----------------------------------------------------------------*/
2013 /* Auxiliary routines */
2014 /*----------------------------------------------------------------*/
2017 skip_Proj (ir_node *node) {
2018 /* don't assert node !!! */
2019 if (node && is_Proj(node)) {
2020 return get_Proj_pred(node);
2027 skip_Tuple (ir_node *node) {
2030 if (!get_opt_normalize()) return node;
2032 node = skip_Id(node);
2033 if (get_irn_op(node) == op_Proj) {
2034 pred = skip_Id(get_Proj_pred(node));
2035 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2036 pred = skip_Id(skip_Tuple(pred));
2037 else if (get_irn_op(pred) == op_Tuple)
2038 return get_Tuple_pred(pred, get_Proj_proj(node));
2043 /* returns operand of node if node is a Cast */
2044 ir_node *skip_Cast (ir_node *node) {
2045 if (node && get_irn_op(node) == op_Cast)
2046 return get_Cast_op(node);
2050 /* returns operand of node if node is a Confirm */
2051 ir_node *skip_Confirm (ir_node *node) {
2052 if (node && get_irn_op(node) == op_Confirm)
2053 return get_Confirm_value(node);
2057 /* skip all high-level ops */
2058 ir_node *skip_HighLevel(ir_node *node) {
2059 if (node && is_op_highlevel(get_irn_op(node)))
2060 return get_irn_n(node, 0);
2065 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2066 than any other approach, as Id chains are resolved and all point to the real node, or
2067 all id's are self loops. */
2069 skip_Id (ir_node *node) {
2070 /* don't assert node !!! */
2072 if (!get_opt_normalize()) return node;
2074 /* Don't use get_Id_pred: We get into an endless loop for
2075 self-referencing Ids. */
2076 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2077 ir_node *rem_pred = node->in[0+1];
2080 assert (get_irn_arity (node) > 0);
2082 node->in[0+1] = node;
2083 res = skip_Id(rem_pred);
2084 if (res->op == op_Id) /* self-loop */ return node;
2086 node->in[0+1] = res;
2093 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2094 than any other approach, as Id chains are resolved and all point to the real node, or
2095 all id's are self loops. */
2097 skip_Id (ir_node *node) {
2099 /* don't assert node !!! */
2101 if (!node || (node->op != op_Id)) return node;
2103 if (!get_opt_normalize()) return node;
2105 /* Don't use get_Id_pred: We get into an endless loop for
2106 self-referencing Ids. */
2107 pred = node->in[0+1];
2109 if (pred->op != op_Id) return pred;
2111 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2112 ir_node *rem_pred, *res;
2114 if (pred->op != op_Id) return pred; /* shortcut */
2117 assert (get_irn_arity (node) > 0);
2119 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2120 res = skip_Id(rem_pred);
2121 if (res->op == op_Id) /* self-loop */ return node;
2123 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2132 (is_Bad)(const ir_node *node) {
2133 return _is_Bad(node);
2137 (is_Const)(const ir_node *node) {
2138 return _is_Const(node);
2142 (is_no_Block)(const ir_node *node) {
2143 return _is_no_Block(node);
2147 (is_Block)(const ir_node *node) {
2148 return _is_Block(node);
2151 /* returns true if node is a Unknown node. */
2153 (is_Unknown)(const ir_node *node) {
2154 return _is_Unknown(node);
2158 is_Proj (const ir_node *node) {
2160 return node->op == op_Proj
2161 || (!get_interprocedural_view() && node->op == op_Filter);
2164 /* Returns true if the operation manipulates control flow. */
2166 is_cfop(const ir_node *node) {
2167 return is_cfopcode(get_irn_op(node));
2170 /* Returns true if the operation manipulates interprocedural control flow:
2171 CallBegin, EndReg, EndExcept */
2172 int is_ip_cfop(const ir_node *node) {
2173 return is_ip_cfopcode(get_irn_op(node));
2176 /* Returns true if the operation can change the control flow because
2179 is_fragile_op(const ir_node *node) {
2180 return is_op_fragile(get_irn_op(node));
2183 /* Returns the memory operand of fragile operations. */
2184 ir_node *get_fragile_op_mem(ir_node *node) {
2185 assert(node && is_fragile_op(node));
2187 switch (get_irn_opcode (node)) {
2196 return get_irn_n(node, 0);
2201 assert(0 && "should not be reached");
2206 /* Returns true if the operation is a forking control flow operation. */
2208 is_forking_op(const ir_node *node) {
2209 return is_op_forking(get_irn_op(node));
2212 type *(get_irn_type)(ir_node *node) {
2213 return _get_irn_type(node);
2216 /** the get_type operation must be always implemented */
2217 static type *get_Null_type(ir_node *n) {
2221 /* set the get_type operation */
2222 ir_op *firm_set_default_get_type(ir_op *op)
2225 case iro_Const: op->get_type = get_Const_type; break;
2226 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2227 case iro_Cast: op->get_type = get_Cast_type; break;
2228 case iro_Proj: op->get_type = get_Proj_type; break;
2229 default: op->get_type = get_Null_type; break;
2234 #ifdef DEBUG_libfirm
2235 void dump_irn (ir_node *n) {
2236 int i, arity = get_irn_arity(n);
2237 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2239 ir_node *pred = get_irn_n(n, -1);
2240 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2241 get_irn_node_nr(pred), (void *)pred);
2243 printf(" preds: \n");
2244 for (i = 0; i < arity; ++i) {
2245 ir_node *pred = get_irn_n(n, i);
2246 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2247 get_irn_node_nr(pred), (void *)pred);
2251 #else /* DEBUG_libfirm */
2252 void dump_irn (ir_node *n) {}
2253 #endif /* DEBUG_libfirm */