3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 for (i = 0, n = arity + not_a_block; i < n; ++i)
167 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
171 hook_new_node(irg, res);
176 /*-- getting some parameters from ir_nodes --*/
179 (is_ir_node)(const void *thing) {
180 return _is_ir_node(thing);
184 (get_irn_intra_arity)(const ir_node *node) {
185 return _get_irn_intra_arity(node);
189 (get_irn_inter_arity)(const ir_node *node) {
190 return _get_irn_inter_arity(node);
193 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
196 (get_irn_arity)(const ir_node *node) {
197 return _get_irn_arity(node);
200 /* Returns the array with ins. This array is shifted with respect to the
201 array accessed by get_irn_n: The block operand is at position 0 not -1.
202 (@@@ This should be changed.)
203 The order of the predecessors in this array is not guaranteed, except that
204 lists of operands as predecessors of Block or arguments of a Call are
207 get_irn_in (const ir_node *node) {
209 if (get_interprocedural_view()) { /* handle Filter and Block specially */
210 if (get_irn_opcode(node) == iro_Filter) {
211 assert(node->attr.filter.in_cg);
212 return node->attr.filter.in_cg;
213 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
214 return node->attr.block.in_cg;
216 /* else fall through */
222 set_irn_in (ir_node *node, int arity, ir_node **in) {
225 if (get_interprocedural_view()) { /* handle Filter and Block specially */
226 if (get_irn_opcode(node) == iro_Filter) {
227 assert(node->attr.filter.in_cg);
228 arr = &node->attr.filter.in_cg;
229 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
230 arr = &node->attr.block.in_cg;
237 if (arity != ARR_LEN(*arr) - 1) {
238 ir_node * block = (*arr)[0];
239 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
242 fix_backedges(current_ir_graph->obst, node);
243 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
247 (get_irn_intra_n)(const ir_node *node, int n) {
248 return _get_irn_intra_n (node, n);
252 (get_irn_inter_n)(const ir_node *node, int n) {
253 return _get_irn_inter_n (node, n);
256 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
259 (get_irn_n)(const ir_node *node, int n) {
260 return _get_irn_n(node, n);
264 set_irn_n (ir_node *node, int n, ir_node *in) {
265 assert(node && node->kind == k_ir_node);
267 assert(n < get_irn_arity(node));
268 assert(in && in->kind == k_ir_node);
270 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
271 /* Change block pred in both views! */
272 node->in[n + 1] = in;
273 assert(node->attr.filter.in_cg);
274 node->attr.filter.in_cg[n + 1] = in;
277 if (get_interprocedural_view()) { /* handle Filter and Block specially */
278 if (get_irn_opcode(node) == iro_Filter) {
279 assert(node->attr.filter.in_cg);
280 node->attr.filter.in_cg[n + 1] = in;
282 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
283 node->attr.block.in_cg[n + 1] = in;
286 /* else fall through */
290 hook_set_irn_n(node, n, in, node->in[n + 1]);
292 /* Here, we rely on src and tgt being in the current ir graph */
293 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
295 node->in[n + 1] = in;
299 (get_irn_mode)(const ir_node *node) {
300 return _get_irn_mode(node);
304 (set_irn_mode)(ir_node *node, ir_mode *mode)
306 _set_irn_mode(node, mode);
310 get_irn_modecode (const ir_node *node)
313 return node->mode->code;
316 /** Gets the string representation of the mode .*/
318 get_irn_modename (const ir_node *node)
321 return get_mode_name(node->mode);
325 get_irn_modeident (const ir_node *node)
328 return get_mode_ident(node->mode);
332 (get_irn_op)(const ir_node *node)
334 return _get_irn_op(node);
337 /* should be private to the library: */
339 set_irn_op (ir_node *node, ir_op *op)
346 (get_irn_opcode)(const ir_node *node)
348 return _get_irn_opcode(node);
352 get_irn_opname (const ir_node *node)
355 if ((get_irn_op((ir_node *)node) == op_Phi) &&
356 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
357 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
358 return get_id_str(node->op->name);
362 get_irn_opident (const ir_node *node)
365 return node->op->name;
369 (get_irn_visited)(const ir_node *node)
371 return _get_irn_visited(node);
375 (set_irn_visited)(ir_node *node, unsigned long visited)
377 _set_irn_visited(node, visited);
381 (mark_irn_visited)(ir_node *node) {
382 _mark_irn_visited(node);
386 (irn_not_visited)(const ir_node *node) {
387 return _irn_not_visited(node);
391 (irn_visited)(const ir_node *node) {
392 return _irn_visited(node);
396 (set_irn_link)(ir_node *node, void *link) {
397 _set_irn_link(node, link);
401 (get_irn_link)(const ir_node *node) {
402 return _get_irn_link(node);
406 (get_irn_pinned)(const ir_node *node) {
407 return _get_irn_pinned(node);
410 void set_irn_pinned(ir_node *node, op_pin_state state) {
411 /* due to optimization an opt may be turned into a Tuple */
412 if (get_irn_op(node) == op_Tuple)
415 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
416 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
418 node->attr.except.pin_state = state;
421 #ifdef DO_HEAPANALYSIS
422 /* Access the abstract interpretation information of a node.
423 Returns NULL if no such information is available. */
424 struct abstval *get_irn_abst_value(ir_node *n) {
427 /* Set the abstract interpretation information of a node. */
428 void set_irn_abst_value(ir_node *n, struct abstval *os) {
431 struct section *firm_get_irn_section(ir_node *n) {
434 void firm_set_irn_section(ir_node *n, struct section *s) {
438 /* Dummies needed for firmjni. */
439 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
440 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
441 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
442 void firm_set_irn_section(ir_node *n, struct section *s) {}
443 #endif /* DO_HEAPANALYSIS */
446 /* Outputs a unique number for this node */
448 get_irn_node_nr(const ir_node *node) {
451 return node->node_nr;
458 get_irn_const_attr (ir_node *node)
460 assert (node->op == op_Const);
461 return node->attr.con;
465 get_irn_proj_attr (ir_node *node)
467 assert (node->op == op_Proj);
468 return node->attr.proj;
472 get_irn_alloc_attr (ir_node *node)
474 assert (node->op == op_Alloc);
479 get_irn_free_attr (ir_node *node)
481 assert (node->op == op_Free);
486 get_irn_symconst_attr (ir_node *node)
488 assert (node->op == op_SymConst);
493 get_irn_call_attr (ir_node *node)
495 assert (node->op == op_Call);
496 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
500 get_irn_sel_attr (ir_node *node)
502 assert (node->op == op_Sel);
507 get_irn_phi_attr (ir_node *node)
509 assert (node->op == op_Phi);
510 return node->attr.phi0_pos;
514 get_irn_block_attr (ir_node *node)
516 assert (node->op == op_Block);
517 return node->attr.block;
521 get_irn_load_attr (ir_node *node)
523 assert (node->op == op_Load);
524 return node->attr.load;
528 get_irn_store_attr (ir_node *node)
530 assert (node->op == op_Store);
531 return node->attr.store;
535 get_irn_except_attr (ir_node *node)
537 assert (node->op == op_Div || node->op == op_Quot ||
538 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
539 return node->attr.except;
542 /** manipulate fields of individual nodes **/
544 /* this works for all except Block */
546 get_nodes_block (const ir_node *node) {
547 assert (!(node->op == op_Block));
548 return get_irn_n(node, -1);
552 set_nodes_block (ir_node *node, ir_node *block) {
553 assert (!(node->op == op_Block));
554 set_irn_n(node, -1, block);
557 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
558 * from Start. If so returns frame type, else Null. */
559 type *is_frame_pointer(ir_node *n) {
560 if ((get_irn_op(n) == op_Proj) &&
561 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
562 ir_node *start = get_Proj_pred(n);
563 if (get_irn_op(start) == op_Start) {
564 return get_irg_frame_type(get_irn_irg(start));
570 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
571 * from Start. If so returns global type, else Null. */
572 type *is_globals_pointer(ir_node *n) {
573 if ((get_irn_op(n) == op_Proj) &&
574 (get_Proj_proj(n) == pn_Start_P_globals)) {
575 ir_node *start = get_Proj_pred(n);
576 if (get_irn_op(start) == op_Start) {
577 return get_glob_type();
583 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
584 * from Start. If so returns 1, else 0. */
585 int is_value_arg_pointer(ir_node *n) {
586 if ((get_irn_op(n) == op_Proj) &&
587 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
588 (get_irn_op(get_Proj_pred(n)) == op_Start))
593 /* Returns an array with the predecessors of the Block. Depending on
594 the implementation of the graph data structure this can be a copy of
595 the internal representation of predecessors as well as the internal
596 array itself. Therefore writing to this array might obstruct the ir. */
598 get_Block_cfgpred_arr (ir_node *node)
600 assert ((node->op == op_Block));
601 return (ir_node **)&(get_irn_in(node)[1]);
605 (get_Block_n_cfgpreds)(ir_node *node) {
606 return get_Block_n_cfgpreds(node);
610 (get_Block_cfgpred)(ir_node *node, int pos) {
611 return get_Block_cfgpred(node, pos);
615 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
616 assert (node->op == op_Block);
617 set_irn_n(node, pos, pred);
621 get_Block_matured (ir_node *node) {
622 assert (node->op == op_Block);
623 return node->attr.block.matured;
627 set_Block_matured (ir_node *node, bool matured) {
628 assert (node->op == op_Block);
629 node->attr.block.matured = matured;
633 (get_Block_block_visited)(ir_node *node) {
634 return _get_Block_block_visited(node);
638 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
639 _set_Block_block_visited(node, visit);
642 /* For this current_ir_graph must be set. */
644 (mark_Block_block_visited)(ir_node *node) {
645 _mark_Block_block_visited(node);
649 (Block_not_block_visited)(ir_node *node) {
650 return _Block_not_block_visited(node);
654 get_Block_graph_arr (ir_node *node, int pos) {
655 assert (node->op == op_Block);
656 return node->attr.block.graph_arr[pos+1];
660 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
661 assert (node->op == op_Block);
662 node->attr.block.graph_arr[pos+1] = value;
665 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
666 assert(node->op == op_Block);
667 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
668 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
669 node->attr.block.in_cg[0] = NULL;
670 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
672 /* Fix backedge array. fix_backedges operates depending on
673 interprocedural_view. */
674 int ipv = get_interprocedural_view();
675 set_interprocedural_view(true);
676 fix_backedges(current_ir_graph->obst, node);
677 set_interprocedural_view(ipv);
680 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
683 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
684 assert(node->op == op_Block &&
685 node->attr.block.in_cg &&
686 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
687 node->attr.block.in_cg[pos + 1] = pred;
690 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
691 assert(node->op == op_Block);
692 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
695 int get_Block_cg_n_cfgpreds(ir_node * node) {
696 assert(node->op == op_Block);
697 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
700 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
701 assert(node->op == op_Block && node->attr.block.in_cg);
702 return node->attr.block.in_cg[pos + 1];
705 void remove_Block_cg_cfgpred_arr(ir_node * node) {
706 assert(node->op == op_Block);
707 node->attr.block.in_cg = NULL;
710 ir_node *(set_Block_dead)(ir_node *block) {
711 return _set_Block_dead(block);
714 int (is_Block_dead)(const ir_node *block) {
715 return _is_Block_dead(block);
718 ir_extblk *get_Block_extbb(const ir_node *block) {
719 assert(is_Block(block));
720 return block->attr.block.extblk;
723 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
724 assert(is_Block(block));
725 block->attr.block.extblk = extblk;
729 set_Start_irg(ir_node *node, ir_graph *irg) {
730 assert(node->op == op_Start);
731 assert(is_ir_graph(irg));
732 assert(0 && " Why set irg? -- use set_irn_irg");
736 get_End_n_keepalives(ir_node *end) {
737 assert (end->op == op_End);
738 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
742 get_End_keepalive(ir_node *end, int pos) {
743 assert (end->op == op_End);
744 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
748 add_End_keepalive (ir_node *end, ir_node *ka) {
749 assert (end->op == op_End);
750 ARR_APP1 (ir_node *, end->in, ka);
754 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
755 assert (end->op == op_End);
756 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
760 free_End (ir_node *end) {
761 assert (end->op == op_End);
763 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
764 end->in = NULL; /* @@@ make sure we get an error if we use the
765 in array afterwards ... */
770 > Implementing the case construct (which is where the constant Proj node is
771 > important) involves far more than simply determining the constant values.
772 > We could argue that this is more properly a function of the translator from
773 > Firm to the target machine. That could be done if there was some way of
774 > projecting "default" out of the Cond node.
775 I know it's complicated.
776 Basically there are two proglems:
777 - determining the gaps between the projs
778 - determining the biggest case constant to know the proj number for
780 I see several solutions:
781 1. Introduce a ProjDefault node. Solves both problems.
782 This means to extend all optimizations executed during construction.
783 2. Give the Cond node for switch two flavors:
784 a) there are no gaps in the projs (existing flavor)
785 b) gaps may exist, default proj is still the Proj with the largest
786 projection number. This covers also the gaps.
787 3. Fix the semantic of the Cond to that of 2b)
789 Solution 2 seems to be the best:
790 Computing the gaps in the Firm representation is not too hard, i.e.,
791 libFIRM can implement a routine that transforms between the two
792 flavours. This is also possible for 1) but 2) does not require to
793 change any existing optimization.
794 Further it should be far simpler to determine the biggest constant than
796 I don't want to choose 3) as 2a) seems to have advantages for
797 dataflow analysis and 3) does not allow to convert the representation to
801 get_Cond_selector (ir_node *node) {
802 assert (node->op == op_Cond);
803 return get_irn_n(node, 0);
807 set_Cond_selector (ir_node *node, ir_node *selector) {
808 assert (node->op == op_Cond);
809 set_irn_n(node, 0, selector);
813 get_Cond_kind (ir_node *node) {
814 assert (node->op == op_Cond);
815 return node->attr.c.kind;
819 set_Cond_kind (ir_node *node, cond_kind kind) {
820 assert (node->op == op_Cond);
821 node->attr.c.kind = kind;
825 get_Cond_defaultProj (ir_node *node) {
826 assert (node->op == op_Cond);
827 return node->attr.c.default_proj;
831 get_Return_mem (ir_node *node) {
832 assert (node->op == op_Return);
833 return get_irn_n(node, 0);
837 set_Return_mem (ir_node *node, ir_node *mem) {
838 assert (node->op == op_Return);
839 set_irn_n(node, 0, mem);
843 get_Return_n_ress (ir_node *node) {
844 assert (node->op == op_Return);
845 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
849 get_Return_res_arr (ir_node *node)
851 assert ((node->op == op_Return));
852 if (get_Return_n_ress(node) > 0)
853 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
860 set_Return_n_res (ir_node *node, int results) {
861 assert (node->op == op_Return);
866 get_Return_res (ir_node *node, int pos) {
867 assert (node->op == op_Return);
868 assert (get_Return_n_ress(node) > pos);
869 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
873 set_Return_res (ir_node *node, int pos, ir_node *res){
874 assert (node->op == op_Return);
875 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
879 get_Raise_mem (ir_node *node) {
880 assert (node->op == op_Raise);
881 return get_irn_n(node, 0);
885 set_Raise_mem (ir_node *node, ir_node *mem) {
886 assert (node->op == op_Raise);
887 set_irn_n(node, 0, mem);
891 get_Raise_exo_ptr (ir_node *node) {
892 assert (node->op == op_Raise);
893 return get_irn_n(node, 1);
897 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
898 assert (node->op == op_Raise);
899 set_irn_n(node, 1, exo_ptr);
902 tarval *(get_Const_tarval)(ir_node *node) {
903 return _get_Const_tarval(node);
907 set_Const_tarval (ir_node *node, tarval *con) {
908 assert (node->op == op_Const);
909 node->attr.con.tv = con;
912 cnst_classify_t (classify_Const)(ir_node *node)
914 return _classify_Const(node);
918 /* The source language type. Must be an atomic type. Mode of type must
919 be mode of node. For tarvals from entities type must be pointer to
922 get_Const_type (ir_node *node) {
923 assert (node->op == op_Const);
924 return node->attr.con.tp;
928 set_Const_type (ir_node *node, type *tp) {
929 assert (node->op == op_Const);
930 if (tp != firm_unknown_type) {
931 assert (is_atomic_type(tp));
932 assert (get_type_mode(tp) == get_irn_mode(node));
934 node->attr.con.tp = tp;
939 get_SymConst_kind (const ir_node *node) {
940 assert (node->op == op_SymConst);
941 return node->attr.i.num;
945 set_SymConst_kind (ir_node *node, symconst_kind num) {
946 assert (node->op == op_SymConst);
947 node->attr.i.num = num;
951 get_SymConst_type (ir_node *node) {
952 assert ( (node->op == op_SymConst)
953 && ( get_SymConst_kind(node) == symconst_type_tag
954 || get_SymConst_kind(node) == symconst_size));
955 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
959 set_SymConst_type (ir_node *node, type *tp) {
960 assert ( (node->op == op_SymConst)
961 && ( get_SymConst_kind(node) == symconst_type_tag
962 || get_SymConst_kind(node) == symconst_size));
963 node->attr.i.sym.type_p = tp;
967 get_SymConst_name (ir_node *node) {
968 assert ( (node->op == op_SymConst)
969 && (get_SymConst_kind(node) == symconst_addr_name));
970 return node->attr.i.sym.ident_p;
974 set_SymConst_name (ir_node *node, ident *name) {
975 assert ( (node->op == op_SymConst)
976 && (get_SymConst_kind(node) == symconst_addr_name));
977 node->attr.i.sym.ident_p = name;
981 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
982 entity *get_SymConst_entity (ir_node *node) {
983 assert ( (node->op == op_SymConst)
984 && (get_SymConst_kind (node) == symconst_addr_ent));
985 return node->attr.i.sym.entity_p;
988 void set_SymConst_entity (ir_node *node, entity *ent) {
989 assert ( (node->op == op_SymConst)
990 && (get_SymConst_kind(node) == symconst_addr_ent));
991 node->attr.i.sym.entity_p = ent;
994 union symconst_symbol
995 get_SymConst_symbol (ir_node *node) {
996 assert (node->op == op_SymConst);
997 return node->attr.i.sym;
1001 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1002 assert (node->op == op_SymConst);
1003 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1004 node->attr.i.sym = sym;
1008 get_SymConst_value_type (ir_node *node) {
1009 assert (node->op == op_SymConst);
1010 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1011 return node->attr.i.tp;
1015 set_SymConst_value_type (ir_node *node, type *tp) {
1016 assert (node->op == op_SymConst);
1017 node->attr.i.tp = tp;
1021 get_Sel_mem (ir_node *node) {
1022 assert (node->op == op_Sel);
1023 return get_irn_n(node, 0);
1027 set_Sel_mem (ir_node *node, ir_node *mem) {
1028 assert (node->op == op_Sel);
1029 set_irn_n(node, 0, mem);
1033 get_Sel_ptr (ir_node *node) {
1034 assert (node->op == op_Sel);
1035 return get_irn_n(node, 1);
1039 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1040 assert (node->op == op_Sel);
1041 set_irn_n(node, 1, ptr);
1045 get_Sel_n_indexs (ir_node *node) {
1046 assert (node->op == op_Sel);
1047 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1051 get_Sel_index_arr (ir_node *node)
1053 assert ((node->op == op_Sel));
1054 if (get_Sel_n_indexs(node) > 0)
1055 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1061 get_Sel_index (ir_node *node, int pos) {
1062 assert (node->op == op_Sel);
1063 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1067 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1068 assert (node->op == op_Sel);
1069 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1073 get_Sel_entity (ir_node *node) {
1074 assert (node->op == op_Sel);
1075 return node->attr.s.ent;
1079 set_Sel_entity (ir_node *node, entity *ent) {
1080 assert (node->op == op_Sel);
1081 node->attr.s.ent = ent;
1085 get_InstOf_ent (ir_node *node) {
1086 assert (node->op = op_InstOf);
1087 return (node->attr.io.ent);
1091 set_InstOf_ent (ir_node *node, type *ent) {
1092 assert (node->op = op_InstOf);
1093 node->attr.io.ent = ent;
1097 get_InstOf_store (ir_node *node) {
1098 assert (node->op = op_InstOf);
1099 return (get_irn_n (node, 0));
1103 set_InstOf_store (ir_node *node, ir_node *obj) {
1104 assert (node->op = op_InstOf);
1105 set_irn_n (node, 0, obj);
1109 get_InstOf_obj (ir_node *node) {
1110 assert (node->op = op_InstOf);
1111 return (get_irn_n (node, 1));
1115 set_InstOf_obj (ir_node *node, ir_node *obj) {
1116 assert (node->op = op_InstOf);
1117 set_irn_n (node, 1, obj);
1121 /* For unary and binary arithmetic operations the access to the
1122 operands can be factored out. Left is the first, right the
1123 second arithmetic value as listed in tech report 0999-33.
1124 unops are: Minus, Abs, Not, Conv, Cast
1125 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1126 Shr, Shrs, Rotate, Cmp */
1130 get_Call_mem (ir_node *node) {
1131 assert (node->op == op_Call);
1132 return get_irn_n(node, 0);
1136 set_Call_mem (ir_node *node, ir_node *mem) {
1137 assert (node->op == op_Call);
1138 set_irn_n(node, 0, mem);
1142 get_Call_ptr (ir_node *node) {
1143 assert (node->op == op_Call);
1144 return get_irn_n(node, 1);
1148 set_Call_ptr (ir_node *node, ir_node *ptr) {
1149 assert (node->op == op_Call);
1150 set_irn_n(node, 1, ptr);
1154 get_Call_param_arr (ir_node *node) {
1155 assert (node->op == op_Call);
1156 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1160 get_Call_n_params (ir_node *node) {
1161 assert (node->op == op_Call);
1162 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1166 get_Call_arity (ir_node *node) {
1167 assert (node->op == op_Call);
1168 return get_Call_n_params(node);
1172 set_Call_arity (ir_node *node, ir_node *arity) {
1173 assert (node->op == op_Call);
1178 get_Call_param (ir_node *node, int pos) {
1179 assert (node->op == op_Call);
1180 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1184 set_Call_param (ir_node *node, int pos, ir_node *param) {
1185 assert (node->op == op_Call);
1186 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1190 get_Call_type (ir_node *node) {
1191 assert (node->op == op_Call);
1192 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1196 set_Call_type (ir_node *node, type *tp) {
1197 assert (node->op == op_Call);
1198 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1199 node->attr.call.cld_tp = tp;
1202 int Call_has_callees(ir_node *node) {
1203 assert(node && node->op == op_Call);
1204 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1205 (node->attr.call.callee_arr != NULL));
1208 int get_Call_n_callees(ir_node * node) {
1209 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1210 return ARR_LEN(node->attr.call.callee_arr);
1213 entity * get_Call_callee(ir_node * node, int pos) {
1214 assert(pos >= 0 && pos < get_Call_n_callees(node));
1215 return node->attr.call.callee_arr[pos];
1218 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1219 assert(node->op == op_Call);
1220 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1221 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1223 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1226 void remove_Call_callee_arr(ir_node * node) {
1227 assert(node->op == op_Call);
1228 node->attr.call.callee_arr = NULL;
1231 ir_node * get_CallBegin_ptr (ir_node *node) {
1232 assert(node->op == op_CallBegin);
1233 return get_irn_n(node, 0);
1235 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1236 assert(node->op == op_CallBegin);
1237 set_irn_n(node, 0, ptr);
1239 ir_node * get_CallBegin_call (ir_node *node) {
1240 assert(node->op == op_CallBegin);
1241 return node->attr.callbegin.call;
1243 void set_CallBegin_call (ir_node *node, ir_node *call) {
1244 assert(node->op == op_CallBegin);
1245 node->attr.callbegin.call = call;
1250 ir_node * get_##OP##_left(ir_node *node) { \
1251 assert(node->op == op_##OP); \
1252 return get_irn_n(node, node->op->op_index); \
1254 void set_##OP##_left(ir_node *node, ir_node *left) { \
1255 assert(node->op == op_##OP); \
1256 set_irn_n(node, node->op->op_index, left); \
1258 ir_node *get_##OP##_right(ir_node *node) { \
1259 assert(node->op == op_##OP); \
1260 return get_irn_n(node, node->op->op_index + 1); \
1262 void set_##OP##_right(ir_node *node, ir_node *right) { \
1263 assert(node->op == op_##OP); \
1264 set_irn_n(node, node->op->op_index + 1, right); \
1268 ir_node *get_##OP##_op(ir_node *node) { \
1269 assert(node->op == op_##OP); \
1270 return get_irn_n(node, node->op->op_index); \
1272 void set_##OP##_op (ir_node *node, ir_node *op) { \
1273 assert(node->op == op_##OP); \
1274 set_irn_n(node, node->op->op_index, op); \
1284 get_Quot_mem (ir_node *node) {
1285 assert (node->op == op_Quot);
1286 return get_irn_n(node, 0);
1290 set_Quot_mem (ir_node *node, ir_node *mem) {
1291 assert (node->op == op_Quot);
1292 set_irn_n(node, 0, mem);
1298 get_DivMod_mem (ir_node *node) {
1299 assert (node->op == op_DivMod);
1300 return get_irn_n(node, 0);
1304 set_DivMod_mem (ir_node *node, ir_node *mem) {
1305 assert (node->op == op_DivMod);
1306 set_irn_n(node, 0, mem);
1312 get_Div_mem (ir_node *node) {
1313 assert (node->op == op_Div);
1314 return get_irn_n(node, 0);
1318 set_Div_mem (ir_node *node, ir_node *mem) {
1319 assert (node->op == op_Div);
1320 set_irn_n(node, 0, mem);
1326 get_Mod_mem (ir_node *node) {
1327 assert (node->op == op_Mod);
1328 return get_irn_n(node, 0);
1332 set_Mod_mem (ir_node *node, ir_node *mem) {
1333 assert (node->op == op_Mod);
1334 set_irn_n(node, 0, mem);
1351 get_Cast_type (ir_node *node) {
1352 assert (node->op == op_Cast);
1353 return node->attr.cast.totype;
1357 set_Cast_type (ir_node *node, type *to_tp) {
1358 assert (node->op == op_Cast);
1359 node->attr.cast.totype = to_tp;
1363 /* Checks for upcast.
1365 * Returns true if the Cast node casts a class type to a super type.
1367 int is_Cast_upcast(ir_node *node) {
1368 type *totype = get_Cast_type(node);
1369 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1370 ir_graph *myirg = get_irn_irg(node);
1372 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1375 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1376 totype = get_pointer_points_to_type(totype);
1377 fromtype = get_pointer_points_to_type(fromtype);
1382 if (!is_Class_type(totype)) return false;
1383 return is_subclass_of(fromtype, totype);
1386 /* Checks for downcast.
1388 * Returns true if the Cast node casts a class type to a sub type.
1390 int is_Cast_downcast(ir_node *node) {
1391 type *totype = get_Cast_type(node);
1392 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1394 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1397 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1398 totype = get_pointer_points_to_type(totype);
1399 fromtype = get_pointer_points_to_type(fromtype);
1404 if (!is_Class_type(totype)) return false;
1405 return is_subclass_of(totype, fromtype);
1409 (is_unop)(const ir_node *node) {
1410 return _is_unop(node);
1414 get_unop_op (ir_node *node) {
1415 if (node->op->opar == oparity_unary)
1416 return get_irn_n(node, node->op->op_index);
1418 assert(node->op->opar == oparity_unary);
1423 set_unop_op (ir_node *node, ir_node *op) {
1424 if (node->op->opar == oparity_unary)
1425 set_irn_n(node, node->op->op_index, op);
1427 assert(node->op->opar == oparity_unary);
1431 (is_binop)(const ir_node *node) {
1432 return _is_binop(node);
1436 get_binop_left (ir_node *node) {
1437 if (node->op->opar == oparity_binary)
1438 return get_irn_n(node, node->op->op_index);
1440 assert(node->op->opar == oparity_binary);
1445 set_binop_left (ir_node *node, ir_node *left) {
1446 if (node->op->opar == oparity_binary)
1447 set_irn_n(node, node->op->op_index, left);
1449 assert (node->op->opar == oparity_binary);
1453 get_binop_right (ir_node *node) {
1454 if (node->op->opar == oparity_binary)
1455 return get_irn_n(node, node->op->op_index + 1);
1457 assert(node->op->opar == oparity_binary);
1462 set_binop_right (ir_node *node, ir_node *right) {
1463 if (node->op->opar == oparity_binary)
1464 set_irn_n(node, node->op->op_index + 1, right);
1466 assert (node->op->opar == oparity_binary);
1469 int is_Phi (const ir_node *n) {
1475 if (op == op_Filter) return get_interprocedural_view();
1478 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1479 (get_irn_arity(n) > 0));
1484 int is_Phi0 (const ir_node *n) {
1487 return ((get_irn_op(n) == op_Phi) &&
1488 (get_irn_arity(n) == 0) &&
1489 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1493 get_Phi_preds_arr (ir_node *node) {
1494 assert (node->op == op_Phi);
1495 return (ir_node **)&(get_irn_in(node)[1]);
1499 get_Phi_n_preds (ir_node *node) {
1500 assert (is_Phi(node) || is_Phi0(node));
1501 return (get_irn_arity(node));
1505 void set_Phi_n_preds (ir_node *node, int n_preds) {
1506 assert (node->op == op_Phi);
1511 get_Phi_pred (ir_node *node, int pos) {
1512 assert (is_Phi(node) || is_Phi0(node));
1513 return get_irn_n(node, pos);
1517 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1518 assert (is_Phi(node) || is_Phi0(node));
1519 set_irn_n(node, pos, pred);
1523 int is_memop(ir_node *node) {
1524 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1527 ir_node *get_memop_mem (ir_node *node) {
1528 assert(is_memop(node));
1529 return get_irn_n(node, 0);
1532 void set_memop_mem (ir_node *node, ir_node *mem) {
1533 assert(is_memop(node));
1534 set_irn_n(node, 0, mem);
1537 ir_node *get_memop_ptr (ir_node *node) {
1538 assert(is_memop(node));
1539 return get_irn_n(node, 1);
1542 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1543 assert(is_memop(node));
1544 set_irn_n(node, 1, ptr);
1548 get_Load_mem (ir_node *node) {
1549 assert (node->op == op_Load);
1550 return get_irn_n(node, 0);
1554 set_Load_mem (ir_node *node, ir_node *mem) {
1555 assert (node->op == op_Load);
1556 set_irn_n(node, 0, mem);
1560 get_Load_ptr (ir_node *node) {
1561 assert (node->op == op_Load);
1562 return get_irn_n(node, 1);
1566 set_Load_ptr (ir_node *node, ir_node *ptr) {
1567 assert (node->op == op_Load);
1568 set_irn_n(node, 1, ptr);
1572 get_Load_mode (ir_node *node) {
1573 assert (node->op == op_Load);
1574 return node->attr.load.load_mode;
1578 set_Load_mode (ir_node *node, ir_mode *mode) {
1579 assert (node->op == op_Load);
1580 node->attr.load.load_mode = mode;
1584 get_Load_volatility (ir_node *node) {
1585 assert (node->op == op_Load);
1586 return node->attr.load.volatility;
1590 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1591 assert (node->op == op_Load);
1592 node->attr.load.volatility = volatility;
1597 get_Store_mem (ir_node *node) {
1598 assert (node->op == op_Store);
1599 return get_irn_n(node, 0);
1603 set_Store_mem (ir_node *node, ir_node *mem) {
1604 assert (node->op == op_Store);
1605 set_irn_n(node, 0, mem);
1609 get_Store_ptr (ir_node *node) {
1610 assert (node->op == op_Store);
1611 return get_irn_n(node, 1);
1615 set_Store_ptr (ir_node *node, ir_node *ptr) {
1616 assert (node->op == op_Store);
1617 set_irn_n(node, 1, ptr);
1621 get_Store_value (ir_node *node) {
1622 assert (node->op == op_Store);
1623 return get_irn_n(node, 2);
1627 set_Store_value (ir_node *node, ir_node *value) {
1628 assert (node->op == op_Store);
1629 set_irn_n(node, 2, value);
1633 get_Store_volatility (ir_node *node) {
1634 assert (node->op == op_Store);
1635 return node->attr.store.volatility;
1639 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1640 assert (node->op == op_Store);
1641 node->attr.store.volatility = volatility;
1646 get_Alloc_mem (ir_node *node) {
1647 assert (node->op == op_Alloc);
1648 return get_irn_n(node, 0);
1652 set_Alloc_mem (ir_node *node, ir_node *mem) {
1653 assert (node->op == op_Alloc);
1654 set_irn_n(node, 0, mem);
1658 get_Alloc_size (ir_node *node) {
1659 assert (node->op == op_Alloc);
1660 return get_irn_n(node, 1);
1664 set_Alloc_size (ir_node *node, ir_node *size) {
1665 assert (node->op == op_Alloc);
1666 set_irn_n(node, 1, size);
1670 get_Alloc_type (ir_node *node) {
1671 assert (node->op == op_Alloc);
1672 return node->attr.a.type = skip_tid(node->attr.a.type);
1676 set_Alloc_type (ir_node *node, type *tp) {
1677 assert (node->op == op_Alloc);
1678 node->attr.a.type = tp;
1682 get_Alloc_where (ir_node *node) {
1683 assert (node->op == op_Alloc);
1684 return node->attr.a.where;
1688 set_Alloc_where (ir_node *node, where_alloc where) {
1689 assert (node->op == op_Alloc);
1690 node->attr.a.where = where;
1695 get_Free_mem (ir_node *node) {
1696 assert (node->op == op_Free);
1697 return get_irn_n(node, 0);
1701 set_Free_mem (ir_node *node, ir_node *mem) {
1702 assert (node->op == op_Free);
1703 set_irn_n(node, 0, mem);
1707 get_Free_ptr (ir_node *node) {
1708 assert (node->op == op_Free);
1709 return get_irn_n(node, 1);
1713 set_Free_ptr (ir_node *node, ir_node *ptr) {
1714 assert (node->op == op_Free);
1715 set_irn_n(node, 1, ptr);
1719 get_Free_size (ir_node *node) {
1720 assert (node->op == op_Free);
1721 return get_irn_n(node, 2);
1725 set_Free_size (ir_node *node, ir_node *size) {
1726 assert (node->op == op_Free);
1727 set_irn_n(node, 2, size);
1731 get_Free_type (ir_node *node) {
1732 assert (node->op == op_Free);
1733 return node->attr.f.type = skip_tid(node->attr.f.type);
1737 set_Free_type (ir_node *node, type *tp) {
1738 assert (node->op == op_Free);
1739 node->attr.f.type = tp;
1743 get_Free_where (ir_node *node) {
1744 assert (node->op == op_Free);
1745 return node->attr.f.where;
1749 set_Free_where (ir_node *node, where_alloc where) {
1750 assert (node->op == op_Free);
1751 node->attr.f.where = where;
1755 get_Sync_preds_arr (ir_node *node) {
1756 assert (node->op == op_Sync);
1757 return (ir_node **)&(get_irn_in(node)[1]);
1761 get_Sync_n_preds (ir_node *node) {
1762 assert (node->op == op_Sync);
1763 return (get_irn_arity(node));
1768 set_Sync_n_preds (ir_node *node, int n_preds) {
1769 assert (node->op == op_Sync);
1774 get_Sync_pred (ir_node *node, int pos) {
1775 assert (node->op == op_Sync);
1776 return get_irn_n(node, pos);
1780 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1781 assert (node->op == op_Sync);
1782 set_irn_n(node, pos, pred);
1785 type *get_Proj_type(ir_node *n)
1788 ir_node *pred = get_Proj_pred(n);
1790 switch (get_irn_opcode(pred)) {
1793 /* Deal with Start / Call here: we need to know the Proj Nr. */
1794 assert(get_irn_mode(pred) == mode_T);
1795 pred_pred = get_Proj_pred(pred);
1796 if (get_irn_op(pred_pred) == op_Start) {
1797 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1798 tp = get_method_param_type(mtp, get_Proj_proj(n));
1799 } else if (get_irn_op(pred_pred) == op_Call) {
1800 type *mtp = get_Call_type(pred_pred);
1801 tp = get_method_res_type(mtp, get_Proj_proj(n));
1804 case iro_Start: break;
1805 case iro_Call: break;
1807 ir_node *a = get_Load_ptr(pred);
1808 if (get_irn_op(a) == op_Sel)
1809 tp = get_entity_type(get_Sel_entity(a));
1818 get_Proj_pred (const ir_node *node) {
1819 assert (is_Proj(node));
1820 return get_irn_n(node, 0);
1824 set_Proj_pred (ir_node *node, ir_node *pred) {
1825 assert (is_Proj(node));
1826 set_irn_n(node, 0, pred);
1830 get_Proj_proj (const ir_node *node) {
1831 assert (is_Proj(node));
1832 if (get_irn_opcode(node) == iro_Proj) {
1833 return node->attr.proj;
1835 assert(get_irn_opcode(node) == iro_Filter);
1836 return node->attr.filter.proj;
1841 set_Proj_proj (ir_node *node, long proj) {
1842 assert (node->op == op_Proj);
1843 node->attr.proj = proj;
1847 get_Tuple_preds_arr (ir_node *node) {
1848 assert (node->op == op_Tuple);
1849 return (ir_node **)&(get_irn_in(node)[1]);
1853 get_Tuple_n_preds (ir_node *node) {
1854 assert (node->op == op_Tuple);
1855 return (get_irn_arity(node));
1860 set_Tuple_n_preds (ir_node *node, int n_preds) {
1861 assert (node->op == op_Tuple);
1866 get_Tuple_pred (ir_node *node, int pos) {
1867 assert (node->op == op_Tuple);
1868 return get_irn_n(node, pos);
1872 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1873 assert (node->op == op_Tuple);
1874 set_irn_n(node, pos, pred);
1878 get_Id_pred (ir_node *node) {
1879 assert (node->op == op_Id);
1880 return get_irn_n(node, 0);
1884 set_Id_pred (ir_node *node, ir_node *pred) {
1885 assert (node->op == op_Id);
1886 set_irn_n(node, 0, pred);
1889 ir_node *get_Confirm_value (ir_node *node) {
1890 assert (node->op == op_Confirm);
1891 return get_irn_n(node, 0);
1893 void set_Confirm_value (ir_node *node, ir_node *value) {
1894 assert (node->op == op_Confirm);
1895 set_irn_n(node, 0, value);
1897 ir_node *get_Confirm_bound (ir_node *node) {
1898 assert (node->op == op_Confirm);
1899 return get_irn_n(node, 1);
1901 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1902 assert (node->op == op_Confirm);
1903 set_irn_n(node, 0, bound);
1905 pn_Cmp get_Confirm_cmp (ir_node *node) {
1906 assert (node->op == op_Confirm);
1907 return node->attr.confirm_cmp;
1909 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1910 assert (node->op == op_Confirm);
1911 node->attr.confirm_cmp = cmp;
1916 get_Filter_pred (ir_node *node) {
1917 assert(node->op == op_Filter);
1921 set_Filter_pred (ir_node *node, ir_node *pred) {
1922 assert(node->op == op_Filter);
1926 get_Filter_proj(ir_node *node) {
1927 assert(node->op == op_Filter);
1928 return node->attr.filter.proj;
1931 set_Filter_proj (ir_node *node, long proj) {
1932 assert(node->op == op_Filter);
1933 node->attr.filter.proj = proj;
1936 /* Don't use get_irn_arity, get_irn_n in implementation as access
1937 shall work independent of view!!! */
1938 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1939 assert(node->op == op_Filter);
1940 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1941 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1942 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1943 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1944 node->attr.filter.in_cg[0] = node->in[0];
1946 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1949 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1950 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1951 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1952 node->attr.filter.in_cg[pos + 1] = pred;
1954 int get_Filter_n_cg_preds(ir_node *node) {
1955 assert(node->op == op_Filter && node->attr.filter.in_cg);
1956 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1958 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1960 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1962 arity = ARR_LEN(node->attr.filter.in_cg);
1963 assert(pos < arity - 1);
1964 return node->attr.filter.in_cg[pos + 1];
1968 ir_node *get_Mux_sel (ir_node *node) {
1969 assert(node->op == op_Mux);
1972 void set_Mux_sel (ir_node *node, ir_node *sel) {
1973 assert(node->op == op_Mux);
1977 ir_node *get_Mux_false (ir_node *node) {
1978 assert(node->op == op_Mux);
1981 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1982 assert(node->op == op_Mux);
1983 node->in[2] = ir_false;
1986 ir_node *get_Mux_true (ir_node *node) {
1987 assert(node->op == op_Mux);
1990 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1991 assert(node->op == op_Mux);
1992 node->in[3] = ir_true;
1997 get_irn_irg(const ir_node *node) {
1998 if (! is_Block(node))
1999 node = get_nodes_block(node);
2000 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2001 node = get_nodes_block(node);
2002 assert(get_irn_op(node) == op_Block);
2003 return node->attr.block.irg;
2007 /*----------------------------------------------------------------*/
2008 /* Auxiliary routines */
2009 /*----------------------------------------------------------------*/
2012 skip_Proj (ir_node *node) {
2013 /* don't assert node !!! */
2014 if (node && is_Proj(node)) {
2015 return get_Proj_pred(node);
2022 skip_Tuple (ir_node *node) {
2025 if (!get_opt_normalize()) return node;
2027 node = skip_Id(node);
2028 if (get_irn_op(node) == op_Proj) {
2029 pred = skip_Id(get_Proj_pred(node));
2030 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2031 pred = skip_Id(skip_Tuple(pred));
2032 else if (get_irn_op(pred) == op_Tuple)
2033 return get_Tuple_pred(pred, get_Proj_proj(node));
2038 /* returns operand of node if node is a Cast */
2039 ir_node *skip_Cast (ir_node *node) {
2040 if (node && get_irn_op(node) == op_Cast)
2041 return get_Cast_op(node);
2045 /* returns operand of node if node is a Confirm */
2046 ir_node *skip_Confirm (ir_node *node) {
2047 if (node && get_irn_op(node) == op_Confirm)
2048 return get_Confirm_value(node);
2052 /* skip all high-level ops */
2053 ir_node *skip_HighLevel(ir_node *node) {
2054 if (node && is_op_highlevel(get_irn_op(node)))
2055 return get_irn_n(node, 0);
2060 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2061 than any other approach, as Id chains are resolved and all point to the real node, or
2062 all id's are self loops. */
2064 skip_Id (ir_node *node) {
2065 /* don't assert node !!! */
2067 if (!get_opt_normalize()) return node;
2069 /* Don't use get_Id_pred: We get into an endless loop for
2070 self-referencing Ids. */
2071 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2072 ir_node *rem_pred = node->in[0+1];
2075 assert (get_irn_arity (node) > 0);
2077 node->in[0+1] = node;
2078 res = skip_Id(rem_pred);
2079 if (res->op == op_Id) /* self-loop */ return node;
2081 node->in[0+1] = res;
2088 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2089 than any other approach, as Id chains are resolved and all point to the real node, or
2090 all id's are self loops. */
2092 skip_Id (ir_node *node) {
2094 /* don't assert node !!! */
2096 if (!node || (node->op != op_Id)) return node;
2098 if (!get_opt_normalize()) return node;
2100 /* Don't use get_Id_pred: We get into an endless loop for
2101 self-referencing Ids. */
2102 pred = node->in[0+1];
2104 if (pred->op != op_Id) return pred;
2106 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2107 ir_node *rem_pred, *res;
2109 if (pred->op != op_Id) return pred; /* shortcut */
2112 assert (get_irn_arity (node) > 0);
2114 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2115 res = skip_Id(rem_pred);
2116 if (res->op == op_Id) /* self-loop */ return node;
2118 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2127 (is_Bad)(const ir_node *node) {
2128 return _is_Bad(node);
2132 (is_Const)(const ir_node *node) {
2133 return _is_Const(node);
2137 (is_no_Block)(const ir_node *node) {
2138 return _is_no_Block(node);
2142 (is_Block)(const ir_node *node) {
2143 return _is_Block(node);
2146 /* returns true if node is a Unknown node. */
2148 (is_Unknown)(const ir_node *node) {
2149 return _is_Unknown(node);
2153 is_Proj (const ir_node *node) {
2155 return node->op == op_Proj
2156 || (!get_interprocedural_view() && node->op == op_Filter);
2159 /* Returns true if the operation manipulates control flow. */
2161 is_cfop(const ir_node *node) {
2162 return is_cfopcode(get_irn_op(node));
2165 /* Returns true if the operation manipulates interprocedural control flow:
2166 CallBegin, EndReg, EndExcept */
2167 int is_ip_cfop(const ir_node *node) {
2168 return is_ip_cfopcode(get_irn_op(node));
2171 /* Returns true if the operation can change the control flow because
2174 is_fragile_op(const ir_node *node) {
2175 return is_op_fragile(get_irn_op(node));
2178 /* Returns the memory operand of fragile operations. */
2179 ir_node *get_fragile_op_mem(ir_node *node) {
2180 assert(node && is_fragile_op(node));
2182 switch (get_irn_opcode (node)) {
2191 return get_irn_n(node, 0);
2196 assert(0 && "should not be reached");
2201 /* Returns true if the operation is a forking control flow operation. */
2203 is_forking_op(const ir_node *node) {
2204 return is_op_forking(get_irn_op(node));
2207 type *(get_irn_type)(ir_node *node) {
2208 return _get_irn_type(node);
2211 /** the get_type operation must be always implemented */
2212 static type *get_Null_type(ir_node *n) {
2216 /* set the get_type operation */
2217 ir_op *firm_set_default_get_type(ir_op *op)
2220 case iro_Const: op->get_type = get_Const_type; break;
2221 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2222 case iro_Cast: op->get_type = get_Cast_type; break;
2223 case iro_Proj: op->get_type = get_Proj_type; break;
2224 default: op->get_type = get_Null_type; break;
2229 #ifdef DEBUG_libfirm
2230 void dump_irn (ir_node *n) {
2231 int i, arity = get_irn_arity(n);
2232 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2234 ir_node *pred = get_irn_n(n, -1);
2235 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2236 get_irn_node_nr(pred), (void *)pred);
2238 printf(" preds: \n");
2239 for (i = 0; i < arity; ++i) {
2240 ir_node *pred = get_irn_n(n, i);
2241 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2242 get_irn_node_nr(pred), (void *)pred);
2246 #else /* DEBUG_libfirm */
2247 void dump_irn (ir_node *n) {}
2248 #endif /* DEBUG_libfirm */