2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
36 #include "irgraph_t.h"
38 #include "irbackedge_t.h"
42 #include "iredgekinds.h"
43 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define FUNCCALL_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc) {
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
79 /* do NOT add the Uo bit for non-floating point values */
80 if (! mode_is_float(mode))
86 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
87 pn_Cmp get_inversed_pnc(long pnc) {
88 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
89 long lesser = pnc & pn_Cmp_Lt;
90 long greater = pnc & pn_Cmp_Gt;
92 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
98 * Indicates, whether additional data can be registered to ir nodes.
99 * If set to 1, this is not possible anymore.
101 static int forbid_new_data = 0;
104 * The amount of additional space for custom data to be allocated upon
105 * creating a new node.
107 unsigned firm_add_node_size = 0;
110 /* register new space for every node */
111 unsigned firm_register_additional_node_data(unsigned size) {
112 assert(!forbid_new_data && "Too late to register additional node data");
117 return firm_add_node_size += size;
123 /* Forbid the addition of new data to an ir node. */
128 * irnode constructor.
129 * Create a new irnode in irg, with an op, mode, arity and
130 * some incoming irnodes.
131 * If arity is negative, a node with a dynamic array is created.
134 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
135 int arity, ir_node **in)
138 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
142 assert(irg && op && mode);
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
159 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
163 set_irn_dbg_info(res, db);
167 res->node_nr = get_irp_new_node_nr();
170 for (i = 0; i < EDGE_KIND_LAST; ++i)
171 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* don't put this into the for loop, arity is -1 for some nodes! */
174 edges_notify_edge(res, -1, res->in[0], NULL, irg);
175 for (i = 1; i <= arity; ++i)
176 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
178 hook_new_node(irg, res);
183 /*-- getting some parameters from ir_nodes --*/
186 (is_ir_node)(const void *thing) {
187 return _is_ir_node(thing);
191 (get_irn_intra_arity)(const ir_node *node) {
192 return _get_irn_intra_arity(node);
196 (get_irn_inter_arity)(const ir_node *node) {
197 return _get_irn_inter_arity(node);
200 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
203 (get_irn_arity)(const ir_node *node) {
204 return _get_irn_arity(node);
207 /* Returns the array with ins. This array is shifted with respect to the
208 array accessed by get_irn_n: The block operand is at position 0 not -1.
209 (@@@ This should be changed.)
210 The order of the predecessors in this array is not guaranteed, except that
211 lists of operands as predecessors of Block or arguments of a Call are
214 get_irn_in(const ir_node *node) {
216 if (get_interprocedural_view()) { /* handle Filter and Block specially */
217 if (get_irn_opcode(node) == iro_Filter) {
218 assert(node->attr.filter.in_cg);
219 return node->attr.filter.in_cg;
220 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
221 return node->attr.block.in_cg;
223 /* else fall through */
229 set_irn_in(ir_node *node, int arity, ir_node **in) {
232 ir_graph *irg = current_ir_graph;
234 if (get_interprocedural_view()) { /* handle Filter and Block specially */
235 ir_opcode code = get_irn_opcode(node);
236 if (code == iro_Filter) {
237 assert(node->attr.filter.in_cg);
238 pOld_in = &node->attr.filter.in_cg;
239 } else if (code == iro_Block && node->attr.block.in_cg) {
240 pOld_in = &node->attr.block.in_cg;
248 for (i = 0; i < arity; i++) {
249 if (i < ARR_LEN(*pOld_in)-1)
250 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
252 edges_notify_edge(node, i, in[i], NULL, irg);
254 for (;i < ARR_LEN(*pOld_in)-1; i++) {
255 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
258 if (arity != ARR_LEN(*pOld_in) - 1) {
259 ir_node * block = (*pOld_in)[0];
260 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
261 (*pOld_in)[0] = block;
263 fix_backedges(irg->obst, node);
265 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
269 (get_irn_intra_n)(const ir_node *node, int n) {
270 return _get_irn_intra_n (node, n);
274 (get_irn_inter_n)(const ir_node *node, int n) {
275 return _get_irn_inter_n (node, n);
278 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
281 (get_irn_n)(const ir_node *node, int n) {
282 return _get_irn_n(node, n);
286 set_irn_n (ir_node *node, int n, ir_node *in) {
287 assert(node && node->kind == k_ir_node);
289 assert(n < get_irn_arity(node));
290 assert(in && in->kind == k_ir_node);
292 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
293 /* Change block pred in both views! */
294 node->in[n + 1] = in;
295 assert(node->attr.filter.in_cg);
296 node->attr.filter.in_cg[n + 1] = in;
299 if (get_interprocedural_view()) { /* handle Filter and Block specially */
300 if (get_irn_opcode(node) == iro_Filter) {
301 assert(node->attr.filter.in_cg);
302 node->attr.filter.in_cg[n + 1] = in;
304 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
305 node->attr.block.in_cg[n + 1] = in;
308 /* else fall through */
312 hook_set_irn_n(node, n, in, node->in[n + 1]);
314 /* Here, we rely on src and tgt being in the current ir graph */
315 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
317 node->in[n + 1] = in;
320 int add_irn_n(ir_node *node, ir_node *in)
323 ir_graph *irg = get_irn_irg(node);
325 assert(node->op->opar == oparity_dynamic);
326 pos = ARR_LEN(node->in) - 1;
327 ARR_APP1(ir_node *, node->in, in);
328 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
331 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
337 (get_irn_deps)(const ir_node *node)
339 return _get_irn_deps(node);
343 (get_irn_dep)(const ir_node *node, int pos)
345 return _get_irn_dep(node, pos);
349 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
351 _set_irn_dep(node, pos, dep);
354 int add_irn_dep(ir_node *node, ir_node *dep)
358 if (node->deps == NULL) {
359 node->deps = NEW_ARR_F(ir_node *, 1);
365 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
366 if(node->deps[i] == NULL)
369 if(node->deps[i] == dep)
373 if (first_zero >= 0) {
374 node->deps[first_zero] = dep;
377 ARR_APP1(ir_node *, node->deps, dep);
382 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
387 void add_irn_deps(ir_node *tgt, ir_node *src) {
390 for (i = 0, n = get_irn_deps(src); i < n; ++i)
391 add_irn_dep(tgt, get_irn_dep(src, i));
396 (get_irn_mode)(const ir_node *node) {
397 return _get_irn_mode(node);
401 (set_irn_mode)(ir_node *node, ir_mode *mode) {
402 _set_irn_mode(node, mode);
406 get_irn_modecode(const ir_node *node) {
408 return node->mode->code;
411 /** Gets the string representation of the mode .*/
413 get_irn_modename(const ir_node *node) {
415 return get_mode_name(node->mode);
419 get_irn_modeident(const ir_node *node) {
421 return get_mode_ident(node->mode);
425 (get_irn_op)(const ir_node *node) {
426 return _get_irn_op(node);
429 /* should be private to the library: */
431 (set_irn_op)(ir_node *node, ir_op *op) {
432 _set_irn_op(node, op);
436 (get_irn_opcode)(const ir_node *node) {
437 return _get_irn_opcode(node);
441 get_irn_opname(const ir_node *node) {
443 if (is_Phi0(node)) return "Phi0";
444 return get_id_str(node->op->name);
448 get_irn_opident(const ir_node *node) {
450 return node->op->name;
454 (get_irn_visited)(const ir_node *node) {
455 return _get_irn_visited(node);
459 (set_irn_visited)(ir_node *node, unsigned long visited) {
460 _set_irn_visited(node, visited);
464 (mark_irn_visited)(ir_node *node) {
465 _mark_irn_visited(node);
469 (irn_not_visited)(const ir_node *node) {
470 return _irn_not_visited(node);
474 (irn_visited)(const ir_node *node) {
475 return _irn_visited(node);
479 (set_irn_link)(ir_node *node, void *link) {
480 _set_irn_link(node, link);
484 (get_irn_link)(const ir_node *node) {
485 return _get_irn_link(node);
489 (get_irn_pinned)(const ir_node *node) {
490 return _get_irn_pinned(node);
494 (is_irn_pinned_in_irg) (const ir_node *node) {
495 return _is_irn_pinned_in_irg(node);
498 void set_irn_pinned(ir_node *node, op_pin_state state) {
499 /* due to optimization an opt may be turned into a Tuple */
500 if (get_irn_op(node) == op_Tuple)
503 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
504 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
506 node->attr.except.pin_state = state;
509 #ifdef DO_HEAPANALYSIS
510 /* Access the abstract interpretation information of a node.
511 Returns NULL if no such information is available. */
512 struct abstval *get_irn_abst_value(ir_node *n) {
515 /* Set the abstract interpretation information of a node. */
516 void set_irn_abst_value(ir_node *n, struct abstval *os) {
519 struct section *firm_get_irn_section(ir_node *n) {
522 void firm_set_irn_section(ir_node *n, struct section *s) {
526 /* Dummies needed for firmjni. */
527 struct abstval *get_irn_abst_value(ir_node *n) {
531 void set_irn_abst_value(ir_node *n, struct abstval *os) {
535 struct section *firm_get_irn_section(ir_node *n) {
539 void firm_set_irn_section(ir_node *n, struct section *s) {
543 #endif /* DO_HEAPANALYSIS */
546 /* Outputs a unique number for this node */
547 long get_irn_node_nr(const ir_node *node) {
550 return node->node_nr;
552 return (long)PTR_TO_INT(node);
557 get_irn_const_attr(ir_node *node) {
558 assert(node->op == op_Const);
559 return &node->attr.con;
563 get_irn_proj_attr(ir_node *node) {
564 assert(node->op == op_Proj);
565 return node->attr.proj;
569 get_irn_alloc_attr(ir_node *node) {
570 assert(node->op == op_Alloc);
571 return &node->attr.alloc;
575 get_irn_free_attr(ir_node *node) {
576 assert(node->op == op_Free);
577 return &node->attr.free;
581 get_irn_symconst_attr(ir_node *node) {
582 assert(node->op == op_SymConst);
583 return &node->attr.symc;
587 get_irn_call_attr(ir_node *node) {
588 assert(node->op == op_Call);
589 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
593 get_irn_sel_attr(ir_node *node) {
594 assert(node->op == op_Sel);
595 return &node->attr.sel;
599 get_irn_phi0_attr(ir_node *node) {
600 assert(is_Phi0(node));
601 return node->attr.phi0.pos;
605 get_irn_block_attr(ir_node *node) {
606 assert(node->op == op_Block);
607 return &node->attr.block;
611 get_irn_load_attr(ir_node *node) {
612 assert(node->op == op_Load);
613 return &node->attr.load;
617 get_irn_store_attr(ir_node *node) {
618 assert(node->op == op_Store);
619 return &node->attr.store;
623 get_irn_except_attr(ir_node *node) {
624 assert(node->op == op_Div || node->op == op_Quot ||
625 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
626 return &node->attr.except;
629 void *(get_irn_generic_attr)(ir_node *node) {
630 assert(is_ir_node(node));
631 return _get_irn_generic_attr(node);
634 const void *(get_irn_generic_attr_const)(const ir_node *node) {
635 assert(is_ir_node(node));
636 return _get_irn_generic_attr_const(node);
639 unsigned (get_irn_idx)(const ir_node *node) {
640 assert(is_ir_node(node));
641 return _get_irn_idx(node);
644 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
646 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
647 if (get_irn_n(node, i) == arg)
653 /** manipulate fields of individual nodes **/
655 /* this works for all except Block */
657 get_nodes_block(const ir_node *node) {
658 assert(node->op != op_Block);
659 return get_irn_n(node, -1);
663 set_nodes_block(ir_node *node, ir_node *block) {
664 assert(node->op != op_Block);
665 set_irn_n(node, -1, block);
668 /* this works for all except Block */
670 get_nodes_MacroBlock(const ir_node *node) {
671 assert(node->op != op_Block);
672 return get_Block_MacroBlock(get_irn_n(node, -1));
675 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
676 * from Start. If so returns frame type, else Null. */
677 ir_type *is_frame_pointer(const ir_node *n) {
678 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
679 ir_node *start = get_Proj_pred(n);
680 if (is_Start(start)) {
681 return get_irg_frame_type(get_irn_irg(start));
687 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
688 * from Start. If so returns global type, else Null. */
689 ir_type *is_globals_pointer(const ir_node *n) {
690 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
691 ir_node *start = get_Proj_pred(n);
692 if (is_Start(start)) {
693 return get_glob_type();
699 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
700 * from Start. If so returns tls type, else Null. */
701 ir_type *is_tls_pointer(const ir_node *n) {
702 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
703 ir_node *start = get_Proj_pred(n);
704 if (is_Start(start)) {
705 return get_tls_type();
711 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
712 * from Start. If so returns 1, else 0. */
713 int is_value_arg_pointer(const ir_node *n) {
715 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
716 is_Start(get_Proj_pred(n)))
721 /* Returns an array with the predecessors of the Block. Depending on
722 the implementation of the graph data structure this can be a copy of
723 the internal representation of predecessors as well as the internal
724 array itself. Therefore writing to this array might obstruct the ir. */
726 get_Block_cfgpred_arr(ir_node *node) {
727 assert((node->op == op_Block));
728 return (ir_node **)&(get_irn_in(node)[1]);
732 (get_Block_n_cfgpreds)(const ir_node *node) {
733 return _get_Block_n_cfgpreds(node);
737 (get_Block_cfgpred)(const ir_node *node, int pos) {
738 return _get_Block_cfgpred(node, pos);
742 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
743 assert(node->op == op_Block);
744 set_irn_n(node, pos, pred);
748 (get_Block_cfgpred_block)(const ir_node *node, int pos) {
749 return _get_Block_cfgpred_block(node, pos);
753 get_Block_matured(const ir_node *node) {
754 assert(node->op == op_Block);
755 return (int)node->attr.block.is_matured;
759 set_Block_matured(ir_node *node, int matured) {
760 assert(node->op == op_Block);
761 node->attr.block.is_matured = matured;
765 (get_Block_block_visited)(const ir_node *node) {
766 return _get_Block_block_visited(node);
770 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
771 _set_Block_block_visited(node, visit);
774 /* For this current_ir_graph must be set. */
776 (mark_Block_block_visited)(ir_node *node) {
777 _mark_Block_block_visited(node);
781 (Block_not_block_visited)(const ir_node *node) {
782 return _Block_not_block_visited(node);
786 (Block_block_visited)(const ir_node *node) {
787 return _Block_block_visited(node);
791 get_Block_graph_arr(ir_node *node, int pos) {
792 assert(node->op == op_Block);
793 return node->attr.block.graph_arr[pos+1];
797 set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
798 assert(node->op == op_Block);
799 node->attr.block.graph_arr[pos+1] = value;
802 #ifdef INTERPROCEDURAL_VIEW
803 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
804 assert(node->op == op_Block);
805 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
806 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
807 node->attr.block.in_cg[0] = NULL;
808 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
810 /* Fix backedge array. fix_backedges() operates depending on
811 interprocedural_view. */
812 int ipv = get_interprocedural_view();
813 set_interprocedural_view(1);
814 fix_backedges(current_ir_graph->obst, node);
815 set_interprocedural_view(ipv);
818 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
821 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
822 assert(node->op == op_Block &&
823 node->attr.block.in_cg &&
824 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
825 node->attr.block.in_cg[pos + 1] = pred;
828 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
829 assert(node->op == op_Block);
830 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
833 int get_Block_cg_n_cfgpreds(const ir_node *node) {
834 assert(node->op == op_Block);
835 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
838 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
839 assert(node->op == op_Block && node->attr.block.in_cg);
840 return node->attr.block.in_cg[pos + 1];
843 void remove_Block_cg_cfgpred_arr(ir_node *node) {
844 assert(node->op == op_Block);
845 node->attr.block.in_cg = NULL;
849 ir_node *(set_Block_dead)(ir_node *block) {
850 return _set_Block_dead(block);
853 int (is_Block_dead)(const ir_node *block) {
854 return _is_Block_dead(block);
857 ir_extblk *get_Block_extbb(const ir_node *block) {
859 assert(is_Block(block));
860 res = block->attr.block.extblk;
861 assert(res == NULL || is_ir_extbb(res));
865 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
866 assert(is_Block(block));
867 assert(extblk == NULL || is_ir_extbb(extblk));
868 block->attr.block.extblk = extblk;
871 /* returns the macro block header of a block. */
872 ir_node *get_Block_MacroBlock(const ir_node *block) {
874 assert(is_Block(block));
875 mbh = get_irn_n(block, -1);
876 /* once macro block header is respected by all optimizations,
877 this assert can be removed */
882 /* returns the macro block header of a node. */
883 ir_node *get_irn_MacroBlock(const ir_node *n) {
885 n = get_nodes_block(n);
886 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
890 return get_Block_MacroBlock(n);
893 /* returns the graph of a Block. */
894 ir_graph *get_Block_irg(const ir_node *block) {
895 assert(is_Block(block));
896 return block->attr.block.irg;
899 int has_Block_label(const ir_node *block) {
900 assert(is_Block(block));
901 return block->attr.block.has_label;
904 ir_label_t get_Block_label(const ir_node *block) {
905 assert(is_Block(block));
906 return block->attr.block.label;
909 void set_Block_label(ir_node *block, ir_label_t label) {
910 assert(is_Block(block));
911 block->attr.block.has_label = 1;
912 block->attr.block.label = label;
916 get_End_n_keepalives(const ir_node *end) {
917 assert(end->op == op_End);
918 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
922 get_End_keepalive(const ir_node *end, int pos) {
923 assert(end->op == op_End);
924 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
928 add_End_keepalive(ir_node *end, ir_node *ka) {
929 assert(end->op == op_End);
930 assert((is_Phi(ka) || is_Proj(ka) || is_Block(ka) || is_irn_keep(ka)) && "Only Phi, Block or Keep nodes can be kept alive!");
935 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
936 assert(end->op == op_End);
937 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
940 /* Set new keep-alives */
941 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
943 ir_graph *irg = get_irn_irg(end);
945 /* notify that edges are deleted */
946 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
947 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
949 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
951 for (i = 0; i < n; ++i) {
952 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
953 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
957 /* Set new keep-alives from old keep-alives, skipping irn */
958 void remove_End_keepalive(ir_node *end, ir_node *irn) {
959 int n = get_End_n_keepalives(end);
963 NEW_ARR_A(ir_node *, in, n);
965 for (idx = i = 0; i < n; ++i) {
966 ir_node *old_ka = get_End_keepalive(end, i);
973 /* set new keep-alives */
974 set_End_keepalives(end, idx, in);
978 free_End(ir_node *end) {
979 assert(end->op == op_End);
982 end->in = NULL; /* @@@ make sure we get an error if we use the
983 in array afterwards ... */
986 /* Return the target address of an IJmp */
987 ir_node *get_IJmp_target(const ir_node *ijmp) {
988 assert(ijmp->op == op_IJmp);
989 return get_irn_n(ijmp, 0);
992 /** Sets the target address of an IJmp */
993 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
994 assert(ijmp->op == op_IJmp);
995 set_irn_n(ijmp, 0, tgt);
999 > Implementing the case construct (which is where the constant Proj node is
1000 > important) involves far more than simply determining the constant values.
1001 > We could argue that this is more properly a function of the translator from
1002 > Firm to the target machine. That could be done if there was some way of
1003 > projecting "default" out of the Cond node.
1004 I know it's complicated.
1005 Basically there are two problems:
1006 - determining the gaps between the Projs
1007 - determining the biggest case constant to know the proj number for
1009 I see several solutions:
1010 1. Introduce a ProjDefault node. Solves both problems.
1011 This means to extend all optimizations executed during construction.
1012 2. Give the Cond node for switch two flavors:
1013 a) there are no gaps in the Projs (existing flavor)
1014 b) gaps may exist, default proj is still the Proj with the largest
1015 projection number. This covers also the gaps.
1016 3. Fix the semantic of the Cond to that of 2b)
1018 Solution 2 seems to be the best:
1019 Computing the gaps in the Firm representation is not too hard, i.e.,
1020 libFIRM can implement a routine that transforms between the two
1021 flavours. This is also possible for 1) but 2) does not require to
1022 change any existing optimization.
1023 Further it should be far simpler to determine the biggest constant than
1024 to compute all gaps.
1025 I don't want to choose 3) as 2a) seems to have advantages for
1026 dataflow analysis and 3) does not allow to convert the representation to
1030 get_Cond_selector(const ir_node *node) {
1031 assert(node->op == op_Cond);
1032 return get_irn_n(node, 0);
1036 set_Cond_selector(ir_node *node, ir_node *selector) {
1037 assert(node->op == op_Cond);
1038 set_irn_n(node, 0, selector);
1042 get_Cond_kind(const ir_node *node) {
1043 assert(node->op == op_Cond);
1044 return node->attr.cond.kind;
1048 set_Cond_kind(ir_node *node, cond_kind kind) {
1049 assert(node->op == op_Cond);
1050 node->attr.cond.kind = kind;
1054 get_Cond_defaultProj(const ir_node *node) {
1055 assert(node->op == op_Cond);
1056 return node->attr.cond.default_proj;
1060 get_Return_mem(const ir_node *node) {
1061 assert(node->op == op_Return);
1062 return get_irn_n(node, 0);
1066 set_Return_mem(ir_node *node, ir_node *mem) {
1067 assert(node->op == op_Return);
1068 set_irn_n(node, 0, mem);
1072 get_Return_n_ress(const ir_node *node) {
1073 assert(node->op == op_Return);
1074 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1078 get_Return_res_arr(ir_node *node) {
1079 assert((node->op == op_Return));
1080 if (get_Return_n_ress(node) > 0)
1081 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1088 set_Return_n_res(ir_node *node, int results) {
1089 assert(node->op == op_Return);
1094 get_Return_res(const ir_node *node, int pos) {
1095 assert(node->op == op_Return);
1096 assert(get_Return_n_ress(node) > pos);
1097 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1101 set_Return_res(ir_node *node, int pos, ir_node *res){
1102 assert(node->op == op_Return);
1103 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1106 tarval *(get_Const_tarval)(const ir_node *node) {
1107 return _get_Const_tarval(node);
1111 set_Const_tarval(ir_node *node, tarval *con) {
1112 assert(node->op == op_Const);
1113 node->attr.con.tv = con;
1116 int (is_Const_null)(const ir_node *node) {
1117 return _is_Const_null(node);
1120 int (is_Const_one)(const ir_node *node) {
1121 return _is_Const_one(node);
1124 int (is_Const_all_one)(const ir_node *node) {
1125 return _is_Const_all_one(node);
1129 /* The source language type. Must be an atomic type. Mode of type must
1130 be mode of node. For tarvals from entities type must be pointer to
1133 get_Const_type(ir_node *node) {
1134 assert(node->op == op_Const);
1135 node->attr.con.tp = skip_tid(node->attr.con.tp);
1136 return node->attr.con.tp;
1140 set_Const_type(ir_node *node, ir_type *tp) {
1141 assert(node->op == op_Const);
1142 if (tp != firm_unknown_type) {
1143 assert(is_atomic_type(tp));
1144 assert(get_type_mode(tp) == get_irn_mode(node));
1146 node->attr.con.tp = tp;
1151 get_SymConst_kind(const ir_node *node) {
1152 assert(node->op == op_SymConst);
1153 return node->attr.symc.num;
1157 set_SymConst_kind(ir_node *node, symconst_kind num) {
1158 assert(node->op == op_SymConst);
1159 node->attr.symc.num = num;
1163 get_SymConst_type(ir_node *node) {
1164 assert((node->op == op_SymConst) &&
1165 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1166 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1170 set_SymConst_type(ir_node *node, ir_type *tp) {
1171 assert((node->op == op_SymConst) &&
1172 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1173 node->attr.symc.sym.type_p = tp;
1177 get_SymConst_name(const ir_node *node) {
1178 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1179 return node->attr.symc.sym.ident_p;
1183 set_SymConst_name(ir_node *node, ident *name) {
1184 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1185 node->attr.symc.sym.ident_p = name;
1189 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1190 ir_entity *get_SymConst_entity(const ir_node *node) {
1191 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1192 return node->attr.symc.sym.entity_p;
1195 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1196 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1197 node->attr.symc.sym.entity_p = ent;
1200 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1201 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1202 return node->attr.symc.sym.enum_p;
1205 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1206 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1207 node->attr.symc.sym.enum_p = ec;
1210 union symconst_symbol
1211 get_SymConst_symbol(const ir_node *node) {
1212 assert(node->op == op_SymConst);
1213 return node->attr.symc.sym;
1217 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1218 assert(node->op == op_SymConst);
1219 node->attr.symc.sym = sym;
1222 ir_label_t get_SymConst_label(const ir_node *node) {
1223 assert(node->op == op_SymConst && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1224 return node->attr.symc.sym.label;
1227 void set_SymConst_label(ir_node *node, ir_label_t label) {
1228 assert(node->op == op_SymConst && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1229 node->attr.symc.sym.label = label;
1233 get_SymConst_value_type(ir_node *node) {
1234 assert(node->op == op_SymConst);
1235 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1236 return node->attr.symc.tp;
1240 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1241 assert(node->op == op_SymConst);
1242 node->attr.symc.tp = tp;
1246 get_Sel_mem(const ir_node *node) {
1247 assert(node->op == op_Sel);
1248 return get_irn_n(node, 0);
1252 set_Sel_mem(ir_node *node, ir_node *mem) {
1253 assert(node->op == op_Sel);
1254 set_irn_n(node, 0, mem);
1258 get_Sel_ptr(const ir_node *node) {
1259 assert(node->op == op_Sel);
1260 return get_irn_n(node, 1);
1264 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1265 assert(node->op == op_Sel);
1266 set_irn_n(node, 1, ptr);
1270 get_Sel_n_indexs(const ir_node *node) {
1271 assert(node->op == op_Sel);
1272 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1276 get_Sel_index_arr(ir_node *node) {
1277 assert((node->op == op_Sel));
1278 if (get_Sel_n_indexs(node) > 0)
1279 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1285 get_Sel_index(const ir_node *node, int pos) {
1286 assert(node->op == op_Sel);
1287 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1291 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1292 assert(node->op == op_Sel);
1293 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1297 get_Sel_entity(const ir_node *node) {
1298 assert(node->op == op_Sel);
1299 return node->attr.sel.ent;
1302 ir_entity *_get_Sel_entity(ir_node *node) {
1303 return get_Sel_entity(node);
1307 set_Sel_entity(ir_node *node, ir_entity *ent) {
1308 assert(node->op == op_Sel);
1309 node->attr.sel.ent = ent;
1313 /* For unary and binary arithmetic operations the access to the
1314 operands can be factored out. Left is the first, right the
1315 second arithmetic value as listed in tech report 0999-33.
1316 unops are: Minus, Abs, Not, Conv, Cast
1317 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1318 Shr, Shrs, Rotate, Cmp */
1322 get_Call_mem(const ir_node *node) {
1323 assert(node->op == op_Call);
1324 return get_irn_n(node, 0);
1328 set_Call_mem(ir_node *node, ir_node *mem) {
1329 assert(node->op == op_Call);
1330 set_irn_n(node, 0, mem);
1334 get_Call_ptr(const ir_node *node) {
1335 assert(node->op == op_Call);
1336 return get_irn_n(node, 1);
1340 set_Call_ptr(ir_node *node, ir_node *ptr) {
1341 assert(node->op == op_Call);
1342 set_irn_n(node, 1, ptr);
1346 get_Call_param_arr(ir_node *node) {
1347 assert(node->op == op_Call);
1348 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1352 get_Call_n_params(const ir_node *node) {
1353 assert(node->op == op_Call);
1354 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1358 get_Call_arity(const ir_node *node) {
1359 assert(node->op == op_Call);
1360 return get_Call_n_params(node);
1364 set_Call_arity(ir_node *node, ir_node *arity) {
1365 assert(node->op == op_Call);
1370 get_Call_param(const ir_node *node, int pos) {
1371 assert(node->op == op_Call);
1372 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1376 set_Call_param(ir_node *node, int pos, ir_node *param) {
1377 assert(node->op == op_Call);
1378 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1382 get_Call_type(ir_node *node) {
1383 assert(node->op == op_Call);
1384 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1388 set_Call_type(ir_node *node, ir_type *tp) {
1389 assert(node->op == op_Call);
1390 assert((get_unknown_type() == tp) || is_Method_type(tp));
1391 node->attr.call.cld_tp = tp;
1394 int Call_has_callees(const ir_node *node) {
1395 assert(node && node->op == op_Call);
1396 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1397 (node->attr.call.callee_arr != NULL));
1400 int get_Call_n_callees(const ir_node *node) {
1401 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1402 return ARR_LEN(node->attr.call.callee_arr);
1405 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1406 assert(pos >= 0 && pos < get_Call_n_callees(node));
1407 return node->attr.call.callee_arr[pos];
1410 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1411 assert(node->op == op_Call);
1412 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1413 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1415 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1418 void remove_Call_callee_arr(ir_node *node) {
1419 assert(node->op == op_Call);
1420 node->attr.call.callee_arr = NULL;
1423 ir_node *get_CallBegin_ptr(const ir_node *node) {
1424 assert(node->op == op_CallBegin);
1425 return get_irn_n(node, 0);
1428 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1429 assert(node->op == op_CallBegin);
1430 set_irn_n(node, 0, ptr);
1433 ir_node *get_CallBegin_call(const ir_node *node) {
1434 assert(node->op == op_CallBegin);
1435 return node->attr.callbegin.call;
1438 void set_CallBegin_call(ir_node *node, ir_node *call) {
1439 assert(node->op == op_CallBegin);
1440 node->attr.callbegin.call = call;
1445 ir_node * get_##OP##_left(const ir_node *node) { \
1446 assert(node->op == op_##OP); \
1447 return get_irn_n(node, node->op->op_index); \
1449 void set_##OP##_left(ir_node *node, ir_node *left) { \
1450 assert(node->op == op_##OP); \
1451 set_irn_n(node, node->op->op_index, left); \
1453 ir_node *get_##OP##_right(const ir_node *node) { \
1454 assert(node->op == op_##OP); \
1455 return get_irn_n(node, node->op->op_index + 1); \
1457 void set_##OP##_right(ir_node *node, ir_node *right) { \
1458 assert(node->op == op_##OP); \
1459 set_irn_n(node, node->op->op_index + 1, right); \
1463 ir_node *get_##OP##_op(const ir_node *node) { \
1464 assert(node->op == op_##OP); \
1465 return get_irn_n(node, node->op->op_index); \
1467 void set_##OP##_op(ir_node *node, ir_node *op) { \
1468 assert(node->op == op_##OP); \
1469 set_irn_n(node, node->op->op_index, op); \
1472 #define BINOP_MEM(OP) \
1476 get_##OP##_mem(const ir_node *node) { \
1477 assert(node->op == op_##OP); \
1478 return get_irn_n(node, 0); \
1482 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1483 assert(node->op == op_##OP); \
1484 set_irn_n(node, 0, mem); \
1490 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1491 assert(node->op == op_##OP); \
1492 return node->attr.divmod.res_mode; \
1495 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1496 assert(node->op == op_##OP); \
1497 node->attr.divmod.res_mode = mode; \
1523 int get_Conv_strict(const ir_node *node) {
1524 assert(node->op == op_Conv);
1525 return node->attr.conv.strict;
1528 void set_Conv_strict(ir_node *node, int strict_flag) {
1529 assert(node->op == op_Conv);
1530 node->attr.conv.strict = (char)strict_flag;
1534 get_Cast_type(ir_node *node) {
1535 assert(node->op == op_Cast);
1536 node->attr.cast.totype = skip_tid(node->attr.cast.totype);
1537 return node->attr.cast.totype;
1541 set_Cast_type(ir_node *node, ir_type *to_tp) {
1542 assert(node->op == op_Cast);
1543 node->attr.cast.totype = to_tp;
1547 /* Checks for upcast.
1549 * Returns true if the Cast node casts a class type to a super type.
1551 int is_Cast_upcast(ir_node *node) {
1552 ir_type *totype = get_Cast_type(node);
1553 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1555 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1558 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1559 totype = get_pointer_points_to_type(totype);
1560 fromtype = get_pointer_points_to_type(fromtype);
1565 if (!is_Class_type(totype)) return 0;
1566 return is_SubClass_of(fromtype, totype);
1569 /* Checks for downcast.
1571 * Returns true if the Cast node casts a class type to a sub type.
1573 int is_Cast_downcast(ir_node *node) {
1574 ir_type *totype = get_Cast_type(node);
1575 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1577 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1580 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1581 totype = get_pointer_points_to_type(totype);
1582 fromtype = get_pointer_points_to_type(fromtype);
1587 if (!is_Class_type(totype)) return 0;
1588 return is_SubClass_of(totype, fromtype);
1592 (is_unop)(const ir_node *node) {
1593 return _is_unop(node);
1597 get_unop_op(const ir_node *node) {
1598 if (node->op->opar == oparity_unary)
1599 return get_irn_n(node, node->op->op_index);
1601 assert(node->op->opar == oparity_unary);
1606 set_unop_op(ir_node *node, ir_node *op) {
1607 if (node->op->opar == oparity_unary)
1608 set_irn_n(node, node->op->op_index, op);
1610 assert(node->op->opar == oparity_unary);
1614 (is_binop)(const ir_node *node) {
1615 return _is_binop(node);
1619 get_binop_left(const ir_node *node) {
1620 assert(node->op->opar == oparity_binary);
1621 return get_irn_n(node, node->op->op_index);
1625 set_binop_left(ir_node *node, ir_node *left) {
1626 assert(node->op->opar == oparity_binary);
1627 set_irn_n(node, node->op->op_index, left);
1631 get_binop_right(const ir_node *node) {
1632 assert(node->op->opar == oparity_binary);
1633 return get_irn_n(node, node->op->op_index + 1);
1637 set_binop_right(ir_node *node, ir_node *right) {
1638 assert(node->op->opar == oparity_binary);
1639 set_irn_n(node, node->op->op_index + 1, right);
1643 (is_Phi)(const ir_node *n) {
1647 int is_Phi0(const ir_node *n) {
1650 return ((get_irn_op(n) == op_Phi) &&
1651 (get_irn_arity(n) == 0) &&
1652 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1656 get_Phi_preds_arr(ir_node *node) {
1657 assert(node->op == op_Phi);
1658 return (ir_node **)&(get_irn_in(node)[1]);
1662 get_Phi_n_preds(const ir_node *node) {
1663 assert(is_Phi(node) || is_Phi0(node));
1664 return (get_irn_arity(node));
1668 void set_Phi_n_preds(ir_node *node, int n_preds) {
1669 assert(node->op == op_Phi);
1674 get_Phi_pred(const ir_node *node, int pos) {
1675 assert(is_Phi(node) || is_Phi0(node));
1676 return get_irn_n(node, pos);
1680 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1681 assert(is_Phi(node) || is_Phi0(node));
1682 set_irn_n(node, pos, pred);
1686 int is_memop(const ir_node *node) {
1687 ir_opcode code = get_irn_opcode(node);
1688 return (code == iro_Load || code == iro_Store);
1691 ir_node *get_memop_mem(const ir_node *node) {
1692 assert(is_memop(node));
1693 return get_irn_n(node, 0);
1696 void set_memop_mem(ir_node *node, ir_node *mem) {
1697 assert(is_memop(node));
1698 set_irn_n(node, 0, mem);
1701 ir_node *get_memop_ptr(const ir_node *node) {
1702 assert(is_memop(node));
1703 return get_irn_n(node, 1);
1706 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1707 assert(is_memop(node));
1708 set_irn_n(node, 1, ptr);
1712 get_Load_mem(const ir_node *node) {
1713 assert(node->op == op_Load);
1714 return get_irn_n(node, 0);
1718 set_Load_mem(ir_node *node, ir_node *mem) {
1719 assert(node->op == op_Load);
1720 set_irn_n(node, 0, mem);
1724 get_Load_ptr(const ir_node *node) {
1725 assert(node->op == op_Load);
1726 return get_irn_n(node, 1);
1730 set_Load_ptr(ir_node *node, ir_node *ptr) {
1731 assert(node->op == op_Load);
1732 set_irn_n(node, 1, ptr);
1736 get_Load_mode(const ir_node *node) {
1737 assert(node->op == op_Load);
1738 return node->attr.load.load_mode;
1742 set_Load_mode(ir_node *node, ir_mode *mode) {
1743 assert(node->op == op_Load);
1744 node->attr.load.load_mode = mode;
1748 get_Load_volatility(const ir_node *node) {
1749 assert(node->op == op_Load);
1750 return node->attr.load.volatility;
1754 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1755 assert(node->op == op_Load);
1756 node->attr.load.volatility = volatility;
1760 get_Load_align(const ir_node *node) {
1761 assert(node->op == op_Load);
1762 return node->attr.load.aligned;
1766 set_Load_align(ir_node *node, ir_align align) {
1767 assert(node->op == op_Load);
1768 node->attr.load.aligned = align;
1773 get_Store_mem(const ir_node *node) {
1774 assert(node->op == op_Store);
1775 return get_irn_n(node, 0);
1779 set_Store_mem(ir_node *node, ir_node *mem) {
1780 assert(node->op == op_Store);
1781 set_irn_n(node, 0, mem);
1785 get_Store_ptr(const ir_node *node) {
1786 assert(node->op == op_Store);
1787 return get_irn_n(node, 1);
1791 set_Store_ptr(ir_node *node, ir_node *ptr) {
1792 assert(node->op == op_Store);
1793 set_irn_n(node, 1, ptr);
1797 get_Store_value(const ir_node *node) {
1798 assert(node->op == op_Store);
1799 return get_irn_n(node, 2);
1803 set_Store_value(ir_node *node, ir_node *value) {
1804 assert(node->op == op_Store);
1805 set_irn_n(node, 2, value);
1809 get_Store_volatility(const ir_node *node) {
1810 assert(node->op == op_Store);
1811 return node->attr.store.volatility;
1815 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1816 assert(node->op == op_Store);
1817 node->attr.store.volatility = volatility;
1821 get_Store_align(const ir_node *node) {
1822 assert(node->op == op_Store);
1823 return node->attr.store.aligned;
1827 set_Store_align(ir_node *node, ir_align align) {
1828 assert(node->op == op_Store);
1829 node->attr.store.aligned = align;
1834 get_Alloc_mem(const ir_node *node) {
1835 assert(node->op == op_Alloc);
1836 return get_irn_n(node, 0);
1840 set_Alloc_mem(ir_node *node, ir_node *mem) {
1841 assert(node->op == op_Alloc);
1842 set_irn_n(node, 0, mem);
1846 get_Alloc_size(const ir_node *node) {
1847 assert(node->op == op_Alloc);
1848 return get_irn_n(node, 1);
1852 set_Alloc_size(ir_node *node, ir_node *size) {
1853 assert(node->op == op_Alloc);
1854 set_irn_n(node, 1, size);
1858 get_Alloc_type(ir_node *node) {
1859 assert(node->op == op_Alloc);
1860 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1864 set_Alloc_type(ir_node *node, ir_type *tp) {
1865 assert(node->op == op_Alloc);
1866 node->attr.alloc.type = tp;
1870 get_Alloc_where(const ir_node *node) {
1871 assert(node->op == op_Alloc);
1872 return node->attr.alloc.where;
1876 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1877 assert(node->op == op_Alloc);
1878 node->attr.alloc.where = where;
1883 get_Free_mem(const ir_node *node) {
1884 assert(node->op == op_Free);
1885 return get_irn_n(node, 0);
1889 set_Free_mem(ir_node *node, ir_node *mem) {
1890 assert(node->op == op_Free);
1891 set_irn_n(node, 0, mem);
1895 get_Free_ptr(const ir_node *node) {
1896 assert(node->op == op_Free);
1897 return get_irn_n(node, 1);
1901 set_Free_ptr(ir_node *node, ir_node *ptr) {
1902 assert(node->op == op_Free);
1903 set_irn_n(node, 1, ptr);
1907 get_Free_size(const ir_node *node) {
1908 assert(node->op == op_Free);
1909 return get_irn_n(node, 2);
1913 set_Free_size(ir_node *node, ir_node *size) {
1914 assert(node->op == op_Free);
1915 set_irn_n(node, 2, size);
1919 get_Free_type(ir_node *node) {
1920 assert(node->op == op_Free);
1921 return node->attr.free.type = skip_tid(node->attr.free.type);
1925 set_Free_type(ir_node *node, ir_type *tp) {
1926 assert(node->op == op_Free);
1927 node->attr.free.type = tp;
1931 get_Free_where(const ir_node *node) {
1932 assert(node->op == op_Free);
1933 return node->attr.free.where;
1937 set_Free_where(ir_node *node, ir_where_alloc where) {
1938 assert(node->op == op_Free);
1939 node->attr.free.where = where;
1942 ir_node **get_Sync_preds_arr(ir_node *node) {
1943 assert(node->op == op_Sync);
1944 return (ir_node **)&(get_irn_in(node)[1]);
1947 int get_Sync_n_preds(const ir_node *node) {
1948 assert(node->op == op_Sync);
1949 return (get_irn_arity(node));
1953 void set_Sync_n_preds(ir_node *node, int n_preds) {
1954 assert(node->op == op_Sync);
1958 ir_node *get_Sync_pred(const ir_node *node, int pos) {
1959 assert(node->op == op_Sync);
1960 return get_irn_n(node, pos);
1963 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1964 assert(node->op == op_Sync);
1965 set_irn_n(node, pos, pred);
1968 /* Add a new Sync predecessor */
1969 void add_Sync_pred(ir_node *node, ir_node *pred) {
1970 assert(node->op == op_Sync);
1971 add_irn_n(node, pred);
1974 /* Returns the source language type of a Proj node. */
1975 ir_type *get_Proj_type(ir_node *n) {
1976 ir_type *tp = firm_unknown_type;
1977 ir_node *pred = get_Proj_pred(n);
1979 switch (get_irn_opcode(pred)) {
1982 /* Deal with Start / Call here: we need to know the Proj Nr. */
1983 assert(get_irn_mode(pred) == mode_T);
1984 pred_pred = get_Proj_pred(pred);
1985 if (get_irn_op(pred_pred) == op_Start) {
1986 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1987 tp = get_method_param_type(mtp, get_Proj_proj(n));
1988 } else if (get_irn_op(pred_pred) == op_Call) {
1989 ir_type *mtp = get_Call_type(pred_pred);
1990 tp = get_method_res_type(mtp, get_Proj_proj(n));
1993 case iro_Start: break;
1994 case iro_Call: break;
1996 ir_node *a = get_Load_ptr(pred);
1998 tp = get_entity_type(get_Sel_entity(a));
2007 get_Proj_pred(const ir_node *node) {
2008 assert(is_Proj(node));
2009 return get_irn_n(node, 0);
2013 set_Proj_pred(ir_node *node, ir_node *pred) {
2014 assert(is_Proj(node));
2015 set_irn_n(node, 0, pred);
2019 get_Proj_proj(const ir_node *node) {
2020 assert(is_Proj(node));
2021 if (get_irn_opcode(node) == iro_Proj) {
2022 return node->attr.proj;
2024 assert(get_irn_opcode(node) == iro_Filter);
2025 return node->attr.filter.proj;
2030 set_Proj_proj(ir_node *node, long proj) {
2031 assert(node->op == op_Proj);
2032 node->attr.proj = proj;
2036 get_Tuple_preds_arr(ir_node *node) {
2037 assert(node->op == op_Tuple);
2038 return (ir_node **)&(get_irn_in(node)[1]);
2042 get_Tuple_n_preds(const ir_node *node) {
2043 assert(node->op == op_Tuple);
2044 return (get_irn_arity(node));
2049 set_Tuple_n_preds(ir_node *node, int n_preds) {
2050 assert(node->op == op_Tuple);
2055 get_Tuple_pred(const ir_node *node, int pos) {
2056 assert(node->op == op_Tuple);
2057 return get_irn_n(node, pos);
2061 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2062 assert(node->op == op_Tuple);
2063 set_irn_n(node, pos, pred);
2067 get_Id_pred(const ir_node *node) {
2068 assert(node->op == op_Id);
2069 return get_irn_n(node, 0);
2073 set_Id_pred(ir_node *node, ir_node *pred) {
2074 assert(node->op == op_Id);
2075 set_irn_n(node, 0, pred);
2078 ir_node *get_Confirm_value(const ir_node *node) {
2079 assert(node->op == op_Confirm);
2080 return get_irn_n(node, 0);
2083 void set_Confirm_value(ir_node *node, ir_node *value) {
2084 assert(node->op == op_Confirm);
2085 set_irn_n(node, 0, value);
2088 ir_node *get_Confirm_bound(const ir_node *node) {
2089 assert(node->op == op_Confirm);
2090 return get_irn_n(node, 1);
2093 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2094 assert(node->op == op_Confirm);
2095 set_irn_n(node, 0, bound);
2098 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2099 assert(node->op == op_Confirm);
2100 return node->attr.confirm.cmp;
2103 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2104 assert(node->op == op_Confirm);
2105 node->attr.confirm.cmp = cmp;
2109 get_Filter_pred(ir_node *node) {
2110 assert(node->op == op_Filter);
2115 set_Filter_pred(ir_node *node, ir_node *pred) {
2116 assert(node->op == op_Filter);
2121 get_Filter_proj(ir_node *node) {
2122 assert(node->op == op_Filter);
2123 return node->attr.filter.proj;
2127 set_Filter_proj(ir_node *node, long proj) {
2128 assert(node->op == op_Filter);
2129 node->attr.filter.proj = proj;
2132 /* Don't use get_irn_arity, get_irn_n in implementation as access
2133 shall work independent of view!!! */
2134 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2135 assert(node->op == op_Filter);
2136 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2137 ir_graph *irg = get_irn_irg(node);
2138 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2139 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2140 node->attr.filter.in_cg[0] = node->in[0];
2142 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2145 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2146 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2147 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2148 node->attr.filter.in_cg[pos + 1] = pred;
2151 int get_Filter_n_cg_preds(ir_node *node) {
2152 assert(node->op == op_Filter && node->attr.filter.in_cg);
2153 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2156 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2158 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2160 arity = ARR_LEN(node->attr.filter.in_cg);
2161 assert(pos < arity - 1);
2162 return node->attr.filter.in_cg[pos + 1];
2166 ir_node *get_Mux_sel(const ir_node *node) {
2167 if (node->op == op_Psi) {
2168 assert(get_irn_arity(node) == 3);
2169 return get_Psi_cond(node, 0);
2171 assert(node->op == op_Mux);
2175 void set_Mux_sel(ir_node *node, ir_node *sel) {
2176 if (node->op == op_Psi) {
2177 assert(get_irn_arity(node) == 3);
2178 set_Psi_cond(node, 0, sel);
2180 assert(node->op == op_Mux);
2185 ir_node *get_Mux_false(const ir_node *node) {
2186 if (node->op == op_Psi) {
2187 assert(get_irn_arity(node) == 3);
2188 return get_Psi_default(node);
2190 assert(node->op == op_Mux);
2194 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2195 if (node->op == op_Psi) {
2196 assert(get_irn_arity(node) == 3);
2197 set_Psi_default(node, ir_false);
2199 assert(node->op == op_Mux);
2200 node->in[2] = ir_false;
2204 ir_node *get_Mux_true(const ir_node *node) {
2205 if (node->op == op_Psi) {
2206 assert(get_irn_arity(node) == 3);
2207 return get_Psi_val(node, 0);
2209 assert(node->op == op_Mux);
2213 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2214 if (node->op == op_Psi) {
2215 assert(get_irn_arity(node) == 3);
2216 set_Psi_val(node, 0, ir_true);
2218 assert(node->op == op_Mux);
2219 node->in[3] = ir_true;
2224 ir_node *get_Psi_cond(const ir_node *node, int pos) {
2225 assert(node->op == op_Psi);
2226 assert(pos < get_Psi_n_conds(node));
2227 return get_irn_n(node, 2 * pos);
2230 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2231 assert(node->op == op_Psi);
2232 assert(pos < get_Psi_n_conds(node));
2233 set_irn_n(node, 2 * pos, cond);
2236 ir_node *get_Psi_val(const ir_node *node, int pos) {
2237 assert(node->op == op_Psi);
2238 assert(pos < get_Psi_n_conds(node));
2239 return get_irn_n(node, 2 * pos + 1);
2242 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2243 assert(node->op == op_Psi);
2244 assert(pos < get_Psi_n_conds(node));
2245 set_irn_n(node, 2 * pos + 1, val);
2248 ir_node *get_Psi_default(const ir_node *node) {
2249 int def_pos = get_irn_arity(node) - 1;
2250 assert(node->op == op_Psi);
2251 return get_irn_n(node, def_pos);
2254 void set_Psi_default(ir_node *node, ir_node *val) {
2255 int def_pos = get_irn_arity(node);
2256 assert(node->op == op_Psi);
2257 set_irn_n(node, def_pos, val);
2260 int (get_Psi_n_conds)(const ir_node *node) {
2261 return _get_Psi_n_conds(node);
2265 ir_node *get_CopyB_mem(const ir_node *node) {
2266 assert(node->op == op_CopyB);
2267 return get_irn_n(node, 0);
2270 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2271 assert(node->op == op_CopyB);
2272 set_irn_n(node, 0, mem);
2275 ir_node *get_CopyB_dst(const ir_node *node) {
2276 assert(node->op == op_CopyB);
2277 return get_irn_n(node, 1);
2280 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2281 assert(node->op == op_CopyB);
2282 set_irn_n(node, 1, dst);
2285 ir_node *get_CopyB_src(const ir_node *node) {
2286 assert(node->op == op_CopyB);
2287 return get_irn_n(node, 2);
2290 void set_CopyB_src(ir_node *node, ir_node *src) {
2291 assert(node->op == op_CopyB);
2292 set_irn_n(node, 2, src);
2295 ir_type *get_CopyB_type(ir_node *node) {
2296 assert(node->op == op_CopyB);
2297 return node->attr.copyb.data_type = skip_tid(node->attr.copyb.data_type);
2300 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2301 assert(node->op == op_CopyB && data_type);
2302 node->attr.copyb.data_type = data_type;
2307 get_InstOf_type(ir_node *node) {
2308 assert(node->op == op_InstOf);
2309 return node->attr.instof.type = skip_tid(node->attr.instof.type);
2313 set_InstOf_type(ir_node *node, ir_type *type) {
2314 assert(node->op == op_InstOf);
2315 node->attr.instof.type = type;
2319 get_InstOf_store(const ir_node *node) {
2320 assert(node->op == op_InstOf);
2321 return get_irn_n(node, 0);
2325 set_InstOf_store(ir_node *node, ir_node *obj) {
2326 assert(node->op == op_InstOf);
2327 set_irn_n(node, 0, obj);
2331 get_InstOf_obj(const ir_node *node) {
2332 assert(node->op == op_InstOf);
2333 return get_irn_n(node, 1);
2337 set_InstOf_obj(ir_node *node, ir_node *obj) {
2338 assert(node->op == op_InstOf);
2339 set_irn_n(node, 1, obj);
2342 /* Returns the memory input of a Raise operation. */
2344 get_Raise_mem(const ir_node *node) {
2345 assert(node->op == op_Raise);
2346 return get_irn_n(node, 0);
2350 set_Raise_mem(ir_node *node, ir_node *mem) {
2351 assert(node->op == op_Raise);
2352 set_irn_n(node, 0, mem);
2356 get_Raise_exo_ptr(const ir_node *node) {
2357 assert(node->op == op_Raise);
2358 return get_irn_n(node, 1);
2362 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2363 assert(node->op == op_Raise);
2364 set_irn_n(node, 1, exo_ptr);
2369 /* Returns the memory input of a Bound operation. */
2370 ir_node *get_Bound_mem(const ir_node *bound) {
2371 assert(bound->op == op_Bound);
2372 return get_irn_n(bound, 0);
2375 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2376 assert(bound->op == op_Bound);
2377 set_irn_n(bound, 0, mem);
2380 /* Returns the index input of a Bound operation. */
2381 ir_node *get_Bound_index(const ir_node *bound) {
2382 assert(bound->op == op_Bound);
2383 return get_irn_n(bound, 1);
2386 void set_Bound_index(ir_node *bound, ir_node *idx) {
2387 assert(bound->op == op_Bound);
2388 set_irn_n(bound, 1, idx);
2391 /* Returns the lower bound input of a Bound operation. */
2392 ir_node *get_Bound_lower(const ir_node *bound) {
2393 assert(bound->op == op_Bound);
2394 return get_irn_n(bound, 2);
2397 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2398 assert(bound->op == op_Bound);
2399 set_irn_n(bound, 2, lower);
2402 /* Returns the upper bound input of a Bound operation. */
2403 ir_node *get_Bound_upper(const ir_node *bound) {
2404 assert(bound->op == op_Bound);
2405 return get_irn_n(bound, 3);
2408 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2409 assert(bound->op == op_Bound);
2410 set_irn_n(bound, 3, upper);
2413 /* Return the operand of a Pin node. */
2414 ir_node *get_Pin_op(const ir_node *pin) {
2415 assert(pin->op == op_Pin);
2416 return get_irn_n(pin, 0);
2419 void set_Pin_op(ir_node *pin, ir_node *node) {
2420 assert(pin->op == op_Pin);
2421 set_irn_n(pin, 0, node);
2424 /* Return the assembler text of an ASM pseudo node. */
2425 ident *get_ASM_text(const ir_node *node) {
2426 assert(node->op == op_ASM);
2427 return node->attr.assem.asm_text;
2430 /* Return the number of input constraints for an ASM node. */
2431 int get_ASM_n_input_constraints(const ir_node *node) {
2432 assert(node->op == op_ASM);
2433 return ARR_LEN(node->attr.assem.inputs);
2436 /* Return the input constraints for an ASM node. This is a flexible array. */
2437 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2438 assert(node->op == op_ASM);
2439 return node->attr.assem.inputs;
2442 /* Return the number of output constraints for an ASM node. */
2443 int get_ASM_n_output_constraints(const ir_node *node) {
2444 assert(node->op == op_ASM);
2445 return ARR_LEN(node->attr.assem.outputs);
2448 /* Return the output constraints for an ASM node. */
2449 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2450 assert(node->op == op_ASM);
2451 return node->attr.assem.outputs;
2454 /* Return the number of clobbered registers for an ASM node. */
2455 int get_ASM_n_clobbers(const ir_node *node) {
2456 assert(node->op == op_ASM);
2457 return ARR_LEN(node->attr.assem.clobber);
2460 /* Return the list of clobbered registers for an ASM node. */
2461 ident **get_ASM_clobbers(const ir_node *node) {
2462 assert(node->op == op_ASM);
2463 return node->attr.assem.clobber;
2466 /* returns the graph of a node */
2468 get_irn_irg(const ir_node *node) {
2470 * Do not use get_nodes_Block() here, because this
2471 * will check the pinned state.
2472 * However even a 'wrong' block is always in the proper
2475 if (! is_Block(node))
2476 node = get_irn_n(node, -1);
2477 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2478 node = get_irn_n(node, -1);
2479 assert(get_irn_op(node) == op_Block);
2480 return node->attr.block.irg;
2484 /*----------------------------------------------------------------*/
2485 /* Auxiliary routines */
2486 /*----------------------------------------------------------------*/
2489 skip_Proj(ir_node *node) {
2490 /* don't assert node !!! */
2495 node = get_Proj_pred(node);
2501 skip_Proj_const(const ir_node *node) {
2502 /* don't assert node !!! */
2507 node = get_Proj_pred(node);
2513 skip_Tuple(ir_node *node) {
2517 if (!get_opt_normalize()) return node;
2520 if (get_irn_op(node) == op_Proj) {
2521 pred = get_Proj_pred(node);
2522 op = get_irn_op(pred);
2525 * Looks strange but calls get_irn_op() only once
2526 * in most often cases.
2528 if (op == op_Proj) { /* nested Tuple ? */
2529 pred = skip_Tuple(pred);
2530 op = get_irn_op(pred);
2532 if (op == op_Tuple) {
2533 node = get_Tuple_pred(pred, get_Proj_proj(node));
2536 } else if (op == op_Tuple) {
2537 node = get_Tuple_pred(pred, get_Proj_proj(node));
2544 /* returns operand of node if node is a Cast */
2545 ir_node *skip_Cast(ir_node *node) {
2546 if (get_irn_op(node) == op_Cast)
2547 return get_Cast_op(node);
2551 /* returns operand of node if node is a Confirm */
2552 ir_node *skip_Confirm(ir_node *node) {
2553 if (get_irn_op(node) == op_Confirm)
2554 return get_Confirm_value(node);
2558 /* skip all high-level ops */
2559 ir_node *skip_HighLevel_ops(ir_node *node) {
2560 while (is_op_highlevel(get_irn_op(node))) {
2561 node = get_irn_n(node, 0);
2567 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2568 * than any other approach, as Id chains are resolved and all point to the real node, or
2569 * all id's are self loops.
2571 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2572 * a little bit "hand optimized".
2574 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2577 skip_Id(ir_node *node) {
2579 /* don't assert node !!! */
2581 if (!node || (node->op != op_Id)) return node;
2583 /* Don't use get_Id_pred(): We get into an endless loop for
2584 self-referencing Ids. */
2585 pred = node->in[0+1];
2587 if (pred->op != op_Id) return pred;
2589 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2590 ir_node *rem_pred, *res;
2592 if (pred->op != op_Id) return pred; /* shortcut */
2595 assert(get_irn_arity (node) > 0);
2597 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2598 res = skip_Id(rem_pred);
2599 if (res->op == op_Id) /* self-loop */ return node;
2601 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2608 void skip_Id_and_store(ir_node **node) {
2611 if (!n || (n->op != op_Id)) return;
2613 /* Don't use get_Id_pred(): We get into an endless loop for
2614 self-referencing Ids. */
2619 (is_Bad)(const ir_node *node) {
2620 return _is_Bad(node);
2624 (is_NoMem)(const ir_node *node) {
2625 return _is_NoMem(node);
2629 (is_Minus)(const ir_node *node) {
2630 return _is_Minus(node);
2634 (is_Mod)(const ir_node *node) {
2635 return _is_Mod(node);
2639 (is_Div)(const ir_node *node) {
2640 return _is_Div(node);
2644 (is_DivMod)(const ir_node *node) {
2645 return _is_DivMod(node);
2649 (is_Quot)(const ir_node *node) {
2650 return _is_Quot(node);
2654 (is_Add)(const ir_node *node) {
2655 return _is_Add(node);
2659 (is_And)(const ir_node *node) {
2660 return _is_And(node);
2664 (is_Or)(const ir_node *node) {
2665 return _is_Or(node);
2669 (is_Eor)(const ir_node *node) {
2670 return _is_Eor(node);
2674 (is_Sub)(const ir_node *node) {
2675 return _is_Sub(node);
2679 (is_Shl)(const ir_node *node) {
2680 return _is_Shl(node);
2684 (is_Shr)(const ir_node *node) {
2685 return _is_Shr(node);
2689 (is_Shrs)(const ir_node *node) {
2690 return _is_Shrs(node);
2694 (is_Rot)(const ir_node *node) {
2695 return _is_Rot(node);
2699 (is_Not)(const ir_node *node) {
2700 return _is_Not(node);
2704 (is_Psi)(const ir_node *node) {
2705 return _is_Psi(node);
2709 (is_Tuple)(const ir_node *node) {
2710 return _is_Tuple(node);
2714 (is_Bound)(const ir_node *node) {
2715 return _is_Bound(node);
2719 (is_Start)(const ir_node *node) {
2720 return _is_Start(node);
2724 (is_End)(const ir_node *node) {
2725 return _is_End(node);
2729 (is_Const)(const ir_node *node) {
2730 return _is_Const(node);
2734 (is_Conv)(const ir_node *node) {
2735 return _is_Conv(node);
2739 (is_strictConv)(const ir_node *node) {
2740 return _is_strictConv(node);
2744 (is_Cast)(const ir_node *node) {
2745 return _is_Cast(node);
2749 (is_no_Block)(const ir_node *node) {
2750 return _is_no_Block(node);
2754 (is_Block)(const ir_node *node) {
2755 return _is_Block(node);
2758 /* returns true if node is an Unknown node. */
2760 (is_Unknown)(const ir_node *node) {
2761 return _is_Unknown(node);
2764 /* returns true if node is a Return node. */
2766 (is_Return)(const ir_node *node) {
2767 return _is_Return(node);
2770 /* returns true if node is a Call node. */
2772 (is_Call)(const ir_node *node) {
2773 return _is_Call(node);
2776 /* returns true if node is a Sel node. */
2778 (is_Sel)(const ir_node *node) {
2779 return _is_Sel(node);
2782 /* returns true if node is a Mux node or a Psi with only one condition. */
2784 (is_Mux)(const ir_node *node) {
2785 return _is_Mux(node);
2788 /* returns true if node is a Load node. */
2790 (is_Load)(const ir_node *node) {
2791 return _is_Load(node);
2794 /* returns true if node is a Load node. */
2796 (is_Store)(const ir_node *node) {
2797 return _is_Store(node);
2800 /* returns true if node is a Sync node. */
2802 (is_Sync)(const ir_node *node) {
2803 return _is_Sync(node);
2806 /* Returns true if node is a Confirm node. */
2808 (is_Confirm)(const ir_node *node) {
2809 return _is_Confirm(node);
2812 /* Returns true if node is a Pin node. */
2814 (is_Pin)(const ir_node *node) {
2815 return _is_Pin(node);
2818 /* Returns true if node is a SymConst node. */
2820 (is_SymConst)(const ir_node *node) {
2821 return _is_SymConst(node);
2824 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2826 (is_SymConst_addr_ent)(const ir_node *node) {
2827 return _is_SymConst_addr_ent(node);
2830 /* Returns true if node is a Cond node. */
2832 (is_Cond)(const ir_node *node) {
2833 return _is_Cond(node);
2837 (is_CopyB)(const ir_node *node) {
2838 return _is_CopyB(node);
2841 /* returns true if node is a Cmp node. */
2843 (is_Cmp)(const ir_node *node) {
2844 return _is_Cmp(node);
2847 /* returns true if node is an Alloc node. */
2849 (is_Alloc)(const ir_node *node) {
2850 return _is_Alloc(node);
2853 /* returns true if a node is a Jmp node. */
2855 (is_Jmp)(const ir_node *node) {
2856 return _is_Jmp(node);
2859 /* returns true if a node is a Raise node. */
2861 (is_Raise)(const ir_node *node) {
2862 return _is_Raise(node);
2865 /* returns true if a node is an ASM node. */
2867 (is_ASM)(const ir_node *node) {
2868 return _is_ASM(node);
2872 (is_Proj)(const ir_node *node) {
2874 return node->op == op_Proj ||
2875 (!get_interprocedural_view() && node->op == op_Filter);
2878 /* Returns true if the operation manipulates control flow. */
2879 int is_cfop(const ir_node *node) {
2880 return is_op_cfopcode(get_irn_op(node));
2883 /* Returns true if the operation manipulates interprocedural control flow:
2884 CallBegin, EndReg, EndExcept */
2885 int is_ip_cfop(const ir_node *node) {
2886 return is_ip_cfopcode(get_irn_op(node));
2889 /* Returns true if the operation can change the control flow because
2892 is_fragile_op(const ir_node *node) {
2893 return is_op_fragile(get_irn_op(node));
2896 /* Returns the memory operand of fragile operations. */
2897 ir_node *get_fragile_op_mem(ir_node *node) {
2898 assert(node && is_fragile_op(node));
2900 switch (get_irn_opcode(node)) {
2911 return get_irn_n(node, pn_Generic_M_regular);
2916 assert(0 && "should not be reached");
2921 /* Returns the result mode of a Div operation. */
2922 ir_mode *get_divop_resmod(const ir_node *node) {
2923 switch (get_irn_opcode(node)) {
2924 case iro_Quot : return get_Quot_resmode(node);
2925 case iro_DivMod: return get_DivMod_resmode(node);
2926 case iro_Div : return get_Div_resmode(node);
2927 case iro_Mod : return get_Mod_resmode(node);
2929 assert(0 && "should not be reached");
2934 /* Returns true if the operation is a forking control flow operation. */
2935 int (is_irn_forking)(const ir_node *node) {
2936 return _is_irn_forking(node);
2939 /* Return the type associated with the value produced by n
2940 * if the node remarks this type as it is the case for
2941 * Cast, Const, SymConst and some Proj nodes. */
2942 ir_type *(get_irn_type)(ir_node *node) {
2943 return _get_irn_type(node);
2946 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2948 ir_type *(get_irn_type_attr)(ir_node *node) {
2949 return _get_irn_type_attr(node);
2952 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2953 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2954 return _get_irn_entity_attr(node);
2957 /* Returns non-zero for constant-like nodes. */
2958 int (is_irn_constlike)(const ir_node *node) {
2959 return _is_irn_constlike(node);
2963 * Returns non-zero for nodes that are allowed to have keep-alives and
2964 * are neither Block nor PhiM.
2966 int (is_irn_keep)(const ir_node *node) {
2967 return _is_irn_keep(node);
2971 * Returns non-zero for nodes that are always placed in the start block.
2973 int (is_irn_start_block_placed)(const ir_node *node) {
2974 return _is_irn_start_block_placed(node);
2977 /* Returns non-zero for nodes that are machine operations. */
2978 int (is_irn_machine_op)(const ir_node *node) {
2979 return _is_irn_machine_op(node);
2982 /* Returns non-zero for nodes that are machine operands. */
2983 int (is_irn_machine_operand)(const ir_node *node) {
2984 return _is_irn_machine_operand(node);
2987 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2988 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2989 return _is_irn_machine_user(node, n);
2993 /* Gets the string representation of the jump prediction .*/
2994 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2997 case COND_JMP_PRED_NONE: return "no prediction";
2998 case COND_JMP_PRED_TRUE: return "true taken";
2999 case COND_JMP_PRED_FALSE: return "false taken";
3003 /* Returns the conditional jump prediction of a Cond node. */
3004 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
3005 return _get_Cond_jmp_pred(cond);
3008 /* Sets a new conditional jump prediction. */
3009 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
3010 _set_Cond_jmp_pred(cond, pred);
3013 /** the get_type operation must be always implemented and return a firm type */
3014 static ir_type *get_Default_type(ir_node *n) {
3016 return get_unknown_type();
3019 /* Sets the get_type operation for an ir_op_ops. */
3020 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
3022 case iro_Const: ops->get_type = get_Const_type; break;
3023 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3024 case iro_Cast: ops->get_type = get_Cast_type; break;
3025 case iro_Proj: ops->get_type = get_Proj_type; break;
3027 /* not allowed to be NULL */
3028 if (! ops->get_type)
3029 ops->get_type = get_Default_type;
3035 /** Return the attribute type of a SymConst node if exists */
3036 static ir_type *get_SymConst_attr_type(ir_node *self) {
3037 symconst_kind kind = get_SymConst_kind(self);
3038 if (SYMCONST_HAS_TYPE(kind))
3039 return get_SymConst_type(self);
3043 /** Return the attribute entity of a SymConst node if exists */
3044 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
3045 symconst_kind kind = get_SymConst_kind(self);
3046 if (SYMCONST_HAS_ENT(kind))
3047 return get_SymConst_entity(self);
3051 /** the get_type_attr operation must be always implemented */
3052 static ir_type *get_Null_type(ir_node *n) {
3054 return firm_unknown_type;
3057 /* Sets the get_type operation for an ir_op_ops. */
3058 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
3060 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3061 case iro_Call: ops->get_type_attr = get_Call_type; break;
3062 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3063 case iro_Free: ops->get_type_attr = get_Free_type; break;
3064 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3066 /* not allowed to be NULL */
3067 if (! ops->get_type_attr)
3068 ops->get_type_attr = get_Null_type;
3074 /** the get_entity_attr operation must be always implemented */
3075 static ir_entity *get_Null_ent(ir_node *n) {
3080 /* Sets the get_type operation for an ir_op_ops. */
3081 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
3083 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3084 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3086 /* not allowed to be NULL */
3087 if (! ops->get_entity_attr)
3088 ops->get_entity_attr = get_Null_ent;
3094 /* Sets the debug information of a node. */
3095 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
3096 _set_irn_dbg_info(n, db);
3100 * Returns the debug information of an node.
3102 * @param n The node.
3104 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
3105 return _get_irn_dbg_info(n);
3110 #ifdef DEBUG_libfirm
3111 void dump_irn(const ir_node *n) {
3112 int i, arity = get_irn_arity(n);
3113 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
3115 ir_node *pred = get_irn_n(n, -1);
3116 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
3117 get_irn_node_nr(pred), (void *)pred);
3119 printf(" preds: \n");
3120 for (i = 0; i < arity; ++i) {
3121 ir_node *pred = get_irn_n(n, i);
3122 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
3123 get_irn_node_nr(pred), (void *)pred);
3127 #else /* DEBUG_libfirm */
3128 void dump_irn(const ir_node *n) { (void) n; }
3129 #endif /* DEBUG_libfirm */