2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
36 #include "irgraph_t.h"
38 #include "irbackedge_t.h"
42 #include "iredgekinds.h"
43 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define FUNCCALL_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 int get_negated_pnc(int pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
87 get_inversed_pnc(int pnc) {
88 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
89 int lesser = pnc & pn_Cmp_Lt;
90 int greater = pnc & pn_Cmp_Gt;
92 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
98 * Indicates, whether additional data can be registered to ir nodes.
99 * If set to 1, this is not possible anymore.
101 static int forbid_new_data = 0;
104 * The amount of additional space for custom data to be allocated upon
105 * creating a new node.
107 unsigned firm_add_node_size = 0;
110 /* register new space for every node */
111 unsigned firm_register_additional_node_data(unsigned size) {
112 assert(!forbid_new_data && "Too late to register additional node data");
117 return firm_add_node_size += size;
123 /* Forbid the addition of new data to an ir node. */
128 * irnode constructor.
129 * Create a new irnode in irg, with an op, mode, arity and
130 * some incoming irnodes.
131 * If arity is negative, a node with a dynamic array is created.
134 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
135 int arity, ir_node **in)
138 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
142 assert(irg && op && mode);
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
159 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
163 set_irn_dbg_info(res, db);
167 res->node_nr = get_irp_new_node_nr();
170 for (i = 0; i < EDGE_KIND_LAST; ++i)
171 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* don't put this into the for loop, arity is -1 for some nodes! */
174 edges_notify_edge(res, -1, res->in[0], NULL, irg);
175 for (i = 1; i <= arity; ++i)
176 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
178 hook_new_node(irg, res);
183 /*-- getting some parameters from ir_nodes --*/
186 (is_ir_node)(const void *thing) {
187 return _is_ir_node(thing);
191 (get_irn_intra_arity)(const ir_node *node) {
192 return _get_irn_intra_arity(node);
196 (get_irn_inter_arity)(const ir_node *node) {
197 return _get_irn_inter_arity(node);
200 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
203 (get_irn_arity)(const ir_node *node) {
204 return _get_irn_arity(node);
207 /* Returns the array with ins. This array is shifted with respect to the
208 array accessed by get_irn_n: The block operand is at position 0 not -1.
209 (@@@ This should be changed.)
210 The order of the predecessors in this array is not guaranteed, except that
211 lists of operands as predecessors of Block or arguments of a Call are
214 get_irn_in(const ir_node *node) {
216 if (get_interprocedural_view()) { /* handle Filter and Block specially */
217 if (get_irn_opcode(node) == iro_Filter) {
218 assert(node->attr.filter.in_cg);
219 return node->attr.filter.in_cg;
220 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
221 return node->attr.block.in_cg;
223 /* else fall through */
229 set_irn_in(ir_node *node, int arity, ir_node **in) {
232 ir_graph *irg = current_ir_graph;
234 if (get_interprocedural_view()) { /* handle Filter and Block specially */
235 if (get_irn_opcode(node) == iro_Filter) {
236 assert(node->attr.filter.in_cg);
237 arr = &node->attr.filter.in_cg;
238 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
239 arr = &node->attr.block.in_cg;
247 for (i = 0; i < arity; i++) {
248 if (i < ARR_LEN(*arr)-1)
249 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
251 edges_notify_edge(node, i, in[i], NULL, irg);
253 for(;i < ARR_LEN(*arr)-1; i++) {
254 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
257 if (arity != ARR_LEN(*arr) - 1) {
258 ir_node * block = (*arr)[0];
259 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
262 fix_backedges(irg->obst, node);
264 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
268 (get_irn_intra_n)(const ir_node *node, int n) {
269 return _get_irn_intra_n (node, n);
273 (get_irn_inter_n)(const ir_node *node, int n) {
274 return _get_irn_inter_n (node, n);
277 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
280 (get_irn_n)(const ir_node *node, int n) {
281 return _get_irn_n(node, n);
285 set_irn_n(ir_node *node, int n, ir_node *in) {
286 assert(node && node->kind == k_ir_node);
288 assert(n < get_irn_arity(node));
289 assert(in && in->kind == k_ir_node);
291 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
292 /* Change block pred in both views! */
293 node->in[n + 1] = in;
294 assert(node->attr.filter.in_cg);
295 node->attr.filter.in_cg[n + 1] = in;
298 if (get_interprocedural_view()) { /* handle Filter and Block specially */
299 if (get_irn_opcode(node) == iro_Filter) {
300 assert(node->attr.filter.in_cg);
301 node->attr.filter.in_cg[n + 1] = in;
303 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
304 node->attr.block.in_cg[n + 1] = in;
307 /* else fall through */
311 hook_set_irn_n(node, n, in, node->in[n + 1]);
313 /* Here, we rely on src and tgt being in the current ir graph */
314 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
316 node->in[n + 1] = in;
319 int add_irn_n(ir_node *node, ir_node *in) {
321 ir_graph *irg = get_irn_irg(node);
323 assert(node->op->opar == oparity_dynamic);
324 pos = ARR_LEN(node->in) - 1;
325 ARR_APP1(ir_node *, node->in, in);
326 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
329 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
335 (get_irn_deps)(const ir_node *node) {
336 return _get_irn_deps(node);
340 (get_irn_dep)(const ir_node *node, int pos) {
341 return _get_irn_dep(node, pos);
345 (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
346 _set_irn_dep(node, pos, dep);
349 int add_irn_dep(ir_node *node, ir_node *dep) {
352 if (node->deps == NULL) {
353 node->deps = NEW_ARR_F(ir_node *, 1);
359 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
360 if(node->deps[i] == NULL)
363 if(node->deps[i] == dep)
367 if (first_zero >= 0) {
368 node->deps[first_zero] = dep;
371 ARR_APP1(ir_node *, node->deps, dep);
376 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
381 void add_irn_deps(ir_node *tgt, ir_node *src) {
384 for (i = 0, n = get_irn_deps(src); i < n; ++i)
385 add_irn_dep(tgt, get_irn_dep(src, i));
390 (get_irn_mode)(const ir_node *node) {
391 return _get_irn_mode(node);
395 (set_irn_mode)(ir_node *node, ir_mode *mode) {
396 _set_irn_mode(node, mode);
400 get_irn_modecode(const ir_node *node) {
402 return node->mode->code;
405 /** Gets the string representation of the mode .*/
407 get_irn_modename(const ir_node *node) {
409 return get_mode_name(node->mode);
413 get_irn_modeident(const ir_node *node) {
415 return get_mode_ident(node->mode);
419 (get_irn_op)(const ir_node *node) {
420 return _get_irn_op(node);
423 /* should be private to the library: */
425 (set_irn_op)(ir_node *node, ir_op *op) {
426 _set_irn_op(node, op);
430 (get_irn_opcode)(const ir_node *node) {
431 return _get_irn_opcode(node);
435 get_irn_opname(const ir_node *node) {
437 if (is_Phi0(node)) return "Phi0";
438 return get_id_str(node->op->name);
442 get_irn_opident(const ir_node *node) {
444 return node->op->name;
448 (get_irn_visited)(const ir_node *node) {
449 return _get_irn_visited(node);
453 (set_irn_visited)(ir_node *node, unsigned long visited) {
454 _set_irn_visited(node, visited);
458 (mark_irn_visited)(ir_node *node) {
459 _mark_irn_visited(node);
463 (irn_not_visited)(const ir_node *node) {
464 return _irn_not_visited(node);
468 (irn_visited)(const ir_node *node) {
469 return _irn_visited(node);
473 (set_irn_link)(ir_node *node, void *link) {
474 _set_irn_link(node, link);
478 (get_irn_link)(const ir_node *node) {
479 return _get_irn_link(node);
483 (get_irn_pinned)(const ir_node *node) {
484 return _get_irn_pinned(node);
488 (is_irn_pinned_in_irg) (const ir_node *node) {
489 return _is_irn_pinned_in_irg(node);
492 void set_irn_pinned(ir_node *node, op_pin_state state) {
493 /* due to optimization an opt may be turned into a Tuple */
494 if (get_irn_op(node) == op_Tuple)
497 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
498 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
500 node->attr.except.pin_state = state;
503 #ifdef DO_HEAPANALYSIS
504 /* Access the abstract interpretation information of a node.
505 Returns NULL if no such information is available. */
506 struct abstval *get_irn_abst_value(ir_node *n) {
509 /* Set the abstract interpretation information of a node. */
510 void set_irn_abst_value(ir_node *n, struct abstval *os) {
513 struct section *firm_get_irn_section(ir_node *n) {
516 void firm_set_irn_section(ir_node *n, struct section *s) {
520 /* Dummies needed for firmjni. */
521 struct abstval *get_irn_abst_value(ir_node *n) {
525 void set_irn_abst_value(ir_node *n, struct abstval *os) {
529 struct section *firm_get_irn_section(ir_node *n) {
533 void firm_set_irn_section(ir_node *n, struct section *s) {
537 #endif /* DO_HEAPANALYSIS */
540 /* Outputs a unique number for this node */
541 long get_irn_node_nr(const ir_node *node) {
544 return node->node_nr;
546 return (long)PTR_TO_INT(node);
551 get_irn_const_attr(ir_node *node) {
552 assert(node->op == op_Const);
553 return &node->attr.con;
557 get_irn_proj_attr(ir_node *node) {
558 assert(node->op == op_Proj);
559 return node->attr.proj;
563 get_irn_alloc_attr(ir_node *node) {
564 assert(node->op == op_Alloc);
565 return &node->attr.alloc;
569 get_irn_free_attr(ir_node *node) {
570 assert(node->op == op_Free);
571 return &node->attr.free;
575 get_irn_symconst_attr(ir_node *node) {
576 assert(node->op == op_SymConst);
577 return &node->attr.symc;
581 get_irn_call_attr(ir_node *node) {
582 assert(node->op == op_Call);
583 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
587 get_irn_sel_attr(ir_node *node) {
588 assert(node->op == op_Sel);
589 return &node->attr.sel;
593 get_irn_phi0_attr(ir_node *node) {
594 assert(is_Phi0(node));
595 return node->attr.phi0.pos;
599 get_irn_block_attr(ir_node *node) {
600 assert(node->op == op_Block);
601 return &node->attr.block;
605 get_irn_load_attr(ir_node *node) {
606 assert(node->op == op_Load);
607 return &node->attr.load;
611 get_irn_store_attr(ir_node *node) {
612 assert(node->op == op_Store);
613 return &node->attr.store;
617 get_irn_except_attr(ir_node *node) {
618 assert(node->op == op_Div || node->op == op_Quot ||
619 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
620 return &node->attr.except;
623 void *(get_irn_generic_attr)(ir_node *node) {
624 assert(is_ir_node(node));
625 return _get_irn_generic_attr(node);
628 const void *(get_irn_generic_attr_const)(const ir_node *node) {
629 assert(is_ir_node(node));
630 return _get_irn_generic_attr_const(node);
633 unsigned (get_irn_idx)(const ir_node *node) {
634 assert(is_ir_node(node));
635 return _get_irn_idx(node);
638 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
640 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
641 if (get_irn_n(node, i) == arg)
647 /** manipulate fields of individual nodes **/
650 (get_nodes_block)(const ir_node *node) {
651 return _get_nodes_block(node);
655 set_nodes_block(ir_node *node, ir_node *block) {
656 node->op->ops.set_block(node, block);
659 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
660 * from Start. If so returns frame type, else Null. */
661 ir_type *is_frame_pointer(ir_node *n) {
662 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
663 ir_node *start = get_Proj_pred(n);
664 if (get_irn_op(start) == op_Start) {
665 return get_irg_frame_type(get_irn_irg(start));
671 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
672 * from Start. If so returns global type, else Null. */
673 ir_type *is_globals_pointer(ir_node *n) {
674 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
675 ir_node *start = get_Proj_pred(n);
676 if (get_irn_op(start) == op_Start) {
677 return get_glob_type();
683 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
684 * from Start. If so returns tls type, else Null. */
685 ir_type *is_tls_pointer(ir_node *n) {
686 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
687 ir_node *start = get_Proj_pred(n);
688 if (get_irn_op(start) == op_Start) {
689 return get_tls_type();
695 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
696 * from Start. If so returns 1, else 0. */
697 int is_value_arg_pointer(ir_node *n) {
698 if ((get_irn_op(n) == op_Proj) &&
699 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
700 (get_irn_op(get_Proj_pred(n)) == op_Start))
705 /* Returns an array with the predecessors of the Block. Depending on
706 the implementation of the graph data structure this can be a copy of
707 the internal representation of predecessors as well as the internal
708 array itself. Therefore writing to this array might obstruct the ir. */
710 get_Block_cfgpred_arr(ir_node *node) {
711 assert((node->op == op_Block));
712 return (ir_node **)&(get_irn_in(node)[1]);
716 (get_Block_n_cfgpreds)(const ir_node *node) {
717 return _get_Block_n_cfgpreds(node);
721 (get_Block_cfgpred)(ir_node *node, int pos) {
722 return _get_Block_cfgpred(node, pos);
726 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
727 assert(node->op == op_Block);
728 set_irn_n(node, pos, pred);
732 (get_Block_cfgpred_block)(ir_node *node, int pos) {
733 return _get_Block_cfgpred_block(node, pos);
737 get_Block_matured(ir_node *node) {
738 assert(node->op == op_Block);
739 return (int)node->attr.block.is_matured;
743 set_Block_matured(ir_node *node, int matured) {
744 assert(node->op == op_Block);
745 node->attr.block.is_matured = matured;
749 (get_Block_block_visited)(const ir_node *node) {
750 return _get_Block_block_visited(node);
754 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
755 _set_Block_block_visited(node, visit);
758 /* For this current_ir_graph must be set. */
760 (mark_Block_block_visited)(ir_node *node) {
761 _mark_Block_block_visited(node);
765 (Block_not_block_visited)(const ir_node *node) {
766 return _Block_not_block_visited(node);
770 (Block_block_visited)(const ir_node *node) {
771 return _Block_block_visited(node);
775 get_Block_graph_arr (ir_node *node, int pos) {
776 assert(node->op == op_Block);
777 return node->attr.block.graph_arr[pos+1];
781 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
782 assert(node->op == op_Block);
783 node->attr.block.graph_arr[pos+1] = value;
786 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
787 assert(node->op == op_Block);
788 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
789 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
790 node->attr.block.in_cg[0] = NULL;
791 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
793 /* Fix backedge array. fix_backedges() operates depending on
794 interprocedural_view. */
795 int ipv = get_interprocedural_view();
796 set_interprocedural_view(1);
797 fix_backedges(current_ir_graph->obst, node);
798 set_interprocedural_view(ipv);
801 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
804 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
805 assert(node->op == op_Block &&
806 node->attr.block.in_cg &&
807 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
808 node->attr.block.in_cg[pos + 1] = pred;
811 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
812 assert(node->op == op_Block);
813 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
816 int get_Block_cg_n_cfgpreds(ir_node *node) {
817 assert(node->op == op_Block);
818 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
821 ir_node *get_Block_cg_cfgpred(ir_node *node, int pos) {
822 assert(node->op == op_Block && node->attr.block.in_cg);
823 return node->attr.block.in_cg[pos + 1];
826 void remove_Block_cg_cfgpred_arr(ir_node *node) {
827 assert(node->op == op_Block);
828 node->attr.block.in_cg = NULL;
831 ir_node *(set_Block_dead)(ir_node *block) {
832 return _set_Block_dead(block);
835 int (is_Block_dead)(const ir_node *block) {
836 return _is_Block_dead(block);
839 ir_extblk *get_Block_extbb(const ir_node *block) {
841 assert(is_Block(block));
842 res = block->attr.block.extblk;
843 assert(res == NULL || is_ir_extbb(res));
847 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
848 assert(is_Block(block));
849 assert(extblk == NULL || is_ir_extbb(extblk));
850 block->attr.block.extblk = extblk;
853 /* returns the macro block header of a block. */
854 ir_node *get_Block_MacroBlock(const ir_node *block) {
855 assert(is_Block(block));
856 return get_irn_n(block, -1);
859 /* returns the graph of a Block. */
860 ir_graph *get_Block_irg(const ir_node *block) {
861 assert(is_Block(block));
862 return block->attr.block.irg;
866 get_End_n_keepalives(ir_node *end) {
867 assert(end->op == op_End);
868 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
872 get_End_keepalive(ir_node *end, int pos) {
873 assert(end->op == op_End);
874 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
878 add_End_keepalive(ir_node *end, ir_node *ka) {
879 assert(end->op == op_End);
880 assert((is_Phi(ka) || is_Proj(ka) || is_Block(ka) || is_irn_keep(ka)) && "Only Phi, Block or Keep nodes can be kept alive!");
885 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
886 assert(end->op == op_End);
887 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
890 /* Set new keep-alives */
891 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
893 ir_graph *irg = get_irn_irg(end);
895 /* notify that edges are deleted */
896 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
897 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
899 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
901 for (i = 0; i < n; ++i) {
902 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
903 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
907 /* Set new keep-alives from old keep-alives, skipping irn */
908 void remove_End_keepalive(ir_node *end, ir_node *irn) {
909 int n = get_End_n_keepalives(end);
913 NEW_ARR_A(ir_node *, in, n);
915 for (idx = i = 0; i < n; ++i) {
916 ir_node *old_ka = get_End_keepalive(end, i);
923 /* set new keep-alives */
924 set_End_keepalives(end, idx, in);
928 free_End (ir_node *end) {
929 assert(end->op == op_End);
932 end->in = NULL; /* @@@ make sure we get an error if we use the
933 in array afterwards ... */
936 /* Return the target address of an IJmp */
937 ir_node *get_IJmp_target(ir_node *ijmp) {
938 assert(ijmp->op == op_IJmp);
939 return get_irn_n(ijmp, 0);
942 /** Sets the target address of an IJmp */
943 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
944 assert(ijmp->op == op_IJmp);
945 set_irn_n(ijmp, 0, tgt);
949 > Implementing the case construct (which is where the constant Proj node is
950 > important) involves far more than simply determining the constant values.
951 > We could argue that this is more properly a function of the translator from
952 > Firm to the target machine. That could be done if there was some way of
953 > projecting "default" out of the Cond node.
954 I know it's complicated.
955 Basically there are two proglems:
956 - determining the gaps between the projs
957 - determining the biggest case constant to know the proj number for
959 I see several solutions:
960 1. Introduce a ProjDefault node. Solves both problems.
961 This means to extend all optimizations executed during construction.
962 2. Give the Cond node for switch two flavors:
963 a) there are no gaps in the projs (existing flavor)
964 b) gaps may exist, default proj is still the Proj with the largest
965 projection number. This covers also the gaps.
966 3. Fix the semantic of the Cond to that of 2b)
968 Solution 2 seems to be the best:
969 Computing the gaps in the Firm representation is not too hard, i.e.,
970 libFIRM can implement a routine that transforms between the two
971 flavours. This is also possible for 1) but 2) does not require to
972 change any existing optimization.
973 Further it should be far simpler to determine the biggest constant than
975 I don't want to choose 3) as 2a) seems to have advantages for
976 dataflow analysis and 3) does not allow to convert the representation to
980 get_Cond_selector(ir_node *node) {
981 assert(node->op == op_Cond);
982 return get_irn_n(node, 0);
986 set_Cond_selector(ir_node *node, ir_node *selector) {
987 assert(node->op == op_Cond);
988 set_irn_n(node, 0, selector);
992 get_Cond_kind(ir_node *node) {
993 assert(node->op == op_Cond);
994 return node->attr.cond.kind;
998 set_Cond_kind(ir_node *node, cond_kind kind) {
999 assert(node->op == op_Cond);
1000 node->attr.cond.kind = kind;
1004 get_Cond_defaultProj(ir_node *node) {
1005 assert(node->op == op_Cond);
1006 return node->attr.cond.default_proj;
1010 get_Return_mem(ir_node *node) {
1011 assert(node->op == op_Return);
1012 return get_irn_n(node, 0);
1016 set_Return_mem(ir_node *node, ir_node *mem) {
1017 assert(node->op == op_Return);
1018 set_irn_n(node, 0, mem);
1022 get_Return_n_ress(ir_node *node) {
1023 assert(node->op == op_Return);
1024 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1028 get_Return_res_arr (ir_node *node) {
1029 assert((node->op == op_Return));
1030 if (get_Return_n_ress(node) > 0)
1031 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1038 set_Return_n_res(ir_node *node, int results) {
1039 assert(node->op == op_Return);
1044 get_Return_res(ir_node *node, int pos) {
1045 assert(node->op == op_Return);
1046 assert(get_Return_n_ress(node) > pos);
1047 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1051 set_Return_res(ir_node *node, int pos, ir_node *res){
1052 assert(node->op == op_Return);
1053 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1056 tarval *(get_Const_tarval)(const ir_node *node) {
1057 return _get_Const_tarval(node);
1061 set_Const_tarval(ir_node *node, tarval *con) {
1062 assert(node->op == op_Const);
1063 node->attr.con.tv = con;
1066 cnst_classify_t (classify_Const)(ir_node *node) {
1067 return _classify_Const(node);
1071 /* The source language type. Must be an atomic type. Mode of type must
1072 be mode of node. For tarvals from entities type must be pointer to
1075 get_Const_type(ir_node *node) {
1076 assert(node->op == op_Const);
1077 return node->attr.con.tp;
1081 set_Const_type(ir_node *node, ir_type *tp) {
1082 assert(node->op == op_Const);
1083 if (tp != firm_unknown_type) {
1084 assert(is_atomic_type(tp));
1085 assert(get_type_mode(tp) == get_irn_mode(node));
1087 node->attr.con.tp = tp;
1092 get_SymConst_kind(const ir_node *node) {
1093 assert(node->op == op_SymConst);
1094 return node->attr.symc.num;
1098 set_SymConst_kind(ir_node *node, symconst_kind num) {
1099 assert(node->op == op_SymConst);
1100 node->attr.symc.num = num;
1104 get_SymConst_type(ir_node *node) {
1105 assert((node->op == op_SymConst) &&
1106 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1107 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1111 set_SymConst_type(ir_node *node, ir_type *tp) {
1112 assert((node->op == op_SymConst) &&
1113 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1114 node->attr.symc.sym.type_p = tp;
1118 get_SymConst_name(const ir_node *node) {
1119 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1120 return node->attr.symc.sym.ident_p;
1124 set_SymConst_name(ir_node *node, ident *name) {
1125 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1126 node->attr.symc.sym.ident_p = name;
1130 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1131 ir_entity *get_SymConst_entity(const ir_node *node) {
1132 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1133 return node->attr.symc.sym.entity_p;
1136 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1137 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1138 node->attr.symc.sym.entity_p = ent;
1141 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1142 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1143 return node->attr.symc.sym.enum_p;
1146 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1147 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1148 node->attr.symc.sym.enum_p = ec;
1151 union symconst_symbol
1152 get_SymConst_symbol(const ir_node *node) {
1153 assert(node->op == op_SymConst);
1154 return node->attr.symc.sym;
1158 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1159 assert(node->op == op_SymConst);
1160 node->attr.symc.sym = sym;
1164 get_SymConst_value_type(ir_node *node) {
1165 assert(node->op == op_SymConst);
1166 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1167 return node->attr.symc.tp;
1171 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1172 assert(node->op == op_SymConst);
1173 node->attr.symc.tp = tp;
1177 get_Sel_mem(ir_node *node) {
1178 assert(node->op == op_Sel);
1179 return get_irn_n(node, 0);
1183 set_Sel_mem(ir_node *node, ir_node *mem) {
1184 assert(node->op == op_Sel);
1185 set_irn_n(node, 0, mem);
1189 get_Sel_ptr(ir_node *node) {
1190 assert(node->op == op_Sel);
1191 return get_irn_n(node, 1);
1195 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1196 assert(node->op == op_Sel);
1197 set_irn_n(node, 1, ptr);
1201 get_Sel_n_indexs(ir_node *node) {
1202 assert(node->op == op_Sel);
1203 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1207 get_Sel_index_arr(ir_node *node) {
1208 assert((node->op == op_Sel));
1209 if (get_Sel_n_indexs(node) > 0)
1210 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1216 get_Sel_index(ir_node *node, int pos) {
1217 assert(node->op == op_Sel);
1218 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1222 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1223 assert(node->op == op_Sel);
1224 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1228 get_Sel_entity(ir_node *node) {
1229 assert(node->op == op_Sel);
1230 return node->attr.sel.ent;
1234 set_Sel_entity(ir_node *node, ir_entity *ent) {
1235 assert(node->op == op_Sel);
1236 node->attr.sel.ent = ent;
1240 /* For unary and binary arithmetic operations the access to the
1241 operands can be factored out. Left is the first, right the
1242 second arithmetic value as listed in tech report 0999-33.
1243 unops are: Minus, Abs, Not, Conv, Cast
1244 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1245 Shr, Shrs, Rotate, Cmp */
1249 get_Call_mem(ir_node *node) {
1250 assert(node->op == op_Call);
1251 return get_irn_n(node, 0);
1255 set_Call_mem(ir_node *node, ir_node *mem) {
1256 assert(node->op == op_Call);
1257 set_irn_n(node, 0, mem);
1261 get_Call_ptr(ir_node *node) {
1262 assert(node->op == op_Call);
1263 return get_irn_n(node, 1);
1267 set_Call_ptr(ir_node *node, ir_node *ptr) {
1268 assert(node->op == op_Call);
1269 set_irn_n(node, 1, ptr);
1273 get_Call_param_arr(ir_node *node) {
1274 assert(node->op == op_Call);
1275 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1279 get_Call_n_params(ir_node *node) {
1280 assert(node->op == op_Call);
1281 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1285 get_Call_arity(ir_node *node) {
1286 assert(node->op == op_Call);
1287 return get_Call_n_params(node);
1291 set_Call_arity(ir_node *node, ir_node *arity) {
1292 assert(node->op == op_Call);
1297 get_Call_param(ir_node *node, int pos) {
1298 assert(node->op == op_Call);
1299 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1303 set_Call_param(ir_node *node, int pos, ir_node *param) {
1304 assert(node->op == op_Call);
1305 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1309 get_Call_type(ir_node *node) {
1310 assert(node->op == op_Call);
1311 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1315 set_Call_type(ir_node *node, ir_type *tp) {
1316 assert(node->op == op_Call);
1317 assert((get_unknown_type() == tp) || is_Method_type(tp));
1318 node->attr.call.cld_tp = tp;
1321 int Call_has_callees(ir_node *node) {
1322 assert(node && node->op == op_Call);
1323 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1324 (node->attr.call.callee_arr != NULL));
1327 int get_Call_n_callees(ir_node * node) {
1328 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1329 return ARR_LEN(node->attr.call.callee_arr);
1332 ir_entity * get_Call_callee(ir_node * node, int pos) {
1333 assert(pos >= 0 && pos < get_Call_n_callees(node));
1334 return node->attr.call.callee_arr[pos];
1337 void set_Call_callee_arr(ir_node * node, const int n, ir_entity ** arr) {
1338 assert(node->op == op_Call);
1339 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1340 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1342 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1345 void remove_Call_callee_arr(ir_node * node) {
1346 assert(node->op == op_Call);
1347 node->attr.call.callee_arr = NULL;
1350 ir_node * get_CallBegin_ptr(ir_node *node) {
1351 assert(node->op == op_CallBegin);
1352 return get_irn_n(node, 0);
1355 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1356 assert(node->op == op_CallBegin);
1357 set_irn_n(node, 0, ptr);
1360 ir_node * get_CallBegin_call(ir_node *node) {
1361 assert(node->op == op_CallBegin);
1362 return node->attr.callbegin.call;
1365 void set_CallBegin_call(ir_node *node, ir_node *call) {
1366 assert(node->op == op_CallBegin);
1367 node->attr.callbegin.call = call;
1372 ir_node * get_##OP##_left(const ir_node *node) { \
1373 assert(node->op == op_##OP); \
1374 return get_irn_n(node, node->op->op_index); \
1376 void set_##OP##_left(ir_node *node, ir_node *left) { \
1377 assert(node->op == op_##OP); \
1378 set_irn_n(node, node->op->op_index, left); \
1380 ir_node *get_##OP##_right(const ir_node *node) { \
1381 assert(node->op == op_##OP); \
1382 return get_irn_n(node, node->op->op_index + 1); \
1384 void set_##OP##_right(ir_node *node, ir_node *right) { \
1385 assert(node->op == op_##OP); \
1386 set_irn_n(node, node->op->op_index + 1, right); \
1390 ir_node *get_##OP##_op(const ir_node *node) { \
1391 assert(node->op == op_##OP); \
1392 return get_irn_n(node, node->op->op_index); \
1394 void set_##OP##_op (ir_node *node, ir_node *op) { \
1395 assert(node->op == op_##OP); \
1396 set_irn_n(node, node->op->op_index, op); \
1399 #define BINOP_MEM(OP) \
1403 get_##OP##_mem(ir_node *node) { \
1404 assert(node->op == op_##OP); \
1405 return get_irn_n(node, 0); \
1409 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1410 assert(node->op == op_##OP); \
1411 set_irn_n(node, 0, mem); \
1417 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1418 assert(node->op == op_##OP); \
1419 return node->attr.divmod.res_mode; \
1422 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1423 assert(node->op == op_##OP); \
1424 node->attr.divmod.res_mode = mode; \
1449 int get_Conv_strict(ir_node *node) {
1450 assert(node->op == op_Conv);
1451 return node->attr.conv.strict;
1454 void set_Conv_strict(ir_node *node, int strict_flag) {
1455 assert(node->op == op_Conv);
1456 node->attr.conv.strict = (char)strict_flag;
1460 get_Cast_type(ir_node *node) {
1461 assert(node->op == op_Cast);
1462 return node->attr.cast.totype;
1466 set_Cast_type(ir_node *node, ir_type *to_tp) {
1467 assert(node->op == op_Cast);
1468 node->attr.cast.totype = to_tp;
1472 /* Checks for upcast.
1474 * Returns true if the Cast node casts a class type to a super type.
1476 int is_Cast_upcast(ir_node *node) {
1477 ir_type *totype = get_Cast_type(node);
1478 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1479 ir_graph *myirg = get_irn_irg(node);
1481 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1484 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1485 totype = get_pointer_points_to_type(totype);
1486 fromtype = get_pointer_points_to_type(fromtype);
1491 if (!is_Class_type(totype)) return 0;
1492 return is_SubClass_of(fromtype, totype);
1495 /* Checks for downcast.
1497 * Returns true if the Cast node casts a class type to a sub type.
1499 int is_Cast_downcast(ir_node *node) {
1500 ir_type *totype = get_Cast_type(node);
1501 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1503 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1506 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1507 totype = get_pointer_points_to_type(totype);
1508 fromtype = get_pointer_points_to_type(fromtype);
1513 if (!is_Class_type(totype)) return 0;
1514 return is_SubClass_of(totype, fromtype);
1518 (is_unop)(const ir_node *node) {
1519 return _is_unop(node);
1523 get_unop_op(const ir_node *node) {
1524 if (node->op->opar == oparity_unary)
1525 return get_irn_n(node, node->op->op_index);
1527 assert(node->op->opar == oparity_unary);
1532 set_unop_op(ir_node *node, ir_node *op) {
1533 if (node->op->opar == oparity_unary)
1534 set_irn_n(node, node->op->op_index, op);
1536 assert(node->op->opar == oparity_unary);
1540 (is_binop)(const ir_node *node) {
1541 return _is_binop(node);
1545 get_binop_left(const ir_node *node) {
1546 assert(node->op->opar == oparity_binary);
1547 return get_irn_n(node, node->op->op_index);
1551 set_binop_left(ir_node *node, ir_node *left) {
1552 assert(node->op->opar == oparity_binary);
1553 set_irn_n(node, node->op->op_index, left);
1557 get_binop_right(const ir_node *node) {
1558 assert(node->op->opar == oparity_binary);
1559 return get_irn_n(node, node->op->op_index + 1);
1563 set_binop_right(ir_node *node, ir_node *right) {
1564 assert(node->op->opar == oparity_binary);
1565 set_irn_n(node, node->op->op_index + 1, right);
1568 int is_Phi(const ir_node *n) {
1574 if (op == op_Filter) return get_interprocedural_view();
1577 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1578 (get_irn_arity(n) > 0));
1583 int is_Phi0(const ir_node *n) {
1586 return ((get_irn_op(n) == op_Phi) &&
1587 (get_irn_arity(n) == 0) &&
1588 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1592 get_Phi_preds_arr(ir_node *node) {
1593 assert(node->op == op_Phi);
1594 return (ir_node **)&(get_irn_in(node)[1]);
1598 get_Phi_n_preds(const ir_node *node) {
1599 assert(is_Phi(node) || is_Phi0(node));
1600 return (get_irn_arity(node));
1604 void set_Phi_n_preds(ir_node *node, int n_preds) {
1605 assert(node->op == op_Phi);
1610 get_Phi_pred(const ir_node *node, int pos) {
1611 assert(is_Phi(node) || is_Phi0(node));
1612 return get_irn_n(node, pos);
1616 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1617 assert(is_Phi(node) || is_Phi0(node));
1618 set_irn_n(node, pos, pred);
1622 int is_memop(ir_node *node) {
1623 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1626 ir_node *get_memop_mem(ir_node *node) {
1627 assert(is_memop(node));
1628 return get_irn_n(node, 0);
1631 void set_memop_mem(ir_node *node, ir_node *mem) {
1632 assert(is_memop(node));
1633 set_irn_n(node, 0, mem);
1636 ir_node *get_memop_ptr(ir_node *node) {
1637 assert(is_memop(node));
1638 return get_irn_n(node, 1);
1641 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1642 assert(is_memop(node));
1643 set_irn_n(node, 1, ptr);
1647 get_Load_mem(ir_node *node) {
1648 assert(node->op == op_Load);
1649 return get_irn_n(node, 0);
1653 set_Load_mem(ir_node *node, ir_node *mem) {
1654 assert(node->op == op_Load);
1655 set_irn_n(node, 0, mem);
1659 get_Load_ptr(ir_node *node) {
1660 assert(node->op == op_Load);
1661 return get_irn_n(node, 1);
1665 set_Load_ptr(ir_node *node, ir_node *ptr) {
1666 assert(node->op == op_Load);
1667 set_irn_n(node, 1, ptr);
1671 get_Load_mode(ir_node *node) {
1672 assert(node->op == op_Load);
1673 return node->attr.load.load_mode;
1677 set_Load_mode(ir_node *node, ir_mode *mode) {
1678 assert(node->op == op_Load);
1679 node->attr.load.load_mode = mode;
1683 get_Load_volatility(ir_node *node) {
1684 assert(node->op == op_Load);
1685 return node->attr.load.volatility;
1689 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1690 assert(node->op == op_Load);
1691 node->attr.load.volatility = volatility;
1696 get_Store_mem(ir_node *node) {
1697 assert(node->op == op_Store);
1698 return get_irn_n(node, 0);
1702 set_Store_mem(ir_node *node, ir_node *mem) {
1703 assert(node->op == op_Store);
1704 set_irn_n(node, 0, mem);
1708 get_Store_ptr(ir_node *node) {
1709 assert(node->op == op_Store);
1710 return get_irn_n(node, 1);
1714 set_Store_ptr(ir_node *node, ir_node *ptr) {
1715 assert(node->op == op_Store);
1716 set_irn_n(node, 1, ptr);
1720 get_Store_value(ir_node *node) {
1721 assert(node->op == op_Store);
1722 return get_irn_n(node, 2);
1726 set_Store_value(ir_node *node, ir_node *value) {
1727 assert(node->op == op_Store);
1728 set_irn_n(node, 2, value);
1732 get_Store_volatility(ir_node *node) {
1733 assert(node->op == op_Store);
1734 return node->attr.store.volatility;
1738 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1739 assert(node->op == op_Store);
1740 node->attr.store.volatility = volatility;
1745 get_Alloc_mem(ir_node *node) {
1746 assert(node->op == op_Alloc);
1747 return get_irn_n(node, 0);
1751 set_Alloc_mem(ir_node *node, ir_node *mem) {
1752 assert(node->op == op_Alloc);
1753 set_irn_n(node, 0, mem);
1757 get_Alloc_size(ir_node *node) {
1758 assert(node->op == op_Alloc);
1759 return get_irn_n(node, 1);
1763 set_Alloc_size(ir_node *node, ir_node *size) {
1764 assert(node->op == op_Alloc);
1765 set_irn_n(node, 1, size);
1769 get_Alloc_type(ir_node *node) {
1770 assert(node->op == op_Alloc);
1771 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1775 set_Alloc_type(ir_node *node, ir_type *tp) {
1776 assert(node->op == op_Alloc);
1777 node->attr.alloc.type = tp;
1781 get_Alloc_where(ir_node *node) {
1782 assert(node->op == op_Alloc);
1783 return node->attr.alloc.where;
1787 set_Alloc_where(ir_node *node, where_alloc where) {
1788 assert(node->op == op_Alloc);
1789 node->attr.alloc.where = where;
1794 get_Free_mem(ir_node *node) {
1795 assert(node->op == op_Free);
1796 return get_irn_n(node, 0);
1800 set_Free_mem(ir_node *node, ir_node *mem) {
1801 assert(node->op == op_Free);
1802 set_irn_n(node, 0, mem);
1806 get_Free_ptr(ir_node *node) {
1807 assert(node->op == op_Free);
1808 return get_irn_n(node, 1);
1812 set_Free_ptr(ir_node *node, ir_node *ptr) {
1813 assert(node->op == op_Free);
1814 set_irn_n(node, 1, ptr);
1818 get_Free_size(ir_node *node) {
1819 assert(node->op == op_Free);
1820 return get_irn_n(node, 2);
1824 set_Free_size(ir_node *node, ir_node *size) {
1825 assert(node->op == op_Free);
1826 set_irn_n(node, 2, size);
1830 get_Free_type(ir_node *node) {
1831 assert(node->op == op_Free);
1832 return node->attr.free.type = skip_tid(node->attr.free.type);
1836 set_Free_type(ir_node *node, ir_type *tp) {
1837 assert(node->op == op_Free);
1838 node->attr.free.type = tp;
1842 get_Free_where(ir_node *node) {
1843 assert(node->op == op_Free);
1844 return node->attr.free.where;
1848 set_Free_where(ir_node *node, where_alloc where) {
1849 assert(node->op == op_Free);
1850 node->attr.free.where = where;
1853 ir_node **get_Sync_preds_arr(ir_node *node) {
1854 assert(node->op == op_Sync);
1855 return (ir_node **)&(get_irn_in(node)[1]);
1858 int get_Sync_n_preds(ir_node *node) {
1859 assert(node->op == op_Sync);
1860 return (get_irn_arity(node));
1864 void set_Sync_n_preds(ir_node *node, int n_preds) {
1865 assert(node->op == op_Sync);
1869 ir_node *get_Sync_pred(ir_node *node, int pos) {
1870 assert(node->op == op_Sync);
1871 return get_irn_n(node, pos);
1874 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1875 assert(node->op == op_Sync);
1876 set_irn_n(node, pos, pred);
1879 /* Add a new Sync predecessor */
1880 void add_Sync_pred(ir_node *node, ir_node *pred) {
1881 assert(node->op == op_Sync);
1882 add_irn_n(node, pred);
1885 /* Returns the source language type of a Proj node. */
1886 ir_type *get_Proj_type(ir_node *n) {
1887 ir_type *tp = firm_unknown_type;
1888 ir_node *pred = get_Proj_pred(n);
1890 switch (get_irn_opcode(pred)) {
1893 /* Deal with Start / Call here: we need to know the Proj Nr. */
1894 assert(get_irn_mode(pred) == mode_T);
1895 pred_pred = get_Proj_pred(pred);
1896 if (get_irn_op(pred_pred) == op_Start) {
1897 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1898 tp = get_method_param_type(mtp, get_Proj_proj(n));
1899 } else if (get_irn_op(pred_pred) == op_Call) {
1900 ir_type *mtp = get_Call_type(pred_pred);
1901 tp = get_method_res_type(mtp, get_Proj_proj(n));
1904 case iro_Start: break;
1905 case iro_Call: break;
1907 ir_node *a = get_Load_ptr(pred);
1909 tp = get_entity_type(get_Sel_entity(a));
1918 get_Proj_pred(const ir_node *node) {
1919 assert(is_Proj(node));
1920 return get_irn_n(node, 0);
1924 set_Proj_pred(ir_node *node, ir_node *pred) {
1925 assert(is_Proj(node));
1926 set_irn_n(node, 0, pred);
1930 get_Proj_proj(const ir_node *node) {
1931 assert(is_Proj(node));
1932 if (get_irn_opcode(node) == iro_Proj) {
1933 return node->attr.proj;
1935 assert(get_irn_opcode(node) == iro_Filter);
1936 return node->attr.filter.proj;
1941 set_Proj_proj(ir_node *node, long proj) {
1942 assert(node->op == op_Proj);
1943 node->attr.proj = proj;
1947 get_Tuple_preds_arr(ir_node *node) {
1948 assert(node->op == op_Tuple);
1949 return (ir_node **)&(get_irn_in(node)[1]);
1953 get_Tuple_n_preds(ir_node *node) {
1954 assert(node->op == op_Tuple);
1955 return (get_irn_arity(node));
1960 set_Tuple_n_preds(ir_node *node, int n_preds) {
1961 assert(node->op == op_Tuple);
1966 get_Tuple_pred (ir_node *node, int pos) {
1967 assert(node->op == op_Tuple);
1968 return get_irn_n(node, pos);
1972 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
1973 assert(node->op == op_Tuple);
1974 set_irn_n(node, pos, pred);
1978 get_Id_pred(ir_node *node) {
1979 assert(node->op == op_Id);
1980 return get_irn_n(node, 0);
1984 set_Id_pred(ir_node *node, ir_node *pred) {
1985 assert(node->op == op_Id);
1986 set_irn_n(node, 0, pred);
1989 ir_node *get_Confirm_value(ir_node *node) {
1990 assert(node->op == op_Confirm);
1991 return get_irn_n(node, 0);
1994 void set_Confirm_value(ir_node *node, ir_node *value) {
1995 assert(node->op == op_Confirm);
1996 set_irn_n(node, 0, value);
1999 ir_node *get_Confirm_bound(ir_node *node) {
2000 assert(node->op == op_Confirm);
2001 return get_irn_n(node, 1);
2004 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2005 assert(node->op == op_Confirm);
2006 set_irn_n(node, 0, bound);
2009 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2010 assert(node->op == op_Confirm);
2011 return node->attr.confirm.cmp;
2014 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2015 assert(node->op == op_Confirm);
2016 node->attr.confirm.cmp = cmp;
2020 get_Filter_pred(ir_node *node) {
2021 assert(node->op == op_Filter);
2026 set_Filter_pred(ir_node *node, ir_node *pred) {
2027 assert(node->op == op_Filter);
2032 get_Filter_proj(ir_node *node) {
2033 assert(node->op == op_Filter);
2034 return node->attr.filter.proj;
2038 set_Filter_proj(ir_node *node, long proj) {
2039 assert(node->op == op_Filter);
2040 node->attr.filter.proj = proj;
2043 /* Don't use get_irn_arity, get_irn_n in implementation as access
2044 shall work independent of view!!! */
2045 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2046 assert(node->op == op_Filter);
2047 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2048 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2049 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2050 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2051 node->attr.filter.in_cg[0] = node->in[0];
2053 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2056 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2057 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2058 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2059 node->attr.filter.in_cg[pos + 1] = pred;
2062 int get_Filter_n_cg_preds(ir_node *node) {
2063 assert(node->op == op_Filter && node->attr.filter.in_cg);
2064 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2067 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2069 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2071 arity = ARR_LEN(node->attr.filter.in_cg);
2072 assert(pos < arity - 1);
2073 return node->attr.filter.in_cg[pos + 1];
2077 ir_node *get_Mux_sel(ir_node *node) {
2078 if (node->op == op_Psi) {
2079 assert(get_irn_arity(node) == 3);
2080 return get_Psi_cond(node, 0);
2082 assert(node->op == op_Mux);
2086 void set_Mux_sel(ir_node *node, ir_node *sel) {
2087 if (node->op == op_Psi) {
2088 assert(get_irn_arity(node) == 3);
2089 set_Psi_cond(node, 0, sel);
2091 assert(node->op == op_Mux);
2096 ir_node *get_Mux_false(ir_node *node) {
2097 if (node->op == op_Psi) {
2098 assert(get_irn_arity(node) == 3);
2099 return get_Psi_default(node);
2101 assert(node->op == op_Mux);
2105 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2106 if (node->op == op_Psi) {
2107 assert(get_irn_arity(node) == 3);
2108 set_Psi_default(node, ir_false);
2110 assert(node->op == op_Mux);
2111 node->in[2] = ir_false;
2115 ir_node *get_Mux_true(ir_node *node) {
2116 if (node->op == op_Psi) {
2117 assert(get_irn_arity(node) == 3);
2118 return get_Psi_val(node, 0);
2120 assert(node->op == op_Mux);
2124 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2125 if (node->op == op_Psi) {
2126 assert(get_irn_arity(node) == 3);
2127 set_Psi_val(node, 0, ir_true);
2129 assert(node->op == op_Mux);
2130 node->in[3] = ir_true;
2135 ir_node *get_Psi_cond(ir_node *node, int pos) {
2136 int num_conds = get_Psi_n_conds(node);
2137 assert(node->op == op_Psi);
2138 assert(pos < num_conds);
2139 return get_irn_n(node, 2 * pos);
2142 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2143 int num_conds = get_Psi_n_conds(node);
2144 assert(node->op == op_Psi);
2145 assert(pos < num_conds);
2146 set_irn_n(node, 2 * pos, cond);
2149 ir_node *get_Psi_val(ir_node *node, int pos) {
2150 int num_vals = get_Psi_n_conds(node);
2151 assert(node->op == op_Psi);
2152 assert(pos < num_vals);
2153 return get_irn_n(node, 2 * pos + 1);
2156 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2157 int num_vals = get_Psi_n_conds(node);
2158 assert(node->op == op_Psi);
2159 assert(pos < num_vals);
2160 set_irn_n(node, 2 * pos + 1, val);
2163 ir_node *get_Psi_default(ir_node *node) {
2164 int def_pos = get_irn_arity(node) - 1;
2165 assert(node->op == op_Psi);
2166 return get_irn_n(node, def_pos);
2169 void set_Psi_default(ir_node *node, ir_node *val) {
2170 int def_pos = get_irn_arity(node);
2171 assert(node->op == op_Psi);
2172 set_irn_n(node, def_pos, val);
2175 int (get_Psi_n_conds)(ir_node *node) {
2176 return _get_Psi_n_conds(node);
2180 ir_node *get_CopyB_mem(ir_node *node) {
2181 assert(node->op == op_CopyB);
2182 return get_irn_n(node, 0);
2185 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2186 assert(node->op == op_CopyB);
2187 set_irn_n(node, 0, mem);
2190 ir_node *get_CopyB_dst(ir_node *node) {
2191 assert(node->op == op_CopyB);
2192 return get_irn_n(node, 1);
2195 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2196 assert(node->op == op_CopyB);
2197 set_irn_n(node, 1, dst);
2200 ir_node *get_CopyB_src (ir_node *node) {
2201 assert(node->op == op_CopyB);
2202 return get_irn_n(node, 2);
2205 void set_CopyB_src(ir_node *node, ir_node *src) {
2206 assert(node->op == op_CopyB);
2207 set_irn_n(node, 2, src);
2210 ir_type *get_CopyB_type(ir_node *node) {
2211 assert(node->op == op_CopyB);
2212 return node->attr.copyb.data_type;
2215 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2216 assert(node->op == op_CopyB && data_type);
2217 node->attr.copyb.data_type = data_type;
2222 get_InstOf_type(ir_node *node) {
2223 assert(node->op = op_InstOf);
2224 return node->attr.instof.type;
2228 set_InstOf_type(ir_node *node, ir_type *type) {
2229 assert(node->op = op_InstOf);
2230 node->attr.instof.type = type;
2234 get_InstOf_store(ir_node *node) {
2235 assert(node->op = op_InstOf);
2236 return get_irn_n(node, 0);
2240 set_InstOf_store(ir_node *node, ir_node *obj) {
2241 assert(node->op = op_InstOf);
2242 set_irn_n(node, 0, obj);
2246 get_InstOf_obj(ir_node *node) {
2247 assert(node->op = op_InstOf);
2248 return get_irn_n(node, 1);
2252 set_InstOf_obj(ir_node *node, ir_node *obj) {
2253 assert(node->op = op_InstOf);
2254 set_irn_n(node, 1, obj);
2257 /* Returns the memory input of a Raise operation. */
2259 get_Raise_mem(ir_node *node) {
2260 assert(node->op == op_Raise);
2261 return get_irn_n(node, 0);
2265 set_Raise_mem(ir_node *node, ir_node *mem) {
2266 assert(node->op == op_Raise);
2267 set_irn_n(node, 0, mem);
2271 get_Raise_exo_ptr(ir_node *node) {
2272 assert(node->op == op_Raise);
2273 return get_irn_n(node, 1);
2277 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2278 assert(node->op == op_Raise);
2279 set_irn_n(node, 1, exo_ptr);
2284 /* Returns the memory input of a Bound operation. */
2285 ir_node *get_Bound_mem(ir_node *bound) {
2286 assert(bound->op == op_Bound);
2287 return get_irn_n(bound, 0);
2290 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2291 assert(bound->op == op_Bound);
2292 set_irn_n(bound, 0, mem);
2295 /* Returns the index input of a Bound operation. */
2296 ir_node *get_Bound_index(ir_node *bound) {
2297 assert(bound->op == op_Bound);
2298 return get_irn_n(bound, 1);
2301 void set_Bound_index(ir_node *bound, ir_node *idx) {
2302 assert(bound->op == op_Bound);
2303 set_irn_n(bound, 1, idx);
2306 /* Returns the lower bound input of a Bound operation. */
2307 ir_node *get_Bound_lower(ir_node *bound) {
2308 assert(bound->op == op_Bound);
2309 return get_irn_n(bound, 2);
2312 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2313 assert(bound->op == op_Bound);
2314 set_irn_n(bound, 2, lower);
2317 /* Returns the upper bound input of a Bound operation. */
2318 ir_node *get_Bound_upper(ir_node *bound) {
2319 assert(bound->op == op_Bound);
2320 return get_irn_n(bound, 3);
2323 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2324 assert(bound->op == op_Bound);
2325 set_irn_n(bound, 3, upper);
2328 /* Return the operand of a Pin node. */
2329 ir_node *get_Pin_op(const ir_node *pin) {
2330 assert(pin->op == op_Pin);
2331 return get_irn_n(pin, 0);
2334 void set_Pin_op(ir_node *pin, ir_node *node) {
2335 assert(pin->op == op_Pin);
2336 set_irn_n(pin, 0, node);
2339 /* Return the assembler text of an ASM pseudo node. */
2340 ident *get_ASM_text(const ir_node *node) {
2341 assert(node->op == op_ASM);
2342 return node->attr.assem.asm_text;
2345 /* Return the number of input constraints for an ASM node. */
2346 int get_ASM_n_input_constraints(const ir_node *node) {
2347 assert(node->op == op_ASM);
2348 return ARR_LEN(node->attr.assem.inputs);
2351 /* Return the input constraints for an ASM node. This is a flexible array. */
2352 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2353 assert(node->op == op_ASM);
2354 return node->attr.assem.inputs;
2357 /* Return the number of output constraints for an ASM node. */
2358 int get_ASM_n_output_constraints(const ir_node *node) {
2359 assert(node->op == op_ASM);
2360 return ARR_LEN(node->attr.assem.outputs);
2363 /* Return the output constraints for an ASM node. */
2364 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2365 assert(node->op == op_ASM);
2366 return node->attr.assem.outputs;
2369 /* Return the number of clobbered registers for an ASM node. */
2370 int get_ASM_n_clobbers(const ir_node *node) {
2371 assert(node->op == op_ASM);
2372 return ARR_LEN(node->attr.assem.clobber);
2375 /* Return the list of clobbered registers for an ASM node. */
2376 ident **get_ASM_clobbers(const ir_node *node) {
2377 assert(node->op == op_ASM);
2378 return node->attr.assem.clobber;
2381 /* returns the graph of a node */
2383 get_irn_irg(const ir_node *node) {
2385 * Do not use get_nodes_Block() here, because this
2386 * will check the pinned state.
2387 * However even a 'wrong' block is always in the proper
2390 if (! is_Block(node))
2391 node = get_nodes_block(node);
2392 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2393 node = get_nodes_block(node);
2394 assert(get_irn_op(node) == op_Block);
2395 return node->attr.block.irg;
2399 /*----------------------------------------------------------------*/
2400 /* Auxiliary routines */
2401 /*----------------------------------------------------------------*/
2404 skip_Proj(ir_node *node) {
2405 /* don't assert node !!! */
2410 node = get_Proj_pred(node);
2416 skip_Proj_const(const ir_node *node) {
2417 /* don't assert node !!! */
2422 node = get_Proj_pred(node);
2428 skip_Tuple(ir_node *node) {
2432 if (!get_opt_normalize()) return node;
2435 if (get_irn_op(node) == op_Proj) {
2436 pred = get_Proj_pred(node);
2437 op = get_irn_op(pred);
2440 * Looks strange but calls get_irn_op() only once
2441 * in most often cases.
2443 if (op == op_Proj) { /* nested Tuple ? */
2444 pred = skip_Tuple(pred);
2445 op = get_irn_op(pred);
2447 if (op == op_Tuple) {
2448 node = get_Tuple_pred(pred, get_Proj_proj(node));
2451 } else if (op == op_Tuple) {
2452 node = get_Tuple_pred(pred, get_Proj_proj(node));
2459 /* returns operand of node if node is a Cast */
2460 ir_node *skip_Cast(ir_node *node) {
2461 if (get_irn_op(node) == op_Cast)
2462 return get_Cast_op(node);
2466 /* returns operand of node if node is a Confirm */
2467 ir_node *skip_Confirm(ir_node *node) {
2468 if (get_irn_op(node) == op_Confirm)
2469 return get_Confirm_value(node);
2473 /* skip all high-level ops */
2474 ir_node *skip_HighLevel(ir_node *node) {
2475 if (is_op_highlevel(get_irn_op(node)))
2476 return get_irn_n(node, 0);
2481 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2482 * than any other approach, as Id chains are resolved and all point to the real node, or
2483 * all id's are self loops.
2485 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2486 * a little bit "hand optimized".
2488 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2491 skip_Id(ir_node *node) {
2493 /* don't assert node !!! */
2495 if (!node || (node->op != op_Id)) return node;
2497 /* Don't use get_Id_pred(): We get into an endless loop for
2498 self-referencing Ids. */
2499 pred = node->in[0+1];
2501 if (pred->op != op_Id) return pred;
2503 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2504 ir_node *rem_pred, *res;
2506 if (pred->op != op_Id) return pred; /* shortcut */
2509 assert(get_irn_arity (node) > 0);
2511 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2512 res = skip_Id(rem_pred);
2513 if (res->op == op_Id) /* self-loop */ return node;
2515 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2522 void skip_Id_and_store(ir_node **node) {
2525 if (!n || (n->op != op_Id)) return;
2527 /* Don't use get_Id_pred(): We get into an endless loop for
2528 self-referencing Ids. */
2533 (is_Bad)(const ir_node *node) {
2534 return _is_Bad(node);
2538 (is_NoMem)(const ir_node *node) {
2539 return _is_NoMem(node);
2543 (is_Minus)(const ir_node *node) {
2544 return _is_Minus(node);
2548 (is_Mod)(const ir_node *node) {
2549 return _is_Mod(node);
2553 (is_Div)(const ir_node *node) {
2554 return _is_Div(node);
2558 (is_DivMod)(const ir_node *node) {
2559 return _is_DivMod(node);
2563 (is_Quot)(const ir_node *node) {
2564 return _is_Quot(node);
2568 (is_Add)(const ir_node *node) {
2569 return _is_Add(node);
2573 (is_And)(const ir_node *node) {
2574 return _is_And(node);
2578 (is_Or)(const ir_node *node) {
2579 return _is_Or(node);
2583 (is_Eor)(const ir_node *node) {
2584 return _is_Eor(node);
2588 (is_Sub)(const ir_node *node) {
2589 return _is_Sub(node);
2593 (is_Not)(const ir_node *node) {
2594 return _is_Not(node);
2598 (is_Psi)(const ir_node *node) {
2599 return _is_Psi(node);
2603 (is_Tuple)(const ir_node *node) {
2604 return _is_Tuple(node);
2608 (is_Start)(const ir_node *node) {
2609 return _is_Start(node);
2613 (is_End)(const ir_node *node) {
2614 return _is_End(node);
2618 (is_Const)(const ir_node *node) {
2619 return _is_Const(node);
2623 (is_Conv)(const ir_node *node) {
2624 return _is_Conv(node);
2628 (is_no_Block)(const ir_node *node) {
2629 return _is_no_Block(node);
2633 (is_Block)(const ir_node *node) {
2634 return _is_Block(node);
2637 /* returns true if node is an Unknown node. */
2639 (is_Unknown)(const ir_node *node) {
2640 return _is_Unknown(node);
2643 /* returns true if node is a Return node. */
2645 (is_Return)(const ir_node *node) {
2646 return _is_Return(node);
2649 /* returns true if node is a Call node. */
2651 (is_Call)(const ir_node *node) {
2652 return _is_Call(node);
2655 /* returns true if node is a Sel node. */
2657 (is_Sel)(const ir_node *node) {
2658 return _is_Sel(node);
2661 /* returns true if node is a Mux node or a Psi with only one condition. */
2663 (is_Mux)(const ir_node *node) {
2664 return _is_Mux(node);
2667 /* returns true if node is a Load node. */
2669 (is_Load)(const ir_node *node) {
2670 return _is_Load(node);
2673 /* returns true if node is a Load node. */
2675 (is_Store)(const ir_node *node) {
2676 return _is_Store(node);
2679 /* returns true if node is a Sync node. */
2681 (is_Sync)(const ir_node *node) {
2682 return _is_Sync(node);
2685 /* returns true if node is a Confirm node. */
2687 (is_Confirm)(const ir_node *node) {
2688 return _is_Confirm(node);
2691 /* returns true if node is a Pin node. */
2693 (is_Pin)(const ir_node *node) {
2694 return _is_Pin(node);
2697 /* returns true if node is a SymConst node. */
2699 (is_SymConst)(const ir_node *node) {
2700 return _is_SymConst(node);
2703 /* returns true if node is a Cond node. */
2705 (is_Cond)(const ir_node *node) {
2706 return _is_Cond(node);
2710 (is_CopyB)(const ir_node *node) {
2711 return _is_CopyB(node);
2714 /* returns true if node is a Cmp node. */
2716 (is_Cmp)(const ir_node *node) {
2717 return _is_Cmp(node);
2720 /* returns true if node is an Alloc node. */
2722 (is_Alloc)(const ir_node *node) {
2723 return _is_Alloc(node);
2726 /* returns true if a node is a Jmp node. */
2728 (is_Jmp)(const ir_node *node) {
2729 return _is_Jmp(node);
2732 /* returns true if a node is a Raise node. */
2734 (is_Raise)(const ir_node *node) {
2735 return _is_Raise(node);
2738 /* returns true if a node is an ASM node. */
2740 (is_ASM)(const ir_node *node) {
2741 return _is_ASM(node);
2745 is_Proj(const ir_node *node) {
2747 return node->op == op_Proj ||
2748 (!get_interprocedural_view() && node->op == op_Filter);
2751 /* Returns true if the operation manipulates control flow. */
2753 is_cfop(const ir_node *node) {
2754 return is_cfopcode(get_irn_op(node));
2757 /* Returns true if the operation manipulates interprocedural control flow:
2758 CallBegin, EndReg, EndExcept */
2759 int is_ip_cfop(const ir_node *node) {
2760 return is_ip_cfopcode(get_irn_op(node));
2763 /* Returns true if the operation can change the control flow because
2766 is_fragile_op(const ir_node *node) {
2767 return is_op_fragile(get_irn_op(node));
2770 /* Returns the memory operand of fragile operations. */
2771 ir_node *get_fragile_op_mem(ir_node *node) {
2772 assert(node && is_fragile_op(node));
2774 switch (get_irn_opcode(node)) {
2784 return get_irn_n(node, 0);
2789 assert(0 && "should not be reached");
2794 /* Returns the result mode of a Div operation. */
2795 ir_mode *get_divop_resmod(const ir_node *node) {
2796 switch (get_irn_opcode(node)) {
2797 case iro_Quot : return get_Quot_resmode(node);
2798 case iro_DivMod: return get_DivMod_resmode(node);
2799 case iro_Div : return get_Div_resmode(node);
2800 case iro_Mod : return get_Mod_resmode(node);
2802 assert(0 && "should not be reached");
2807 /* Returns true if the operation is a forking control flow operation. */
2808 int (is_irn_forking)(const ir_node *node) {
2809 return _is_irn_forking(node);
2812 /* Return the type associated with the value produced by n
2813 * if the node remarks this type as it is the case for
2814 * Cast, Const, SymConst and some Proj nodes. */
2815 ir_type *(get_irn_type)(ir_node *node) {
2816 return _get_irn_type(node);
2819 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2821 ir_type *(get_irn_type_attr)(ir_node *node) {
2822 return _get_irn_type_attr(node);
2825 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2826 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2827 return _get_irn_entity_attr(node);
2830 /* Returns non-zero for constant-like nodes. */
2831 int (is_irn_constlike)(const ir_node *node) {
2832 return _is_irn_constlike(node);
2836 * Returns non-zero for nodes that are allowed to have keep-alives and
2837 * are neither Block nor PhiM.
2839 int (is_irn_keep)(const ir_node *node) {
2840 return _is_irn_keep(node);
2844 * Returns non-zero for nodes that are always placed in the start block.
2846 int (is_irn_start_block_placed)(const ir_node *node) {
2847 return _is_irn_start_block_placed(node);
2850 /* Returns non-zero for nodes that are machine operations. */
2851 int (is_irn_machine_op)(const ir_node *node) {
2852 return _is_irn_machine_op(node);
2855 /* Returns non-zero for nodes that are machine operands. */
2856 int (is_irn_machine_operand)(const ir_node *node) {
2857 return _is_irn_machine_operand(node);
2860 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2861 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2862 return _is_irn_machine_user(node, n);
2866 /* Gets the string representation of the jump prediction .*/
2867 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2870 case COND_JMP_PRED_NONE: return "no prediction";
2871 case COND_JMP_PRED_TRUE: return "true taken";
2872 case COND_JMP_PRED_FALSE: return "false taken";
2876 /* Returns the conditional jump prediction of a Cond node. */
2877 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2878 return _get_Cond_jmp_pred(cond);
2881 /* Sets a new conditional jump prediction. */
2882 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2883 _set_Cond_jmp_pred(cond, pred);
2886 /** the get_type operation must be always implemented and return a firm type */
2887 static ir_type *get_Default_type(ir_node *n) {
2889 return get_unknown_type();
2892 /* Sets the get_type operation for an ir_op_ops. */
2893 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2895 case iro_Const: ops->get_type = get_Const_type; break;
2896 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2897 case iro_Cast: ops->get_type = get_Cast_type; break;
2898 case iro_Proj: ops->get_type = get_Proj_type; break;
2900 /* not allowed to be NULL */
2901 if (! ops->get_type)
2902 ops->get_type = get_Default_type;
2908 /** Return the attribute type of a SymConst node if exists */
2909 static ir_type *get_SymConst_attr_type(ir_node *self) {
2910 symconst_kind kind = get_SymConst_kind(self);
2911 if (SYMCONST_HAS_TYPE(kind))
2912 return get_SymConst_type(self);
2916 /** Return the attribute entity of a SymConst node if exists */
2917 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2918 symconst_kind kind = get_SymConst_kind(self);
2919 if (SYMCONST_HAS_ENT(kind))
2920 return get_SymConst_entity(self);
2924 /** the get_type_attr operation must be always implemented */
2925 static ir_type *get_Null_type(ir_node *n) {
2927 return firm_unknown_type;
2930 /* Sets the get_type operation for an ir_op_ops. */
2931 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2933 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2934 case iro_Call: ops->get_type_attr = get_Call_type; break;
2935 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2936 case iro_Free: ops->get_type_attr = get_Free_type; break;
2937 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2939 /* not allowed to be NULL */
2940 if (! ops->get_type_attr)
2941 ops->get_type_attr = get_Null_type;
2947 /** the get_entity_attr operation must be always implemented */
2948 static ir_entity *get_Null_ent(ir_node *n) {
2953 /* Sets the get_type operation for an ir_op_ops. */
2954 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2956 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2957 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2959 /* not allowed to be NULL */
2960 if (! ops->get_entity_attr)
2961 ops->get_entity_attr = get_Null_ent;
2967 /* Sets the debug information of a node. */
2968 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2969 _set_irn_dbg_info(n, db);
2973 * Returns the debug information of an node.
2975 * @param n The node.
2977 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2978 return _get_irn_dbg_info(n);
2983 #ifdef DEBUG_libfirm
2984 void dump_irn(ir_node *n) {
2985 int i, arity = get_irn_arity(n);
2986 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2988 ir_node *pred = get_nodes_block(n);
2989 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2990 get_irn_node_nr(pred), (void *)pred);
2992 printf(" preds: \n");
2993 for (i = 0; i < arity; ++i) {
2994 ir_node *pred = get_irn_n(n, i);
2995 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2996 get_irn_node_nr(pred), (void *)pred);
3000 #else /* DEBUG_libfirm */
3001 void dump_irn(ir_node *n) {}
3002 #endif /* DEBUG_libfirm */