2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/ir/irnode.c
23 * Purpose: Representation of an intermediate operation.
24 * Author: Martin Trapp, Christian Schaefer
25 * Modified by: Goetz Lindenmaier, Michael Beck
28 * Copyright: (c) 1998-2006 Universität Karlsruhe
41 #include "irgraph_t.h"
44 #include "irbackedge_t.h"
48 #include "iredgekinds.h"
49 #include "iredges_t.h"
54 /* some constants fixing the positions of nodes predecessors
56 #define CALL_PARAM_OFFSET 2
57 #define FUNCCALL_PARAM_OFFSET 1
58 #define SEL_INDEX_OFFSET 2
59 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
60 #define END_KEEPALIVE_OFFSET 0
62 static const char *pnc_name_arr [] = {
63 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
64 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
65 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
66 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
70 * returns the pnc name from an pnc constant
72 const char *get_pnc_string(int pnc) {
73 return pnc_name_arr[pnc];
77 * Calculates the negated (Complement(R)) pnc condition.
79 int get_negated_pnc(int pnc, ir_mode *mode) {
82 /* do NOT add the Uo bit for non-floating point values */
83 if (! mode_is_float(mode))
89 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
91 get_inversed_pnc(int pnc) {
92 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
93 int lesser = pnc & pn_Cmp_Lt;
94 int greater = pnc & pn_Cmp_Gt;
96 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
102 * Indicates, whether additional data can be registered to ir nodes.
103 * If set to 1, this is not possible anymore.
105 static int forbid_new_data = 0;
108 * The amount of additional space for custom data to be allocated upon
109 * creating a new node.
111 unsigned firm_add_node_size = 0;
114 /* register new space for every node */
115 unsigned register_additional_node_data(unsigned size) {
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
127 /* Forbid the addition of new data to an ir node. */
132 * irnode constructor.
133 * Create a new irnode in irg, with an op, mode, arity and
134 * some incoming irnodes.
135 * If arity is negative, a node with a dynamic array is created.
138 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
139 int arity, ir_node **in)
142 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
146 assert(irg && op && mode);
147 p = obstack_alloc (irg->obst, node_size);
148 memset(p, 0, node_size);
149 res = (ir_node *) (p + firm_add_node_size);
151 res->kind = k_ir_node;
155 res->node_idx = irg_register_node_idx(irg, res);
160 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
162 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
163 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
167 set_irn_dbg_info(res, db);
171 res->node_nr = get_irp_new_node_nr();
174 for(i = 0; i < EDGE_KIND_LAST; ++i)
175 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
177 // don't put this into the for loop, arity is -1 for some nodes!
178 edges_notify_edge(res, -1, res->in[0], NULL, irg);
179 for (i = 1; i <= arity; ++i)
180 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
182 hook_new_node(irg, res);
187 /*-- getting some parameters from ir_nodes --*/
190 (is_ir_node)(const void *thing) {
191 return _is_ir_node(thing);
195 (get_irn_intra_arity)(const ir_node *node) {
196 return _get_irn_intra_arity(node);
200 (get_irn_inter_arity)(const ir_node *node) {
201 return _get_irn_inter_arity(node);
204 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
207 (get_irn_arity)(const ir_node *node) {
208 return _get_irn_arity(node);
211 /* Returns the array with ins. This array is shifted with respect to the
212 array accessed by get_irn_n: The block operand is at position 0 not -1.
213 (@@@ This should be changed.)
214 The order of the predecessors in this array is not guaranteed, except that
215 lists of operands as predecessors of Block or arguments of a Call are
218 get_irn_in(const ir_node *node) {
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 return node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 return node->attr.block.in_cg;
227 /* else fall through */
233 set_irn_in(ir_node *node, int arity, ir_node **in) {
236 ir_graph *irg = current_ir_graph;
238 if (get_interprocedural_view()) { /* handle Filter and Block specially */
239 if (get_irn_opcode(node) == iro_Filter) {
240 assert(node->attr.filter.in_cg);
241 arr = &node->attr.filter.in_cg;
242 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
243 arr = &node->attr.block.in_cg;
251 for (i = 0; i < arity; i++) {
252 if (i < ARR_LEN(*arr)-1)
253 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
255 edges_notify_edge(node, i, in[i], NULL, irg);
257 for(;i < ARR_LEN(*arr)-1; i++) {
258 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
261 if (arity != ARR_LEN(*arr) - 1) {
262 ir_node * block = (*arr)[0];
263 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
266 fix_backedges(irg->obst, node);
268 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
272 (get_irn_intra_n)(const ir_node *node, int n) {
273 return _get_irn_intra_n (node, n);
277 (get_irn_inter_n)(const ir_node *node, int n) {
278 return _get_irn_inter_n (node, n);
281 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
284 (get_irn_n)(const ir_node *node, int n) {
285 return _get_irn_n(node, n);
289 set_irn_n (ir_node *node, int n, ir_node *in) {
290 assert(node && node->kind == k_ir_node);
292 assert(n < get_irn_arity(node));
293 assert(in && in->kind == k_ir_node);
295 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
296 /* Change block pred in both views! */
297 node->in[n + 1] = in;
298 assert(node->attr.filter.in_cg);
299 node->attr.filter.in_cg[n + 1] = in;
302 if (get_interprocedural_view()) { /* handle Filter and Block specially */
303 if (get_irn_opcode(node) == iro_Filter) {
304 assert(node->attr.filter.in_cg);
305 node->attr.filter.in_cg[n + 1] = in;
307 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
308 node->attr.block.in_cg[n + 1] = in;
311 /* else fall through */
315 hook_set_irn_n(node, n, in, node->in[n + 1]);
317 /* Here, we rely on src and tgt being in the current ir graph */
318 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
320 node->in[n + 1] = in;
323 int add_irn_n(ir_node *node, ir_node *in)
326 ir_graph *irg = get_irn_irg(node);
328 assert(node->op->opar == oparity_dynamic);
329 pos = ARR_LEN(node->in) - 1;
330 ARR_APP1(ir_node *, node->in, in);
331 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
334 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
340 (get_irn_deps)(const ir_node *node)
342 return _get_irn_deps(node);
346 (get_irn_dep)(const ir_node *node, int pos)
348 return _get_irn_dep(node, pos);
352 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
354 _set_irn_dep(node, pos, dep);
357 int add_irn_dep(ir_node *node, ir_node *dep)
361 if (node->deps == NULL) {
362 node->deps = NEW_ARR_F(ir_node *, 1);
368 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
369 if(node->deps[i] == NULL)
372 if(node->deps[i] == dep)
376 if (first_zero >= 0) {
377 node->deps[first_zero] = dep;
380 ARR_APP1(ir_node *, node->deps, dep);
385 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
390 void add_irn_deps(ir_node *tgt, ir_node *src)
394 for(i = 0, n = get_irn_deps(src); i < n; ++i)
395 add_irn_dep(tgt, get_irn_dep(src, i));
400 (get_irn_mode)(const ir_node *node) {
401 return _get_irn_mode(node);
405 (set_irn_mode)(ir_node *node, ir_mode *mode) {
406 _set_irn_mode(node, mode);
410 get_irn_modecode(const ir_node *node) {
412 return node->mode->code;
415 /** Gets the string representation of the mode .*/
417 get_irn_modename(const ir_node *node) {
419 return get_mode_name(node->mode);
423 get_irn_modeident(const ir_node *node) {
425 return get_mode_ident(node->mode);
429 (get_irn_op)(const ir_node *node) {
430 return _get_irn_op(node);
433 /* should be private to the library: */
435 (set_irn_op)(ir_node *node, ir_op *op) {
436 _set_irn_op(node, op);
440 (get_irn_opcode)(const ir_node *node) {
441 return _get_irn_opcode(node);
445 get_irn_opname(const ir_node *node) {
447 if ((get_irn_op((ir_node *)node) == op_Phi) &&
448 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
449 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
450 return get_id_str(node->op->name);
454 get_irn_opident(const ir_node *node) {
456 return node->op->name;
460 (get_irn_visited)(const ir_node *node) {
461 return _get_irn_visited(node);
465 (set_irn_visited)(ir_node *node, unsigned long visited) {
466 _set_irn_visited(node, visited);
470 (mark_irn_visited)(ir_node *node) {
471 _mark_irn_visited(node);
475 (irn_not_visited)(const ir_node *node) {
476 return _irn_not_visited(node);
480 (irn_visited)(const ir_node *node) {
481 return _irn_visited(node);
485 (set_irn_link)(ir_node *node, void *link) {
486 _set_irn_link(node, link);
490 (get_irn_link)(const ir_node *node) {
491 return _get_irn_link(node);
495 (get_irn_pinned)(const ir_node *node) {
496 return _get_irn_pinned(node);
500 (is_irn_pinned_in_irg) (const ir_node *node) {
501 return _is_irn_pinned_in_irg(node);
504 void set_irn_pinned(ir_node *node, op_pin_state state) {
505 /* due to optimization an opt may be turned into a Tuple */
506 if (get_irn_op(node) == op_Tuple)
509 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
510 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
512 node->attr.except.pin_state = state;
515 #ifdef DO_HEAPANALYSIS
516 /* Access the abstract interpretation information of a node.
517 Returns NULL if no such information is available. */
518 struct abstval *get_irn_abst_value(ir_node *n) {
521 /* Set the abstract interpretation information of a node. */
522 void set_irn_abst_value(ir_node *n, struct abstval *os) {
525 struct section *firm_get_irn_section(ir_node *n) {
528 void firm_set_irn_section(ir_node *n, struct section *s) {
532 /* Dummies needed for firmjni. */
533 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
534 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
535 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
536 void firm_set_irn_section(ir_node *n, struct section *s) {}
537 #endif /* DO_HEAPANALYSIS */
540 /* Outputs a unique number for this node */
541 long get_irn_node_nr(const ir_node *node) {
544 return node->node_nr;
546 return (long)PTR_TO_INT(node);
551 get_irn_const_attr(ir_node *node) {
552 assert(node->op == op_Const);
553 return node->attr.con;
557 get_irn_proj_attr(ir_node *node) {
558 assert(node->op == op_Proj);
559 return node->attr.proj;
563 get_irn_alloc_attr(ir_node *node) {
564 assert(node->op == op_Alloc);
565 return node->attr.alloc;
569 get_irn_free_attr(ir_node *node) {
570 assert(node->op == op_Free);
571 return node->attr.free;
575 get_irn_symconst_attr(ir_node *node) {
576 assert(node->op == op_SymConst);
577 return node->attr.symc;
581 get_irn_call_attr(ir_node *node) {
582 assert(node->op == op_Call);
583 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
587 get_irn_sel_attr(ir_node *node) {
588 assert(node->op == op_Sel);
589 return node->attr.sel;
593 get_irn_phi_attr(ir_node *node) {
594 assert(node->op == op_Phi);
595 return node->attr.phi0_pos;
599 get_irn_block_attr(ir_node *node) {
600 assert(node->op == op_Block);
601 return node->attr.block;
605 get_irn_load_attr(ir_node *node)
607 assert(node->op == op_Load);
608 return node->attr.load;
612 get_irn_store_attr(ir_node *node)
614 assert(node->op == op_Store);
615 return node->attr.store;
619 get_irn_except_attr(ir_node *node) {
620 assert(node->op == op_Div || node->op == op_Quot ||
621 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
622 return node->attr.except;
626 get_irn_generic_attr(ir_node *node) {
630 unsigned (get_irn_idx)(const ir_node *node) {
631 assert(is_ir_node(node));
632 return _get_irn_idx(node);
635 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
637 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
638 if (get_irn_n(node, i) == arg)
644 /** manipulate fields of individual nodes **/
646 /* this works for all except Block */
648 get_nodes_block(const ir_node *node) {
649 assert(node->op != op_Block);
650 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
651 return get_irn_n(node, -1);
655 set_nodes_block(ir_node *node, ir_node *block) {
656 assert(node->op != op_Block);
657 set_irn_n(node, -1, block);
660 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
661 * from Start. If so returns frame type, else Null. */
662 ir_type *is_frame_pointer(ir_node *n) {
663 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
664 ir_node *start = get_Proj_pred(n);
665 if (get_irn_op(start) == op_Start) {
666 return get_irg_frame_type(get_irn_irg(start));
672 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
673 * from Start. If so returns global type, else Null. */
674 ir_type *is_globals_pointer(ir_node *n) {
675 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
676 ir_node *start = get_Proj_pred(n);
677 if (get_irn_op(start) == op_Start) {
678 return get_glob_type();
684 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
685 * from Start. If so returns tls type, else Null. */
686 ir_type *is_tls_pointer(ir_node *n) {
687 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
688 ir_node *start = get_Proj_pred(n);
689 if (get_irn_op(start) == op_Start) {
690 return get_tls_type();
696 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
697 * from Start. If so returns 1, else 0. */
698 int is_value_arg_pointer(ir_node *n) {
699 if ((get_irn_op(n) == op_Proj) &&
700 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
701 (get_irn_op(get_Proj_pred(n)) == op_Start))
706 /* Returns an array with the predecessors of the Block. Depending on
707 the implementation of the graph data structure this can be a copy of
708 the internal representation of predecessors as well as the internal
709 array itself. Therefore writing to this array might obstruct the ir. */
711 get_Block_cfgpred_arr(ir_node *node) {
712 assert((node->op == op_Block));
713 return (ir_node **)&(get_irn_in(node)[1]);
717 (get_Block_n_cfgpreds)(const ir_node *node) {
718 return _get_Block_n_cfgpreds(node);
722 (get_Block_cfgpred)(ir_node *node, int pos) {
723 return _get_Block_cfgpred(node, pos);
727 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
728 assert(node->op == op_Block);
729 set_irn_n(node, pos, pred);
733 (get_Block_cfgpred_block)(ir_node *node, int pos) {
734 return _get_Block_cfgpred_block(node, pos);
738 get_Block_matured(ir_node *node) {
739 assert(node->op == op_Block);
740 return (int)node->attr.block.matured;
744 set_Block_matured(ir_node *node, int matured) {
745 assert(node->op == op_Block);
746 node->attr.block.matured = matured;
750 (get_Block_block_visited)(const ir_node *node) {
751 return _get_Block_block_visited(node);
755 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
756 _set_Block_block_visited(node, visit);
759 /* For this current_ir_graph must be set. */
761 (mark_Block_block_visited)(ir_node *node) {
762 _mark_Block_block_visited(node);
766 (Block_not_block_visited)(const ir_node *node) {
767 return _Block_not_block_visited(node);
771 (Block_block_visited)(const ir_node *node) {
772 return _Block_block_visited(node);
776 get_Block_graph_arr (ir_node *node, int pos) {
777 assert(node->op == op_Block);
778 return node->attr.block.graph_arr[pos+1];
782 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
783 assert(node->op == op_Block);
784 node->attr.block.graph_arr[pos+1] = value;
787 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
788 assert(node->op == op_Block);
789 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
790 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
791 node->attr.block.in_cg[0] = NULL;
792 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
794 /* Fix backedge array. fix_backedges() operates depending on
795 interprocedural_view. */
796 int ipv = get_interprocedural_view();
797 set_interprocedural_view(1);
798 fix_backedges(current_ir_graph->obst, node);
799 set_interprocedural_view(ipv);
802 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
805 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
806 assert(node->op == op_Block &&
807 node->attr.block.in_cg &&
808 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
809 node->attr.block.in_cg[pos + 1] = pred;
812 ir_node **get_Block_cg_cfgpred_arr(ir_node * node) {
813 assert(node->op == op_Block);
814 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
817 int get_Block_cg_n_cfgpreds(ir_node * node) {
818 assert(node->op == op_Block);
819 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
822 ir_node *get_Block_cg_cfgpred(ir_node * node, int pos) {
823 assert(node->op == op_Block && node->attr.block.in_cg);
824 return node->attr.block.in_cg[pos + 1];
827 void remove_Block_cg_cfgpred_arr(ir_node * node) {
828 assert(node->op == op_Block);
829 node->attr.block.in_cg = NULL;
832 ir_node *(set_Block_dead)(ir_node *block) {
833 return _set_Block_dead(block);
836 int (is_Block_dead)(const ir_node *block) {
837 return _is_Block_dead(block);
840 ir_extblk *get_Block_extbb(const ir_node *block) {
842 assert(is_Block(block));
843 res = block->attr.block.extblk;
844 assert(res == NULL || is_ir_extbb(res));
848 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
849 assert(is_Block(block));
850 assert(extblk == NULL || is_ir_extbb(extblk));
851 block->attr.block.extblk = extblk;
855 get_End_n_keepalives(ir_node *end) {
856 assert(end->op == op_End);
857 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
861 get_End_keepalive(ir_node *end, int pos) {
862 assert(end->op == op_End);
863 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
867 add_End_keepalive (ir_node *end, ir_node *ka) {
868 assert(end->op == op_End);
873 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
874 assert(end->op == op_End);
875 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
878 /* Set new keep-alives */
879 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
881 ir_graph *irg = get_irn_irg(end);
883 /* notify that edges are deleted */
884 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
885 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
887 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
889 for (i = 0; i < n; ++i) {
890 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
891 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
895 /* Set new keep-alives from old keep-alives, skipping irn */
896 void remove_End_keepalive(ir_node *end, ir_node *irn) {
897 int n = get_End_n_keepalives(end);
901 NEW_ARR_A(ir_node *, in, n);
903 for (idx = i = 0; i < n; ++i) {
904 ir_node *old_ka = get_End_keepalive(end, i);
911 /* set new keep-alives */
912 set_End_keepalives(end, idx, in);
916 free_End (ir_node *end) {
917 assert(end->op == op_End);
920 end->in = NULL; /* @@@ make sure we get an error if we use the
921 in array afterwards ... */
924 /* Return the target address of an IJmp */
925 ir_node *get_IJmp_target(ir_node *ijmp) {
926 assert(ijmp->op == op_IJmp);
927 return get_irn_n(ijmp, 0);
930 /** Sets the target address of an IJmp */
931 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
932 assert(ijmp->op == op_IJmp);
933 set_irn_n(ijmp, 0, tgt);
937 > Implementing the case construct (which is where the constant Proj node is
938 > important) involves far more than simply determining the constant values.
939 > We could argue that this is more properly a function of the translator from
940 > Firm to the target machine. That could be done if there was some way of
941 > projecting "default" out of the Cond node.
942 I know it's complicated.
943 Basically there are two proglems:
944 - determining the gaps between the projs
945 - determining the biggest case constant to know the proj number for
947 I see several solutions:
948 1. Introduce a ProjDefault node. Solves both problems.
949 This means to extend all optimizations executed during construction.
950 2. Give the Cond node for switch two flavors:
951 a) there are no gaps in the projs (existing flavor)
952 b) gaps may exist, default proj is still the Proj with the largest
953 projection number. This covers also the gaps.
954 3. Fix the semantic of the Cond to that of 2b)
956 Solution 2 seems to be the best:
957 Computing the gaps in the Firm representation is not too hard, i.e.,
958 libFIRM can implement a routine that transforms between the two
959 flavours. This is also possible for 1) but 2) does not require to
960 change any existing optimization.
961 Further it should be far simpler to determine the biggest constant than
963 I don't want to choose 3) as 2a) seems to have advantages for
964 dataflow analysis and 3) does not allow to convert the representation to
968 get_Cond_selector(ir_node *node) {
969 assert(node->op == op_Cond);
970 return get_irn_n(node, 0);
974 set_Cond_selector(ir_node *node, ir_node *selector) {
975 assert(node->op == op_Cond);
976 set_irn_n(node, 0, selector);
980 get_Cond_kind(ir_node *node) {
981 assert(node->op == op_Cond);
982 return node->attr.cond.kind;
986 set_Cond_kind(ir_node *node, cond_kind kind) {
987 assert(node->op == op_Cond);
988 node->attr.cond.kind = kind;
992 get_Cond_defaultProj(ir_node *node) {
993 assert(node->op == op_Cond);
994 return node->attr.cond.default_proj;
998 get_Return_mem(ir_node *node) {
999 assert(node->op == op_Return);
1000 return get_irn_n(node, 0);
1004 set_Return_mem(ir_node *node, ir_node *mem) {
1005 assert(node->op == op_Return);
1006 set_irn_n(node, 0, mem);
1010 get_Return_n_ress(ir_node *node) {
1011 assert(node->op == op_Return);
1012 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1016 get_Return_res_arr (ir_node *node) {
1017 assert((node->op == op_Return));
1018 if (get_Return_n_ress(node) > 0)
1019 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1026 set_Return_n_res(ir_node *node, int results) {
1027 assert(node->op == op_Return);
1032 get_Return_res(ir_node *node, int pos) {
1033 assert(node->op == op_Return);
1034 assert(get_Return_n_ress(node) > pos);
1035 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1039 set_Return_res(ir_node *node, int pos, ir_node *res){
1040 assert(node->op == op_Return);
1041 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1044 tarval *(get_Const_tarval)(ir_node *node) {
1045 return _get_Const_tarval(node);
1049 set_Const_tarval(ir_node *node, tarval *con) {
1050 assert(node->op == op_Const);
1051 node->attr.con.tv = con;
1054 cnst_classify_t (classify_Const)(ir_node *node) {
1055 return _classify_Const(node);
1059 /* The source language type. Must be an atomic type. Mode of type must
1060 be mode of node. For tarvals from entities type must be pointer to
1063 get_Const_type(ir_node *node) {
1064 assert(node->op == op_Const);
1065 return node->attr.con.tp;
1069 set_Const_type(ir_node *node, ir_type *tp) {
1070 assert(node->op == op_Const);
1071 if (tp != firm_unknown_type) {
1072 assert(is_atomic_type(tp));
1073 assert(get_type_mode(tp) == get_irn_mode(node));
1075 node->attr.con.tp = tp;
1080 get_SymConst_kind(const ir_node *node) {
1081 assert(node->op == op_SymConst);
1082 return node->attr.symc.num;
1086 set_SymConst_kind(ir_node *node, symconst_kind num) {
1087 assert(node->op == op_SymConst);
1088 node->attr.symc.num = num;
1092 get_SymConst_type(ir_node *node) {
1093 assert((node->op == op_SymConst) &&
1094 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1095 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1099 set_SymConst_type(ir_node *node, ir_type *tp) {
1100 assert((node->op == op_SymConst) &&
1101 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1102 node->attr.symc.sym.type_p = tp;
1106 get_SymConst_name(ir_node *node) {
1107 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1108 return node->attr.symc.sym.ident_p;
1112 set_SymConst_name(ir_node *node, ident *name) {
1113 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1114 node->attr.symc.sym.ident_p = name;
1118 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1119 ir_entity *get_SymConst_entity(ir_node *node) {
1120 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1121 return node->attr.symc.sym.entity_p;
1124 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1125 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1126 node->attr.symc.sym.entity_p = ent;
1129 ir_enum_const *get_SymConst_enum(ir_node *node) {
1130 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1131 return node->attr.symc.sym.enum_p;
1134 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1135 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1136 node->attr.symc.sym.enum_p = ec;
1139 union symconst_symbol
1140 get_SymConst_symbol(ir_node *node) {
1141 assert(node->op == op_SymConst);
1142 return node->attr.symc.sym;
1146 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1147 assert(node->op == op_SymConst);
1148 node->attr.symc.sym = sym;
1152 get_SymConst_value_type(ir_node *node) {
1153 assert(node->op == op_SymConst);
1154 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1155 return node->attr.symc.tp;
1159 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1160 assert(node->op == op_SymConst);
1161 node->attr.symc.tp = tp;
1165 get_Sel_mem(ir_node *node) {
1166 assert(node->op == op_Sel);
1167 return get_irn_n(node, 0);
1171 set_Sel_mem(ir_node *node, ir_node *mem) {
1172 assert(node->op == op_Sel);
1173 set_irn_n(node, 0, mem);
1177 get_Sel_ptr(ir_node *node) {
1178 assert(node->op == op_Sel);
1179 return get_irn_n(node, 1);
1183 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1184 assert(node->op == op_Sel);
1185 set_irn_n(node, 1, ptr);
1189 get_Sel_n_indexs(ir_node *node) {
1190 assert(node->op == op_Sel);
1191 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1195 get_Sel_index_arr(ir_node *node) {
1196 assert((node->op == op_Sel));
1197 if (get_Sel_n_indexs(node) > 0)
1198 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1204 get_Sel_index(ir_node *node, int pos) {
1205 assert(node->op == op_Sel);
1206 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1210 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1211 assert(node->op == op_Sel);
1212 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1216 get_Sel_entity(ir_node *node) {
1217 assert(node->op == op_Sel);
1218 return node->attr.sel.ent;
1222 set_Sel_entity(ir_node *node, ir_entity *ent) {
1223 assert(node->op == op_Sel);
1224 node->attr.sel.ent = ent;
1228 /* For unary and binary arithmetic operations the access to the
1229 operands can be factored out. Left is the first, right the
1230 second arithmetic value as listed in tech report 0999-33.
1231 unops are: Minus, Abs, Not, Conv, Cast
1232 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1233 Shr, Shrs, Rotate, Cmp */
1237 get_Call_mem(ir_node *node) {
1238 assert(node->op == op_Call);
1239 return get_irn_n(node, 0);
1243 set_Call_mem(ir_node *node, ir_node *mem) {
1244 assert(node->op == op_Call);
1245 set_irn_n(node, 0, mem);
1249 get_Call_ptr(ir_node *node) {
1250 assert(node->op == op_Call);
1251 return get_irn_n(node, 1);
1255 set_Call_ptr(ir_node *node, ir_node *ptr) {
1256 assert(node->op == op_Call);
1257 set_irn_n(node, 1, ptr);
1261 get_Call_param_arr(ir_node *node) {
1262 assert(node->op == op_Call);
1263 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1267 get_Call_n_params(ir_node *node) {
1268 assert(node->op == op_Call);
1269 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1273 get_Call_arity(ir_node *node) {
1274 assert(node->op == op_Call);
1275 return get_Call_n_params(node);
1279 set_Call_arity(ir_node *node, ir_node *arity) {
1280 assert(node->op == op_Call);
1285 get_Call_param(ir_node *node, int pos) {
1286 assert(node->op == op_Call);
1287 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1291 set_Call_param(ir_node *node, int pos, ir_node *param) {
1292 assert(node->op == op_Call);
1293 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1297 get_Call_type(ir_node *node) {
1298 assert(node->op == op_Call);
1299 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1303 set_Call_type(ir_node *node, ir_type *tp) {
1304 assert(node->op == op_Call);
1305 assert((get_unknown_type() == tp) || is_Method_type(tp));
1306 node->attr.call.cld_tp = tp;
1309 int Call_has_callees(ir_node *node) {
1310 assert(node && node->op == op_Call);
1311 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1312 (node->attr.call.callee_arr != NULL));
1315 int get_Call_n_callees(ir_node * node) {
1316 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1317 return ARR_LEN(node->attr.call.callee_arr);
1320 ir_entity * get_Call_callee(ir_node * node, int pos) {
1321 assert(pos >= 0 && pos < get_Call_n_callees(node));
1322 return node->attr.call.callee_arr[pos];
1325 void set_Call_callee_arr(ir_node * node, const int n, ir_entity ** arr) {
1326 assert(node->op == op_Call);
1327 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1328 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1330 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1333 void remove_Call_callee_arr(ir_node * node) {
1334 assert(node->op == op_Call);
1335 node->attr.call.callee_arr = NULL;
1338 ir_node * get_CallBegin_ptr(ir_node *node) {
1339 assert(node->op == op_CallBegin);
1340 return get_irn_n(node, 0);
1343 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1344 assert(node->op == op_CallBegin);
1345 set_irn_n(node, 0, ptr);
1348 ir_node * get_CallBegin_call(ir_node *node) {
1349 assert(node->op == op_CallBegin);
1350 return node->attr.callbegin.call;
1353 void set_CallBegin_call(ir_node *node, ir_node *call) {
1354 assert(node->op == op_CallBegin);
1355 node->attr.callbegin.call = call;
1360 ir_node * get_##OP##_left(ir_node *node) { \
1361 assert(node->op == op_##OP); \
1362 return get_irn_n(node, node->op->op_index); \
1364 void set_##OP##_left(ir_node *node, ir_node *left) { \
1365 assert(node->op == op_##OP); \
1366 set_irn_n(node, node->op->op_index, left); \
1368 ir_node *get_##OP##_right(ir_node *node) { \
1369 assert(node->op == op_##OP); \
1370 return get_irn_n(node, node->op->op_index + 1); \
1372 void set_##OP##_right(ir_node *node, ir_node *right) { \
1373 assert(node->op == op_##OP); \
1374 set_irn_n(node, node->op->op_index + 1, right); \
1378 ir_node *get_##OP##_op(ir_node *node) { \
1379 assert(node->op == op_##OP); \
1380 return get_irn_n(node, node->op->op_index); \
1382 void set_##OP##_op (ir_node *node, ir_node *op) { \
1383 assert(node->op == op_##OP); \
1384 set_irn_n(node, node->op->op_index, op); \
1387 #define BINOP_MEM(OP) \
1391 get_##OP##_mem(ir_node *node) { \
1392 assert(node->op == op_##OP); \
1393 return get_irn_n(node, 0); \
1397 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1398 assert(node->op == op_##OP); \
1399 set_irn_n(node, 0, mem); \
1423 int get_Conv_strict(ir_node *node) {
1424 assert(node->op == op_Conv);
1425 return node->attr.conv.strict;
1428 void set_Conv_strict(ir_node *node, int strict_flag) {
1429 assert(node->op == op_Conv);
1430 node->attr.conv.strict = (char)strict_flag;
1434 get_Cast_type(ir_node *node) {
1435 assert(node->op == op_Cast);
1436 return node->attr.cast.totype;
1440 set_Cast_type(ir_node *node, ir_type *to_tp) {
1441 assert(node->op == op_Cast);
1442 node->attr.cast.totype = to_tp;
1446 /* Checks for upcast.
1448 * Returns true if the Cast node casts a class type to a super type.
1450 int is_Cast_upcast(ir_node *node) {
1451 ir_type *totype = get_Cast_type(node);
1452 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1453 ir_graph *myirg = get_irn_irg(node);
1455 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1458 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1459 totype = get_pointer_points_to_type(totype);
1460 fromtype = get_pointer_points_to_type(fromtype);
1465 if (!is_Class_type(totype)) return 0;
1466 return is_SubClass_of(fromtype, totype);
1469 /* Checks for downcast.
1471 * Returns true if the Cast node casts a class type to a sub type.
1473 int is_Cast_downcast(ir_node *node) {
1474 ir_type *totype = get_Cast_type(node);
1475 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1477 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1480 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1481 totype = get_pointer_points_to_type(totype);
1482 fromtype = get_pointer_points_to_type(fromtype);
1487 if (!is_Class_type(totype)) return 0;
1488 return is_SubClass_of(totype, fromtype);
1492 (is_unop)(const ir_node *node) {
1493 return _is_unop(node);
1497 get_unop_op(ir_node *node) {
1498 if (node->op->opar == oparity_unary)
1499 return get_irn_n(node, node->op->op_index);
1501 assert(node->op->opar == oparity_unary);
1506 set_unop_op(ir_node *node, ir_node *op) {
1507 if (node->op->opar == oparity_unary)
1508 set_irn_n(node, node->op->op_index, op);
1510 assert(node->op->opar == oparity_unary);
1514 (is_binop)(const ir_node *node) {
1515 return _is_binop(node);
1519 get_binop_left(ir_node *node) {
1520 assert(node->op->opar == oparity_binary);
1521 return get_irn_n(node, node->op->op_index);
1525 set_binop_left(ir_node *node, ir_node *left) {
1526 assert(node->op->opar == oparity_binary);
1527 set_irn_n(node, node->op->op_index, left);
1531 get_binop_right(ir_node *node) {
1532 assert(node->op->opar == oparity_binary);
1533 return get_irn_n(node, node->op->op_index + 1);
1537 set_binop_right(ir_node *node, ir_node *right) {
1538 assert(node->op->opar == oparity_binary);
1539 set_irn_n(node, node->op->op_index + 1, right);
1542 int is_Phi(const ir_node *n) {
1548 if (op == op_Filter) return get_interprocedural_view();
1551 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1552 (get_irn_arity(n) > 0));
1557 int is_Phi0(const ir_node *n) {
1560 return ((get_irn_op(n) == op_Phi) &&
1561 (get_irn_arity(n) == 0) &&
1562 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1566 get_Phi_preds_arr(ir_node *node) {
1567 assert(node->op == op_Phi);
1568 return (ir_node **)&(get_irn_in(node)[1]);
1572 get_Phi_n_preds(ir_node *node) {
1573 assert(is_Phi(node) || is_Phi0(node));
1574 return (get_irn_arity(node));
1578 void set_Phi_n_preds(ir_node *node, int n_preds) {
1579 assert(node->op == op_Phi);
1584 get_Phi_pred(ir_node *node, int pos) {
1585 assert(is_Phi(node) || is_Phi0(node));
1586 return get_irn_n(node, pos);
1590 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1591 assert(is_Phi(node) || is_Phi0(node));
1592 set_irn_n(node, pos, pred);
1596 int is_memop(ir_node *node) {
1597 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1600 ir_node *get_memop_mem(ir_node *node) {
1601 assert(is_memop(node));
1602 return get_irn_n(node, 0);
1605 void set_memop_mem(ir_node *node, ir_node *mem) {
1606 assert(is_memop(node));
1607 set_irn_n(node, 0, mem);
1610 ir_node *get_memop_ptr(ir_node *node) {
1611 assert(is_memop(node));
1612 return get_irn_n(node, 1);
1615 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1616 assert(is_memop(node));
1617 set_irn_n(node, 1, ptr);
1621 get_Load_mem(ir_node *node) {
1622 assert(node->op == op_Load);
1623 return get_irn_n(node, 0);
1627 set_Load_mem(ir_node *node, ir_node *mem) {
1628 assert(node->op == op_Load);
1629 set_irn_n(node, 0, mem);
1633 get_Load_ptr(ir_node *node) {
1634 assert(node->op == op_Load);
1635 return get_irn_n(node, 1);
1639 set_Load_ptr(ir_node *node, ir_node *ptr) {
1640 assert(node->op == op_Load);
1641 set_irn_n(node, 1, ptr);
1645 get_Load_mode(ir_node *node) {
1646 assert(node->op == op_Load);
1647 return node->attr.load.load_mode;
1651 set_Load_mode(ir_node *node, ir_mode *mode) {
1652 assert(node->op == op_Load);
1653 node->attr.load.load_mode = mode;
1657 get_Load_volatility(ir_node *node) {
1658 assert(node->op == op_Load);
1659 return node->attr.load.volatility;
1663 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1664 assert(node->op == op_Load);
1665 node->attr.load.volatility = volatility;
1670 get_Store_mem(ir_node *node) {
1671 assert(node->op == op_Store);
1672 return get_irn_n(node, 0);
1676 set_Store_mem(ir_node *node, ir_node *mem) {
1677 assert(node->op == op_Store);
1678 set_irn_n(node, 0, mem);
1682 get_Store_ptr(ir_node *node) {
1683 assert(node->op == op_Store);
1684 return get_irn_n(node, 1);
1688 set_Store_ptr(ir_node *node, ir_node *ptr) {
1689 assert(node->op == op_Store);
1690 set_irn_n(node, 1, ptr);
1694 get_Store_value(ir_node *node) {
1695 assert(node->op == op_Store);
1696 return get_irn_n(node, 2);
1700 set_Store_value(ir_node *node, ir_node *value) {
1701 assert(node->op == op_Store);
1702 set_irn_n(node, 2, value);
1706 get_Store_volatility(ir_node *node) {
1707 assert(node->op == op_Store);
1708 return node->attr.store.volatility;
1712 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1713 assert(node->op == op_Store);
1714 node->attr.store.volatility = volatility;
1719 get_Alloc_mem(ir_node *node) {
1720 assert(node->op == op_Alloc);
1721 return get_irn_n(node, 0);
1725 set_Alloc_mem(ir_node *node, ir_node *mem) {
1726 assert(node->op == op_Alloc);
1727 set_irn_n(node, 0, mem);
1731 get_Alloc_size(ir_node *node) {
1732 assert(node->op == op_Alloc);
1733 return get_irn_n(node, 1);
1737 set_Alloc_size(ir_node *node, ir_node *size) {
1738 assert(node->op == op_Alloc);
1739 set_irn_n(node, 1, size);
1743 get_Alloc_type(ir_node *node) {
1744 assert(node->op == op_Alloc);
1745 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1749 set_Alloc_type(ir_node *node, ir_type *tp) {
1750 assert(node->op == op_Alloc);
1751 node->attr.alloc.type = tp;
1755 get_Alloc_where(ir_node *node) {
1756 assert(node->op == op_Alloc);
1757 return node->attr.alloc.where;
1761 set_Alloc_where(ir_node *node, where_alloc where) {
1762 assert(node->op == op_Alloc);
1763 node->attr.alloc.where = where;
1768 get_Free_mem(ir_node *node) {
1769 assert(node->op == op_Free);
1770 return get_irn_n(node, 0);
1774 set_Free_mem(ir_node *node, ir_node *mem) {
1775 assert(node->op == op_Free);
1776 set_irn_n(node, 0, mem);
1780 get_Free_ptr(ir_node *node) {
1781 assert(node->op == op_Free);
1782 return get_irn_n(node, 1);
1786 set_Free_ptr(ir_node *node, ir_node *ptr) {
1787 assert(node->op == op_Free);
1788 set_irn_n(node, 1, ptr);
1792 get_Free_size(ir_node *node) {
1793 assert(node->op == op_Free);
1794 return get_irn_n(node, 2);
1798 set_Free_size(ir_node *node, ir_node *size) {
1799 assert(node->op == op_Free);
1800 set_irn_n(node, 2, size);
1804 get_Free_type(ir_node *node) {
1805 assert(node->op == op_Free);
1806 return node->attr.free.type = skip_tid(node->attr.free.type);
1810 set_Free_type(ir_node *node, ir_type *tp) {
1811 assert(node->op == op_Free);
1812 node->attr.free.type = tp;
1816 get_Free_where(ir_node *node) {
1817 assert(node->op == op_Free);
1818 return node->attr.free.where;
1822 set_Free_where(ir_node *node, where_alloc where) {
1823 assert(node->op == op_Free);
1824 node->attr.free.where = where;
1827 ir_node **get_Sync_preds_arr(ir_node *node) {
1828 assert(node->op == op_Sync);
1829 return (ir_node **)&(get_irn_in(node)[1]);
1832 int get_Sync_n_preds(ir_node *node) {
1833 assert(node->op == op_Sync);
1834 return (get_irn_arity(node));
1838 void set_Sync_n_preds(ir_node *node, int n_preds) {
1839 assert(node->op == op_Sync);
1843 ir_node *get_Sync_pred(ir_node *node, int pos) {
1844 assert(node->op == op_Sync);
1845 return get_irn_n(node, pos);
1848 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1849 assert(node->op == op_Sync);
1850 set_irn_n(node, pos, pred);
1853 /* Add a new Sync predecessor */
1854 void add_Sync_pred(ir_node *node, ir_node *pred) {
1855 assert(node->op == op_Sync);
1856 add_irn_n(node, pred);
1859 /* Returns the source language type of a Proj node. */
1860 ir_type *get_Proj_type(ir_node *n) {
1861 ir_type *tp = firm_unknown_type;
1862 ir_node *pred = get_Proj_pred(n);
1864 switch (get_irn_opcode(pred)) {
1867 /* Deal with Start / Call here: we need to know the Proj Nr. */
1868 assert(get_irn_mode(pred) == mode_T);
1869 pred_pred = get_Proj_pred(pred);
1870 if (get_irn_op(pred_pred) == op_Start) {
1871 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1872 tp = get_method_param_type(mtp, get_Proj_proj(n));
1873 } else if (get_irn_op(pred_pred) == op_Call) {
1874 ir_type *mtp = get_Call_type(pred_pred);
1875 tp = get_method_res_type(mtp, get_Proj_proj(n));
1878 case iro_Start: break;
1879 case iro_Call: break;
1881 ir_node *a = get_Load_ptr(pred);
1883 tp = get_entity_type(get_Sel_entity(a));
1892 get_Proj_pred(const ir_node *node) {
1893 assert(is_Proj(node));
1894 return get_irn_n(node, 0);
1898 set_Proj_pred(ir_node *node, ir_node *pred) {
1899 assert(is_Proj(node));
1900 set_irn_n(node, 0, pred);
1903 long get_VProj_proj(const ir_node *node)
1905 return node->attr.proj;
1908 void set_VProj_proj(ir_node *node, long value)
1910 node->attr.proj = value;
1914 get_Proj_proj(const ir_node *node) {
1915 assert(is_Proj(node));
1916 if (get_irn_opcode(node) == iro_Proj) {
1917 return node->attr.proj;
1919 assert(get_irn_opcode(node) == iro_Filter);
1920 return node->attr.filter.proj;
1925 set_Proj_proj(ir_node *node, long proj) {
1926 assert(node->op == op_Proj);
1927 node->attr.proj = proj;
1931 get_Tuple_preds_arr(ir_node *node) {
1932 assert(node->op == op_Tuple);
1933 return (ir_node **)&(get_irn_in(node)[1]);
1937 get_Tuple_n_preds(ir_node *node) {
1938 assert(node->op == op_Tuple);
1939 return (get_irn_arity(node));
1944 set_Tuple_n_preds(ir_node *node, int n_preds) {
1945 assert(node->op == op_Tuple);
1950 get_Tuple_pred (ir_node *node, int pos) {
1951 assert(node->op == op_Tuple);
1952 return get_irn_n(node, pos);
1956 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
1957 assert(node->op == op_Tuple);
1958 set_irn_n(node, pos, pred);
1962 get_Id_pred(ir_node *node) {
1963 assert(node->op == op_Id);
1964 return get_irn_n(node, 0);
1968 set_Id_pred(ir_node *node, ir_node *pred) {
1969 assert(node->op == op_Id);
1970 set_irn_n(node, 0, pred);
1973 ir_node *get_Confirm_value(ir_node *node) {
1974 assert(node->op == op_Confirm);
1975 return get_irn_n(node, 0);
1978 void set_Confirm_value(ir_node *node, ir_node *value) {
1979 assert(node->op == op_Confirm);
1980 set_irn_n(node, 0, value);
1983 ir_node *get_Confirm_bound(ir_node *node) {
1984 assert(node->op == op_Confirm);
1985 return get_irn_n(node, 1);
1988 void set_Confirm_bound(ir_node *node, ir_node *bound) {
1989 assert(node->op == op_Confirm);
1990 set_irn_n(node, 0, bound);
1993 pn_Cmp get_Confirm_cmp(ir_node *node) {
1994 assert(node->op == op_Confirm);
1995 return node->attr.confirm_cmp;
1998 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
1999 assert(node->op == op_Confirm);
2000 node->attr.confirm_cmp = cmp;
2005 get_Filter_pred(ir_node *node) {
2006 assert(node->op == op_Filter);
2011 set_Filter_pred(ir_node *node, ir_node *pred) {
2012 assert(node->op == op_Filter);
2017 get_Filter_proj(ir_node *node) {
2018 assert(node->op == op_Filter);
2019 return node->attr.filter.proj;
2023 set_Filter_proj(ir_node *node, long proj) {
2024 assert(node->op == op_Filter);
2025 node->attr.filter.proj = proj;
2028 /* Don't use get_irn_arity, get_irn_n in implementation as access
2029 shall work independent of view!!! */
2030 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2031 assert(node->op == op_Filter);
2032 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2033 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2034 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2035 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2036 node->attr.filter.in_cg[0] = node->in[0];
2038 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2041 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2042 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2043 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2044 node->attr.filter.in_cg[pos + 1] = pred;
2047 int get_Filter_n_cg_preds(ir_node *node) {
2048 assert(node->op == op_Filter && node->attr.filter.in_cg);
2049 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2052 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2054 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2056 arity = ARR_LEN(node->attr.filter.in_cg);
2057 assert(pos < arity - 1);
2058 return node->attr.filter.in_cg[pos + 1];
2062 ir_node *get_Mux_sel(ir_node *node) {
2063 if (node->op == op_Psi) {
2064 assert(get_irn_arity(node) == 3);
2065 return get_Psi_cond(node, 0);
2067 assert(node->op == op_Mux);
2071 void set_Mux_sel(ir_node *node, ir_node *sel) {
2072 if (node->op == op_Psi) {
2073 assert(get_irn_arity(node) == 3);
2074 set_Psi_cond(node, 0, sel);
2076 assert(node->op == op_Mux);
2081 ir_node *get_Mux_false(ir_node *node) {
2082 if (node->op == op_Psi) {
2083 assert(get_irn_arity(node) == 3);
2084 return get_Psi_default(node);
2086 assert(node->op == op_Mux);
2090 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2091 if (node->op == op_Psi) {
2092 assert(get_irn_arity(node) == 3);
2093 set_Psi_default(node, ir_false);
2095 assert(node->op == op_Mux);
2096 node->in[2] = ir_false;
2100 ir_node *get_Mux_true(ir_node *node) {
2101 if (node->op == op_Psi) {
2102 assert(get_irn_arity(node) == 3);
2103 return get_Psi_val(node, 0);
2105 assert(node->op == op_Mux);
2109 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2110 if (node->op == op_Psi) {
2111 assert(get_irn_arity(node) == 3);
2112 set_Psi_val(node, 0, ir_true);
2114 assert(node->op == op_Mux);
2115 node->in[3] = ir_true;
2120 ir_node *get_Psi_cond(ir_node *node, int pos) {
2121 int num_conds = get_Psi_n_conds(node);
2122 assert(node->op == op_Psi);
2123 assert(pos < num_conds);
2124 return get_irn_n(node, 2 * pos);
2127 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2128 int num_conds = get_Psi_n_conds(node);
2129 assert(node->op == op_Psi);
2130 assert(pos < num_conds);
2131 set_irn_n(node, 2 * pos, cond);
2134 ir_node *get_Psi_val(ir_node *node, int pos) {
2135 int num_vals = get_Psi_n_conds(node);
2136 assert(node->op == op_Psi);
2137 assert(pos < num_vals);
2138 return get_irn_n(node, 2 * pos + 1);
2141 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2142 int num_vals = get_Psi_n_conds(node);
2143 assert(node->op == op_Psi);
2144 assert(pos < num_vals);
2145 set_irn_n(node, 2 * pos + 1, val);
2148 ir_node *get_Psi_default(ir_node *node) {
2149 int def_pos = get_irn_arity(node) - 1;
2150 assert(node->op == op_Psi);
2151 return get_irn_n(node, def_pos);
2154 void set_Psi_default(ir_node *node, ir_node *val) {
2155 int def_pos = get_irn_arity(node);
2156 assert(node->op == op_Psi);
2157 set_irn_n(node, def_pos, val);
2160 int (get_Psi_n_conds)(ir_node *node) {
2161 return _get_Psi_n_conds(node);
2165 ir_node *get_CopyB_mem(ir_node *node) {
2166 assert(node->op == op_CopyB);
2167 return get_irn_n(node, 0);
2170 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2171 assert(node->op == op_CopyB);
2172 set_irn_n(node, 0, mem);
2175 ir_node *get_CopyB_dst(ir_node *node) {
2176 assert(node->op == op_CopyB);
2177 return get_irn_n(node, 1);
2180 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2181 assert(node->op == op_CopyB);
2182 set_irn_n(node, 1, dst);
2185 ir_node *get_CopyB_src (ir_node *node) {
2186 assert(node->op == op_CopyB);
2187 return get_irn_n(node, 2);
2190 void set_CopyB_src(ir_node *node, ir_node *src) {
2191 assert(node->op == op_CopyB);
2192 set_irn_n(node, 2, src);
2195 ir_type *get_CopyB_type(ir_node *node) {
2196 assert(node->op == op_CopyB);
2197 return node->attr.copyb.data_type;
2200 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2201 assert(node->op == op_CopyB && data_type);
2202 node->attr.copyb.data_type = data_type;
2207 get_InstOf_type(ir_node *node) {
2208 assert(node->op = op_InstOf);
2209 return node->attr.instof.type;
2213 set_InstOf_type(ir_node *node, ir_type *type) {
2214 assert(node->op = op_InstOf);
2215 node->attr.instof.type = type;
2219 get_InstOf_store(ir_node *node) {
2220 assert(node->op = op_InstOf);
2221 return get_irn_n(node, 0);
2225 set_InstOf_store(ir_node *node, ir_node *obj) {
2226 assert(node->op = op_InstOf);
2227 set_irn_n(node, 0, obj);
2231 get_InstOf_obj(ir_node *node) {
2232 assert(node->op = op_InstOf);
2233 return get_irn_n(node, 1);
2237 set_InstOf_obj(ir_node *node, ir_node *obj) {
2238 assert(node->op = op_InstOf);
2239 set_irn_n(node, 1, obj);
2242 /* Returns the memory input of a Raise operation. */
2244 get_Raise_mem(ir_node *node) {
2245 assert(node->op == op_Raise);
2246 return get_irn_n(node, 0);
2250 set_Raise_mem(ir_node *node, ir_node *mem) {
2251 assert(node->op == op_Raise);
2252 set_irn_n(node, 0, mem);
2256 get_Raise_exo_ptr(ir_node *node) {
2257 assert(node->op == op_Raise);
2258 return get_irn_n(node, 1);
2262 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2263 assert(node->op == op_Raise);
2264 set_irn_n(node, 1, exo_ptr);
2269 /* Returns the memory input of a Bound operation. */
2270 ir_node *get_Bound_mem(ir_node *bound) {
2271 assert(bound->op == op_Bound);
2272 return get_irn_n(bound, 0);
2275 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2276 assert(bound->op == op_Bound);
2277 set_irn_n(bound, 0, mem);
2280 /* Returns the index input of a Bound operation. */
2281 ir_node *get_Bound_index(ir_node *bound) {
2282 assert(bound->op == op_Bound);
2283 return get_irn_n(bound, 1);
2286 void set_Bound_index(ir_node *bound, ir_node *idx) {
2287 assert(bound->op == op_Bound);
2288 set_irn_n(bound, 1, idx);
2291 /* Returns the lower bound input of a Bound operation. */
2292 ir_node *get_Bound_lower(ir_node *bound) {
2293 assert(bound->op == op_Bound);
2294 return get_irn_n(bound, 2);
2297 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2298 assert(bound->op == op_Bound);
2299 set_irn_n(bound, 2, lower);
2302 /* Returns the upper bound input of a Bound operation. */
2303 ir_node *get_Bound_upper(ir_node *bound) {
2304 assert(bound->op == op_Bound);
2305 return get_irn_n(bound, 3);
2308 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2309 assert(bound->op == op_Bound);
2310 set_irn_n(bound, 3, upper);
2313 /* Return the operand of a Pin node. */
2314 ir_node *get_Pin_op(ir_node *pin) {
2315 assert(pin->op == op_Pin);
2316 return get_irn_n(pin, 0);
2319 void set_Pin_op(ir_node *pin, ir_node *node) {
2320 assert(pin->op == op_Pin);
2321 set_irn_n(pin, 0, node);
2325 /* returns the graph of a node */
2327 get_irn_irg(const ir_node *node) {
2329 * Do not use get_nodes_Block() here, because this
2330 * will check the pinned state.
2331 * However even a 'wrong' block is always in the proper
2334 if (! is_Block(node))
2335 node = get_irn_n(node, -1);
2336 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2337 node = get_irn_n(node, -1);
2338 assert(get_irn_op(node) == op_Block);
2339 return node->attr.block.irg;
2343 /*----------------------------------------------------------------*/
2344 /* Auxiliary routines */
2345 /*----------------------------------------------------------------*/
2348 skip_Proj(ir_node *node) {
2349 /* don't assert node !!! */
2354 node = get_Proj_pred(node);
2360 skip_Proj_const(const ir_node *node) {
2361 /* don't assert node !!! */
2366 node = get_Proj_pred(node);
2372 skip_Tuple(ir_node *node) {
2376 if (!get_opt_normalize()) return node;
2379 if (get_irn_op(node) == op_Proj) {
2380 pred = get_Proj_pred(node);
2381 op = get_irn_op(pred);
2384 * Looks strange but calls get_irn_op() only once
2385 * in most often cases.
2387 if (op == op_Proj) { /* nested Tuple ? */
2388 pred = skip_Tuple(pred);
2389 op = get_irn_op(pred);
2391 if (op == op_Tuple) {
2392 node = get_Tuple_pred(pred, get_Proj_proj(node));
2395 } else if (op == op_Tuple) {
2396 node = get_Tuple_pred(pred, get_Proj_proj(node));
2403 /* returns operand of node if node is a Cast */
2404 ir_node *skip_Cast(ir_node *node) {
2405 if (get_irn_op(node) == op_Cast)
2406 return get_Cast_op(node);
2410 /* returns operand of node if node is a Confirm */
2411 ir_node *skip_Confirm(ir_node *node) {
2412 if (get_irn_op(node) == op_Confirm)
2413 return get_Confirm_value(node);
2417 /* skip all high-level ops */
2418 ir_node *skip_HighLevel(ir_node *node) {
2419 if (is_op_highlevel(get_irn_op(node)))
2420 return get_irn_n(node, 0);
2425 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2426 * than any other approach, as Id chains are resolved and all point to the real node, or
2427 * all id's are self loops.
2429 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2430 * a little bit "hand optimized".
2432 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2435 skip_Id(ir_node *node) {
2437 /* don't assert node !!! */
2439 if (!node || (node->op != op_Id)) return node;
2441 /* Don't use get_Id_pred(): We get into an endless loop for
2442 self-referencing Ids. */
2443 pred = node->in[0+1];
2445 if (pred->op != op_Id) return pred;
2447 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2448 ir_node *rem_pred, *res;
2450 if (pred->op != op_Id) return pred; /* shortcut */
2453 assert(get_irn_arity (node) > 0);
2455 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2456 res = skip_Id(rem_pred);
2457 if (res->op == op_Id) /* self-loop */ return node;
2459 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2466 void skip_Id_and_store(ir_node **node) {
2469 if (!n || (n->op != op_Id)) return;
2471 /* Don't use get_Id_pred(): We get into an endless loop for
2472 self-referencing Ids. */
2477 (is_Bad)(const ir_node *node) {
2478 return _is_Bad(node);
2482 (is_NoMem)(const ir_node *node) {
2483 return _is_NoMem(node);
2487 (is_Mod)(const ir_node *node) {
2488 return _is_Mod(node);
2492 (is_Div)(const ir_node *node) {
2493 return _is_Div(node);
2497 (is_DivMod)(const ir_node *node) {
2498 return _is_DivMod(node);
2502 (is_Quot)(const ir_node *node) {
2503 return _is_Quot(node);
2507 (is_Start)(const ir_node *node) {
2508 return _is_Start(node);
2512 (is_End)(const ir_node *node) {
2513 return _is_End(node);
2517 (is_Const)(const ir_node *node) {
2518 return _is_Const(node);
2522 (is_no_Block)(const ir_node *node) {
2523 return _is_no_Block(node);
2527 (is_Block)(const ir_node *node) {
2528 return _is_Block(node);
2531 /* returns true if node is an Unknown node. */
2533 (is_Unknown)(const ir_node *node) {
2534 return _is_Unknown(node);
2537 /* returns true if node is a Return node. */
2539 (is_Return)(const ir_node *node) {
2540 return _is_Return(node);
2543 /* returns true if node is a Call node. */
2545 (is_Call)(const ir_node *node) {
2546 return _is_Call(node);
2549 /* returns true if node is a Sel node. */
2551 (is_Sel)(const ir_node *node) {
2552 return _is_Sel(node);
2555 /* returns true if node is a Mux node or a Psi with only one condition. */
2557 (is_Mux)(const ir_node *node) {
2558 return _is_Mux(node);
2561 /* returns true if node is a Load node. */
2563 (is_Load)(const ir_node *node) {
2564 return _is_Load(node);
2567 /* returns true if node is a Load node. */
2569 (is_Store)(const ir_node *node) {
2570 return _is_Store(node);
2573 /* returns true if node is a Sync node. */
2575 (is_Sync)(const ir_node *node) {
2576 return _is_Sync(node);
2579 /* returns true if node is a Confirm node. */
2581 (is_Confirm)(const ir_node *node) {
2582 return _is_Confirm(node);
2585 /* returns true if node is a Pin node. */
2587 (is_Pin)(const ir_node *node) {
2588 return _is_Pin(node);
2591 /* returns true if node is a SymConst node. */
2593 (is_SymConst)(const ir_node *node) {
2594 return _is_SymConst(node);
2597 /* returns true if node is a Cond node. */
2599 (is_Cond)(const ir_node *node) {
2600 return _is_Cond(node);
2604 (is_CopyB)(const ir_node *node) {
2605 return _is_CopyB(node);
2608 /* returns true if node is a Cmp node. */
2610 (is_Cmp)(const ir_node *node) {
2611 return _is_Cmp(node);
2614 /* returns true if node is an Alloc node. */
2616 (is_Alloc)(const ir_node *node) {
2617 return _is_Alloc(node);
2620 /* returns true if a node is a Jmp node. */
2622 (is_Jmp)(const ir_node *node) {
2623 return _is_Jmp(node);
2626 /* returns true if a node is a Raise node. */
2628 (is_Raise)(const ir_node *node) {
2629 return _is_Raise(node);
2633 is_Proj(const ir_node *node) {
2635 return node->op == op_Proj ||
2636 (!get_interprocedural_view() && node->op == op_Filter);
2639 /* Returns true if the operation manipulates control flow. */
2641 is_cfop(const ir_node *node) {
2642 return is_cfopcode(get_irn_op(node));
2645 /* Returns true if the operation manipulates interprocedural control flow:
2646 CallBegin, EndReg, EndExcept */
2647 int is_ip_cfop(const ir_node *node) {
2648 return is_ip_cfopcode(get_irn_op(node));
2651 /* Returns true if the operation can change the control flow because
2654 is_fragile_op(const ir_node *node) {
2655 return is_op_fragile(get_irn_op(node));
2658 /* Returns the memory operand of fragile operations. */
2659 ir_node *get_fragile_op_mem(ir_node *node) {
2660 assert(node && is_fragile_op(node));
2662 switch (get_irn_opcode (node)) {
2672 return get_irn_n(node, 0);
2677 assert(0 && "should not be reached");
2682 /* Returns true if the operation is a forking control flow operation. */
2683 int (is_irn_forking)(const ir_node *node) {
2684 return _is_irn_forking(node);
2687 /* Return the type associated with the value produced by n
2688 * if the node remarks this type as it is the case for
2689 * Cast, Const, SymConst and some Proj nodes. */
2690 ir_type *(get_irn_type)(ir_node *node) {
2691 return _get_irn_type(node);
2694 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2696 ir_type *(get_irn_type_attr)(ir_node *node) {
2697 return _get_irn_type_attr(node);
2700 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2701 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2702 return _get_irn_entity_attr(node);
2705 /* Returns non-zero for constant-like nodes. */
2706 int (is_irn_constlike)(const ir_node *node) {
2707 return _is_irn_constlike(node);
2711 * Returns non-zero for nodes that are allowed to have keep-alives and
2712 * are neither Block nor PhiM.
2714 int (is_irn_keep)(const ir_node *node) {
2715 return _is_irn_keep(node);
2719 * Returns non-zero for nodes that are always placed in the start block.
2721 int (is_irn_start_block_placed)(const ir_node *node) {
2722 return _is_irn_start_block_placed(node);
2725 /* Returns non-zero for nodes that are machine operations. */
2726 int (is_irn_machine_op)(const ir_node *node) {
2727 return _is_irn_machine_op(node);
2730 /* Returns non-zero for nodes that are machine operands. */
2731 int (is_irn_machine_operand)(const ir_node *node) {
2732 return _is_irn_machine_operand(node);
2735 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2736 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2737 return _is_irn_machine_user(node, n);
2741 /* Gets the string representation of the jump prediction .*/
2742 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2745 case COND_JMP_PRED_NONE: return "no prediction";
2746 case COND_JMP_PRED_TRUE: return "true taken";
2747 case COND_JMP_PRED_FALSE: return "false taken";
2751 /* Returns the conditional jump prediction of a Cond node. */
2752 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2753 return _get_Cond_jmp_pred(cond);
2756 /* Sets a new conditional jump prediction. */
2757 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2758 _set_Cond_jmp_pred(cond, pred);
2761 /** the get_type operation must be always implemented and return a firm type */
2762 static ir_type *get_Default_type(ir_node *n) {
2763 return get_unknown_type();
2766 /* Sets the get_type operation for an ir_op_ops. */
2767 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2769 case iro_Const: ops->get_type = get_Const_type; break;
2770 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2771 case iro_Cast: ops->get_type = get_Cast_type; break;
2772 case iro_Proj: ops->get_type = get_Proj_type; break;
2774 /* not allowed to be NULL */
2775 if (! ops->get_type)
2776 ops->get_type = get_Default_type;
2782 /** Return the attribute type of a SymConst node if exists */
2783 static ir_type *get_SymConst_attr_type(ir_node *self) {
2784 symconst_kind kind = get_SymConst_kind(self);
2785 if (SYMCONST_HAS_TYPE(kind))
2786 return get_SymConst_type(self);
2790 /** Return the attribute entity of a SymConst node if exists */
2791 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2792 symconst_kind kind = get_SymConst_kind(self);
2793 if (SYMCONST_HAS_ENT(kind))
2794 return get_SymConst_entity(self);
2798 /** the get_type_attr operation must be always implemented */
2799 static ir_type *get_Null_type(ir_node *n) {
2800 return firm_unknown_type;
2803 /* Sets the get_type operation for an ir_op_ops. */
2804 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2806 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2807 case iro_Call: ops->get_type_attr = get_Call_type; break;
2808 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2809 case iro_Free: ops->get_type_attr = get_Free_type; break;
2810 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2812 /* not allowed to be NULL */
2813 if (! ops->get_type_attr)
2814 ops->get_type_attr = get_Null_type;
2820 /** the get_entity_attr operation must be always implemented */
2821 static ir_entity *get_Null_ent(ir_node *n) {
2825 /* Sets the get_type operation for an ir_op_ops. */
2826 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2828 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2829 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2831 /* not allowed to be NULL */
2832 if (! ops->get_entity_attr)
2833 ops->get_entity_attr = get_Null_ent;
2839 #ifdef DEBUG_libfirm
2840 void dump_irn(ir_node *n) {
2841 int i, arity = get_irn_arity(n);
2842 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2844 ir_node *pred = get_irn_n(n, -1);
2845 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2846 get_irn_node_nr(pred), (void *)pred);
2848 printf(" preds: \n");
2849 for (i = 0; i < arity; ++i) {
2850 ir_node *pred = get_irn_n(n, i);
2851 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2852 get_irn_node_nr(pred), (void *)pred);
2856 #else /* DEBUG_libfirm */
2857 void dump_irn(ir_node *n) {}
2858 #endif /* DEBUG_libfirm */