2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc)
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
80 /* do NOT add the Uo bit for non-floating point values */
81 if (! mode_is_float(mode))
87 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
88 pn_Cmp get_inversed_pnc(long pnc)
90 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
91 long lesser = pnc & pn_Cmp_Lt;
92 long greater = pnc & pn_Cmp_Gt;
94 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
100 * Indicates, whether additional data can be registered to ir nodes.
101 * If set to 1, this is not possible anymore.
103 static int forbid_new_data = 0;
106 * The amount of additional space for custom data to be allocated upon
107 * creating a new node.
109 unsigned firm_add_node_size = 0;
112 /* register new space for every node */
113 unsigned firm_register_additional_node_data(unsigned size)
115 assert(!forbid_new_data && "Too late to register additional node data");
120 return firm_add_node_size += size;
124 void init_irnode(void)
126 /* Forbid the addition of new data to an ir node. */
131 * irnode constructor.
132 * Create a new irnode in irg, with an op, mode, arity and
133 * some incoming irnodes.
134 * If arity is negative, a node with a dynamic array is created.
136 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
137 ir_mode *mode, int arity, ir_node **in)
140 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
147 p = obstack_alloc(irg->obst, node_size);
148 memset(p, 0, node_size);
149 res = (ir_node *)(p + firm_add_node_size);
151 res->kind = k_ir_node;
155 res->node_idx = irg_register_node_idx(irg, res);
160 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
162 /* not nice but necessary: End and Sync must always have a flexible array */
163 if (op == op_End || op == op_Sync)
164 res->in = NEW_ARR_F(ir_node *, (arity+1));
166 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
167 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
171 set_irn_dbg_info(res, db);
173 res->node_nr = get_irp_new_node_nr();
175 for (i = 0; i < EDGE_KIND_LAST; ++i) {
176 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
177 /* edges will be build immediately */
178 res->edge_info[i].edges_built = 1;
179 res->edge_info[i].out_count = 0;
182 /* don't put this into the for loop, arity is -1 for some nodes! */
183 edges_notify_edge(res, -1, res->in[0], NULL, irg);
184 for (i = 1; i <= arity; ++i)
185 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
187 hook_new_node(irg, res);
188 if (get_irg_phase_state(irg) == phase_backend) {
189 be_info_new_node(res);
191 // Init the VRP structures
192 res->vrp.range_type = VRP_UNDEFINED;
194 if (mode_is_int(mode)) {
195 // We are assuming that 0 is always represented as 0x0000
196 res->vrp.bits_set = new_tarval_from_long(0, mode);
197 res->vrp.bits_not_set = new_tarval_from_long(0, mode);
198 res->vrp.range_bottom = get_tarval_top();
199 res->vrp.range_top = get_tarval_top();
201 res->vrp.bits_set = get_tarval_bad();
202 res->vrp.bits_not_set = get_tarval_bad();
203 res->vrp.range_bottom = get_tarval_bad();
204 res->vrp.range_top = get_tarval_bad();
206 res->vrp.bits_node = NULL;
207 res->vrp.range_node = NULL;
208 res->vrp.range_op = VRP_NONE;
214 /*-- getting some parameters from ir_nodes --*/
216 int (is_ir_node)(const void *thing)
218 return _is_ir_node(thing);
221 int (get_irn_intra_arity)(const ir_node *node)
223 return _get_irn_intra_arity(node);
226 int (get_irn_inter_arity)(const ir_node *node)
228 return _get_irn_inter_arity(node);
231 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
233 int (get_irn_arity)(const ir_node *node)
235 return _get_irn_arity(node);
238 /* Returns the array with ins. This array is shifted with respect to the
239 array accessed by get_irn_n: The block operand is at position 0 not -1.
240 (@@@ This should be changed.)
241 The order of the predecessors in this array is not guaranteed, except that
242 lists of operands as predecessors of Block or arguments of a Call are
244 ir_node **get_irn_in(const ir_node *node)
247 #ifdef INTERPROCEDURAL_VIEW
248 if (get_interprocedural_view()) { /* handle Filter and Block specially */
249 if (get_irn_opcode(node) == iro_Filter) {
250 assert(node->attr.filter.in_cg);
251 return node->attr.filter.in_cg;
252 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
253 return node->attr.block.in_cg;
255 /* else fall through */
257 #endif /* INTERPROCEDURAL_VIEW */
261 void set_irn_in(ir_node *node, int arity, ir_node **in)
265 ir_graph *irg = get_irn_irg(node);
268 #ifdef INTERPROCEDURAL_VIEW
269 if (get_interprocedural_view()) { /* handle Filter and Block specially */
270 ir_opcode code = get_irn_opcode(node);
271 if (code == iro_Filter) {
272 assert(node->attr.filter.in_cg);
273 pOld_in = &node->attr.filter.in_cg;
274 } else if (code == iro_Block && node->attr.block.in_cg) {
275 pOld_in = &node->attr.block.in_cg;
280 #endif /* INTERPROCEDURAL_VIEW */
284 for (i = 0; i < arity; i++) {
285 if (i < ARR_LEN(*pOld_in)-1)
286 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
288 edges_notify_edge(node, i, in[i], NULL, irg);
290 for (;i < ARR_LEN(*pOld_in)-1; i++) {
291 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
294 if (arity != ARR_LEN(*pOld_in) - 1) {
295 ir_node * block = (*pOld_in)[0];
296 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
297 (*pOld_in)[0] = block;
299 fix_backedges(irg->obst, node);
301 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
304 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
306 return _get_irn_intra_n(node, n);
309 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
311 return _get_irn_inter_n(node, n);
314 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
316 ir_node *(get_irn_n)(const ir_node *node, int n)
318 return _get_irn_n(node, n);
321 void set_irn_n(ir_node *node, int n, ir_node *in)
323 assert(node && node->kind == k_ir_node);
325 assert(n < get_irn_arity(node));
326 assert(in && in->kind == k_ir_node);
328 #ifdef INTERPROCEDURAL_VIEW
329 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
330 /* Change block pred in both views! */
331 node->in[n + 1] = in;
332 assert(node->attr.filter.in_cg);
333 node->attr.filter.in_cg[n + 1] = in;
336 if (get_interprocedural_view()) { /* handle Filter and Block specially */
337 if (get_irn_opcode(node) == iro_Filter) {
338 assert(node->attr.filter.in_cg);
339 node->attr.filter.in_cg[n + 1] = in;
341 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
342 node->attr.block.in_cg[n + 1] = in;
345 /* else fall through */
347 #endif /* INTERPROCEDURAL_VIEW */
350 hook_set_irn_n(node, n, in, node->in[n + 1]);
352 /* Here, we rely on src and tgt being in the current ir graph */
353 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
355 node->in[n + 1] = in;
358 int add_irn_n(ir_node *node, ir_node *in)
361 ir_graph *irg = get_irn_irg(node);
363 assert(node->op->opar == oparity_dynamic);
364 pos = ARR_LEN(node->in) - 1;
365 ARR_APP1(ir_node *, node->in, in);
366 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
369 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
374 void del_Sync_n(ir_node *n, int i)
376 int arity = get_Sync_n_preds(n);
377 ir_node *last_pred = get_Sync_pred(n, arity - 1);
378 set_Sync_pred(n, i, last_pred);
379 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
380 ARR_SHRINKLEN(get_irn_in(n), arity);
383 int (get_irn_deps)(const ir_node *node)
385 return _get_irn_deps(node);
388 ir_node *(get_irn_dep)(const ir_node *node, int pos)
390 return _get_irn_dep(node, pos);
393 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
395 _set_irn_dep(node, pos, dep);
398 int add_irn_dep(ir_node *node, ir_node *dep)
402 /* DEP edges are only allowed in backend phase */
403 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
404 if (node->deps == NULL) {
405 node->deps = NEW_ARR_F(ir_node *, 1);
411 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
412 if (node->deps[i] == NULL)
415 if (node->deps[i] == dep)
419 if (first_zero >= 0) {
420 node->deps[first_zero] = dep;
423 ARR_APP1(ir_node *, node->deps, dep);
428 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
433 void add_irn_deps(ir_node *tgt, ir_node *src)
437 for (i = 0, n = get_irn_deps(src); i < n; ++i)
438 add_irn_dep(tgt, get_irn_dep(src, i));
442 ir_mode *(get_irn_mode)(const ir_node *node)
444 return _get_irn_mode(node);
447 void (set_irn_mode)(ir_node *node, ir_mode *mode)
449 _set_irn_mode(node, mode);
452 /** Gets the string representation of the mode .*/
453 const char *get_irn_modename(const ir_node *node)
456 return get_mode_name(node->mode);
459 ident *get_irn_modeident(const ir_node *node)
462 return get_mode_ident(node->mode);
465 ir_op *(get_irn_op)(const ir_node *node)
467 return _get_irn_op(node);
470 /* should be private to the library: */
471 void (set_irn_op)(ir_node *node, ir_op *op)
473 _set_irn_op(node, op);
476 unsigned (get_irn_opcode)(const ir_node *node)
478 return _get_irn_opcode(node);
481 const char *get_irn_opname(const ir_node *node)
484 if (is_Phi0(node)) return "Phi0";
485 return get_id_str(node->op->name);
488 ident *get_irn_opident(const ir_node *node)
491 return node->op->name;
494 ir_visited_t (get_irn_visited)(const ir_node *node)
496 return _get_irn_visited(node);
499 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
501 _set_irn_visited(node, visited);
504 void (mark_irn_visited)(ir_node *node)
506 _mark_irn_visited(node);
509 int (irn_visited)(const ir_node *node)
511 return _irn_visited(node);
514 int (irn_visited_else_mark)(ir_node *node)
516 return _irn_visited_else_mark(node);
519 void (set_irn_link)(ir_node *node, void *link)
521 _set_irn_link(node, link);
524 void *(get_irn_link)(const ir_node *node)
526 return _get_irn_link(node);
529 op_pin_state (get_irn_pinned)(const ir_node *node)
531 return _get_irn_pinned(node);
534 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
536 return _is_irn_pinned_in_irg(node);
539 void set_irn_pinned(ir_node *node, op_pin_state state)
541 /* due to optimization an opt may be turned into a Tuple */
545 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
546 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
548 node->attr.except.pin_state = state;
551 /* Outputs a unique number for this node */
552 long get_irn_node_nr(const ir_node *node)
555 return node->node_nr;
558 const_attr *get_irn_const_attr(ir_node *node)
560 assert(is_Const(node));
561 return &node->attr.con;
564 long get_irn_proj_attr(ir_node *node)
566 /* BEWARE: check for true Proj node here, no Filter */
567 assert(node->op == op_Proj);
568 return node->attr.proj;
571 alloc_attr *get_irn_alloc_attr(ir_node *node)
573 assert(is_Alloc(node));
574 return &node->attr.alloc;
577 free_attr *get_irn_free_attr(ir_node *node)
579 assert(is_Free(node));
580 return &node->attr.free;
583 symconst_attr *get_irn_symconst_attr(ir_node *node)
585 assert(is_SymConst(node));
586 return &node->attr.symc;
589 call_attr *get_irn_call_attr(ir_node *node)
591 assert(is_Call(node));
592 return &node->attr.call;
595 sel_attr *get_irn_sel_attr(ir_node *node)
597 assert(is_Sel(node));
598 return &node->attr.sel;
601 phi_attr *get_irn_phi_attr(ir_node *node)
603 return &node->attr.phi;
606 block_attr *get_irn_block_attr(ir_node *node)
608 assert(is_Block(node));
609 return &node->attr.block;
612 load_attr *get_irn_load_attr(ir_node *node)
614 assert(is_Load(node));
615 return &node->attr.load;
618 store_attr *get_irn_store_attr(ir_node *node)
620 assert(is_Store(node));
621 return &node->attr.store;
624 except_attr *get_irn_except_attr(ir_node *node)
626 assert(node->op == op_Div || node->op == op_Quot ||
627 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
628 return &node->attr.except;
631 divmod_attr *get_irn_divmod_attr(ir_node *node)
633 assert(node->op == op_Div || node->op == op_Quot ||
634 node->op == op_DivMod || node->op == op_Mod);
635 return &node->attr.divmod;
638 builtin_attr *get_irn_builtin_attr(ir_node *node)
640 assert(is_Builtin(node));
641 return &node->attr.builtin;
644 void *(get_irn_generic_attr)(ir_node *node)
646 assert(is_ir_node(node));
647 return _get_irn_generic_attr(node);
650 const void *(get_irn_generic_attr_const)(const ir_node *node)
652 assert(is_ir_node(node));
653 return _get_irn_generic_attr_const(node);
656 unsigned (get_irn_idx)(const ir_node *node)
658 assert(is_ir_node(node));
659 return _get_irn_idx(node);
662 int get_irn_pred_pos(ir_node *node, ir_node *arg)
665 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
666 if (get_irn_n(node, i) == arg)
672 /** manipulate fields of individual nodes **/
674 /* this works for all except Block */
675 ir_node *get_nodes_block(const ir_node *node)
677 assert(node->op != op_Block);
678 return get_irn_n(node, -1);
681 void set_nodes_block(ir_node *node, ir_node *block)
683 assert(node->op != op_Block);
684 set_irn_n(node, -1, block);
687 /* this works for all except Block */
688 ir_node *get_nodes_MacroBlock(const ir_node *node)
690 assert(node->op != op_Block);
691 return get_Block_MacroBlock(get_irn_n(node, -1));
694 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
695 * from Start. If so returns frame type, else Null. */
696 ir_type *is_frame_pointer(const ir_node *n)
698 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
699 ir_node *start = get_Proj_pred(n);
700 if (is_Start(start)) {
701 return get_irg_frame_type(get_irn_irg(start));
707 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
708 * from Start. If so returns tls type, else Null. */
709 ir_type *is_tls_pointer(const ir_node *n)
711 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
712 ir_node *start = get_Proj_pred(n);
713 if (is_Start(start)) {
714 return get_tls_type();
720 ir_node **get_Block_cfgpred_arr(ir_node *node)
722 assert(is_Block(node));
723 return (ir_node **)&(get_irn_in(node)[1]);
726 int (get_Block_n_cfgpreds)(const ir_node *node)
728 return _get_Block_n_cfgpreds(node);
731 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
733 return _get_Block_cfgpred(node, pos);
736 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
738 assert(is_Block(node));
739 set_irn_n(node, pos, pred);
742 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
746 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
747 if (get_Block_cfgpred_block(block, i) == pred)
753 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
755 return _get_Block_cfgpred_block(node, pos);
758 int get_Block_matured(const ir_node *node)
760 assert(is_Block(node));
761 return (int)node->attr.block.is_matured;
764 void set_Block_matured(ir_node *node, int matured)
766 assert(is_Block(node));
767 node->attr.block.is_matured = matured;
770 ir_visited_t (get_Block_block_visited)(const ir_node *node)
772 return _get_Block_block_visited(node);
775 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
777 _set_Block_block_visited(node, visit);
780 /* For this current_ir_graph must be set. */
781 void (mark_Block_block_visited)(ir_node *node)
783 _mark_Block_block_visited(node);
786 int (Block_block_visited)(const ir_node *node)
788 return _Block_block_visited(node);
791 ir_node *get_Block_graph_arr(ir_node *node, int pos)
793 assert(is_Block(node));
794 return node->attr.block.graph_arr[pos+1];
797 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value)
799 assert(is_Block(node));
800 node->attr.block.graph_arr[pos+1] = value;
803 #ifdef INTERPROCEDURAL_VIEW
804 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
806 assert(is_Block(node));
807 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
808 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
809 node->attr.block.in_cg[0] = NULL;
810 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
812 /* Fix backedge array. fix_backedges() operates depending on
813 interprocedural_view. */
814 int ipv = get_interprocedural_view();
815 set_interprocedural_view(1);
816 fix_backedges(current_ir_graph->obst, node);
817 set_interprocedural_view(ipv);
820 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
823 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
825 assert(is_Block(node) && node->attr.block.in_cg &&
826 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
827 node->attr.block.in_cg[pos + 1] = pred;
830 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
832 assert(is_Block(node));
833 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
836 int get_Block_cg_n_cfgpreds(const ir_node *node)
838 assert(is_Block(node));
839 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
842 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
844 assert(is_Block(node) && node->attr.block.in_cg);
845 return node->attr.block.in_cg[pos + 1];
848 void remove_Block_cg_cfgpred_arr(ir_node *node)
850 assert(is_Block(node));
851 node->attr.block.in_cg = NULL;
853 #endif /* INTERPROCEDURAL_VIEW */
855 ir_node *(set_Block_dead)(ir_node *block)
857 return _set_Block_dead(block);
860 int (is_Block_dead)(const ir_node *block)
862 return _is_Block_dead(block);
865 ir_extblk *get_Block_extbb(const ir_node *block)
868 assert(is_Block(block));
869 res = block->attr.block.extblk;
870 assert(res == NULL || is_ir_extbb(res));
874 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
876 assert(is_Block(block));
877 assert(extblk == NULL || is_ir_extbb(extblk));
878 block->attr.block.extblk = extblk;
881 /* Returns the macro block header of a block.*/
882 ir_node *get_Block_MacroBlock(const ir_node *block)
885 assert(is_Block(block));
886 mbh = get_irn_n(block, -1);
887 /* once macro block header is respected by all optimizations,
888 this assert can be removed */
893 /* Sets the macro block header of a block. */
894 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
896 assert(is_Block(block));
898 assert(is_Block(mbh));
899 set_irn_n(block, -1, mbh);
902 /* returns the macro block header of a node. */
903 ir_node *get_irn_MacroBlock(const ir_node *n)
906 n = get_nodes_block(n);
907 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
911 return get_Block_MacroBlock(n);
914 /* returns the graph of a Block. */
915 ir_graph *(get_Block_irg)(const ir_node *block)
917 return _get_Block_irg(block);
920 ir_entity *create_Block_entity(ir_node *block)
923 assert(is_Block(block));
925 entity = block->attr.block.entity;
926 if (entity == NULL) {
930 glob = get_glob_type();
931 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
932 set_entity_visibility(entity, ir_visibility_local);
933 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
934 nr = get_irp_next_label_nr();
935 set_entity_label(entity, nr);
936 set_entity_compiler_generated(entity, 1);
938 block->attr.block.entity = entity;
943 ir_entity *get_Block_entity(const ir_node *block)
945 assert(is_Block(block));
946 return block->attr.block.entity;
949 void set_Block_entity(ir_node *block, ir_entity *entity)
951 assert(is_Block(block));
952 assert(get_entity_type(entity) == get_code_type());
953 block->attr.block.entity = entity;
956 int has_Block_entity(const ir_node *block)
958 return block->attr.block.entity != NULL;
961 ir_node *(get_Block_phis)(const ir_node *block)
963 return _get_Block_phis(block);
966 void (set_Block_phis)(ir_node *block, ir_node *phi)
968 _set_Block_phis(block, phi);
971 void (add_Block_phi)(ir_node *block, ir_node *phi)
973 _add_Block_phi(block, phi);
976 /* Get the Block mark (single bit). */
977 unsigned (get_Block_mark)(const ir_node *block)
979 return _get_Block_mark(block);
982 /* Set the Block mark (single bit). */
983 void (set_Block_mark)(ir_node *block, unsigned mark)
985 _set_Block_mark(block, mark);
988 int get_End_n_keepalives(const ir_node *end)
991 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
994 ir_node *get_End_keepalive(const ir_node *end, int pos)
997 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
1000 void add_End_keepalive(ir_node *end, ir_node *ka)
1002 assert(is_End(end));
1006 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
1008 assert(is_End(end));
1009 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
1012 /* Set new keep-alives */
1013 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
1016 ir_graph *irg = get_irn_irg(end);
1018 /* notify that edges are deleted */
1019 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1020 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1022 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1024 for (i = 0; i < n; ++i) {
1025 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1026 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1030 /* Set new keep-alives from old keep-alives, skipping irn */
1031 void remove_End_keepalive(ir_node *end, ir_node *irn)
1033 int n = get_End_n_keepalives(end);
1038 for (i = n -1; i >= 0; --i) {
1039 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1042 if (old_ka == irn) {
1049 irg = get_irn_irg(end);
1051 /* remove the edge */
1052 edges_notify_edge(end, idx, NULL, irn, irg);
1055 /* exchange with the last one */
1056 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1057 edges_notify_edge(end, n - 1, NULL, old, irg);
1058 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1059 edges_notify_edge(end, idx, old, NULL, irg);
1061 /* now n - 1 keeps, 1 block input */
1062 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1065 /* remove Bads, NoMems and doublets from the keep-alive set */
1066 void remove_End_Bads_and_doublets(ir_node *end)
1069 int idx, n = get_End_n_keepalives(end);
1075 irg = get_irn_irg(end);
1076 pset_new_init(&keeps);
1078 for (idx = n - 1; idx >= 0; --idx) {
1079 ir_node *ka = get_End_keepalive(end, idx);
1081 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1082 /* remove the edge */
1083 edges_notify_edge(end, idx, NULL, ka, irg);
1086 /* exchange with the last one */
1087 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1088 edges_notify_edge(end, n - 1, NULL, old, irg);
1089 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1090 edges_notify_edge(end, idx, old, NULL, irg);
1094 pset_new_insert(&keeps, ka);
1097 /* n keeps, 1 block input */
1098 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1100 pset_new_destroy(&keeps);
1103 void free_End(ir_node *end)
1105 assert(is_End(end));
1108 end->in = NULL; /* @@@ make sure we get an error if we use the
1109 in array afterwards ... */
1112 /* Return the target address of an IJmp */
1113 ir_node *get_IJmp_target(const ir_node *ijmp)
1115 assert(is_IJmp(ijmp));
1116 return get_irn_n(ijmp, 0);
1119 /** Sets the target address of an IJmp */
1120 void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
1122 assert(is_IJmp(ijmp));
1123 set_irn_n(ijmp, 0, tgt);
1126 ir_node *get_Cond_selector(const ir_node *node)
1128 assert(is_Cond(node));
1129 return get_irn_n(node, 0);
1132 void set_Cond_selector(ir_node *node, ir_node *selector)
1134 assert(is_Cond(node));
1135 set_irn_n(node, 0, selector);
1138 long get_Cond_default_proj(const ir_node *node)
1140 assert(is_Cond(node));
1141 return node->attr.cond.default_proj;
1144 void set_Cond_default_proj(ir_node *node, long defproj)
1146 assert(is_Cond(node));
1147 node->attr.cond.default_proj = defproj;
1150 ir_node *get_Return_mem(const ir_node *node)
1152 assert(is_Return(node));
1153 return get_irn_n(node, 0);
1156 void set_Return_mem(ir_node *node, ir_node *mem)
1158 assert(is_Return(node));
1159 set_irn_n(node, 0, mem);
1162 int get_Return_n_ress(const ir_node *node)
1164 assert(is_Return(node));
1165 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1168 ir_node **get_Return_res_arr(ir_node *node)
1170 assert(is_Return(node));
1171 if (get_Return_n_ress(node) > 0)
1172 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1178 void set_Return_n_res(ir_node *node, int results)
1180 assert(is_Return(node));
1184 ir_node *get_Return_res(const ir_node *node, int pos)
1186 assert(is_Return(node));
1187 assert(get_Return_n_ress(node) > pos);
1188 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1191 void set_Return_res(ir_node *node, int pos, ir_node *res)
1193 assert(is_Return(node));
1194 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1197 tarval *(get_Const_tarval)(const ir_node *node)
1199 return _get_Const_tarval(node);
1202 void set_Const_tarval(ir_node *node, tarval *con)
1204 assert(is_Const(node));
1205 node->attr.con.tv = con;
1208 int (is_Const_null)(const ir_node *node)
1210 return _is_Const_null(node);
1213 int (is_Const_one)(const ir_node *node)
1215 return _is_Const_one(node);
1218 int (is_Const_all_one)(const ir_node *node)
1220 return _is_Const_all_one(node);
1224 /* The source language type. Must be an atomic type. Mode of type must
1225 be mode of node. For tarvals from entities type must be pointer to
1227 ir_type *get_Const_type(ir_node *node)
1229 assert(is_Const(node));
1230 return node->attr.con.tp;
1233 void set_Const_type(ir_node *node, ir_type *tp)
1235 assert(is_Const(node));
1236 if (tp != firm_unknown_type) {
1237 assert(is_atomic_type(tp));
1238 assert(get_type_mode(tp) == get_irn_mode(node));
1240 node->attr.con.tp = tp;
1244 symconst_kind get_SymConst_kind(const ir_node *node)
1246 assert(is_SymConst(node));
1247 return node->attr.symc.kind;
1250 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1252 assert(is_SymConst(node));
1253 node->attr.symc.kind = kind;
1256 ir_type *get_SymConst_type(const ir_node *node)
1258 /* the cast here is annoying, but we have to compensate for
1260 ir_node *irn = (ir_node *)node;
1261 assert(is_SymConst(node) &&
1262 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1263 return irn->attr.symc.sym.type_p;
1266 void set_SymConst_type(ir_node *node, ir_type *tp)
1268 assert(is_SymConst(node) &&
1269 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1270 node->attr.symc.sym.type_p = tp;
1274 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1275 ir_entity *get_SymConst_entity(const ir_node *node)
1277 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1278 return node->attr.symc.sym.entity_p;
1281 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1283 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1284 node->attr.symc.sym.entity_p = ent;
1287 ir_enum_const *get_SymConst_enum(const ir_node *node)
1289 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1290 return node->attr.symc.sym.enum_p;
1293 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1295 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1296 node->attr.symc.sym.enum_p = ec;
1299 union symconst_symbol
1300 get_SymConst_symbol(const ir_node *node)
1302 assert(is_SymConst(node));
1303 return node->attr.symc.sym;
1306 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1308 assert(is_SymConst(node));
1309 node->attr.symc.sym = sym;
1312 ir_type *get_SymConst_value_type(ir_node *node)
1314 assert(is_SymConst(node));
1315 return node->attr.symc.tp;
1318 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1320 assert(is_SymConst(node));
1321 node->attr.symc.tp = tp;
1324 ir_node *get_Sel_mem(const ir_node *node)
1326 assert(is_Sel(node));
1327 return get_irn_n(node, 0);
1330 void set_Sel_mem(ir_node *node, ir_node *mem)
1332 assert(is_Sel(node));
1333 set_irn_n(node, 0, mem);
1336 ir_node *get_Sel_ptr(const ir_node *node)
1338 assert(is_Sel(node));
1339 return get_irn_n(node, 1);
1342 void set_Sel_ptr(ir_node *node, ir_node *ptr)
1344 assert(is_Sel(node));
1345 set_irn_n(node, 1, ptr);
1348 int get_Sel_n_indexs(const ir_node *node)
1350 assert(is_Sel(node));
1351 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1354 ir_node **get_Sel_index_arr(ir_node *node)
1356 assert(is_Sel(node));
1357 if (get_Sel_n_indexs(node) > 0)
1358 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1363 ir_node *get_Sel_index(const ir_node *node, int pos)
1365 assert(is_Sel(node));
1366 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1369 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1371 assert(is_Sel(node));
1372 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1375 ir_entity *get_Sel_entity(const ir_node *node)
1377 assert(is_Sel(node));
1378 return node->attr.sel.entity;
1381 /* need a version without const to prevent warning */
1382 static ir_entity *_get_Sel_entity(ir_node *node)
1384 return get_Sel_entity(node);
1387 void set_Sel_entity(ir_node *node, ir_entity *ent)
1389 assert(is_Sel(node));
1390 node->attr.sel.entity = ent;
1394 /* For unary and binary arithmetic operations the access to the
1395 operands can be factored out. Left is the first, right the
1396 second arithmetic value as listed in tech report 0999-33.
1397 unops are: Minus, Abs, Not, Conv, Cast
1398 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1399 Shr, Shrs, Rotate, Cmp */
1402 ir_node *get_Call_mem(const ir_node *node)
1404 assert(is_Call(node));
1405 return get_irn_n(node, 0);
1408 void set_Call_mem(ir_node *node, ir_node *mem)
1410 assert(is_Call(node));
1411 set_irn_n(node, 0, mem);
1414 ir_node *get_Call_ptr(const ir_node *node)
1416 assert(is_Call(node));
1417 return get_irn_n(node, 1);
1420 void set_Call_ptr(ir_node *node, ir_node *ptr)
1422 assert(is_Call(node));
1423 set_irn_n(node, 1, ptr);
1426 ir_node **get_Call_param_arr(ir_node *node)
1428 assert(is_Call(node));
1429 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1432 int get_Call_n_params(const ir_node *node)
1434 assert(is_Call(node));
1435 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1438 ir_node *get_Call_param(const ir_node *node, int pos)
1440 assert(is_Call(node));
1441 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1444 void set_Call_param(ir_node *node, int pos, ir_node *param)
1446 assert(is_Call(node));
1447 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1450 ir_type *get_Call_type(ir_node *node)
1452 assert(is_Call(node));
1453 return node->attr.call.type;
1456 void set_Call_type(ir_node *node, ir_type *tp)
1458 assert(is_Call(node));
1459 assert((get_unknown_type() == tp) || is_Method_type(tp));
1460 node->attr.call.type = tp;
1463 unsigned get_Call_tail_call(const ir_node *node)
1465 assert(is_Call(node));
1466 return node->attr.call.tail_call;
1469 void set_Call_tail_call(ir_node *node, unsigned tail_call)
1471 assert(is_Call(node));
1472 node->attr.call.tail_call = tail_call != 0;
1475 ir_node *get_Builtin_mem(const ir_node *node)
1477 assert(is_Builtin(node));
1478 return get_irn_n(node, 0);
1481 void set_Builin_mem(ir_node *node, ir_node *mem)
1483 assert(is_Builtin(node));
1484 set_irn_n(node, 0, mem);
1487 ir_builtin_kind get_Builtin_kind(const ir_node *node)
1489 assert(is_Builtin(node));
1490 return node->attr.builtin.kind;
1493 void set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
1495 assert(is_Builtin(node));
1496 node->attr.builtin.kind = kind;
1499 ir_node **get_Builtin_param_arr(ir_node *node)
1501 assert(is_Builtin(node));
1502 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1505 int get_Builtin_n_params(const ir_node *node)
1507 assert(is_Builtin(node));
1508 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1511 ir_node *get_Builtin_param(const ir_node *node, int pos)
1513 assert(is_Builtin(node));
1514 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1517 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1519 assert(is_Builtin(node));
1520 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1523 ir_type *get_Builtin_type(ir_node *node)
1525 assert(is_Builtin(node));
1526 return node->attr.builtin.type;
1529 void set_Builtin_type(ir_node *node, ir_type *tp)
1531 assert(is_Builtin(node));
1532 assert((get_unknown_type() == tp) || is_Method_type(tp));
1533 node->attr.builtin.type = tp;
1536 /* Returns a human readable string for the ir_builtin_kind. */
1537 const char *get_builtin_kind_name(ir_builtin_kind kind)
1539 #define X(a) case a: return #a;
1542 X(ir_bk_debugbreak);
1543 X(ir_bk_return_address);
1544 X(ir_bk_frame_address);
1554 X(ir_bk_inner_trampoline);
1561 int Call_has_callees(const ir_node *node)
1563 assert(is_Call(node));
1564 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1565 (node->attr.call.callee_arr != NULL));
1568 int get_Call_n_callees(const ir_node *node)
1570 assert(is_Call(node) && node->attr.call.callee_arr);
1571 return ARR_LEN(node->attr.call.callee_arr);
1574 ir_entity *get_Call_callee(const ir_node *node, int pos)
1576 assert(pos >= 0 && pos < get_Call_n_callees(node));
1577 return node->attr.call.callee_arr[pos];
1580 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1582 assert(is_Call(node));
1583 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1584 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1586 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1589 void remove_Call_callee_arr(ir_node *node)
1591 assert(is_Call(node));
1592 node->attr.call.callee_arr = NULL;
1595 ir_node *get_CallBegin_ptr(const ir_node *node)
1597 assert(is_CallBegin(node));
1598 return get_irn_n(node, 0);
1601 void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
1603 assert(is_CallBegin(node));
1604 set_irn_n(node, 0, ptr);
1607 ir_node *get_CallBegin_call(const ir_node *node)
1609 assert(is_CallBegin(node));
1610 return node->attr.callbegin.call;
1613 void set_CallBegin_call(ir_node *node, ir_node *call)
1615 assert(is_CallBegin(node));
1616 node->attr.callbegin.call = call;
1620 * Returns non-zero if a Call is surely a self-recursive Call.
1621 * Beware: if this functions returns 0, the call might be self-recursive!
1623 int is_self_recursive_Call(const ir_node *call)
1625 const ir_node *callee = get_Call_ptr(call);
1627 if (is_SymConst_addr_ent(callee)) {
1628 const ir_entity *ent = get_SymConst_entity(callee);
1629 const ir_graph *irg = get_entity_irg(ent);
1630 if (irg == get_irn_irg(call))
1637 ir_node * get_##OP##_left(const ir_node *node) { \
1638 assert(is_##OP(node)); \
1639 return get_irn_n(node, node->op->op_index); \
1641 void set_##OP##_left(ir_node *node, ir_node *left) { \
1642 assert(is_##OP(node)); \
1643 set_irn_n(node, node->op->op_index, left); \
1645 ir_node *get_##OP##_right(const ir_node *node) { \
1646 assert(is_##OP(node)); \
1647 return get_irn_n(node, node->op->op_index + 1); \
1649 void set_##OP##_right(ir_node *node, ir_node *right) { \
1650 assert(is_##OP(node)); \
1651 set_irn_n(node, node->op->op_index + 1, right); \
1655 ir_node *get_##OP##_op(const ir_node *node) { \
1656 assert(is_##OP(node)); \
1657 return get_irn_n(node, node->op->op_index); \
1659 void set_##OP##_op(ir_node *node, ir_node *op) { \
1660 assert(is_##OP(node)); \
1661 set_irn_n(node, node->op->op_index, op); \
1664 #define BINOP_MEM(OP) \
1668 get_##OP##_mem(const ir_node *node) { \
1669 assert(is_##OP(node)); \
1670 return get_irn_n(node, 0); \
1674 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1675 assert(is_##OP(node)); \
1676 set_irn_n(node, 0, mem); \
1682 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1683 assert(is_##OP(node)); \
1684 return node->attr.divmod.resmode; \
1687 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1688 assert(is_##OP(node)); \
1689 node->attr.divmod.resmode = mode; \
1717 int get_Div_no_remainder(const ir_node *node)
1719 assert(is_Div(node));
1720 return node->attr.divmod.no_remainder;
1723 void set_Div_no_remainder(ir_node *node, int no_remainder)
1725 assert(is_Div(node));
1726 node->attr.divmod.no_remainder = no_remainder;
1729 int get_Conv_strict(const ir_node *node)
1731 assert(is_Conv(node));
1732 return node->attr.conv.strict;
1735 void set_Conv_strict(ir_node *node, int strict_flag)
1737 assert(is_Conv(node));
1738 node->attr.conv.strict = (char)strict_flag;
1741 ir_type *get_Cast_type(ir_node *node)
1743 assert(is_Cast(node));
1744 return node->attr.cast.type;
1747 void set_Cast_type(ir_node *node, ir_type *to_tp)
1749 assert(is_Cast(node));
1750 node->attr.cast.type = to_tp;
1754 /* Checks for upcast.
1756 * Returns true if the Cast node casts a class type to a super type.
1758 int is_Cast_upcast(ir_node *node)
1760 ir_type *totype = get_Cast_type(node);
1761 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1763 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1766 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1767 totype = get_pointer_points_to_type(totype);
1768 fromtype = get_pointer_points_to_type(fromtype);
1773 if (!is_Class_type(totype)) return 0;
1774 return is_SubClass_of(fromtype, totype);
1777 /* Checks for downcast.
1779 * Returns true if the Cast node casts a class type to a sub type.
1781 int is_Cast_downcast(ir_node *node)
1783 ir_type *totype = get_Cast_type(node);
1784 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1786 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1789 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1790 totype = get_pointer_points_to_type(totype);
1791 fromtype = get_pointer_points_to_type(fromtype);
1796 if (!is_Class_type(totype)) return 0;
1797 return is_SubClass_of(totype, fromtype);
1800 int (is_unop)(const ir_node *node)
1802 return _is_unop(node);
1805 ir_node *get_unop_op(const ir_node *node)
1807 if (node->op->opar == oparity_unary)
1808 return get_irn_n(node, node->op->op_index);
1810 assert(node->op->opar == oparity_unary);
1814 void set_unop_op(ir_node *node, ir_node *op)
1816 if (node->op->opar == oparity_unary)
1817 set_irn_n(node, node->op->op_index, op);
1819 assert(node->op->opar == oparity_unary);
1822 int (is_binop)(const ir_node *node)
1824 return _is_binop(node);
1827 ir_node *get_binop_left(const ir_node *node)
1829 assert(node->op->opar == oparity_binary);
1830 return get_irn_n(node, node->op->op_index);
1833 void set_binop_left(ir_node *node, ir_node *left)
1835 assert(node->op->opar == oparity_binary);
1836 set_irn_n(node, node->op->op_index, left);
1839 ir_node *get_binop_right(const ir_node *node)
1841 assert(node->op->opar == oparity_binary);
1842 return get_irn_n(node, node->op->op_index + 1);
1845 void set_binop_right(ir_node *node, ir_node *right)
1847 assert(node->op->opar == oparity_binary);
1848 set_irn_n(node, node->op->op_index + 1, right);
1851 int is_Phi0(const ir_node *n)
1855 return ((get_irn_op(n) == op_Phi) &&
1856 (get_irn_arity(n) == 0) &&
1857 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1860 ir_node **get_Phi_preds_arr(ir_node *node)
1862 assert(node->op == op_Phi);
1863 return (ir_node **)&(get_irn_in(node)[1]);
1866 int get_Phi_n_preds(const ir_node *node)
1868 assert(is_Phi(node) || is_Phi0(node));
1869 return (get_irn_arity(node));
1873 void set_Phi_n_preds(ir_node *node, int n_preds)
1875 assert(node->op == op_Phi);
1879 ir_node *get_Phi_pred(const ir_node *node, int pos)
1881 assert(is_Phi(node) || is_Phi0(node));
1882 return get_irn_n(node, pos);
1885 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1887 assert(is_Phi(node) || is_Phi0(node));
1888 set_irn_n(node, pos, pred);
1891 ir_node *(get_Phi_next)(const ir_node *phi)
1893 return _get_Phi_next(phi);
1896 void (set_Phi_next)(ir_node *phi, ir_node *next)
1898 _set_Phi_next(phi, next);
1901 int is_memop(const ir_node *node)
1903 ir_opcode code = get_irn_opcode(node);
1904 return (code == iro_Load || code == iro_Store);
1907 ir_node *get_memop_mem(const ir_node *node)
1909 assert(is_memop(node));
1910 return get_irn_n(node, 0);
1913 void set_memop_mem(ir_node *node, ir_node *mem)
1915 assert(is_memop(node));
1916 set_irn_n(node, 0, mem);
1919 ir_node *get_memop_ptr(const ir_node *node)
1921 assert(is_memop(node));
1922 return get_irn_n(node, 1);
1925 void set_memop_ptr(ir_node *node, ir_node *ptr)
1927 assert(is_memop(node));
1928 set_irn_n(node, 1, ptr);
1931 ir_node *get_Load_mem(const ir_node *node)
1933 assert(is_Load(node));
1934 return get_irn_n(node, 0);
1937 void set_Load_mem(ir_node *node, ir_node *mem)
1939 assert(is_Load(node));
1940 set_irn_n(node, 0, mem);
1943 ir_node *get_Load_ptr(const ir_node *node)
1945 assert(is_Load(node));
1946 return get_irn_n(node, 1);
1949 void set_Load_ptr(ir_node *node, ir_node *ptr)
1951 assert(is_Load(node));
1952 set_irn_n(node, 1, ptr);
1955 ir_mode *get_Load_mode(const ir_node *node)
1957 assert(is_Load(node));
1958 return node->attr.load.mode;
1961 void set_Load_mode(ir_node *node, ir_mode *mode)
1963 assert(is_Load(node));
1964 node->attr.load.mode = mode;
1967 ir_volatility get_Load_volatility(const ir_node *node)
1969 assert(is_Load(node));
1970 return node->attr.load.volatility;
1973 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1975 assert(is_Load(node));
1976 node->attr.load.volatility = volatility;
1979 ir_align get_Load_align(const ir_node *node)
1981 assert(is_Load(node));
1982 return node->attr.load.aligned;
1985 void set_Load_align(ir_node *node, ir_align align)
1987 assert(is_Load(node));
1988 node->attr.load.aligned = align;
1992 ir_node *get_Store_mem(const ir_node *node)
1994 assert(is_Store(node));
1995 return get_irn_n(node, 0);
1998 void set_Store_mem(ir_node *node, ir_node *mem)
2000 assert(is_Store(node));
2001 set_irn_n(node, 0, mem);
2004 ir_node *get_Store_ptr(const ir_node *node)
2006 assert(is_Store(node));
2007 return get_irn_n(node, 1);
2010 void set_Store_ptr(ir_node *node, ir_node *ptr)
2012 assert(is_Store(node));
2013 set_irn_n(node, 1, ptr);
2016 ir_node *get_Store_value(const ir_node *node)
2018 assert(is_Store(node));
2019 return get_irn_n(node, 2);
2022 void set_Store_value(ir_node *node, ir_node *value)
2024 assert(is_Store(node));
2025 set_irn_n(node, 2, value);
2028 ir_volatility get_Store_volatility(const ir_node *node)
2030 assert(is_Store(node));
2031 return node->attr.store.volatility;
2034 void set_Store_volatility(ir_node *node, ir_volatility volatility)
2036 assert(is_Store(node));
2037 node->attr.store.volatility = volatility;
2040 ir_align get_Store_align(const ir_node *node)
2042 assert(is_Store(node));
2043 return node->attr.store.aligned;
2046 void set_Store_align(ir_node *node, ir_align align)
2048 assert(is_Store(node));
2049 node->attr.store.aligned = align;
2053 ir_node *get_Alloc_mem(const ir_node *node)
2055 assert(is_Alloc(node));
2056 return get_irn_n(node, 0);
2059 void set_Alloc_mem(ir_node *node, ir_node *mem)
2061 assert(is_Alloc(node));
2062 set_irn_n(node, 0, mem);
2065 ir_node *get_Alloc_size(const ir_node *node)
2067 assert(is_Alloc(node));
2068 return get_irn_n(node, 1);
2071 void set_Alloc_size(ir_node *node, ir_node *size)
2073 assert(is_Alloc(node));
2074 set_irn_n(node, 1, size);
2077 ir_type *get_Alloc_type(ir_node *node)
2079 assert(is_Alloc(node));
2080 return node->attr.alloc.type;
2083 void set_Alloc_type(ir_node *node, ir_type *tp)
2085 assert(is_Alloc(node));
2086 node->attr.alloc.type = tp;
2089 ir_where_alloc get_Alloc_where(const ir_node *node)
2091 assert(is_Alloc(node));
2092 return node->attr.alloc.where;
2095 void set_Alloc_where(ir_node *node, ir_where_alloc where)
2097 assert(is_Alloc(node));
2098 node->attr.alloc.where = where;
2102 ir_node *get_Free_mem(const ir_node *node)
2104 assert(is_Free(node));
2105 return get_irn_n(node, 0);
2108 void set_Free_mem(ir_node *node, ir_node *mem)
2110 assert(is_Free(node));
2111 set_irn_n(node, 0, mem);
2114 ir_node *get_Free_ptr(const ir_node *node)
2116 assert(is_Free(node));
2117 return get_irn_n(node, 1);
2120 void set_Free_ptr(ir_node *node, ir_node *ptr)
2122 assert(is_Free(node));
2123 set_irn_n(node, 1, ptr);
2126 ir_node *get_Free_size(const ir_node *node)
2128 assert(is_Free(node));
2129 return get_irn_n(node, 2);
2132 void set_Free_size(ir_node *node, ir_node *size)
2134 assert(is_Free(node));
2135 set_irn_n(node, 2, size);
2138 ir_type *get_Free_type(ir_node *node)
2140 assert(is_Free(node));
2141 return node->attr.free.type;
2144 void set_Free_type(ir_node *node, ir_type *tp)
2146 assert(is_Free(node));
2147 node->attr.free.type = tp;
2150 ir_where_alloc get_Free_where(const ir_node *node)
2152 assert(is_Free(node));
2153 return node->attr.free.where;
2156 void set_Free_where(ir_node *node, ir_where_alloc where)
2158 assert(is_Free(node));
2159 node->attr.free.where = where;
2162 ir_node **get_Sync_preds_arr(ir_node *node)
2164 assert(is_Sync(node));
2165 return (ir_node **)&(get_irn_in(node)[1]);
2168 int get_Sync_n_preds(const ir_node *node)
2170 assert(is_Sync(node));
2171 return (get_irn_arity(node));
2175 void set_Sync_n_preds(ir_node *node, int n_preds)
2177 assert(is_Sync(node));
2181 ir_node *get_Sync_pred(const ir_node *node, int pos)
2183 assert(is_Sync(node));
2184 return get_irn_n(node, pos);
2187 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
2189 assert(is_Sync(node));
2190 set_irn_n(node, pos, pred);
2193 /* Add a new Sync predecessor */
2194 void add_Sync_pred(ir_node *node, ir_node *pred)
2196 assert(is_Sync(node));
2197 add_irn_n(node, pred);
2200 /* Returns the source language type of a Proj node. */
2201 ir_type *get_Proj_type(ir_node *n)
2203 ir_type *tp = firm_unknown_type;
2204 ir_node *pred = get_Proj_pred(n);
2206 switch (get_irn_opcode(pred)) {
2209 /* Deal with Start / Call here: we need to know the Proj Nr. */
2210 assert(get_irn_mode(pred) == mode_T);
2211 pred_pred = get_Proj_pred(pred);
2213 if (is_Start(pred_pred)) {
2214 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2215 tp = get_method_param_type(mtp, get_Proj_proj(n));
2216 } else if (is_Call(pred_pred)) {
2217 ir_type *mtp = get_Call_type(pred_pred);
2218 tp = get_method_res_type(mtp, get_Proj_proj(n));
2221 case iro_Start: break;
2222 case iro_Call: break;
2224 ir_node *a = get_Load_ptr(pred);
2226 tp = get_entity_type(get_Sel_entity(a));
2234 ir_node *get_Proj_pred(const ir_node *node)
2236 assert(is_Proj(node));
2237 return get_irn_n(node, 0);
2240 void set_Proj_pred(ir_node *node, ir_node *pred)
2242 assert(is_Proj(node));
2243 set_irn_n(node, 0, pred);
2246 long get_Proj_proj(const ir_node *node)
2248 #ifdef INTERPROCEDURAL_VIEW
2249 ir_opcode code = get_irn_opcode(node);
2251 if (code == iro_Proj) {
2252 return node->attr.proj;
2255 assert(code == iro_Filter);
2256 return node->attr.filter.proj;
2259 assert(is_Proj(node));
2260 return node->attr.proj;
2261 #endif /* INTERPROCEDURAL_VIEW */
2264 void set_Proj_proj(ir_node *node, long proj)
2266 #ifdef INTERPROCEDURAL_VIEW
2267 ir_opcode code = get_irn_opcode(node);
2269 if (code == iro_Proj) {
2270 node->attr.proj = proj;
2273 assert(code == iro_Filter);
2274 node->attr.filter.proj = proj;
2277 assert(is_Proj(node));
2278 node->attr.proj = proj;
2279 #endif /* INTERPROCEDURAL_VIEW */
2282 /* Returns non-zero if a node is a routine parameter. */
2283 int (is_arg_Proj)(const ir_node *node)
2285 return _is_arg_Proj(node);
2288 ir_node **get_Tuple_preds_arr(ir_node *node)
2290 assert(is_Tuple(node));
2291 return (ir_node **)&(get_irn_in(node)[1]);
2294 int get_Tuple_n_preds(const ir_node *node)
2296 assert(is_Tuple(node));
2297 return get_irn_arity(node);
2301 void set_Tuple_n_preds(ir_node *node, int n_preds)
2303 assert(is_Tuple(node));
2307 ir_node *get_Tuple_pred(const ir_node *node, int pos)
2309 assert(is_Tuple(node));
2310 return get_irn_n(node, pos);
2313 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
2315 assert(is_Tuple(node));
2316 set_irn_n(node, pos, pred);
2319 ir_node *get_Id_pred(const ir_node *node)
2321 assert(is_Id(node));
2322 return get_irn_n(node, 0);
2325 void set_Id_pred(ir_node *node, ir_node *pred)
2327 assert(is_Id(node));
2328 set_irn_n(node, 0, pred);
2331 ir_node *get_Confirm_value(const ir_node *node)
2333 assert(is_Confirm(node));
2334 return get_irn_n(node, 0);
2337 void set_Confirm_value(ir_node *node, ir_node *value)
2339 assert(is_Confirm(node));
2340 set_irn_n(node, 0, value);
2343 ir_node *get_Confirm_bound(const ir_node *node)
2345 assert(is_Confirm(node));
2346 return get_irn_n(node, 1);
2349 void set_Confirm_bound(ir_node *node, ir_node *bound)
2351 assert(is_Confirm(node));
2352 set_irn_n(node, 0, bound);
2355 pn_Cmp get_Confirm_cmp(const ir_node *node)
2357 assert(is_Confirm(node));
2358 return node->attr.confirm.cmp;
2361 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
2363 assert(is_Confirm(node));
2364 node->attr.confirm.cmp = cmp;
2367 ir_node *get_Filter_pred(ir_node *node)
2369 assert(is_Filter(node));
2373 void set_Filter_pred(ir_node *node, ir_node *pred)
2375 assert(is_Filter(node));
2379 long get_Filter_proj(ir_node *node)
2381 assert(is_Filter(node));
2382 return node->attr.filter.proj;
2385 void set_Filter_proj(ir_node *node, long proj)
2387 assert(is_Filter(node));
2388 node->attr.filter.proj = proj;
2391 /* Don't use get_irn_arity, get_irn_n in implementation as access
2392 shall work independent of view!!! */
2393 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
2395 assert(is_Filter(node));
2396 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2397 ir_graph *irg = get_irn_irg(node);
2398 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2399 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2400 node->attr.filter.in_cg[0] = node->in[0];
2402 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2405 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
2407 assert(is_Filter(node) && node->attr.filter.in_cg &&
2408 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2409 node->attr.filter.in_cg[pos + 1] = pred;
2412 int get_Filter_n_cg_preds(ir_node *node)
2414 assert(is_Filter(node) && node->attr.filter.in_cg);
2415 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2418 ir_node *get_Filter_cg_pred(ir_node *node, int pos)
2421 assert(is_Filter(node) && node->attr.filter.in_cg &&
2423 arity = ARR_LEN(node->attr.filter.in_cg);
2424 assert(pos < arity - 1);
2425 return node->attr.filter.in_cg[pos + 1];
2429 ir_node *get_Mux_sel(const ir_node *node)
2431 assert(is_Mux(node));
2435 void set_Mux_sel(ir_node *node, ir_node *sel)
2437 assert(is_Mux(node));
2441 ir_node *get_Mux_false(const ir_node *node)
2443 assert(is_Mux(node));
2447 void set_Mux_false(ir_node *node, ir_node *ir_false)
2449 assert(is_Mux(node));
2450 node->in[2] = ir_false;
2453 ir_node *get_Mux_true(const ir_node *node)
2455 assert(is_Mux(node));
2459 void set_Mux_true(ir_node *node, ir_node *ir_true)
2461 assert(is_Mux(node));
2462 node->in[3] = ir_true;
2466 ir_node *get_CopyB_mem(const ir_node *node)
2468 assert(is_CopyB(node));
2469 return get_irn_n(node, 0);
2472 void set_CopyB_mem(ir_node *node, ir_node *mem)
2474 assert(node->op == op_CopyB);
2475 set_irn_n(node, 0, mem);
2478 ir_node *get_CopyB_dst(const ir_node *node)
2480 assert(is_CopyB(node));
2481 return get_irn_n(node, 1);
2484 void set_CopyB_dst(ir_node *node, ir_node *dst)
2486 assert(is_CopyB(node));
2487 set_irn_n(node, 1, dst);
2490 ir_node *get_CopyB_src(const ir_node *node)
2492 assert(is_CopyB(node));
2493 return get_irn_n(node, 2);
2496 void set_CopyB_src(ir_node *node, ir_node *src)
2498 assert(is_CopyB(node));
2499 set_irn_n(node, 2, src);
2502 ir_type *get_CopyB_type(ir_node *node)
2504 assert(is_CopyB(node));
2505 return node->attr.copyb.type;
2508 void set_CopyB_type(ir_node *node, ir_type *data_type)
2510 assert(is_CopyB(node) && data_type);
2511 node->attr.copyb.type = data_type;
2515 ir_type *get_InstOf_type(ir_node *node)
2517 assert(node->op == op_InstOf);
2518 return node->attr.instof.type;
2521 void set_InstOf_type(ir_node *node, ir_type *type)
2523 assert(node->op == op_InstOf);
2524 node->attr.instof.type = type;
2527 ir_node *get_InstOf_store(const ir_node *node)
2529 assert(node->op == op_InstOf);
2530 return get_irn_n(node, 0);
2533 void set_InstOf_store(ir_node *node, ir_node *obj)
2535 assert(node->op == op_InstOf);
2536 set_irn_n(node, 0, obj);
2539 ir_node *get_InstOf_obj(const ir_node *node)
2541 assert(node->op == op_InstOf);
2542 return get_irn_n(node, 1);
2545 void set_InstOf_obj(ir_node *node, ir_node *obj)
2547 assert(node->op == op_InstOf);
2548 set_irn_n(node, 1, obj);
2551 /* Returns the memory input of a Raise operation. */
2552 ir_node *get_Raise_mem(const ir_node *node)
2554 assert(is_Raise(node));
2555 return get_irn_n(node, 0);
2558 void set_Raise_mem(ir_node *node, ir_node *mem)
2560 assert(is_Raise(node));
2561 set_irn_n(node, 0, mem);
2564 ir_node *get_Raise_exo_ptr(const ir_node *node)
2566 assert(is_Raise(node));
2567 return get_irn_n(node, 1);
2570 void set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
2572 assert(is_Raise(node));
2573 set_irn_n(node, 1, exo_ptr);
2578 /* Returns the memory input of a Bound operation. */
2579 ir_node *get_Bound_mem(const ir_node *bound)
2581 assert(is_Bound(bound));
2582 return get_irn_n(bound, 0);
2585 void set_Bound_mem(ir_node *bound, ir_node *mem)
2587 assert(is_Bound(bound));
2588 set_irn_n(bound, 0, mem);
2591 /* Returns the index input of a Bound operation. */
2592 ir_node *get_Bound_index(const ir_node *bound)
2594 assert(is_Bound(bound));
2595 return get_irn_n(bound, 1);
2598 void set_Bound_index(ir_node *bound, ir_node *idx)
2600 assert(is_Bound(bound));
2601 set_irn_n(bound, 1, idx);
2604 /* Returns the lower bound input of a Bound operation. */
2605 ir_node *get_Bound_lower(const ir_node *bound)
2607 assert(is_Bound(bound));
2608 return get_irn_n(bound, 2);
2611 void set_Bound_lower(ir_node *bound, ir_node *lower)
2613 assert(is_Bound(bound));
2614 set_irn_n(bound, 2, lower);
2617 /* Returns the upper bound input of a Bound operation. */
2618 ir_node *get_Bound_upper(const ir_node *bound)
2620 assert(is_Bound(bound));
2621 return get_irn_n(bound, 3);
2624 void set_Bound_upper(ir_node *bound, ir_node *upper)
2626 assert(is_Bound(bound));
2627 set_irn_n(bound, 3, upper);
2630 /* Return the operand of a Pin node. */
2631 ir_node *get_Pin_op(const ir_node *pin)
2633 assert(is_Pin(pin));
2634 return get_irn_n(pin, 0);
2637 void set_Pin_op(ir_node *pin, ir_node *node)
2639 assert(is_Pin(pin));
2640 set_irn_n(pin, 0, node);
2643 /* Return the assembler text of an ASM pseudo node. */
2644 ident *get_ASM_text(const ir_node *node)
2646 assert(is_ASM(node));
2647 return node->attr.assem.asm_text;
2650 /* Return the number of input constraints for an ASM node. */
2651 int get_ASM_n_input_constraints(const ir_node *node)
2653 assert(is_ASM(node));
2654 return ARR_LEN(node->attr.assem.inputs);
2657 /* Return the input constraints for an ASM node. This is a flexible array. */
2658 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
2660 assert(is_ASM(node));
2661 return node->attr.assem.inputs;
2664 /* Return the number of output constraints for an ASM node. */
2665 int get_ASM_n_output_constraints(const ir_node *node)
2667 assert(is_ASM(node));
2668 return ARR_LEN(node->attr.assem.outputs);
2671 /* Return the output constraints for an ASM node. */
2672 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
2674 assert(is_ASM(node));
2675 return node->attr.assem.outputs;
2678 /* Return the number of clobbered registers for an ASM node. */
2679 int get_ASM_n_clobbers(const ir_node *node)
2681 assert(is_ASM(node));
2682 return ARR_LEN(node->attr.assem.clobber);
2685 /* Return the list of clobbered registers for an ASM node. */
2686 ident **get_ASM_clobbers(const ir_node *node)
2688 assert(is_ASM(node));
2689 return node->attr.assem.clobber;
2692 /* returns the graph of a node */
2693 ir_graph *get_irn_irg(const ir_node *node)
2696 * Do not use get_nodes_Block() here, because this
2697 * will check the pinned state.
2698 * However even a 'wrong' block is always in the proper
2701 if (! is_Block(node))
2702 node = get_irn_n(node, -1);
2703 /* note that get_Block_irg() can handle Bad nodes */
2704 return get_Block_irg(node);
2708 /*----------------------------------------------------------------*/
2709 /* Auxiliary routines */
2710 /*----------------------------------------------------------------*/
2712 ir_node *skip_Proj(ir_node *node)
2714 /* don't assert node !!! */
2719 node = get_Proj_pred(node);
2725 skip_Proj_const(const ir_node *node)
2727 /* don't assert node !!! */
2732 node = get_Proj_pred(node);
2737 ir_node *skip_Tuple(ir_node *node)
2743 if (is_Proj(node)) {
2744 pred = get_Proj_pred(node);
2745 op = get_irn_op(pred);
2748 * Looks strange but calls get_irn_op() only once
2749 * in most often cases.
2751 if (op == op_Proj) { /* nested Tuple ? */
2752 pred = skip_Tuple(pred);
2754 if (is_Tuple(pred)) {
2755 node = get_Tuple_pred(pred, get_Proj_proj(node));
2758 } else if (op == op_Tuple) {
2759 node = get_Tuple_pred(pred, get_Proj_proj(node));
2766 /* returns operand of node if node is a Cast */
2767 ir_node *skip_Cast(ir_node *node)
2770 return get_Cast_op(node);
2774 /* returns operand of node if node is a Cast */
2775 const ir_node *skip_Cast_const(const ir_node *node)
2778 return get_Cast_op(node);
2782 /* returns operand of node if node is a Pin */
2783 ir_node *skip_Pin(ir_node *node)
2786 return get_Pin_op(node);
2790 /* returns operand of node if node is a Confirm */
2791 ir_node *skip_Confirm(ir_node *node)
2793 if (is_Confirm(node))
2794 return get_Confirm_value(node);
2798 /* skip all high-level ops */
2799 ir_node *skip_HighLevel_ops(ir_node *node)
2801 while (is_op_highlevel(get_irn_op(node))) {
2802 node = get_irn_n(node, 0);
2808 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2809 * than any other approach, as Id chains are resolved and all point to the real node, or
2810 * all id's are self loops.
2812 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2813 * a little bit "hand optimized".
2815 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2817 ir_node *skip_Id(ir_node *node)
2820 /* don't assert node !!! */
2822 if (!node || (node->op != op_Id)) return node;
2824 /* Don't use get_Id_pred(): We get into an endless loop for
2825 self-referencing Ids. */
2826 pred = node->in[0+1];
2828 if (pred->op != op_Id) return pred;
2830 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2831 ir_node *rem_pred, *res;
2833 if (pred->op != op_Id) return pred; /* shortcut */
2836 assert(get_irn_arity (node) > 0);
2838 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2839 res = skip_Id(rem_pred);
2840 if (res->op == op_Id) /* self-loop */ return node;
2842 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2849 void skip_Id_and_store(ir_node **node)
2853 if (!n || (n->op != op_Id)) return;
2855 /* Don't use get_Id_pred(): We get into an endless loop for
2856 self-referencing Ids. */
2860 int (is_strictConv)(const ir_node *node)
2862 return _is_strictConv(node);
2865 int (is_no_Block)(const ir_node *node)
2867 return _is_no_Block(node);
2870 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2871 int (is_SymConst_addr_ent)(const ir_node *node)
2873 return _is_SymConst_addr_ent(node);
2876 /* Returns true if the operation manipulates control flow. */
2877 int is_cfop(const ir_node *node)
2879 return is_op_cfopcode(get_irn_op(node));
2882 /* Returns true if the operation manipulates interprocedural control flow:
2883 CallBegin, EndReg, EndExcept */
2884 int is_ip_cfop(const ir_node *node)
2886 return is_ip_cfopcode(get_irn_op(node));
2889 /* Returns true if the operation can change the control flow because
2891 int is_fragile_op(const ir_node *node)
2893 return is_op_fragile(get_irn_op(node));
2896 /* Returns the memory operand of fragile operations. */
2897 ir_node *get_fragile_op_mem(ir_node *node)
2899 assert(node && is_fragile_op(node));
2901 switch (get_irn_opcode(node)) {
2912 return get_irn_n(node, pn_Generic_M);
2917 assert(0 && "should not be reached");
2922 /* Returns the result mode of a Div operation. */
2923 ir_mode *get_divop_resmod(const ir_node *node)
2925 switch (get_irn_opcode(node)) {
2926 case iro_Quot : return get_Quot_resmode(node);
2927 case iro_DivMod: return get_DivMod_resmode(node);
2928 case iro_Div : return get_Div_resmode(node);
2929 case iro_Mod : return get_Mod_resmode(node);
2931 assert(0 && "should not be reached");
2936 /* Returns true if the operation is a forking control flow operation. */
2937 int (is_irn_forking)(const ir_node *node)
2939 return _is_irn_forking(node);
2942 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node)
2944 _copy_node_attr(old_node, new_node);
2947 /* Return the type associated with the value produced by n
2948 * if the node remarks this type as it is the case for
2949 * Cast, Const, SymConst and some Proj nodes. */
2950 ir_type *(get_irn_type)(ir_node *node)
2952 return _get_irn_type(node);
2955 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2957 ir_type *(get_irn_type_attr)(ir_node *node)
2959 return _get_irn_type_attr(node);
2962 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2963 ir_entity *(get_irn_entity_attr)(ir_node *node)
2965 return _get_irn_entity_attr(node);
2968 /* Returns non-zero for constant-like nodes. */
2969 int (is_irn_constlike)(const ir_node *node)
2971 return _is_irn_constlike(node);
2975 * Returns non-zero for nodes that are allowed to have keep-alives and
2976 * are neither Block nor PhiM.
2978 int (is_irn_keep)(const ir_node *node)
2980 return _is_irn_keep(node);
2984 * Returns non-zero for nodes that are always placed in the start block.
2986 int (is_irn_start_block_placed)(const ir_node *node)
2988 return _is_irn_start_block_placed(node);
2991 /* Returns non-zero for nodes that are machine operations. */
2992 int (is_irn_machine_op)(const ir_node *node)
2994 return _is_irn_machine_op(node);
2997 /* Returns non-zero for nodes that are machine operands. */
2998 int (is_irn_machine_operand)(const ir_node *node)
3000 return _is_irn_machine_operand(node);
3003 /* Returns non-zero for nodes that have the n'th user machine flag set. */
3004 int (is_irn_machine_user)(const ir_node *node, unsigned n)
3006 return _is_irn_machine_user(node, n);
3009 /* Returns non-zero for nodes that are CSE neutral to its users. */
3010 int (is_irn_cse_neutral)(const ir_node *node)
3012 return _is_irn_cse_neutral(node);
3015 /* Gets the string representation of the jump prediction .*/
3016 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
3018 #define X(a) case a: return #a;
3020 X(COND_JMP_PRED_NONE);
3021 X(COND_JMP_PRED_TRUE);
3022 X(COND_JMP_PRED_FALSE);
3028 /* Returns the conditional jump prediction of a Cond node. */
3029 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
3031 return _get_Cond_jmp_pred(cond);
3034 /* Sets a new conditional jump prediction. */
3035 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
3037 _set_Cond_jmp_pred(cond, pred);
3040 /** the get_type operation must be always implemented and return a firm type */
3041 static ir_type *get_Default_type(ir_node *n)
3044 return get_unknown_type();
3047 /* Sets the get_type operation for an ir_op_ops. */
3048 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
3051 case iro_Const: ops->get_type = get_Const_type; break;
3052 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3053 case iro_Cast: ops->get_type = get_Cast_type; break;
3054 case iro_Proj: ops->get_type = get_Proj_type; break;
3056 /* not allowed to be NULL */
3057 if (! ops->get_type)
3058 ops->get_type = get_Default_type;
3064 /** Return the attribute type of a SymConst node if exists */
3065 static ir_type *get_SymConst_attr_type(ir_node *self)
3067 symconst_kind kind = get_SymConst_kind(self);
3068 if (SYMCONST_HAS_TYPE(kind))
3069 return get_SymConst_type(self);
3073 /** Return the attribute entity of a SymConst node if exists */
3074 static ir_entity *get_SymConst_attr_entity(ir_node *self)
3076 symconst_kind kind = get_SymConst_kind(self);
3077 if (SYMCONST_HAS_ENT(kind))
3078 return get_SymConst_entity(self);
3082 /** the get_type_attr operation must be always implemented */
3083 static ir_type *get_Null_type(ir_node *n)
3086 return firm_unknown_type;
3089 /* Sets the get_type operation for an ir_op_ops. */
3090 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
3093 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3094 case iro_Call: ops->get_type_attr = get_Call_type; break;
3095 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3096 case iro_Free: ops->get_type_attr = get_Free_type; break;
3097 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3099 /* not allowed to be NULL */
3100 if (! ops->get_type_attr)
3101 ops->get_type_attr = get_Null_type;
3107 /** the get_entity_attr operation must be always implemented */
3108 static ir_entity *get_Null_ent(ir_node *n)
3114 /* Sets the get_type operation for an ir_op_ops. */
3115 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
3118 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3119 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3121 /* not allowed to be NULL */
3122 if (! ops->get_entity_attr)
3123 ops->get_entity_attr = get_Null_ent;
3129 /* Sets the debug information of a node. */
3130 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
3132 _set_irn_dbg_info(n, db);
3136 * Returns the debug information of an node.
3138 * @param n The node.
3140 dbg_info *(get_irn_dbg_info)(const ir_node *n)
3142 return _get_irn_dbg_info(n);
3145 /* checks whether a node represents a global address */
3146 int is_Global(const ir_node *node)
3148 return is_SymConst_addr_ent(node);
3151 /* returns the entity of a global address */
3152 ir_entity *get_Global_entity(const ir_node *node)
3154 return get_SymConst_entity(node);
3158 * Calculate a hash value of a node.
3160 unsigned firm_default_hash(const ir_node *node)
3165 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3166 h = irn_arity = get_irn_intra_arity(node);
3168 /* consider all in nodes... except the block if not a control flow. */
3169 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3170 ir_node *pred = get_irn_intra_n(node, i);
3171 if (is_irn_cse_neutral(pred))
3174 h = 9*h + HASH_PTR(pred);
3178 h = 9*h + HASH_PTR(get_irn_mode(node));
3180 h = 9*h + HASH_PTR(get_irn_op(node));
3183 } /* firm_default_hash */
3185 /* include generated code */
3186 #include "gen_irnode.c.inl"