2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc)
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
80 /* do NOT add the Uo bit for non-floating point values */
81 if (! mode_is_float(mode))
87 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
88 pn_Cmp get_inversed_pnc(long pnc)
90 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
91 long lesser = pnc & pn_Cmp_Lt;
92 long greater = pnc & pn_Cmp_Gt;
94 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
100 * Indicates, whether additional data can be registered to ir nodes.
101 * If set to 1, this is not possible anymore.
103 static int forbid_new_data = 0;
106 * The amount of additional space for custom data to be allocated upon
107 * creating a new node.
109 unsigned firm_add_node_size = 0;
112 /* register new space for every node */
113 unsigned firm_register_additional_node_data(unsigned size)
115 assert(!forbid_new_data && "Too late to register additional node data");
120 return firm_add_node_size += size;
124 void init_irnode(void)
126 /* Forbid the addition of new data to an ir node. */
130 struct struct_align {
140 * irnode constructor.
141 * Create a new irnode in irg, with an op, mode, arity and
142 * some incoming irnodes.
143 * If arity is negative, a node with a dynamic array is created.
145 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
146 ir_mode *mode, int arity, ir_node **in)
149 unsigned align = offsetof(struct struct_align, s) - 1;
150 unsigned add_node_size = (firm_add_node_size + align) & ~align;
151 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
158 p = obstack_alloc(irg->obst, node_size);
159 memset(p, 0, node_size);
160 res = (ir_node *)(p + add_node_size);
162 res->kind = k_ir_node;
166 res->node_idx = irg_register_node_idx(irg, res);
171 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
173 /* not nice but necessary: End and Sync must always have a flexible array */
174 if (op == op_End || op == op_Sync)
175 res->in = NEW_ARR_F(ir_node *, (arity+1));
177 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
178 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
182 set_irn_dbg_info(res, db);
184 res->node_nr = get_irp_new_node_nr();
186 for (i = 0; i < EDGE_KIND_LAST; ++i) {
187 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
188 /* edges will be build immediately */
189 res->edge_info[i].edges_built = 1;
190 res->edge_info[i].out_count = 0;
193 /* don't put this into the for loop, arity is -1 for some nodes! */
194 edges_notify_edge(res, -1, res->in[0], NULL, irg);
195 for (i = 1; i <= arity; ++i)
196 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
198 hook_new_node(irg, res);
199 if (get_irg_phase_state(irg) == phase_backend) {
200 be_info_new_node(res);
202 /* Init the VRP structures */
203 res->vrp.range_type = VRP_UNDEFINED;
205 if (mode_is_int(mode)) {
206 /* We are assuming that 0 is always represented by this modes 0 */
208 res->vrp.bits_not_set = get_mode_null(mode);
209 res->vrp.range_bottom =
210 res->vrp.range_top = get_tarval_top();
213 res->vrp.bits_not_set =
214 res->vrp.range_bottom =
215 res->vrp.range_top = get_tarval_bad();
218 res->vrp.bits_node = NULL;
219 res->vrp.range_node = NULL;
220 res->vrp.range_op = VRP_NONE;
226 /*-- getting some parameters from ir_nodes --*/
228 int (is_ir_node)(const void *thing)
230 return _is_ir_node(thing);
233 int (get_irn_intra_arity)(const ir_node *node)
235 return _get_irn_intra_arity(node);
238 int (get_irn_inter_arity)(const ir_node *node)
240 return _get_irn_inter_arity(node);
243 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
245 int (get_irn_arity)(const ir_node *node)
247 return _get_irn_arity(node);
250 /* Returns the array with ins. This array is shifted with respect to the
251 array accessed by get_irn_n: The block operand is at position 0 not -1.
252 (@@@ This should be changed.)
253 The order of the predecessors in this array is not guaranteed, except that
254 lists of operands as predecessors of Block or arguments of a Call are
256 ir_node **get_irn_in(const ir_node *node)
259 #ifdef INTERPROCEDURAL_VIEW
260 if (get_interprocedural_view()) { /* handle Filter and Block specially */
261 if (get_irn_opcode(node) == iro_Filter) {
262 assert(node->attr.filter.in_cg);
263 return node->attr.filter.in_cg;
264 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
265 return node->attr.block.in_cg;
267 /* else fall through */
269 #endif /* INTERPROCEDURAL_VIEW */
273 void set_irn_in(ir_node *node, int arity, ir_node **in)
277 ir_graph *irg = get_irn_irg(node);
280 #ifdef INTERPROCEDURAL_VIEW
281 if (get_interprocedural_view()) { /* handle Filter and Block specially */
282 ir_opcode code = get_irn_opcode(node);
283 if (code == iro_Filter) {
284 assert(node->attr.filter.in_cg);
285 pOld_in = &node->attr.filter.in_cg;
286 } else if (code == iro_Block && node->attr.block.in_cg) {
287 pOld_in = &node->attr.block.in_cg;
292 #endif /* INTERPROCEDURAL_VIEW */
296 for (i = 0; i < arity; i++) {
297 if (i < ARR_LEN(*pOld_in)-1)
298 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
300 edges_notify_edge(node, i, in[i], NULL, irg);
302 for (;i < ARR_LEN(*pOld_in)-1; i++) {
303 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
306 if (arity != ARR_LEN(*pOld_in) - 1) {
307 ir_node * block = (*pOld_in)[0];
308 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
309 (*pOld_in)[0] = block;
311 fix_backedges(irg->obst, node);
313 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
316 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
318 return _get_irn_intra_n(node, n);
321 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
323 return _get_irn_inter_n(node, n);
326 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
328 ir_node *(get_irn_n)(const ir_node *node, int n)
330 return _get_irn_n(node, n);
333 void set_irn_n(ir_node *node, int n, ir_node *in)
335 assert(node && node->kind == k_ir_node);
337 assert(n < get_irn_arity(node));
338 assert(in && in->kind == k_ir_node);
340 #ifdef INTERPROCEDURAL_VIEW
341 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
342 /* Change block pred in both views! */
343 node->in[n + 1] = in;
344 assert(node->attr.filter.in_cg);
345 node->attr.filter.in_cg[n + 1] = in;
348 if (get_interprocedural_view()) { /* handle Filter and Block specially */
349 if (get_irn_opcode(node) == iro_Filter) {
350 assert(node->attr.filter.in_cg);
351 node->attr.filter.in_cg[n + 1] = in;
353 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
354 node->attr.block.in_cg[n + 1] = in;
357 /* else fall through */
359 #endif /* INTERPROCEDURAL_VIEW */
362 hook_set_irn_n(node, n, in, node->in[n + 1]);
364 /* Here, we rely on src and tgt being in the current ir graph */
365 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
367 node->in[n + 1] = in;
370 int add_irn_n(ir_node *node, ir_node *in)
373 ir_graph *irg = get_irn_irg(node);
375 assert(node->op->opar == oparity_dynamic);
376 pos = ARR_LEN(node->in) - 1;
377 ARR_APP1(ir_node *, node->in, in);
378 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
381 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
386 void del_Sync_n(ir_node *n, int i)
388 int arity = get_Sync_n_preds(n);
389 ir_node *last_pred = get_Sync_pred(n, arity - 1);
390 set_Sync_pred(n, i, last_pred);
391 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
392 ARR_SHRINKLEN(get_irn_in(n), arity);
395 int (get_irn_deps)(const ir_node *node)
397 return _get_irn_deps(node);
400 ir_node *(get_irn_dep)(const ir_node *node, int pos)
402 return _get_irn_dep(node, pos);
405 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
407 _set_irn_dep(node, pos, dep);
410 int add_irn_dep(ir_node *node, ir_node *dep)
414 /* DEP edges are only allowed in backend phase */
415 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
416 if (node->deps == NULL) {
417 node->deps = NEW_ARR_F(ir_node *, 1);
423 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
424 if (node->deps[i] == NULL)
427 if (node->deps[i] == dep)
431 if (first_zero >= 0) {
432 node->deps[first_zero] = dep;
435 ARR_APP1(ir_node *, node->deps, dep);
440 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
445 void add_irn_deps(ir_node *tgt, ir_node *src)
449 for (i = 0, n = get_irn_deps(src); i < n; ++i)
450 add_irn_dep(tgt, get_irn_dep(src, i));
454 ir_mode *(get_irn_mode)(const ir_node *node)
456 return _get_irn_mode(node);
459 void (set_irn_mode)(ir_node *node, ir_mode *mode)
461 _set_irn_mode(node, mode);
464 /** Gets the string representation of the mode .*/
465 const char *get_irn_modename(const ir_node *node)
468 return get_mode_name(node->mode);
471 ident *get_irn_modeident(const ir_node *node)
474 return get_mode_ident(node->mode);
477 ir_op *(get_irn_op)(const ir_node *node)
479 return _get_irn_op(node);
482 /* should be private to the library: */
483 void (set_irn_op)(ir_node *node, ir_op *op)
485 _set_irn_op(node, op);
488 unsigned (get_irn_opcode)(const ir_node *node)
490 return _get_irn_opcode(node);
493 const char *get_irn_opname(const ir_node *node)
496 if (is_Phi0(node)) return "Phi0";
497 return get_id_str(node->op->name);
500 ident *get_irn_opident(const ir_node *node)
503 return node->op->name;
506 ir_visited_t (get_irn_visited)(const ir_node *node)
508 return _get_irn_visited(node);
511 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
513 _set_irn_visited(node, visited);
516 void (mark_irn_visited)(ir_node *node)
518 _mark_irn_visited(node);
521 int (irn_visited)(const ir_node *node)
523 return _irn_visited(node);
526 int (irn_visited_else_mark)(ir_node *node)
528 return _irn_visited_else_mark(node);
531 void (set_irn_link)(ir_node *node, void *link)
533 _set_irn_link(node, link);
536 void *(get_irn_link)(const ir_node *node)
538 return _get_irn_link(node);
541 op_pin_state (get_irn_pinned)(const ir_node *node)
543 return _get_irn_pinned(node);
546 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
548 return _is_irn_pinned_in_irg(node);
551 void set_irn_pinned(ir_node *node, op_pin_state state)
553 /* due to optimization an opt may be turned into a Tuple */
557 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
558 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
560 node->attr.except.pin_state = state;
563 /* Outputs a unique number for this node */
564 long get_irn_node_nr(const ir_node *node)
567 return node->node_nr;
570 const_attr *get_irn_const_attr(ir_node *node)
572 assert(is_Const(node));
573 return &node->attr.con;
576 long get_irn_proj_attr(ir_node *node)
578 /* BEWARE: check for true Proj node here, no Filter */
579 assert(node->op == op_Proj);
580 return node->attr.proj;
583 alloc_attr *get_irn_alloc_attr(ir_node *node)
585 assert(is_Alloc(node));
586 return &node->attr.alloc;
589 free_attr *get_irn_free_attr(ir_node *node)
591 assert(is_Free(node));
592 return &node->attr.free;
595 symconst_attr *get_irn_symconst_attr(ir_node *node)
597 assert(is_SymConst(node));
598 return &node->attr.symc;
601 call_attr *get_irn_call_attr(ir_node *node)
603 assert(is_Call(node));
604 return &node->attr.call;
607 sel_attr *get_irn_sel_attr(ir_node *node)
609 assert(is_Sel(node));
610 return &node->attr.sel;
613 phi_attr *get_irn_phi_attr(ir_node *node)
615 return &node->attr.phi;
618 block_attr *get_irn_block_attr(ir_node *node)
620 assert(is_Block(node));
621 return &node->attr.block;
624 load_attr *get_irn_load_attr(ir_node *node)
626 assert(is_Load(node));
627 return &node->attr.load;
630 store_attr *get_irn_store_attr(ir_node *node)
632 assert(is_Store(node));
633 return &node->attr.store;
636 except_attr *get_irn_except_attr(ir_node *node)
638 assert(node->op == op_Div || node->op == op_Quot ||
639 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
640 return &node->attr.except;
643 divmod_attr *get_irn_divmod_attr(ir_node *node)
645 assert(node->op == op_Div || node->op == op_Quot ||
646 node->op == op_DivMod || node->op == op_Mod);
647 return &node->attr.divmod;
650 builtin_attr *get_irn_builtin_attr(ir_node *node)
652 assert(is_Builtin(node));
653 return &node->attr.builtin;
656 void *(get_irn_generic_attr)(ir_node *node)
658 assert(is_ir_node(node));
659 return _get_irn_generic_attr(node);
662 const void *(get_irn_generic_attr_const)(const ir_node *node)
664 assert(is_ir_node(node));
665 return _get_irn_generic_attr_const(node);
668 unsigned (get_irn_idx)(const ir_node *node)
670 assert(is_ir_node(node));
671 return _get_irn_idx(node);
674 int get_irn_pred_pos(ir_node *node, ir_node *arg)
677 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
678 if (get_irn_n(node, i) == arg)
684 /** manipulate fields of individual nodes **/
686 /* this works for all except Block */
687 ir_node *get_nodes_block(const ir_node *node)
689 assert(node->op != op_Block);
690 return get_irn_n(node, -1);
693 void set_nodes_block(ir_node *node, ir_node *block)
695 assert(node->op != op_Block);
696 set_irn_n(node, -1, block);
699 /* this works for all except Block */
700 ir_node *get_nodes_MacroBlock(const ir_node *node)
702 assert(node->op != op_Block);
703 return get_Block_MacroBlock(get_irn_n(node, -1));
706 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
707 * from Start. If so returns frame type, else Null. */
708 ir_type *is_frame_pointer(const ir_node *n)
710 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
711 ir_node *start = get_Proj_pred(n);
712 if (is_Start(start)) {
713 return get_irg_frame_type(get_irn_irg(start));
719 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
720 * from Start. If so returns tls type, else Null. */
721 ir_type *is_tls_pointer(const ir_node *n)
723 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
724 ir_node *start = get_Proj_pred(n);
725 if (is_Start(start)) {
726 return get_tls_type();
732 ir_node **get_Block_cfgpred_arr(ir_node *node)
734 assert(is_Block(node));
735 return (ir_node **)&(get_irn_in(node)[1]);
738 int (get_Block_n_cfgpreds)(const ir_node *node)
740 return _get_Block_n_cfgpreds(node);
743 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
745 return _get_Block_cfgpred(node, pos);
748 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
750 assert(is_Block(node));
751 set_irn_n(node, pos, pred);
754 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
758 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
759 if (get_Block_cfgpred_block(block, i) == pred)
765 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
767 return _get_Block_cfgpred_block(node, pos);
770 int get_Block_matured(const ir_node *node)
772 assert(is_Block(node));
773 return (int)node->attr.block.is_matured;
776 void set_Block_matured(ir_node *node, int matured)
778 assert(is_Block(node));
779 node->attr.block.is_matured = matured;
782 ir_visited_t (get_Block_block_visited)(const ir_node *node)
784 return _get_Block_block_visited(node);
787 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
789 _set_Block_block_visited(node, visit);
792 /* For this current_ir_graph must be set. */
793 void (mark_Block_block_visited)(ir_node *node)
795 _mark_Block_block_visited(node);
798 int (Block_block_visited)(const ir_node *node)
800 return _Block_block_visited(node);
803 ir_node *get_Block_graph_arr(ir_node *node, int pos)
805 assert(is_Block(node));
806 return node->attr.block.graph_arr[pos+1];
809 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value)
811 assert(is_Block(node));
812 node->attr.block.graph_arr[pos+1] = value;
815 #ifdef INTERPROCEDURAL_VIEW
816 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
818 assert(is_Block(node));
819 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
820 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
821 node->attr.block.in_cg[0] = NULL;
822 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
824 /* Fix backedge array. fix_backedges() operates depending on
825 interprocedural_view. */
826 int ipv = get_interprocedural_view();
827 set_interprocedural_view(1);
828 fix_backedges(current_ir_graph->obst, node);
829 set_interprocedural_view(ipv);
832 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
835 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
837 assert(is_Block(node) && node->attr.block.in_cg &&
838 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
839 node->attr.block.in_cg[pos + 1] = pred;
842 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
844 assert(is_Block(node));
845 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
848 int get_Block_cg_n_cfgpreds(const ir_node *node)
850 assert(is_Block(node));
851 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
854 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
856 assert(is_Block(node) && node->attr.block.in_cg);
857 return node->attr.block.in_cg[pos + 1];
860 void remove_Block_cg_cfgpred_arr(ir_node *node)
862 assert(is_Block(node));
863 node->attr.block.in_cg = NULL;
865 #endif /* INTERPROCEDURAL_VIEW */
867 ir_node *(set_Block_dead)(ir_node *block)
869 return _set_Block_dead(block);
872 int (is_Block_dead)(const ir_node *block)
874 return _is_Block_dead(block);
877 ir_extblk *get_Block_extbb(const ir_node *block)
880 assert(is_Block(block));
881 res = block->attr.block.extblk;
882 assert(res == NULL || is_ir_extbb(res));
886 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
888 assert(is_Block(block));
889 assert(extblk == NULL || is_ir_extbb(extblk));
890 block->attr.block.extblk = extblk;
893 /* Returns the macro block header of a block.*/
894 ir_node *get_Block_MacroBlock(const ir_node *block)
897 assert(is_Block(block));
898 mbh = get_irn_n(block, -1);
899 /* once macro block header is respected by all optimizations,
900 this assert can be removed */
905 /* Sets the macro block header of a block. */
906 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
908 assert(is_Block(block));
910 assert(is_Block(mbh));
911 set_irn_n(block, -1, mbh);
914 /* returns the macro block header of a node. */
915 ir_node *get_irn_MacroBlock(const ir_node *n)
918 n = get_nodes_block(n);
919 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
923 return get_Block_MacroBlock(n);
926 /* returns the graph of a Block. */
927 ir_graph *(get_Block_irg)(const ir_node *block)
929 return _get_Block_irg(block);
932 ir_entity *create_Block_entity(ir_node *block)
935 assert(is_Block(block));
937 entity = block->attr.block.entity;
938 if (entity == NULL) {
942 glob = get_glob_type();
943 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
944 set_entity_visibility(entity, ir_visibility_local);
945 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
946 nr = get_irp_next_label_nr();
947 set_entity_label(entity, nr);
948 set_entity_compiler_generated(entity, 1);
950 block->attr.block.entity = entity;
955 ir_entity *get_Block_entity(const ir_node *block)
957 assert(is_Block(block));
958 return block->attr.block.entity;
961 void set_Block_entity(ir_node *block, ir_entity *entity)
963 assert(is_Block(block));
964 assert(get_entity_type(entity) == get_code_type());
965 block->attr.block.entity = entity;
968 int has_Block_entity(const ir_node *block)
970 return block->attr.block.entity != NULL;
973 ir_node *(get_Block_phis)(const ir_node *block)
975 return _get_Block_phis(block);
978 void (set_Block_phis)(ir_node *block, ir_node *phi)
980 _set_Block_phis(block, phi);
983 void (add_Block_phi)(ir_node *block, ir_node *phi)
985 _add_Block_phi(block, phi);
988 /* Get the Block mark (single bit). */
989 unsigned (get_Block_mark)(const ir_node *block)
991 return _get_Block_mark(block);
994 /* Set the Block mark (single bit). */
995 void (set_Block_mark)(ir_node *block, unsigned mark)
997 _set_Block_mark(block, mark);
1000 int get_End_n_keepalives(const ir_node *end)
1002 assert(is_End(end));
1003 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
1006 ir_node *get_End_keepalive(const ir_node *end, int pos)
1008 assert(is_End(end));
1009 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
1012 void add_End_keepalive(ir_node *end, ir_node *ka)
1014 assert(is_End(end));
1018 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
1020 assert(is_End(end));
1021 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
1024 /* Set new keep-alives */
1025 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
1028 ir_graph *irg = get_irn_irg(end);
1030 /* notify that edges are deleted */
1031 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1032 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1034 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1036 for (i = 0; i < n; ++i) {
1037 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1038 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1042 /* Set new keep-alives from old keep-alives, skipping irn */
1043 void remove_End_keepalive(ir_node *end, ir_node *irn)
1045 int n = get_End_n_keepalives(end);
1050 for (i = n -1; i >= 0; --i) {
1051 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1054 if (old_ka == irn) {
1061 irg = get_irn_irg(end);
1063 /* remove the edge */
1064 edges_notify_edge(end, idx, NULL, irn, irg);
1067 /* exchange with the last one */
1068 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1069 edges_notify_edge(end, n - 1, NULL, old, irg);
1070 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1071 edges_notify_edge(end, idx, old, NULL, irg);
1073 /* now n - 1 keeps, 1 block input */
1074 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1077 /* remove Bads, NoMems and doublets from the keep-alive set */
1078 void remove_End_Bads_and_doublets(ir_node *end)
1081 int idx, n = get_End_n_keepalives(end);
1087 irg = get_irn_irg(end);
1088 pset_new_init(&keeps);
1090 for (idx = n - 1; idx >= 0; --idx) {
1091 ir_node *ka = get_End_keepalive(end, idx);
1093 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1094 /* remove the edge */
1095 edges_notify_edge(end, idx, NULL, ka, irg);
1098 /* exchange with the last one */
1099 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1100 edges_notify_edge(end, n - 1, NULL, old, irg);
1101 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1102 edges_notify_edge(end, idx, old, NULL, irg);
1106 pset_new_insert(&keeps, ka);
1109 /* n keeps, 1 block input */
1110 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1112 pset_new_destroy(&keeps);
1115 void free_End(ir_node *end)
1117 assert(is_End(end));
1120 end->in = NULL; /* @@@ make sure we get an error if we use the
1121 in array afterwards ... */
1124 /* Return the target address of an IJmp */
1125 ir_node *get_IJmp_target(const ir_node *ijmp)
1127 assert(is_IJmp(ijmp));
1128 return get_irn_n(ijmp, 0);
1131 /** Sets the target address of an IJmp */
1132 void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
1134 assert(is_IJmp(ijmp));
1135 set_irn_n(ijmp, 0, tgt);
1138 ir_node *get_Cond_selector(const ir_node *node)
1140 assert(is_Cond(node));
1141 return get_irn_n(node, 0);
1144 void set_Cond_selector(ir_node *node, ir_node *selector)
1146 assert(is_Cond(node));
1147 set_irn_n(node, 0, selector);
1150 long get_Cond_default_proj(const ir_node *node)
1152 assert(is_Cond(node));
1153 return node->attr.cond.default_proj;
1156 void set_Cond_default_proj(ir_node *node, long defproj)
1158 assert(is_Cond(node));
1159 node->attr.cond.default_proj = defproj;
1162 ir_node *get_Return_mem(const ir_node *node)
1164 assert(is_Return(node));
1165 return get_irn_n(node, 0);
1168 void set_Return_mem(ir_node *node, ir_node *mem)
1170 assert(is_Return(node));
1171 set_irn_n(node, 0, mem);
1174 int get_Return_n_ress(const ir_node *node)
1176 assert(is_Return(node));
1177 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1180 ir_node **get_Return_res_arr(ir_node *node)
1182 assert(is_Return(node));
1183 if (get_Return_n_ress(node) > 0)
1184 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1190 void set_Return_n_res(ir_node *node, int results)
1192 assert(is_Return(node));
1196 ir_node *get_Return_res(const ir_node *node, int pos)
1198 assert(is_Return(node));
1199 assert(get_Return_n_ress(node) > pos);
1200 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1203 void set_Return_res(ir_node *node, int pos, ir_node *res)
1205 assert(is_Return(node));
1206 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1209 tarval *(get_Const_tarval)(const ir_node *node)
1211 return _get_Const_tarval(node);
1214 void set_Const_tarval(ir_node *node, tarval *con)
1216 assert(is_Const(node));
1217 node->attr.con.tv = con;
1220 int (is_Const_null)(const ir_node *node)
1222 return _is_Const_null(node);
1225 int (is_Const_one)(const ir_node *node)
1227 return _is_Const_one(node);
1230 int (is_Const_all_one)(const ir_node *node)
1232 return _is_Const_all_one(node);
1236 /* The source language type. Must be an atomic type. Mode of type must
1237 be mode of node. For tarvals from entities type must be pointer to
1239 ir_type *get_Const_type(ir_node *node)
1241 assert(is_Const(node));
1242 return node->attr.con.tp;
1245 void set_Const_type(ir_node *node, ir_type *tp)
1247 assert(is_Const(node));
1248 if (tp != firm_unknown_type) {
1249 assert(is_atomic_type(tp));
1250 assert(get_type_mode(tp) == get_irn_mode(node));
1252 node->attr.con.tp = tp;
1256 symconst_kind get_SymConst_kind(const ir_node *node)
1258 assert(is_SymConst(node));
1259 return node->attr.symc.kind;
1262 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1264 assert(is_SymConst(node));
1265 node->attr.symc.kind = kind;
1268 ir_type *get_SymConst_type(const ir_node *node)
1270 /* the cast here is annoying, but we have to compensate for
1272 ir_node *irn = (ir_node *)node;
1273 assert(is_SymConst(node) &&
1274 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1275 return irn->attr.symc.sym.type_p;
1278 void set_SymConst_type(ir_node *node, ir_type *tp)
1280 assert(is_SymConst(node) &&
1281 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1282 node->attr.symc.sym.type_p = tp;
1286 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1287 ir_entity *get_SymConst_entity(const ir_node *node)
1289 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1290 return node->attr.symc.sym.entity_p;
1293 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1295 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1296 node->attr.symc.sym.entity_p = ent;
1299 ir_enum_const *get_SymConst_enum(const ir_node *node)
1301 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1302 return node->attr.symc.sym.enum_p;
1305 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1307 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1308 node->attr.symc.sym.enum_p = ec;
1311 union symconst_symbol
1312 get_SymConst_symbol(const ir_node *node)
1314 assert(is_SymConst(node));
1315 return node->attr.symc.sym;
1318 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1320 assert(is_SymConst(node));
1321 node->attr.symc.sym = sym;
1324 ir_type *get_SymConst_value_type(ir_node *node)
1326 assert(is_SymConst(node));
1327 return node->attr.symc.tp;
1330 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1332 assert(is_SymConst(node));
1333 node->attr.symc.tp = tp;
1336 ir_node *get_Sel_mem(const ir_node *node)
1338 assert(is_Sel(node));
1339 return get_irn_n(node, 0);
1342 void set_Sel_mem(ir_node *node, ir_node *mem)
1344 assert(is_Sel(node));
1345 set_irn_n(node, 0, mem);
1348 ir_node *get_Sel_ptr(const ir_node *node)
1350 assert(is_Sel(node));
1351 return get_irn_n(node, 1);
1354 void set_Sel_ptr(ir_node *node, ir_node *ptr)
1356 assert(is_Sel(node));
1357 set_irn_n(node, 1, ptr);
1360 int get_Sel_n_indexs(const ir_node *node)
1362 assert(is_Sel(node));
1363 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1366 ir_node **get_Sel_index_arr(ir_node *node)
1368 assert(is_Sel(node));
1369 if (get_Sel_n_indexs(node) > 0)
1370 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1375 ir_node *get_Sel_index(const ir_node *node, int pos)
1377 assert(is_Sel(node));
1378 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1381 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1383 assert(is_Sel(node));
1384 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1387 ir_entity *get_Sel_entity(const ir_node *node)
1389 assert(is_Sel(node));
1390 return node->attr.sel.entity;
1393 /* need a version without const to prevent warning */
1394 static ir_entity *_get_Sel_entity(ir_node *node)
1396 return get_Sel_entity(node);
1399 void set_Sel_entity(ir_node *node, ir_entity *ent)
1401 assert(is_Sel(node));
1402 node->attr.sel.entity = ent;
1406 /* For unary and binary arithmetic operations the access to the
1407 operands can be factored out. Left is the first, right the
1408 second arithmetic value as listed in tech report 0999-33.
1409 unops are: Minus, Abs, Not, Conv, Cast
1410 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1411 Shr, Shrs, Rotate, Cmp */
1414 ir_node *get_Call_mem(const ir_node *node)
1416 assert(is_Call(node));
1417 return get_irn_n(node, 0);
1420 void set_Call_mem(ir_node *node, ir_node *mem)
1422 assert(is_Call(node));
1423 set_irn_n(node, 0, mem);
1426 ir_node *get_Call_ptr(const ir_node *node)
1428 assert(is_Call(node));
1429 return get_irn_n(node, 1);
1432 void set_Call_ptr(ir_node *node, ir_node *ptr)
1434 assert(is_Call(node));
1435 set_irn_n(node, 1, ptr);
1438 ir_node **get_Call_param_arr(ir_node *node)
1440 assert(is_Call(node));
1441 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1444 int get_Call_n_params(const ir_node *node)
1446 assert(is_Call(node));
1447 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1450 ir_node *get_Call_param(const ir_node *node, int pos)
1452 assert(is_Call(node));
1453 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1456 void set_Call_param(ir_node *node, int pos, ir_node *param)
1458 assert(is_Call(node));
1459 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1462 ir_type *get_Call_type(ir_node *node)
1464 assert(is_Call(node));
1465 return node->attr.call.type;
1468 void set_Call_type(ir_node *node, ir_type *tp)
1470 assert(is_Call(node));
1471 assert((get_unknown_type() == tp) || is_Method_type(tp));
1472 node->attr.call.type = tp;
1475 unsigned get_Call_tail_call(const ir_node *node)
1477 assert(is_Call(node));
1478 return node->attr.call.tail_call;
1481 void set_Call_tail_call(ir_node *node, unsigned tail_call)
1483 assert(is_Call(node));
1484 node->attr.call.tail_call = tail_call != 0;
1487 ir_node *get_Builtin_mem(const ir_node *node)
1489 assert(is_Builtin(node));
1490 return get_irn_n(node, 0);
1493 void set_Builin_mem(ir_node *node, ir_node *mem)
1495 assert(is_Builtin(node));
1496 set_irn_n(node, 0, mem);
1499 ir_builtin_kind get_Builtin_kind(const ir_node *node)
1501 assert(is_Builtin(node));
1502 return node->attr.builtin.kind;
1505 void set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
1507 assert(is_Builtin(node));
1508 node->attr.builtin.kind = kind;
1511 ir_node **get_Builtin_param_arr(ir_node *node)
1513 assert(is_Builtin(node));
1514 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1517 int get_Builtin_n_params(const ir_node *node)
1519 assert(is_Builtin(node));
1520 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1523 ir_node *get_Builtin_param(const ir_node *node, int pos)
1525 assert(is_Builtin(node));
1526 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1529 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1531 assert(is_Builtin(node));
1532 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1535 ir_type *get_Builtin_type(ir_node *node)
1537 assert(is_Builtin(node));
1538 return node->attr.builtin.type;
1541 void set_Builtin_type(ir_node *node, ir_type *tp)
1543 assert(is_Builtin(node));
1544 assert((get_unknown_type() == tp) || is_Method_type(tp));
1545 node->attr.builtin.type = tp;
1548 /* Returns a human readable string for the ir_builtin_kind. */
1549 const char *get_builtin_kind_name(ir_builtin_kind kind)
1551 #define X(a) case a: return #a;
1554 X(ir_bk_debugbreak);
1555 X(ir_bk_return_address);
1556 X(ir_bk_frame_address);
1566 X(ir_bk_inner_trampoline);
1573 int Call_has_callees(const ir_node *node)
1575 assert(is_Call(node));
1576 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1577 (node->attr.call.callee_arr != NULL));
1580 int get_Call_n_callees(const ir_node *node)
1582 assert(is_Call(node) && node->attr.call.callee_arr);
1583 return ARR_LEN(node->attr.call.callee_arr);
1586 ir_entity *get_Call_callee(const ir_node *node, int pos)
1588 assert(pos >= 0 && pos < get_Call_n_callees(node));
1589 return node->attr.call.callee_arr[pos];
1592 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1594 assert(is_Call(node));
1595 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1596 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1598 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1601 void remove_Call_callee_arr(ir_node *node)
1603 assert(is_Call(node));
1604 node->attr.call.callee_arr = NULL;
1607 ir_node *get_CallBegin_ptr(const ir_node *node)
1609 assert(is_CallBegin(node));
1610 return get_irn_n(node, 0);
1613 void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
1615 assert(is_CallBegin(node));
1616 set_irn_n(node, 0, ptr);
1619 ir_node *get_CallBegin_call(const ir_node *node)
1621 assert(is_CallBegin(node));
1622 return node->attr.callbegin.call;
1625 void set_CallBegin_call(ir_node *node, ir_node *call)
1627 assert(is_CallBegin(node));
1628 node->attr.callbegin.call = call;
1632 * Returns non-zero if a Call is surely a self-recursive Call.
1633 * Beware: if this functions returns 0, the call might be self-recursive!
1635 int is_self_recursive_Call(const ir_node *call)
1637 const ir_node *callee = get_Call_ptr(call);
1639 if (is_SymConst_addr_ent(callee)) {
1640 const ir_entity *ent = get_SymConst_entity(callee);
1641 const ir_graph *irg = get_entity_irg(ent);
1642 if (irg == get_irn_irg(call))
1649 ir_node * get_##OP##_left(const ir_node *node) { \
1650 assert(is_##OP(node)); \
1651 return get_irn_n(node, node->op->op_index); \
1653 void set_##OP##_left(ir_node *node, ir_node *left) { \
1654 assert(is_##OP(node)); \
1655 set_irn_n(node, node->op->op_index, left); \
1657 ir_node *get_##OP##_right(const ir_node *node) { \
1658 assert(is_##OP(node)); \
1659 return get_irn_n(node, node->op->op_index + 1); \
1661 void set_##OP##_right(ir_node *node, ir_node *right) { \
1662 assert(is_##OP(node)); \
1663 set_irn_n(node, node->op->op_index + 1, right); \
1667 ir_node *get_##OP##_op(const ir_node *node) { \
1668 assert(is_##OP(node)); \
1669 return get_irn_n(node, node->op->op_index); \
1671 void set_##OP##_op(ir_node *node, ir_node *op) { \
1672 assert(is_##OP(node)); \
1673 set_irn_n(node, node->op->op_index, op); \
1676 #define BINOP_MEM(OP) \
1680 get_##OP##_mem(const ir_node *node) { \
1681 assert(is_##OP(node)); \
1682 return get_irn_n(node, 0); \
1686 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1687 assert(is_##OP(node)); \
1688 set_irn_n(node, 0, mem); \
1694 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1695 assert(is_##OP(node)); \
1696 return node->attr.divmod.resmode; \
1699 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1700 assert(is_##OP(node)); \
1701 node->attr.divmod.resmode = mode; \
1729 int get_Div_no_remainder(const ir_node *node)
1731 assert(is_Div(node));
1732 return node->attr.divmod.no_remainder;
1735 void set_Div_no_remainder(ir_node *node, int no_remainder)
1737 assert(is_Div(node));
1738 node->attr.divmod.no_remainder = no_remainder;
1741 int get_Conv_strict(const ir_node *node)
1743 assert(is_Conv(node));
1744 return node->attr.conv.strict;
1747 void set_Conv_strict(ir_node *node, int strict_flag)
1749 assert(is_Conv(node));
1750 node->attr.conv.strict = (char)strict_flag;
1753 ir_type *get_Cast_type(ir_node *node)
1755 assert(is_Cast(node));
1756 return node->attr.cast.type;
1759 void set_Cast_type(ir_node *node, ir_type *to_tp)
1761 assert(is_Cast(node));
1762 node->attr.cast.type = to_tp;
1766 /* Checks for upcast.
1768 * Returns true if the Cast node casts a class type to a super type.
1770 int is_Cast_upcast(ir_node *node)
1772 ir_type *totype = get_Cast_type(node);
1773 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1775 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1778 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1779 totype = get_pointer_points_to_type(totype);
1780 fromtype = get_pointer_points_to_type(fromtype);
1785 if (!is_Class_type(totype)) return 0;
1786 return is_SubClass_of(fromtype, totype);
1789 /* Checks for downcast.
1791 * Returns true if the Cast node casts a class type to a sub type.
1793 int is_Cast_downcast(ir_node *node)
1795 ir_type *totype = get_Cast_type(node);
1796 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1798 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1801 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1802 totype = get_pointer_points_to_type(totype);
1803 fromtype = get_pointer_points_to_type(fromtype);
1808 if (!is_Class_type(totype)) return 0;
1809 return is_SubClass_of(totype, fromtype);
1812 int (is_unop)(const ir_node *node)
1814 return _is_unop(node);
1817 ir_node *get_unop_op(const ir_node *node)
1819 if (node->op->opar == oparity_unary)
1820 return get_irn_n(node, node->op->op_index);
1822 assert(node->op->opar == oparity_unary);
1826 void set_unop_op(ir_node *node, ir_node *op)
1828 if (node->op->opar == oparity_unary)
1829 set_irn_n(node, node->op->op_index, op);
1831 assert(node->op->opar == oparity_unary);
1834 int (is_binop)(const ir_node *node)
1836 return _is_binop(node);
1839 ir_node *get_binop_left(const ir_node *node)
1841 assert(node->op->opar == oparity_binary);
1842 return get_irn_n(node, node->op->op_index);
1845 void set_binop_left(ir_node *node, ir_node *left)
1847 assert(node->op->opar == oparity_binary);
1848 set_irn_n(node, node->op->op_index, left);
1851 ir_node *get_binop_right(const ir_node *node)
1853 assert(node->op->opar == oparity_binary);
1854 return get_irn_n(node, node->op->op_index + 1);
1857 void set_binop_right(ir_node *node, ir_node *right)
1859 assert(node->op->opar == oparity_binary);
1860 set_irn_n(node, node->op->op_index + 1, right);
1863 int is_Phi0(const ir_node *n)
1867 return ((get_irn_op(n) == op_Phi) &&
1868 (get_irn_arity(n) == 0) &&
1869 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1872 ir_node **get_Phi_preds_arr(ir_node *node)
1874 assert(node->op == op_Phi);
1875 return (ir_node **)&(get_irn_in(node)[1]);
1878 int get_Phi_n_preds(const ir_node *node)
1880 assert(is_Phi(node) || is_Phi0(node));
1881 return (get_irn_arity(node));
1885 void set_Phi_n_preds(ir_node *node, int n_preds)
1887 assert(node->op == op_Phi);
1891 ir_node *get_Phi_pred(const ir_node *node, int pos)
1893 assert(is_Phi(node) || is_Phi0(node));
1894 return get_irn_n(node, pos);
1897 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1899 assert(is_Phi(node) || is_Phi0(node));
1900 set_irn_n(node, pos, pred);
1903 ir_node *(get_Phi_next)(const ir_node *phi)
1905 return _get_Phi_next(phi);
1908 void (set_Phi_next)(ir_node *phi, ir_node *next)
1910 _set_Phi_next(phi, next);
1913 int is_memop(const ir_node *node)
1915 ir_opcode code = get_irn_opcode(node);
1916 return (code == iro_Load || code == iro_Store);
1919 ir_node *get_memop_mem(const ir_node *node)
1921 assert(is_memop(node));
1922 return get_irn_n(node, 0);
1925 void set_memop_mem(ir_node *node, ir_node *mem)
1927 assert(is_memop(node));
1928 set_irn_n(node, 0, mem);
1931 ir_node *get_memop_ptr(const ir_node *node)
1933 assert(is_memop(node));
1934 return get_irn_n(node, 1);
1937 void set_memop_ptr(ir_node *node, ir_node *ptr)
1939 assert(is_memop(node));
1940 set_irn_n(node, 1, ptr);
1943 ir_node *get_Load_mem(const ir_node *node)
1945 assert(is_Load(node));
1946 return get_irn_n(node, 0);
1949 void set_Load_mem(ir_node *node, ir_node *mem)
1951 assert(is_Load(node));
1952 set_irn_n(node, 0, mem);
1955 ir_node *get_Load_ptr(const ir_node *node)
1957 assert(is_Load(node));
1958 return get_irn_n(node, 1);
1961 void set_Load_ptr(ir_node *node, ir_node *ptr)
1963 assert(is_Load(node));
1964 set_irn_n(node, 1, ptr);
1967 ir_mode *get_Load_mode(const ir_node *node)
1969 assert(is_Load(node));
1970 return node->attr.load.mode;
1973 void set_Load_mode(ir_node *node, ir_mode *mode)
1975 assert(is_Load(node));
1976 node->attr.load.mode = mode;
1979 ir_volatility get_Load_volatility(const ir_node *node)
1981 assert(is_Load(node));
1982 return node->attr.load.volatility;
1985 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1987 assert(is_Load(node));
1988 node->attr.load.volatility = volatility;
1991 ir_align get_Load_align(const ir_node *node)
1993 assert(is_Load(node));
1994 return node->attr.load.aligned;
1997 void set_Load_align(ir_node *node, ir_align align)
1999 assert(is_Load(node));
2000 node->attr.load.aligned = align;
2004 ir_node *get_Store_mem(const ir_node *node)
2006 assert(is_Store(node));
2007 return get_irn_n(node, 0);
2010 void set_Store_mem(ir_node *node, ir_node *mem)
2012 assert(is_Store(node));
2013 set_irn_n(node, 0, mem);
2016 ir_node *get_Store_ptr(const ir_node *node)
2018 assert(is_Store(node));
2019 return get_irn_n(node, 1);
2022 void set_Store_ptr(ir_node *node, ir_node *ptr)
2024 assert(is_Store(node));
2025 set_irn_n(node, 1, ptr);
2028 ir_node *get_Store_value(const ir_node *node)
2030 assert(is_Store(node));
2031 return get_irn_n(node, 2);
2034 void set_Store_value(ir_node *node, ir_node *value)
2036 assert(is_Store(node));
2037 set_irn_n(node, 2, value);
2040 ir_volatility get_Store_volatility(const ir_node *node)
2042 assert(is_Store(node));
2043 return node->attr.store.volatility;
2046 void set_Store_volatility(ir_node *node, ir_volatility volatility)
2048 assert(is_Store(node));
2049 node->attr.store.volatility = volatility;
2052 ir_align get_Store_align(const ir_node *node)
2054 assert(is_Store(node));
2055 return node->attr.store.aligned;
2058 void set_Store_align(ir_node *node, ir_align align)
2060 assert(is_Store(node));
2061 node->attr.store.aligned = align;
2065 ir_node *get_Alloc_mem(const ir_node *node)
2067 assert(is_Alloc(node));
2068 return get_irn_n(node, 0);
2071 void set_Alloc_mem(ir_node *node, ir_node *mem)
2073 assert(is_Alloc(node));
2074 set_irn_n(node, 0, mem);
2077 ir_node *get_Alloc_size(const ir_node *node)
2079 assert(is_Alloc(node));
2080 return get_irn_n(node, 1);
2083 void set_Alloc_size(ir_node *node, ir_node *size)
2085 assert(is_Alloc(node));
2086 set_irn_n(node, 1, size);
2089 ir_type *get_Alloc_type(ir_node *node)
2091 assert(is_Alloc(node));
2092 return node->attr.alloc.type;
2095 void set_Alloc_type(ir_node *node, ir_type *tp)
2097 assert(is_Alloc(node));
2098 node->attr.alloc.type = tp;
2101 ir_where_alloc get_Alloc_where(const ir_node *node)
2103 assert(is_Alloc(node));
2104 return node->attr.alloc.where;
2107 void set_Alloc_where(ir_node *node, ir_where_alloc where)
2109 assert(is_Alloc(node));
2110 node->attr.alloc.where = where;
2114 ir_node *get_Free_mem(const ir_node *node)
2116 assert(is_Free(node));
2117 return get_irn_n(node, 0);
2120 void set_Free_mem(ir_node *node, ir_node *mem)
2122 assert(is_Free(node));
2123 set_irn_n(node, 0, mem);
2126 ir_node *get_Free_ptr(const ir_node *node)
2128 assert(is_Free(node));
2129 return get_irn_n(node, 1);
2132 void set_Free_ptr(ir_node *node, ir_node *ptr)
2134 assert(is_Free(node));
2135 set_irn_n(node, 1, ptr);
2138 ir_node *get_Free_size(const ir_node *node)
2140 assert(is_Free(node));
2141 return get_irn_n(node, 2);
2144 void set_Free_size(ir_node *node, ir_node *size)
2146 assert(is_Free(node));
2147 set_irn_n(node, 2, size);
2150 ir_type *get_Free_type(ir_node *node)
2152 assert(is_Free(node));
2153 return node->attr.free.type;
2156 void set_Free_type(ir_node *node, ir_type *tp)
2158 assert(is_Free(node));
2159 node->attr.free.type = tp;
2162 ir_where_alloc get_Free_where(const ir_node *node)
2164 assert(is_Free(node));
2165 return node->attr.free.where;
2168 void set_Free_where(ir_node *node, ir_where_alloc where)
2170 assert(is_Free(node));
2171 node->attr.free.where = where;
2174 ir_node **get_Sync_preds_arr(ir_node *node)
2176 assert(is_Sync(node));
2177 return (ir_node **)&(get_irn_in(node)[1]);
2180 int get_Sync_n_preds(const ir_node *node)
2182 assert(is_Sync(node));
2183 return (get_irn_arity(node));
2187 void set_Sync_n_preds(ir_node *node, int n_preds)
2189 assert(is_Sync(node));
2193 ir_node *get_Sync_pred(const ir_node *node, int pos)
2195 assert(is_Sync(node));
2196 return get_irn_n(node, pos);
2199 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
2201 assert(is_Sync(node));
2202 set_irn_n(node, pos, pred);
2205 /* Add a new Sync predecessor */
2206 void add_Sync_pred(ir_node *node, ir_node *pred)
2208 assert(is_Sync(node));
2209 add_irn_n(node, pred);
2212 /* Returns the source language type of a Proj node. */
2213 ir_type *get_Proj_type(ir_node *n)
2215 ir_type *tp = firm_unknown_type;
2216 ir_node *pred = get_Proj_pred(n);
2218 switch (get_irn_opcode(pred)) {
2221 /* Deal with Start / Call here: we need to know the Proj Nr. */
2222 assert(get_irn_mode(pred) == mode_T);
2223 pred_pred = get_Proj_pred(pred);
2225 if (is_Start(pred_pred)) {
2226 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2227 tp = get_method_param_type(mtp, get_Proj_proj(n));
2228 } else if (is_Call(pred_pred)) {
2229 ir_type *mtp = get_Call_type(pred_pred);
2230 tp = get_method_res_type(mtp, get_Proj_proj(n));
2233 case iro_Start: break;
2234 case iro_Call: break;
2236 ir_node *a = get_Load_ptr(pred);
2238 tp = get_entity_type(get_Sel_entity(a));
2246 ir_node *get_Proj_pred(const ir_node *node)
2248 assert(is_Proj(node));
2249 return get_irn_n(node, 0);
2252 void set_Proj_pred(ir_node *node, ir_node *pred)
2254 assert(is_Proj(node));
2255 set_irn_n(node, 0, pred);
2258 long get_Proj_proj(const ir_node *node)
2260 #ifdef INTERPROCEDURAL_VIEW
2261 ir_opcode code = get_irn_opcode(node);
2263 if (code == iro_Proj) {
2264 return node->attr.proj;
2267 assert(code == iro_Filter);
2268 return node->attr.filter.proj;
2271 assert(is_Proj(node));
2272 return node->attr.proj;
2273 #endif /* INTERPROCEDURAL_VIEW */
2276 void set_Proj_proj(ir_node *node, long proj)
2278 #ifdef INTERPROCEDURAL_VIEW
2279 ir_opcode code = get_irn_opcode(node);
2281 if (code == iro_Proj) {
2282 node->attr.proj = proj;
2285 assert(code == iro_Filter);
2286 node->attr.filter.proj = proj;
2289 assert(is_Proj(node));
2290 node->attr.proj = proj;
2291 #endif /* INTERPROCEDURAL_VIEW */
2294 /* Returns non-zero if a node is a routine parameter. */
2295 int (is_arg_Proj)(const ir_node *node)
2297 return _is_arg_Proj(node);
2300 ir_node **get_Tuple_preds_arr(ir_node *node)
2302 assert(is_Tuple(node));
2303 return (ir_node **)&(get_irn_in(node)[1]);
2306 int get_Tuple_n_preds(const ir_node *node)
2308 assert(is_Tuple(node));
2309 return get_irn_arity(node);
2313 void set_Tuple_n_preds(ir_node *node, int n_preds)
2315 assert(is_Tuple(node));
2319 ir_node *get_Tuple_pred(const ir_node *node, int pos)
2321 assert(is_Tuple(node));
2322 return get_irn_n(node, pos);
2325 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
2327 assert(is_Tuple(node));
2328 set_irn_n(node, pos, pred);
2331 ir_node *get_Id_pred(const ir_node *node)
2333 assert(is_Id(node));
2334 return get_irn_n(node, 0);
2337 void set_Id_pred(ir_node *node, ir_node *pred)
2339 assert(is_Id(node));
2340 set_irn_n(node, 0, pred);
2343 ir_node *get_Confirm_value(const ir_node *node)
2345 assert(is_Confirm(node));
2346 return get_irn_n(node, 0);
2349 void set_Confirm_value(ir_node *node, ir_node *value)
2351 assert(is_Confirm(node));
2352 set_irn_n(node, 0, value);
2355 ir_node *get_Confirm_bound(const ir_node *node)
2357 assert(is_Confirm(node));
2358 return get_irn_n(node, 1);
2361 void set_Confirm_bound(ir_node *node, ir_node *bound)
2363 assert(is_Confirm(node));
2364 set_irn_n(node, 0, bound);
2367 pn_Cmp get_Confirm_cmp(const ir_node *node)
2369 assert(is_Confirm(node));
2370 return node->attr.confirm.cmp;
2373 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
2375 assert(is_Confirm(node));
2376 node->attr.confirm.cmp = cmp;
2379 ir_node *get_Filter_pred(ir_node *node)
2381 assert(is_Filter(node));
2385 void set_Filter_pred(ir_node *node, ir_node *pred)
2387 assert(is_Filter(node));
2391 long get_Filter_proj(ir_node *node)
2393 assert(is_Filter(node));
2394 return node->attr.filter.proj;
2397 void set_Filter_proj(ir_node *node, long proj)
2399 assert(is_Filter(node));
2400 node->attr.filter.proj = proj;
2403 /* Don't use get_irn_arity, get_irn_n in implementation as access
2404 shall work independent of view!!! */
2405 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
2407 assert(is_Filter(node));
2408 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2409 ir_graph *irg = get_irn_irg(node);
2410 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2411 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2412 node->attr.filter.in_cg[0] = node->in[0];
2414 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2417 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
2419 assert(is_Filter(node) && node->attr.filter.in_cg &&
2420 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2421 node->attr.filter.in_cg[pos + 1] = pred;
2424 int get_Filter_n_cg_preds(ir_node *node)
2426 assert(is_Filter(node) && node->attr.filter.in_cg);
2427 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2430 ir_node *get_Filter_cg_pred(ir_node *node, int pos)
2433 assert(is_Filter(node) && node->attr.filter.in_cg &&
2435 arity = ARR_LEN(node->attr.filter.in_cg);
2436 assert(pos < arity - 1);
2437 return node->attr.filter.in_cg[pos + 1];
2441 ir_node *get_Mux_sel(const ir_node *node)
2443 assert(is_Mux(node));
2447 void set_Mux_sel(ir_node *node, ir_node *sel)
2449 assert(is_Mux(node));
2453 ir_node *get_Mux_false(const ir_node *node)
2455 assert(is_Mux(node));
2459 void set_Mux_false(ir_node *node, ir_node *ir_false)
2461 assert(is_Mux(node));
2462 node->in[2] = ir_false;
2465 ir_node *get_Mux_true(const ir_node *node)
2467 assert(is_Mux(node));
2471 void set_Mux_true(ir_node *node, ir_node *ir_true)
2473 assert(is_Mux(node));
2474 node->in[3] = ir_true;
2478 ir_node *get_CopyB_mem(const ir_node *node)
2480 assert(is_CopyB(node));
2481 return get_irn_n(node, 0);
2484 void set_CopyB_mem(ir_node *node, ir_node *mem)
2486 assert(node->op == op_CopyB);
2487 set_irn_n(node, 0, mem);
2490 ir_node *get_CopyB_dst(const ir_node *node)
2492 assert(is_CopyB(node));
2493 return get_irn_n(node, 1);
2496 void set_CopyB_dst(ir_node *node, ir_node *dst)
2498 assert(is_CopyB(node));
2499 set_irn_n(node, 1, dst);
2502 ir_node *get_CopyB_src(const ir_node *node)
2504 assert(is_CopyB(node));
2505 return get_irn_n(node, 2);
2508 void set_CopyB_src(ir_node *node, ir_node *src)
2510 assert(is_CopyB(node));
2511 set_irn_n(node, 2, src);
2514 ir_type *get_CopyB_type(ir_node *node)
2516 assert(is_CopyB(node));
2517 return node->attr.copyb.type;
2520 void set_CopyB_type(ir_node *node, ir_type *data_type)
2522 assert(is_CopyB(node) && data_type);
2523 node->attr.copyb.type = data_type;
2527 ir_type *get_InstOf_type(ir_node *node)
2529 assert(node->op == op_InstOf);
2530 return node->attr.instof.type;
2533 void set_InstOf_type(ir_node *node, ir_type *type)
2535 assert(node->op == op_InstOf);
2536 node->attr.instof.type = type;
2539 ir_node *get_InstOf_store(const ir_node *node)
2541 assert(node->op == op_InstOf);
2542 return get_irn_n(node, 0);
2545 void set_InstOf_store(ir_node *node, ir_node *obj)
2547 assert(node->op == op_InstOf);
2548 set_irn_n(node, 0, obj);
2551 ir_node *get_InstOf_obj(const ir_node *node)
2553 assert(node->op == op_InstOf);
2554 return get_irn_n(node, 1);
2557 void set_InstOf_obj(ir_node *node, ir_node *obj)
2559 assert(node->op == op_InstOf);
2560 set_irn_n(node, 1, obj);
2563 /* Returns the memory input of a Raise operation. */
2564 ir_node *get_Raise_mem(const ir_node *node)
2566 assert(is_Raise(node));
2567 return get_irn_n(node, 0);
2570 void set_Raise_mem(ir_node *node, ir_node *mem)
2572 assert(is_Raise(node));
2573 set_irn_n(node, 0, mem);
2576 ir_node *get_Raise_exo_ptr(const ir_node *node)
2578 assert(is_Raise(node));
2579 return get_irn_n(node, 1);
2582 void set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
2584 assert(is_Raise(node));
2585 set_irn_n(node, 1, exo_ptr);
2590 /* Returns the memory input of a Bound operation. */
2591 ir_node *get_Bound_mem(const ir_node *bound)
2593 assert(is_Bound(bound));
2594 return get_irn_n(bound, 0);
2597 void set_Bound_mem(ir_node *bound, ir_node *mem)
2599 assert(is_Bound(bound));
2600 set_irn_n(bound, 0, mem);
2603 /* Returns the index input of a Bound operation. */
2604 ir_node *get_Bound_index(const ir_node *bound)
2606 assert(is_Bound(bound));
2607 return get_irn_n(bound, 1);
2610 void set_Bound_index(ir_node *bound, ir_node *idx)
2612 assert(is_Bound(bound));
2613 set_irn_n(bound, 1, idx);
2616 /* Returns the lower bound input of a Bound operation. */
2617 ir_node *get_Bound_lower(const ir_node *bound)
2619 assert(is_Bound(bound));
2620 return get_irn_n(bound, 2);
2623 void set_Bound_lower(ir_node *bound, ir_node *lower)
2625 assert(is_Bound(bound));
2626 set_irn_n(bound, 2, lower);
2629 /* Returns the upper bound input of a Bound operation. */
2630 ir_node *get_Bound_upper(const ir_node *bound)
2632 assert(is_Bound(bound));
2633 return get_irn_n(bound, 3);
2636 void set_Bound_upper(ir_node *bound, ir_node *upper)
2638 assert(is_Bound(bound));
2639 set_irn_n(bound, 3, upper);
2642 /* Return the operand of a Pin node. */
2643 ir_node *get_Pin_op(const ir_node *pin)
2645 assert(is_Pin(pin));
2646 return get_irn_n(pin, 0);
2649 void set_Pin_op(ir_node *pin, ir_node *node)
2651 assert(is_Pin(pin));
2652 set_irn_n(pin, 0, node);
2655 /* Return the assembler text of an ASM pseudo node. */
2656 ident *get_ASM_text(const ir_node *node)
2658 assert(is_ASM(node));
2659 return node->attr.assem.asm_text;
2662 /* Return the number of input constraints for an ASM node. */
2663 int get_ASM_n_input_constraints(const ir_node *node)
2665 assert(is_ASM(node));
2666 return ARR_LEN(node->attr.assem.inputs);
2669 /* Return the input constraints for an ASM node. This is a flexible array. */
2670 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
2672 assert(is_ASM(node));
2673 return node->attr.assem.inputs;
2676 /* Return the number of output constraints for an ASM node. */
2677 int get_ASM_n_output_constraints(const ir_node *node)
2679 assert(is_ASM(node));
2680 return ARR_LEN(node->attr.assem.outputs);
2683 /* Return the output constraints for an ASM node. */
2684 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
2686 assert(is_ASM(node));
2687 return node->attr.assem.outputs;
2690 /* Return the number of clobbered registers for an ASM node. */
2691 int get_ASM_n_clobbers(const ir_node *node)
2693 assert(is_ASM(node));
2694 return ARR_LEN(node->attr.assem.clobber);
2697 /* Return the list of clobbered registers for an ASM node. */
2698 ident **get_ASM_clobbers(const ir_node *node)
2700 assert(is_ASM(node));
2701 return node->attr.assem.clobber;
2704 /* returns the graph of a node */
2705 ir_graph *get_irn_irg(const ir_node *node)
2708 * Do not use get_nodes_Block() here, because this
2709 * will check the pinned state.
2710 * However even a 'wrong' block is always in the proper
2713 if (! is_Block(node))
2714 node = get_irn_n(node, -1);
2715 /* note that get_Block_irg() can handle Bad nodes */
2716 return get_Block_irg(node);
2720 /*----------------------------------------------------------------*/
2721 /* Auxiliary routines */
2722 /*----------------------------------------------------------------*/
2724 ir_node *skip_Proj(ir_node *node)
2726 /* don't assert node !!! */
2731 node = get_Proj_pred(node);
2737 skip_Proj_const(const ir_node *node)
2739 /* don't assert node !!! */
2744 node = get_Proj_pred(node);
2749 ir_node *skip_Tuple(ir_node *node)
2755 if (is_Proj(node)) {
2756 pred = get_Proj_pred(node);
2757 op = get_irn_op(pred);
2760 * Looks strange but calls get_irn_op() only once
2761 * in most often cases.
2763 if (op == op_Proj) { /* nested Tuple ? */
2764 pred = skip_Tuple(pred);
2766 if (is_Tuple(pred)) {
2767 node = get_Tuple_pred(pred, get_Proj_proj(node));
2770 } else if (op == op_Tuple) {
2771 node = get_Tuple_pred(pred, get_Proj_proj(node));
2778 /* returns operand of node if node is a Cast */
2779 ir_node *skip_Cast(ir_node *node)
2782 return get_Cast_op(node);
2786 /* returns operand of node if node is a Cast */
2787 const ir_node *skip_Cast_const(const ir_node *node)
2790 return get_Cast_op(node);
2794 /* returns operand of node if node is a Pin */
2795 ir_node *skip_Pin(ir_node *node)
2798 return get_Pin_op(node);
2802 /* returns operand of node if node is a Confirm */
2803 ir_node *skip_Confirm(ir_node *node)
2805 if (is_Confirm(node))
2806 return get_Confirm_value(node);
2810 /* skip all high-level ops */
2811 ir_node *skip_HighLevel_ops(ir_node *node)
2813 while (is_op_highlevel(get_irn_op(node))) {
2814 node = get_irn_n(node, 0);
2820 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2821 * than any other approach, as Id chains are resolved and all point to the real node, or
2822 * all id's are self loops.
2824 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2825 * a little bit "hand optimized".
2827 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2829 ir_node *skip_Id(ir_node *node)
2832 /* don't assert node !!! */
2834 if (!node || (node->op != op_Id)) return node;
2836 /* Don't use get_Id_pred(): We get into an endless loop for
2837 self-referencing Ids. */
2838 pred = node->in[0+1];
2840 if (pred->op != op_Id) return pred;
2842 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2843 ir_node *rem_pred, *res;
2845 if (pred->op != op_Id) return pred; /* shortcut */
2848 assert(get_irn_arity (node) > 0);
2850 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2851 res = skip_Id(rem_pred);
2852 if (res->op == op_Id) /* self-loop */ return node;
2854 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2861 void skip_Id_and_store(ir_node **node)
2865 if (!n || (n->op != op_Id)) return;
2867 /* Don't use get_Id_pred(): We get into an endless loop for
2868 self-referencing Ids. */
2872 int (is_strictConv)(const ir_node *node)
2874 return _is_strictConv(node);
2877 int (is_no_Block)(const ir_node *node)
2879 return _is_no_Block(node);
2882 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2883 int (is_SymConst_addr_ent)(const ir_node *node)
2885 return _is_SymConst_addr_ent(node);
2888 /* Returns true if the operation manipulates control flow. */
2889 int is_cfop(const ir_node *node)
2891 return is_op_cfopcode(get_irn_op(node));
2894 /* Returns true if the operation manipulates interprocedural control flow:
2895 CallBegin, EndReg, EndExcept */
2896 int is_ip_cfop(const ir_node *node)
2898 return is_ip_cfopcode(get_irn_op(node));
2901 /* Returns true if the operation can change the control flow because
2903 int is_fragile_op(const ir_node *node)
2905 return is_op_fragile(get_irn_op(node));
2908 /* Returns the memory operand of fragile operations. */
2909 ir_node *get_fragile_op_mem(ir_node *node)
2911 assert(node && is_fragile_op(node));
2913 switch (get_irn_opcode(node)) {
2924 return get_irn_n(node, pn_Generic_M);
2929 assert(0 && "should not be reached");
2934 /* Returns the result mode of a Div operation. */
2935 ir_mode *get_divop_resmod(const ir_node *node)
2937 switch (get_irn_opcode(node)) {
2938 case iro_Quot : return get_Quot_resmode(node);
2939 case iro_DivMod: return get_DivMod_resmode(node);
2940 case iro_Div : return get_Div_resmode(node);
2941 case iro_Mod : return get_Mod_resmode(node);
2943 assert(0 && "should not be reached");
2948 /* Returns true if the operation is a forking control flow operation. */
2949 int (is_irn_forking)(const ir_node *node)
2951 return _is_irn_forking(node);
2954 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node)
2956 _copy_node_attr(old_node, new_node);
2959 /* Return the type associated with the value produced by n
2960 * if the node remarks this type as it is the case for
2961 * Cast, Const, SymConst and some Proj nodes. */
2962 ir_type *(get_irn_type)(ir_node *node)
2964 return _get_irn_type(node);
2967 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2969 ir_type *(get_irn_type_attr)(ir_node *node)
2971 return _get_irn_type_attr(node);
2974 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2975 ir_entity *(get_irn_entity_attr)(ir_node *node)
2977 return _get_irn_entity_attr(node);
2980 /* Returns non-zero for constant-like nodes. */
2981 int (is_irn_constlike)(const ir_node *node)
2983 return _is_irn_constlike(node);
2987 * Returns non-zero for nodes that are allowed to have keep-alives and
2988 * are neither Block nor PhiM.
2990 int (is_irn_keep)(const ir_node *node)
2992 return _is_irn_keep(node);
2996 * Returns non-zero for nodes that are always placed in the start block.
2998 int (is_irn_start_block_placed)(const ir_node *node)
3000 return _is_irn_start_block_placed(node);
3003 /* Returns non-zero for nodes that are machine operations. */
3004 int (is_irn_machine_op)(const ir_node *node)
3006 return _is_irn_machine_op(node);
3009 /* Returns non-zero for nodes that are machine operands. */
3010 int (is_irn_machine_operand)(const ir_node *node)
3012 return _is_irn_machine_operand(node);
3015 /* Returns non-zero for nodes that have the n'th user machine flag set. */
3016 int (is_irn_machine_user)(const ir_node *node, unsigned n)
3018 return _is_irn_machine_user(node, n);
3021 /* Returns non-zero for nodes that are CSE neutral to its users. */
3022 int (is_irn_cse_neutral)(const ir_node *node)
3024 return _is_irn_cse_neutral(node);
3027 /* Gets the string representation of the jump prediction .*/
3028 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
3030 #define X(a) case a: return #a;
3032 X(COND_JMP_PRED_NONE);
3033 X(COND_JMP_PRED_TRUE);
3034 X(COND_JMP_PRED_FALSE);
3040 /* Returns the conditional jump prediction of a Cond node. */
3041 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
3043 return _get_Cond_jmp_pred(cond);
3046 /* Sets a new conditional jump prediction. */
3047 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
3049 _set_Cond_jmp_pred(cond, pred);
3052 /** the get_type operation must be always implemented and return a firm type */
3053 static ir_type *get_Default_type(ir_node *n)
3056 return get_unknown_type();
3059 /* Sets the get_type operation for an ir_op_ops. */
3060 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
3063 case iro_Const: ops->get_type = get_Const_type; break;
3064 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3065 case iro_Cast: ops->get_type = get_Cast_type; break;
3066 case iro_Proj: ops->get_type = get_Proj_type; break;
3068 /* not allowed to be NULL */
3069 if (! ops->get_type)
3070 ops->get_type = get_Default_type;
3076 /** Return the attribute type of a SymConst node if exists */
3077 static ir_type *get_SymConst_attr_type(ir_node *self)
3079 symconst_kind kind = get_SymConst_kind(self);
3080 if (SYMCONST_HAS_TYPE(kind))
3081 return get_SymConst_type(self);
3085 /** Return the attribute entity of a SymConst node if exists */
3086 static ir_entity *get_SymConst_attr_entity(ir_node *self)
3088 symconst_kind kind = get_SymConst_kind(self);
3089 if (SYMCONST_HAS_ENT(kind))
3090 return get_SymConst_entity(self);
3094 /** the get_type_attr operation must be always implemented */
3095 static ir_type *get_Null_type(ir_node *n)
3098 return firm_unknown_type;
3101 /* Sets the get_type operation for an ir_op_ops. */
3102 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
3105 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3106 case iro_Call: ops->get_type_attr = get_Call_type; break;
3107 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3108 case iro_Free: ops->get_type_attr = get_Free_type; break;
3109 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3111 /* not allowed to be NULL */
3112 if (! ops->get_type_attr)
3113 ops->get_type_attr = get_Null_type;
3119 /** the get_entity_attr operation must be always implemented */
3120 static ir_entity *get_Null_ent(ir_node *n)
3126 /* Sets the get_type operation for an ir_op_ops. */
3127 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
3130 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3131 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3133 /* not allowed to be NULL */
3134 if (! ops->get_entity_attr)
3135 ops->get_entity_attr = get_Null_ent;
3141 /* Sets the debug information of a node. */
3142 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
3144 _set_irn_dbg_info(n, db);
3148 * Returns the debug information of an node.
3150 * @param n The node.
3152 dbg_info *(get_irn_dbg_info)(const ir_node *n)
3154 return _get_irn_dbg_info(n);
3157 /* checks whether a node represents a global address */
3158 int is_Global(const ir_node *node)
3160 return is_SymConst_addr_ent(node);
3163 /* returns the entity of a global address */
3164 ir_entity *get_Global_entity(const ir_node *node)
3166 return get_SymConst_entity(node);
3170 * Calculate a hash value of a node.
3172 unsigned firm_default_hash(const ir_node *node)
3177 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3178 h = irn_arity = get_irn_intra_arity(node);
3180 /* consider all in nodes... except the block if not a control flow. */
3181 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3182 ir_node *pred = get_irn_intra_n(node, i);
3183 if (is_irn_cse_neutral(pred))
3186 h = 9*h + HASH_PTR(pred);
3190 h = 9*h + HASH_PTR(get_irn_mode(node));
3192 h = 9*h + HASH_PTR(get_irn_op(node));
3195 } /* firm_default_hash */
3197 /* include generated code */
3198 #include "gen_irnode.c.inl"