2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
86 pn_Cmp get_inversed_pnc(long pnc) {
87 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
88 long lesser = pnc & pn_Cmp_Lt;
89 long greater = pnc & pn_Cmp_Gt;
91 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size) {
111 assert(!forbid_new_data && "Too late to register additional node data");
116 return firm_add_node_size += size;
120 void init_irnode(void) {
121 /* Forbid the addition of new data to an ir node. */
126 * irnode constructor.
127 * Create a new irnode in irg, with an op, mode, arity and
128 * some incoming irnodes.
129 * If arity is negative, a node with a dynamic array is created.
132 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
133 int arity, ir_node **in)
136 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 /* not nice but necessary: End and Sync must always have a flexible array */
159 if (op == op_End || op == op_Sync)
160 res->in = NEW_ARR_F(ir_node *, (arity+1));
162 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
163 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
167 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
171 for (i = 0; i < EDGE_KIND_LAST; ++i) {
172 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* edges will be build immediately */
174 res->edge_info[i].edges_built = 1;
175 res->edge_info[i].out_count = 0;
178 /* don't put this into the for loop, arity is -1 for some nodes! */
179 edges_notify_edge(res, -1, res->in[0], NULL, irg);
180 for (i = 1; i <= arity; ++i)
181 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
183 hook_new_node(irg, res);
184 if (get_irg_phase_state(irg) == phase_backend) {
185 be_info_new_node(res);
191 /*-- getting some parameters from ir_nodes --*/
193 int (is_ir_node)(const void *thing) {
194 return _is_ir_node(thing);
197 int (get_irn_intra_arity)(const ir_node *node) {
198 return _get_irn_intra_arity(node);
201 int (get_irn_inter_arity)(const ir_node *node) {
202 return _get_irn_inter_arity(node);
205 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
207 int (get_irn_arity)(const ir_node *node) {
208 return _get_irn_arity(node);
211 /* Returns the array with ins. This array is shifted with respect to the
212 array accessed by get_irn_n: The block operand is at position 0 not -1.
213 (@@@ This should be changed.)
214 The order of the predecessors in this array is not guaranteed, except that
215 lists of operands as predecessors of Block or arguments of a Call are
217 ir_node **get_irn_in(const ir_node *node) {
219 #ifdef INTERPROCEDURAL_VIEW
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 return node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 return node->attr.block.in_cg;
227 /* else fall through */
229 #endif /* INTERPROCEDURAL_VIEW */
233 void set_irn_in(ir_node *node, int arity, ir_node **in) {
236 ir_graph *irg = get_irn_irg(node);
239 #ifdef INTERPROCEDURAL_VIEW
240 if (get_interprocedural_view()) { /* handle Filter and Block specially */
241 ir_opcode code = get_irn_opcode(node);
242 if (code == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 pOld_in = &node->attr.filter.in_cg;
245 } else if (code == iro_Block && node->attr.block.in_cg) {
246 pOld_in = &node->attr.block.in_cg;
251 #endif /* INTERPROCEDURAL_VIEW */
255 for (i = 0; i < arity; i++) {
256 if (i < ARR_LEN(*pOld_in)-1)
257 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
259 edges_notify_edge(node, i, in[i], NULL, irg);
261 for (;i < ARR_LEN(*pOld_in)-1; i++) {
262 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
265 if (arity != ARR_LEN(*pOld_in) - 1) {
266 ir_node * block = (*pOld_in)[0];
267 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
268 (*pOld_in)[0] = block;
270 fix_backedges(irg->obst, node);
272 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
275 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
276 return _get_irn_intra_n(node, n);
279 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
280 return _get_irn_inter_n(node, n);
283 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
285 ir_node *(get_irn_n)(const ir_node *node, int n) {
286 return _get_irn_n(node, n);
289 void set_irn_n(ir_node *node, int n, ir_node *in) {
290 assert(node && node->kind == k_ir_node);
292 assert(n < get_irn_arity(node));
293 assert(in && in->kind == k_ir_node);
295 #ifdef INTERPROCEDURAL_VIEW
296 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
297 /* Change block pred in both views! */
298 node->in[n + 1] = in;
299 assert(node->attr.filter.in_cg);
300 node->attr.filter.in_cg[n + 1] = in;
303 if (get_interprocedural_view()) { /* handle Filter and Block specially */
304 if (get_irn_opcode(node) == iro_Filter) {
305 assert(node->attr.filter.in_cg);
306 node->attr.filter.in_cg[n + 1] = in;
308 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
309 node->attr.block.in_cg[n + 1] = in;
312 /* else fall through */
314 #endif /* INTERPROCEDURAL_VIEW */
317 hook_set_irn_n(node, n, in, node->in[n + 1]);
319 /* Here, we rely on src and tgt being in the current ir graph */
320 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
322 node->in[n + 1] = in;
325 int add_irn_n(ir_node *node, ir_node *in) {
327 ir_graph *irg = get_irn_irg(node);
329 assert(node->op->opar == oparity_dynamic);
330 pos = ARR_LEN(node->in) - 1;
331 ARR_APP1(ir_node *, node->in, in);
332 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
335 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
340 void del_Sync_n(ir_node *n, int i)
342 int arity = get_Sync_n_preds(n);
343 ir_node *last_pred = get_Sync_pred(n, arity - 1);
344 set_Sync_pred(n, i, last_pred);
345 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
346 ARR_SHRINKLEN(get_irn_in(n), arity);
349 int (get_irn_deps)(const ir_node *node) {
350 return _get_irn_deps(node);
353 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
354 return _get_irn_dep(node, pos);
357 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
358 _set_irn_dep(node, pos, dep);
361 int add_irn_dep(ir_node *node, ir_node *dep) {
364 /* DEP edges are only allowed in backend phase */
365 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
366 if (node->deps == NULL) {
367 node->deps = NEW_ARR_F(ir_node *, 1);
373 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
374 if(node->deps[i] == NULL)
377 if(node->deps[i] == dep)
381 if (first_zero >= 0) {
382 node->deps[first_zero] = dep;
385 ARR_APP1(ir_node *, node->deps, dep);
390 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
395 void add_irn_deps(ir_node *tgt, ir_node *src) {
398 for (i = 0, n = get_irn_deps(src); i < n; ++i)
399 add_irn_dep(tgt, get_irn_dep(src, i));
403 ir_mode *(get_irn_mode)(const ir_node *node) {
404 return _get_irn_mode(node);
407 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
408 _set_irn_mode(node, mode);
411 /** Gets the string representation of the mode .*/
412 const char *get_irn_modename(const ir_node *node) {
414 return get_mode_name(node->mode);
417 ident *get_irn_modeident(const ir_node *node) {
419 return get_mode_ident(node->mode);
422 ir_op *(get_irn_op)(const ir_node *node) {
423 return _get_irn_op(node);
426 /* should be private to the library: */
427 void (set_irn_op)(ir_node *node, ir_op *op) {
428 _set_irn_op(node, op);
431 unsigned (get_irn_opcode)(const ir_node *node) {
432 return _get_irn_opcode(node);
435 const char *get_irn_opname(const ir_node *node) {
437 if (is_Phi0(node)) return "Phi0";
438 return get_id_str(node->op->name);
441 ident *get_irn_opident(const ir_node *node) {
443 return node->op->name;
446 ir_visited_t (get_irn_visited)(const ir_node *node) {
447 return _get_irn_visited(node);
450 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
451 _set_irn_visited(node, visited);
454 void (mark_irn_visited)(ir_node *node) {
455 _mark_irn_visited(node);
458 int (irn_visited)(const ir_node *node) {
459 return _irn_visited(node);
462 int (irn_visited_else_mark)(ir_node *node) {
463 return _irn_visited_else_mark(node);
466 void (set_irn_link)(ir_node *node, void *link) {
467 _set_irn_link(node, link);
470 void *(get_irn_link)(const ir_node *node) {
471 return _get_irn_link(node);
474 op_pin_state (get_irn_pinned)(const ir_node *node) {
475 return _get_irn_pinned(node);
478 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
479 return _is_irn_pinned_in_irg(node);
482 void set_irn_pinned(ir_node *node, op_pin_state state) {
483 /* due to optimization an opt may be turned into a Tuple */
487 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
488 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
490 node->attr.except.pin_state = state;
493 /* Outputs a unique number for this node */
494 long get_irn_node_nr(const ir_node *node) {
496 return node->node_nr;
499 const_attr *get_irn_const_attr(ir_node *node) {
500 assert(is_Const(node));
501 return &node->attr.con;
504 long get_irn_proj_attr(ir_node *node) {
505 /* BEWARE: check for true Proj node here, no Filter */
506 assert(node->op == op_Proj);
507 return node->attr.proj;
510 alloc_attr *get_irn_alloc_attr(ir_node *node) {
511 assert(is_Alloc(node));
512 return &node->attr.alloc;
515 free_attr *get_irn_free_attr(ir_node *node) {
516 assert(is_Free(node));
517 return &node->attr.free;
520 symconst_attr *get_irn_symconst_attr(ir_node *node) {
521 assert(is_SymConst(node));
522 return &node->attr.symc;
525 call_attr *get_irn_call_attr(ir_node *node) {
526 assert(is_Call(node));
527 return &node->attr.call;
530 sel_attr *get_irn_sel_attr(ir_node *node) {
531 assert(is_Sel(node));
532 return &node->attr.sel;
535 phi_attr *get_irn_phi_attr(ir_node *node) {
536 return &node->attr.phi;
539 block_attr *get_irn_block_attr(ir_node *node) {
540 assert(is_Block(node));
541 return &node->attr.block;
544 load_attr *get_irn_load_attr(ir_node *node) {
545 assert(is_Load(node));
546 return &node->attr.load;
549 store_attr *get_irn_store_attr(ir_node *node) {
550 assert(is_Store(node));
551 return &node->attr.store;
554 except_attr *get_irn_except_attr(ir_node *node) {
555 assert(node->op == op_Div || node->op == op_Quot ||
556 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
557 return &node->attr.except;
560 divmod_attr *get_irn_divmod_attr(ir_node *node) {
561 assert(node->op == op_Div || node->op == op_Quot ||
562 node->op == op_DivMod || node->op == op_Mod);
563 return &node->attr.divmod;
566 builtin_attr *get_irn_builtin_attr(ir_node *node) {
567 assert(is_Builtin(node));
568 return &node->attr.builtin;
571 void *(get_irn_generic_attr)(ir_node *node) {
572 assert(is_ir_node(node));
573 return _get_irn_generic_attr(node);
576 const void *(get_irn_generic_attr_const)(const ir_node *node) {
577 assert(is_ir_node(node));
578 return _get_irn_generic_attr_const(node);
581 unsigned (get_irn_idx)(const ir_node *node) {
582 assert(is_ir_node(node));
583 return _get_irn_idx(node);
586 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
588 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
589 if (get_irn_n(node, i) == arg)
595 /** manipulate fields of individual nodes **/
597 /* this works for all except Block */
598 ir_node *get_nodes_block(const ir_node *node) {
599 assert(node->op != op_Block);
600 return get_irn_n(node, -1);
603 void set_nodes_block(ir_node *node, ir_node *block) {
604 assert(node->op != op_Block);
605 set_irn_n(node, -1, block);
608 /* this works for all except Block */
609 ir_node *get_nodes_MacroBlock(const ir_node *node) {
610 assert(node->op != op_Block);
611 return get_Block_MacroBlock(get_irn_n(node, -1));
614 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
615 * from Start. If so returns frame type, else Null. */
616 ir_type *is_frame_pointer(const ir_node *n) {
617 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
618 ir_node *start = get_Proj_pred(n);
619 if (is_Start(start)) {
620 return get_irg_frame_type(get_irn_irg(start));
626 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
627 * from Start. If so returns tls type, else Null. */
628 ir_type *is_tls_pointer(const ir_node *n) {
629 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
630 ir_node *start = get_Proj_pred(n);
631 if (is_Start(start)) {
632 return get_tls_type();
638 ir_node **get_Block_cfgpred_arr(ir_node *node) {
639 assert(is_Block(node));
640 return (ir_node **)&(get_irn_in(node)[1]);
643 int (get_Block_n_cfgpreds)(const ir_node *node) {
644 return _get_Block_n_cfgpreds(node);
647 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
648 return _get_Block_cfgpred(node, pos);
651 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
652 assert(is_Block(node));
653 set_irn_n(node, pos, pred);
656 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
659 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
660 if (get_Block_cfgpred_block(block, i) == pred)
666 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
667 return _get_Block_cfgpred_block(node, pos);
670 int get_Block_matured(const ir_node *node) {
671 assert(is_Block(node));
672 return (int)node->attr.block.is_matured;
675 void set_Block_matured(ir_node *node, int matured) {
676 assert(is_Block(node));
677 node->attr.block.is_matured = matured;
680 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
681 return _get_Block_block_visited(node);
684 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
685 _set_Block_block_visited(node, visit);
688 /* For this current_ir_graph must be set. */
689 void (mark_Block_block_visited)(ir_node *node) {
690 _mark_Block_block_visited(node);
693 int (Block_block_visited)(const ir_node *node) {
694 return _Block_block_visited(node);
697 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
698 assert(is_Block(node));
699 return node->attr.block.graph_arr[pos+1];
702 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
703 assert(is_Block(node));
704 node->attr.block.graph_arr[pos+1] = value;
707 #ifdef INTERPROCEDURAL_VIEW
708 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
709 assert(is_Block(node));
710 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
711 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
712 node->attr.block.in_cg[0] = NULL;
713 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
715 /* Fix backedge array. fix_backedges() operates depending on
716 interprocedural_view. */
717 int ipv = get_interprocedural_view();
718 set_interprocedural_view(1);
719 fix_backedges(current_ir_graph->obst, node);
720 set_interprocedural_view(ipv);
723 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
726 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
727 assert(is_Block(node) && node->attr.block.in_cg &&
728 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
729 node->attr.block.in_cg[pos + 1] = pred;
732 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
733 assert(is_Block(node));
734 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
737 int get_Block_cg_n_cfgpreds(const ir_node *node) {
738 assert(is_Block(node));
739 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
742 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
743 assert(is_Block(node) && node->attr.block.in_cg);
744 return node->attr.block.in_cg[pos + 1];
747 void remove_Block_cg_cfgpred_arr(ir_node *node) {
748 assert(is_Block(node));
749 node->attr.block.in_cg = NULL;
751 #endif /* INTERPROCEDURAL_VIEW */
753 ir_node *(set_Block_dead)(ir_node *block) {
754 return _set_Block_dead(block);
757 int (is_Block_dead)(const ir_node *block) {
758 return _is_Block_dead(block);
761 ir_extblk *get_Block_extbb(const ir_node *block) {
763 assert(is_Block(block));
764 res = block->attr.block.extblk;
765 assert(res == NULL || is_ir_extbb(res));
769 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
770 assert(is_Block(block));
771 assert(extblk == NULL || is_ir_extbb(extblk));
772 block->attr.block.extblk = extblk;
775 /* Returns the macro block header of a block.*/
776 ir_node *get_Block_MacroBlock(const ir_node *block) {
778 assert(is_Block(block));
779 mbh = get_irn_n(block, -1);
780 /* once macro block header is respected by all optimizations,
781 this assert can be removed */
786 /* Sets the macro block header of a block. */
787 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
788 assert(is_Block(block));
790 assert(is_Block(mbh));
791 set_irn_n(block, -1, mbh);
794 /* returns the macro block header of a node. */
795 ir_node *get_irn_MacroBlock(const ir_node *n) {
797 n = get_nodes_block(n);
798 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
802 return get_Block_MacroBlock(n);
805 /* returns the graph of a Block. */
806 ir_graph *(get_Block_irg)(const ir_node *block) {
807 return _get_Block_irg(block);
810 ir_entity *create_Block_entity(ir_node *block) {
812 assert(is_Block(block));
814 entity = block->attr.block.entity;
815 if (entity == NULL) {
819 glob = get_glob_type();
820 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
821 set_entity_visibility(entity, ir_visibility_local);
822 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
823 nr = get_irp_next_label_nr();
824 set_entity_label(entity, nr);
825 set_entity_compiler_generated(entity, 1);
827 block->attr.block.entity = entity;
832 ir_entity *get_Block_entity(const ir_node *block) {
833 assert(is_Block(block));
834 return block->attr.block.entity;
837 void set_Block_entity(ir_node *block, ir_entity *entity)
839 assert(is_Block(block));
840 assert(get_entity_type(entity) == get_code_type());
841 block->attr.block.entity = entity;
844 int has_Block_entity(const ir_node *block)
846 return block->attr.block.entity != NULL;
849 ir_node *(get_Block_phis)(const ir_node *block) {
850 return _get_Block_phis(block);
853 void (set_Block_phis)(ir_node *block, ir_node *phi) {
854 _set_Block_phis(block, phi);
857 void (add_Block_phi)(ir_node *block, ir_node *phi) {
858 _add_Block_phi(block, phi);
861 /* Get the Block mark (single bit). */
862 unsigned (get_Block_mark)(const ir_node *block) {
863 return _get_Block_mark(block);
866 /* Set the Block mark (single bit). */
867 void (set_Block_mark)(ir_node *block, unsigned mark) {
868 _set_Block_mark(block, mark);
871 int get_End_n_keepalives(const ir_node *end) {
873 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
876 ir_node *get_End_keepalive(const ir_node *end, int pos) {
878 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
881 void add_End_keepalive(ir_node *end, ir_node *ka) {
886 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
888 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
891 /* Set new keep-alives */
892 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
894 ir_graph *irg = get_irn_irg(end);
896 /* notify that edges are deleted */
897 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
898 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
900 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
902 for (i = 0; i < n; ++i) {
903 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
904 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
908 /* Set new keep-alives from old keep-alives, skipping irn */
909 void remove_End_keepalive(ir_node *end, ir_node *irn) {
910 int n = get_End_n_keepalives(end);
915 for (i = n -1; i >= 0; --i) {
916 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
926 irg = get_irn_irg(end);
928 /* remove the edge */
929 edges_notify_edge(end, idx, NULL, irn, irg);
932 /* exchange with the last one */
933 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
934 edges_notify_edge(end, n - 1, NULL, old, irg);
935 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
936 edges_notify_edge(end, idx, old, NULL, irg);
938 /* now n - 1 keeps, 1 block input */
939 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
942 /* remove Bads, NoMems and doublets from the keep-alive set */
943 void remove_End_Bads_and_doublets(ir_node *end) {
945 int idx, n = get_End_n_keepalives(end);
951 irg = get_irn_irg(end);
952 pset_new_init(&keeps);
954 for (idx = n - 1; idx >= 0; --idx) {
955 ir_node *ka = get_End_keepalive(end, idx);
957 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
958 /* remove the edge */
959 edges_notify_edge(end, idx, NULL, ka, irg);
962 /* exchange with the last one */
963 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
964 edges_notify_edge(end, n - 1, NULL, old, irg);
965 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
966 edges_notify_edge(end, idx, old, NULL, irg);
970 pset_new_insert(&keeps, ka);
973 /* n keeps, 1 block input */
974 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
976 pset_new_destroy(&keeps);
979 void free_End(ir_node *end) {
983 end->in = NULL; /* @@@ make sure we get an error if we use the
984 in array afterwards ... */
987 /* Return the target address of an IJmp */
988 ir_node *get_IJmp_target(const ir_node *ijmp) {
989 assert(is_IJmp(ijmp));
990 return get_irn_n(ijmp, 0);
993 /** Sets the target address of an IJmp */
994 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
995 assert(is_IJmp(ijmp));
996 set_irn_n(ijmp, 0, tgt);
1000 get_Cond_selector(const ir_node *node) {
1001 assert(is_Cond(node));
1002 return get_irn_n(node, 0);
1006 set_Cond_selector(ir_node *node, ir_node *selector) {
1007 assert(is_Cond(node));
1008 set_irn_n(node, 0, selector);
1012 get_Cond_default_proj(const ir_node *node) {
1013 assert(is_Cond(node));
1014 return node->attr.cond.default_proj;
1017 void set_Cond_default_proj(ir_node *node, long defproj) {
1018 assert(is_Cond(node));
1019 node->attr.cond.default_proj = defproj;
1023 get_Return_mem(const ir_node *node) {
1024 assert(is_Return(node));
1025 return get_irn_n(node, 0);
1029 set_Return_mem(ir_node *node, ir_node *mem) {
1030 assert(is_Return(node));
1031 set_irn_n(node, 0, mem);
1035 get_Return_n_ress(const ir_node *node) {
1036 assert(is_Return(node));
1037 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1041 get_Return_res_arr(ir_node *node) {
1042 assert(is_Return(node));
1043 if (get_Return_n_ress(node) > 0)
1044 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1051 set_Return_n_res(ir_node *node, int results) {
1052 assert(is_Return(node));
1057 get_Return_res(const ir_node *node, int pos) {
1058 assert(is_Return(node));
1059 assert(get_Return_n_ress(node) > pos);
1060 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1064 set_Return_res(ir_node *node, int pos, ir_node *res){
1065 assert(is_Return(node));
1066 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1069 tarval *(get_Const_tarval)(const ir_node *node) {
1070 return _get_Const_tarval(node);
1074 set_Const_tarval(ir_node *node, tarval *con) {
1075 assert(is_Const(node));
1076 node->attr.con.tv = con;
1079 int (is_Const_null)(const ir_node *node) {
1080 return _is_Const_null(node);
1083 int (is_Const_one)(const ir_node *node) {
1084 return _is_Const_one(node);
1087 int (is_Const_all_one)(const ir_node *node) {
1088 return _is_Const_all_one(node);
1092 /* The source language type. Must be an atomic type. Mode of type must
1093 be mode of node. For tarvals from entities type must be pointer to
1096 get_Const_type(ir_node *node) {
1097 assert(is_Const(node));
1098 return node->attr.con.tp;
1102 set_Const_type(ir_node *node, ir_type *tp) {
1103 assert(is_Const(node));
1104 if (tp != firm_unknown_type) {
1105 assert(is_atomic_type(tp));
1106 assert(get_type_mode(tp) == get_irn_mode(node));
1108 node->attr.con.tp = tp;
1113 get_SymConst_kind(const ir_node *node) {
1114 assert(is_SymConst(node));
1115 return node->attr.symc.kind;
1119 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1120 assert(is_SymConst(node));
1121 node->attr.symc.kind = kind;
1125 get_SymConst_type(const ir_node *node) {
1126 /* the cast here is annoying, but we have to compensate for
1128 ir_node *irn = (ir_node *)node;
1129 assert(is_SymConst(node) &&
1130 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1131 return irn->attr.symc.sym.type_p;
1135 set_SymConst_type(ir_node *node, ir_type *tp) {
1136 assert(is_SymConst(node) &&
1137 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1138 node->attr.symc.sym.type_p = tp;
1142 get_SymConst_name(const ir_node *node) {
1143 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1144 return node->attr.symc.sym.ident_p;
1148 set_SymConst_name(ir_node *node, ident *name) {
1149 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1150 node->attr.symc.sym.ident_p = name;
1154 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1155 ir_entity *get_SymConst_entity(const ir_node *node) {
1156 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1157 return node->attr.symc.sym.entity_p;
1160 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1161 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1162 node->attr.symc.sym.entity_p = ent;
1165 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1166 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1167 return node->attr.symc.sym.enum_p;
1170 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1171 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1172 node->attr.symc.sym.enum_p = ec;
1175 union symconst_symbol
1176 get_SymConst_symbol(const ir_node *node) {
1177 assert(is_SymConst(node));
1178 return node->attr.symc.sym;
1182 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1183 assert(is_SymConst(node));
1184 node->attr.symc.sym = sym;
1188 get_SymConst_value_type(ir_node *node) {
1189 assert(is_SymConst(node));
1190 return node->attr.symc.tp;
1194 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1195 assert(is_SymConst(node));
1196 node->attr.symc.tp = tp;
1200 get_Sel_mem(const ir_node *node) {
1201 assert(is_Sel(node));
1202 return get_irn_n(node, 0);
1206 set_Sel_mem(ir_node *node, ir_node *mem) {
1207 assert(is_Sel(node));
1208 set_irn_n(node, 0, mem);
1212 get_Sel_ptr(const ir_node *node) {
1213 assert(is_Sel(node));
1214 return get_irn_n(node, 1);
1218 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1219 assert(is_Sel(node));
1220 set_irn_n(node, 1, ptr);
1224 get_Sel_n_indexs(const ir_node *node) {
1225 assert(is_Sel(node));
1226 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1230 get_Sel_index_arr(ir_node *node) {
1231 assert(is_Sel(node));
1232 if (get_Sel_n_indexs(node) > 0)
1233 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1239 get_Sel_index(const ir_node *node, int pos) {
1240 assert(is_Sel(node));
1241 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1245 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1246 assert(is_Sel(node));
1247 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1251 get_Sel_entity(const ir_node *node) {
1252 assert(is_Sel(node));
1253 return node->attr.sel.entity;
1256 /* need a version without const to prevent warning */
1257 static ir_entity *_get_Sel_entity(ir_node *node) {
1258 return get_Sel_entity(node);
1262 set_Sel_entity(ir_node *node, ir_entity *ent) {
1263 assert(is_Sel(node));
1264 node->attr.sel.entity = ent;
1268 /* For unary and binary arithmetic operations the access to the
1269 operands can be factored out. Left is the first, right the
1270 second arithmetic value as listed in tech report 0999-33.
1271 unops are: Minus, Abs, Not, Conv, Cast
1272 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1273 Shr, Shrs, Rotate, Cmp */
1277 get_Call_mem(const ir_node *node) {
1278 assert(is_Call(node));
1279 return get_irn_n(node, 0);
1283 set_Call_mem(ir_node *node, ir_node *mem) {
1284 assert(is_Call(node));
1285 set_irn_n(node, 0, mem);
1289 get_Call_ptr(const ir_node *node) {
1290 assert(is_Call(node));
1291 return get_irn_n(node, 1);
1295 set_Call_ptr(ir_node *node, ir_node *ptr) {
1296 assert(is_Call(node));
1297 set_irn_n(node, 1, ptr);
1301 get_Call_param_arr(ir_node *node) {
1302 assert(is_Call(node));
1303 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1307 get_Call_n_params(const ir_node *node) {
1308 assert(is_Call(node));
1309 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1313 get_Call_param(const ir_node *node, int pos) {
1314 assert(is_Call(node));
1315 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1319 set_Call_param(ir_node *node, int pos, ir_node *param) {
1320 assert(is_Call(node));
1321 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1325 get_Call_type(ir_node *node) {
1326 assert(is_Call(node));
1327 return node->attr.call.type;
1331 set_Call_type(ir_node *node, ir_type *tp) {
1332 assert(is_Call(node));
1333 assert((get_unknown_type() == tp) || is_Method_type(tp));
1334 node->attr.call.type = tp;
1338 get_Call_tail_call(const ir_node *node) {
1339 assert(is_Call(node));
1340 return node->attr.call.tail_call;
1344 set_Call_tail_call(ir_node *node, unsigned tail_call) {
1345 assert(is_Call(node));
1346 node->attr.call.tail_call = tail_call != 0;
1350 get_Builtin_mem(const ir_node *node) {
1351 assert(is_Builtin(node));
1352 return get_irn_n(node, 0);
1356 set_Builin_mem(ir_node *node, ir_node *mem) {
1357 assert(is_Builtin(node));
1358 set_irn_n(node, 0, mem);
1362 get_Builtin_kind(const ir_node *node) {
1363 assert(is_Builtin(node));
1364 return node->attr.builtin.kind;
1368 set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
1369 assert(is_Builtin(node));
1370 node->attr.builtin.kind = kind;
1374 get_Builtin_param_arr(ir_node *node) {
1375 assert(is_Builtin(node));
1376 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1380 get_Builtin_n_params(const ir_node *node) {
1381 assert(is_Builtin(node));
1382 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1386 get_Builtin_param(const ir_node *node, int pos) {
1387 assert(is_Builtin(node));
1388 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1392 set_Builtin_param(ir_node *node, int pos, ir_node *param) {
1393 assert(is_Builtin(node));
1394 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1398 get_Builtin_type(ir_node *node) {
1399 assert(is_Builtin(node));
1400 return node->attr.builtin.type;
1404 set_Builtin_type(ir_node *node, ir_type *tp) {
1405 assert(is_Builtin(node));
1406 assert((get_unknown_type() == tp) || is_Method_type(tp));
1407 node->attr.builtin.type = tp;
1410 /* Returns a human readable string for the ir_builtin_kind. */
1411 const char *get_builtin_kind_name(ir_builtin_kind kind) {
1412 #define X(a) case a: return #a;
1415 X(ir_bk_debugbreak);
1416 X(ir_bk_return_address);
1417 X(ir_bk_frame_address);
1427 X(ir_bk_inner_trampoline);
1434 int Call_has_callees(const ir_node *node) {
1435 assert(is_Call(node));
1436 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1437 (node->attr.call.callee_arr != NULL));
1440 int get_Call_n_callees(const ir_node *node) {
1441 assert(is_Call(node) && node->attr.call.callee_arr);
1442 return ARR_LEN(node->attr.call.callee_arr);
1445 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1446 assert(pos >= 0 && pos < get_Call_n_callees(node));
1447 return node->attr.call.callee_arr[pos];
1450 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1451 assert(is_Call(node));
1452 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1453 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1455 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1458 void remove_Call_callee_arr(ir_node *node) {
1459 assert(is_Call(node));
1460 node->attr.call.callee_arr = NULL;
1463 ir_node *get_CallBegin_ptr(const ir_node *node) {
1464 assert(is_CallBegin(node));
1465 return get_irn_n(node, 0);
1468 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1469 assert(is_CallBegin(node));
1470 set_irn_n(node, 0, ptr);
1473 ir_node *get_CallBegin_call(const ir_node *node) {
1474 assert(is_CallBegin(node));
1475 return node->attr.callbegin.call;
1478 void set_CallBegin_call(ir_node *node, ir_node *call) {
1479 assert(is_CallBegin(node));
1480 node->attr.callbegin.call = call;
1484 * Returns non-zero if a Call is surely a self-recursive Call.
1485 * Beware: if this functions returns 0, the call might be self-recursive!
1487 int is_self_recursive_Call(const ir_node *call) {
1488 const ir_node *callee = get_Call_ptr(call);
1490 if (is_SymConst_addr_ent(callee)) {
1491 const ir_entity *ent = get_SymConst_entity(callee);
1492 const ir_graph *irg = get_entity_irg(ent);
1493 if (irg == get_irn_irg(call))
1500 ir_node * get_##OP##_left(const ir_node *node) { \
1501 assert(is_##OP(node)); \
1502 return get_irn_n(node, node->op->op_index); \
1504 void set_##OP##_left(ir_node *node, ir_node *left) { \
1505 assert(is_##OP(node)); \
1506 set_irn_n(node, node->op->op_index, left); \
1508 ir_node *get_##OP##_right(const ir_node *node) { \
1509 assert(is_##OP(node)); \
1510 return get_irn_n(node, node->op->op_index + 1); \
1512 void set_##OP##_right(ir_node *node, ir_node *right) { \
1513 assert(is_##OP(node)); \
1514 set_irn_n(node, node->op->op_index + 1, right); \
1518 ir_node *get_##OP##_op(const ir_node *node) { \
1519 assert(is_##OP(node)); \
1520 return get_irn_n(node, node->op->op_index); \
1522 void set_##OP##_op(ir_node *node, ir_node *op) { \
1523 assert(is_##OP(node)); \
1524 set_irn_n(node, node->op->op_index, op); \
1527 #define BINOP_MEM(OP) \
1531 get_##OP##_mem(const ir_node *node) { \
1532 assert(is_##OP(node)); \
1533 return get_irn_n(node, 0); \
1537 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1538 assert(is_##OP(node)); \
1539 set_irn_n(node, 0, mem); \
1545 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1546 assert(is_##OP(node)); \
1547 return node->attr.divmod.resmode; \
1550 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1551 assert(is_##OP(node)); \
1552 node->attr.divmod.resmode = mode; \
1580 int get_Div_no_remainder(const ir_node *node) {
1581 assert(is_Div(node));
1582 return node->attr.divmod.no_remainder;
1585 void set_Div_no_remainder(ir_node *node, int no_remainder) {
1586 assert(is_Div(node));
1587 node->attr.divmod.no_remainder = no_remainder;
1590 int get_Conv_strict(const ir_node *node) {
1591 assert(is_Conv(node));
1592 return node->attr.conv.strict;
1595 void set_Conv_strict(ir_node *node, int strict_flag) {
1596 assert(is_Conv(node));
1597 node->attr.conv.strict = (char)strict_flag;
1601 get_Cast_type(ir_node *node) {
1602 assert(is_Cast(node));
1603 return node->attr.cast.type;
1607 set_Cast_type(ir_node *node, ir_type *to_tp) {
1608 assert(is_Cast(node));
1609 node->attr.cast.type = to_tp;
1613 /* Checks for upcast.
1615 * Returns true if the Cast node casts a class type to a super type.
1617 int is_Cast_upcast(ir_node *node) {
1618 ir_type *totype = get_Cast_type(node);
1619 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1621 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1624 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1625 totype = get_pointer_points_to_type(totype);
1626 fromtype = get_pointer_points_to_type(fromtype);
1631 if (!is_Class_type(totype)) return 0;
1632 return is_SubClass_of(fromtype, totype);
1635 /* Checks for downcast.
1637 * Returns true if the Cast node casts a class type to a sub type.
1639 int is_Cast_downcast(ir_node *node) {
1640 ir_type *totype = get_Cast_type(node);
1641 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1643 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1646 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1647 totype = get_pointer_points_to_type(totype);
1648 fromtype = get_pointer_points_to_type(fromtype);
1653 if (!is_Class_type(totype)) return 0;
1654 return is_SubClass_of(totype, fromtype);
1658 (is_unop)(const ir_node *node) {
1659 return _is_unop(node);
1663 get_unop_op(const ir_node *node) {
1664 if (node->op->opar == oparity_unary)
1665 return get_irn_n(node, node->op->op_index);
1667 assert(node->op->opar == oparity_unary);
1672 set_unop_op(ir_node *node, ir_node *op) {
1673 if (node->op->opar == oparity_unary)
1674 set_irn_n(node, node->op->op_index, op);
1676 assert(node->op->opar == oparity_unary);
1680 (is_binop)(const ir_node *node) {
1681 return _is_binop(node);
1685 get_binop_left(const ir_node *node) {
1686 assert(node->op->opar == oparity_binary);
1687 return get_irn_n(node, node->op->op_index);
1691 set_binop_left(ir_node *node, ir_node *left) {
1692 assert(node->op->opar == oparity_binary);
1693 set_irn_n(node, node->op->op_index, left);
1697 get_binop_right(const ir_node *node) {
1698 assert(node->op->opar == oparity_binary);
1699 return get_irn_n(node, node->op->op_index + 1);
1703 set_binop_right(ir_node *node, ir_node *right) {
1704 assert(node->op->opar == oparity_binary);
1705 set_irn_n(node, node->op->op_index + 1, right);
1708 int is_Phi0(const ir_node *n) {
1711 return ((get_irn_op(n) == op_Phi) &&
1712 (get_irn_arity(n) == 0) &&
1713 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1717 get_Phi_preds_arr(ir_node *node) {
1718 assert(node->op == op_Phi);
1719 return (ir_node **)&(get_irn_in(node)[1]);
1723 get_Phi_n_preds(const ir_node *node) {
1724 assert(is_Phi(node) || is_Phi0(node));
1725 return (get_irn_arity(node));
1729 void set_Phi_n_preds(ir_node *node, int n_preds) {
1730 assert(node->op == op_Phi);
1735 get_Phi_pred(const ir_node *node, int pos) {
1736 assert(is_Phi(node) || is_Phi0(node));
1737 return get_irn_n(node, pos);
1741 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1742 assert(is_Phi(node) || is_Phi0(node));
1743 set_irn_n(node, pos, pred);
1746 ir_node *(get_Phi_next)(const ir_node *phi) {
1747 return _get_Phi_next(phi);
1750 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1751 _set_Phi_next(phi, next);
1754 int is_memop(const ir_node *node) {
1755 ir_opcode code = get_irn_opcode(node);
1756 return (code == iro_Load || code == iro_Store);
1759 ir_node *get_memop_mem(const ir_node *node) {
1760 assert(is_memop(node));
1761 return get_irn_n(node, 0);
1764 void set_memop_mem(ir_node *node, ir_node *mem) {
1765 assert(is_memop(node));
1766 set_irn_n(node, 0, mem);
1769 ir_node *get_memop_ptr(const ir_node *node) {
1770 assert(is_memop(node));
1771 return get_irn_n(node, 1);
1774 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1775 assert(is_memop(node));
1776 set_irn_n(node, 1, ptr);
1780 get_Load_mem(const ir_node *node) {
1781 assert(is_Load(node));
1782 return get_irn_n(node, 0);
1786 set_Load_mem(ir_node *node, ir_node *mem) {
1787 assert(is_Load(node));
1788 set_irn_n(node, 0, mem);
1792 get_Load_ptr(const ir_node *node) {
1793 assert(is_Load(node));
1794 return get_irn_n(node, 1);
1798 set_Load_ptr(ir_node *node, ir_node *ptr) {
1799 assert(is_Load(node));
1800 set_irn_n(node, 1, ptr);
1804 get_Load_mode(const ir_node *node) {
1805 assert(is_Load(node));
1806 return node->attr.load.mode;
1810 set_Load_mode(ir_node *node, ir_mode *mode) {
1811 assert(is_Load(node));
1812 node->attr.load.mode = mode;
1816 get_Load_volatility(const ir_node *node) {
1817 assert(is_Load(node));
1818 return node->attr.load.volatility;
1822 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1823 assert(is_Load(node));
1824 node->attr.load.volatility = volatility;
1828 get_Load_align(const ir_node *node) {
1829 assert(is_Load(node));
1830 return node->attr.load.aligned;
1834 set_Load_align(ir_node *node, ir_align align) {
1835 assert(is_Load(node));
1836 node->attr.load.aligned = align;
1841 get_Store_mem(const ir_node *node) {
1842 assert(is_Store(node));
1843 return get_irn_n(node, 0);
1847 set_Store_mem(ir_node *node, ir_node *mem) {
1848 assert(is_Store(node));
1849 set_irn_n(node, 0, mem);
1853 get_Store_ptr(const ir_node *node) {
1854 assert(is_Store(node));
1855 return get_irn_n(node, 1);
1859 set_Store_ptr(ir_node *node, ir_node *ptr) {
1860 assert(is_Store(node));
1861 set_irn_n(node, 1, ptr);
1865 get_Store_value(const ir_node *node) {
1866 assert(is_Store(node));
1867 return get_irn_n(node, 2);
1871 set_Store_value(ir_node *node, ir_node *value) {
1872 assert(is_Store(node));
1873 set_irn_n(node, 2, value);
1877 get_Store_volatility(const ir_node *node) {
1878 assert(is_Store(node));
1879 return node->attr.store.volatility;
1883 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1884 assert(is_Store(node));
1885 node->attr.store.volatility = volatility;
1889 get_Store_align(const ir_node *node) {
1890 assert(is_Store(node));
1891 return node->attr.store.aligned;
1895 set_Store_align(ir_node *node, ir_align align) {
1896 assert(is_Store(node));
1897 node->attr.store.aligned = align;
1902 get_Alloc_mem(const ir_node *node) {
1903 assert(is_Alloc(node));
1904 return get_irn_n(node, 0);
1908 set_Alloc_mem(ir_node *node, ir_node *mem) {
1909 assert(is_Alloc(node));
1910 set_irn_n(node, 0, mem);
1914 get_Alloc_size(const ir_node *node) {
1915 assert(is_Alloc(node));
1916 return get_irn_n(node, 1);
1920 set_Alloc_size(ir_node *node, ir_node *size) {
1921 assert(is_Alloc(node));
1922 set_irn_n(node, 1, size);
1926 get_Alloc_type(ir_node *node) {
1927 assert(is_Alloc(node));
1928 return node->attr.alloc.type;
1932 set_Alloc_type(ir_node *node, ir_type *tp) {
1933 assert(is_Alloc(node));
1934 node->attr.alloc.type = tp;
1938 get_Alloc_where(const ir_node *node) {
1939 assert(is_Alloc(node));
1940 return node->attr.alloc.where;
1944 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1945 assert(is_Alloc(node));
1946 node->attr.alloc.where = where;
1951 get_Free_mem(const ir_node *node) {
1952 assert(is_Free(node));
1953 return get_irn_n(node, 0);
1957 set_Free_mem(ir_node *node, ir_node *mem) {
1958 assert(is_Free(node));
1959 set_irn_n(node, 0, mem);
1963 get_Free_ptr(const ir_node *node) {
1964 assert(is_Free(node));
1965 return get_irn_n(node, 1);
1969 set_Free_ptr(ir_node *node, ir_node *ptr) {
1970 assert(is_Free(node));
1971 set_irn_n(node, 1, ptr);
1975 get_Free_size(const ir_node *node) {
1976 assert(is_Free(node));
1977 return get_irn_n(node, 2);
1981 set_Free_size(ir_node *node, ir_node *size) {
1982 assert(is_Free(node));
1983 set_irn_n(node, 2, size);
1987 get_Free_type(ir_node *node) {
1988 assert(is_Free(node));
1989 return node->attr.free.type;
1993 set_Free_type(ir_node *node, ir_type *tp) {
1994 assert(is_Free(node));
1995 node->attr.free.type = tp;
1999 get_Free_where(const ir_node *node) {
2000 assert(is_Free(node));
2001 return node->attr.free.where;
2005 set_Free_where(ir_node *node, ir_where_alloc where) {
2006 assert(is_Free(node));
2007 node->attr.free.where = where;
2010 ir_node **get_Sync_preds_arr(ir_node *node) {
2011 assert(is_Sync(node));
2012 return (ir_node **)&(get_irn_in(node)[1]);
2015 int get_Sync_n_preds(const ir_node *node) {
2016 assert(is_Sync(node));
2017 return (get_irn_arity(node));
2021 void set_Sync_n_preds(ir_node *node, int n_preds) {
2022 assert(is_Sync(node));
2026 ir_node *get_Sync_pred(const ir_node *node, int pos) {
2027 assert(is_Sync(node));
2028 return get_irn_n(node, pos);
2031 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
2032 assert(is_Sync(node));
2033 set_irn_n(node, pos, pred);
2036 /* Add a new Sync predecessor */
2037 void add_Sync_pred(ir_node *node, ir_node *pred) {
2038 assert(is_Sync(node));
2039 add_irn_n(node, pred);
2042 /* Returns the source language type of a Proj node. */
2043 ir_type *get_Proj_type(ir_node *n) {
2044 ir_type *tp = firm_unknown_type;
2045 ir_node *pred = get_Proj_pred(n);
2047 switch (get_irn_opcode(pred)) {
2050 /* Deal with Start / Call here: we need to know the Proj Nr. */
2051 assert(get_irn_mode(pred) == mode_T);
2052 pred_pred = get_Proj_pred(pred);
2054 if (is_Start(pred_pred)) {
2055 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2056 tp = get_method_param_type(mtp, get_Proj_proj(n));
2057 } else if (is_Call(pred_pred)) {
2058 ir_type *mtp = get_Call_type(pred_pred);
2059 tp = get_method_res_type(mtp, get_Proj_proj(n));
2062 case iro_Start: break;
2063 case iro_Call: break;
2065 ir_node *a = get_Load_ptr(pred);
2067 tp = get_entity_type(get_Sel_entity(a));
2076 get_Proj_pred(const ir_node *node) {
2077 assert(is_Proj(node));
2078 return get_irn_n(node, 0);
2082 set_Proj_pred(ir_node *node, ir_node *pred) {
2083 assert(is_Proj(node));
2084 set_irn_n(node, 0, pred);
2088 get_Proj_proj(const ir_node *node) {
2089 #ifdef INTERPROCEDURAL_VIEW
2090 ir_opcode code = get_irn_opcode(node);
2092 if (code == iro_Proj) {
2093 return node->attr.proj;
2096 assert(code == iro_Filter);
2097 return node->attr.filter.proj;
2100 assert(is_Proj(node));
2101 return node->attr.proj;
2102 #endif /* INTERPROCEDURAL_VIEW */
2106 set_Proj_proj(ir_node *node, long proj) {
2107 #ifdef INTERPROCEDURAL_VIEW
2108 ir_opcode code = get_irn_opcode(node);
2110 if (code == iro_Proj) {
2111 node->attr.proj = proj;
2114 assert(code == iro_Filter);
2115 node->attr.filter.proj = proj;
2118 assert(is_Proj(node));
2119 node->attr.proj = proj;
2120 #endif /* INTERPROCEDURAL_VIEW */
2123 /* Returns non-zero if a node is a routine parameter. */
2124 int (is_arg_Proj)(const ir_node *node) {
2125 return _is_arg_Proj(node);
2129 get_Tuple_preds_arr(ir_node *node) {
2130 assert(is_Tuple(node));
2131 return (ir_node **)&(get_irn_in(node)[1]);
2135 get_Tuple_n_preds(const ir_node *node) {
2136 assert(is_Tuple(node));
2137 return get_irn_arity(node);
2142 set_Tuple_n_preds(ir_node *node, int n_preds) {
2143 assert(is_Tuple(node));
2148 get_Tuple_pred(const ir_node *node, int pos) {
2149 assert(is_Tuple(node));
2150 return get_irn_n(node, pos);
2154 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2155 assert(is_Tuple(node));
2156 set_irn_n(node, pos, pred);
2160 get_Id_pred(const ir_node *node) {
2161 assert(is_Id(node));
2162 return get_irn_n(node, 0);
2166 set_Id_pred(ir_node *node, ir_node *pred) {
2167 assert(is_Id(node));
2168 set_irn_n(node, 0, pred);
2171 ir_node *get_Confirm_value(const ir_node *node) {
2172 assert(is_Confirm(node));
2173 return get_irn_n(node, 0);
2176 void set_Confirm_value(ir_node *node, ir_node *value) {
2177 assert(is_Confirm(node));
2178 set_irn_n(node, 0, value);
2181 ir_node *get_Confirm_bound(const ir_node *node) {
2182 assert(is_Confirm(node));
2183 return get_irn_n(node, 1);
2186 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2187 assert(is_Confirm(node));
2188 set_irn_n(node, 0, bound);
2191 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2192 assert(is_Confirm(node));
2193 return node->attr.confirm.cmp;
2196 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2197 assert(is_Confirm(node));
2198 node->attr.confirm.cmp = cmp;
2202 get_Filter_pred(ir_node *node) {
2203 assert(is_Filter(node));
2208 set_Filter_pred(ir_node *node, ir_node *pred) {
2209 assert(is_Filter(node));
2214 get_Filter_proj(ir_node *node) {
2215 assert(is_Filter(node));
2216 return node->attr.filter.proj;
2220 set_Filter_proj(ir_node *node, long proj) {
2221 assert(is_Filter(node));
2222 node->attr.filter.proj = proj;
2225 /* Don't use get_irn_arity, get_irn_n in implementation as access
2226 shall work independent of view!!! */
2227 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2228 assert(is_Filter(node));
2229 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2230 ir_graph *irg = get_irn_irg(node);
2231 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2232 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2233 node->attr.filter.in_cg[0] = node->in[0];
2235 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2238 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2239 assert(is_Filter(node) && node->attr.filter.in_cg &&
2240 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2241 node->attr.filter.in_cg[pos + 1] = pred;
2244 int get_Filter_n_cg_preds(ir_node *node) {
2245 assert(is_Filter(node) && node->attr.filter.in_cg);
2246 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2249 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2251 assert(is_Filter(node) && node->attr.filter.in_cg &&
2253 arity = ARR_LEN(node->attr.filter.in_cg);
2254 assert(pos < arity - 1);
2255 return node->attr.filter.in_cg[pos + 1];
2259 ir_node *get_Mux_sel(const ir_node *node) {
2260 assert(is_Mux(node));
2264 void set_Mux_sel(ir_node *node, ir_node *sel) {
2265 assert(is_Mux(node));
2269 ir_node *get_Mux_false(const ir_node *node) {
2270 assert(is_Mux(node));
2274 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2275 assert(is_Mux(node));
2276 node->in[2] = ir_false;
2279 ir_node *get_Mux_true(const ir_node *node) {
2280 assert(is_Mux(node));
2284 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2285 assert(is_Mux(node));
2286 node->in[3] = ir_true;
2290 ir_node *get_CopyB_mem(const ir_node *node) {
2291 assert(is_CopyB(node));
2292 return get_irn_n(node, 0);
2295 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2296 assert(node->op == op_CopyB);
2297 set_irn_n(node, 0, mem);
2300 ir_node *get_CopyB_dst(const ir_node *node) {
2301 assert(is_CopyB(node));
2302 return get_irn_n(node, 1);
2305 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2306 assert(is_CopyB(node));
2307 set_irn_n(node, 1, dst);
2310 ir_node *get_CopyB_src(const ir_node *node) {
2311 assert(is_CopyB(node));
2312 return get_irn_n(node, 2);
2315 void set_CopyB_src(ir_node *node, ir_node *src) {
2316 assert(is_CopyB(node));
2317 set_irn_n(node, 2, src);
2320 ir_type *get_CopyB_type(ir_node *node) {
2321 assert(is_CopyB(node));
2322 return node->attr.copyb.type;
2325 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2326 assert(is_CopyB(node) && data_type);
2327 node->attr.copyb.type = data_type;
2332 get_InstOf_type(ir_node *node) {
2333 assert(node->op == op_InstOf);
2334 return node->attr.instof.type;
2338 set_InstOf_type(ir_node *node, ir_type *type) {
2339 assert(node->op == op_InstOf);
2340 node->attr.instof.type = type;
2344 get_InstOf_store(const ir_node *node) {
2345 assert(node->op == op_InstOf);
2346 return get_irn_n(node, 0);
2350 set_InstOf_store(ir_node *node, ir_node *obj) {
2351 assert(node->op == op_InstOf);
2352 set_irn_n(node, 0, obj);
2356 get_InstOf_obj(const ir_node *node) {
2357 assert(node->op == op_InstOf);
2358 return get_irn_n(node, 1);
2362 set_InstOf_obj(ir_node *node, ir_node *obj) {
2363 assert(node->op == op_InstOf);
2364 set_irn_n(node, 1, obj);
2367 /* Returns the memory input of a Raise operation. */
2369 get_Raise_mem(const ir_node *node) {
2370 assert(is_Raise(node));
2371 return get_irn_n(node, 0);
2375 set_Raise_mem(ir_node *node, ir_node *mem) {
2376 assert(is_Raise(node));
2377 set_irn_n(node, 0, mem);
2381 get_Raise_exo_ptr(const ir_node *node) {
2382 assert(is_Raise(node));
2383 return get_irn_n(node, 1);
2387 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2388 assert(is_Raise(node));
2389 set_irn_n(node, 1, exo_ptr);
2394 /* Returns the memory input of a Bound operation. */
2395 ir_node *get_Bound_mem(const ir_node *bound) {
2396 assert(is_Bound(bound));
2397 return get_irn_n(bound, 0);
2400 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2401 assert(is_Bound(bound));
2402 set_irn_n(bound, 0, mem);
2405 /* Returns the index input of a Bound operation. */
2406 ir_node *get_Bound_index(const ir_node *bound) {
2407 assert(is_Bound(bound));
2408 return get_irn_n(bound, 1);
2411 void set_Bound_index(ir_node *bound, ir_node *idx) {
2412 assert(is_Bound(bound));
2413 set_irn_n(bound, 1, idx);
2416 /* Returns the lower bound input of a Bound operation. */
2417 ir_node *get_Bound_lower(const ir_node *bound) {
2418 assert(is_Bound(bound));
2419 return get_irn_n(bound, 2);
2422 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2423 assert(is_Bound(bound));
2424 set_irn_n(bound, 2, lower);
2427 /* Returns the upper bound input of a Bound operation. */
2428 ir_node *get_Bound_upper(const ir_node *bound) {
2429 assert(is_Bound(bound));
2430 return get_irn_n(bound, 3);
2433 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2434 assert(is_Bound(bound));
2435 set_irn_n(bound, 3, upper);
2438 /* Return the operand of a Pin node. */
2439 ir_node *get_Pin_op(const ir_node *pin) {
2440 assert(is_Pin(pin));
2441 return get_irn_n(pin, 0);
2444 void set_Pin_op(ir_node *pin, ir_node *node) {
2445 assert(is_Pin(pin));
2446 set_irn_n(pin, 0, node);
2449 /* Return the assembler text of an ASM pseudo node. */
2450 ident *get_ASM_text(const ir_node *node) {
2451 assert(is_ASM(node));
2452 return node->attr.assem.asm_text;
2455 /* Return the number of input constraints for an ASM node. */
2456 int get_ASM_n_input_constraints(const ir_node *node) {
2457 assert(is_ASM(node));
2458 return ARR_LEN(node->attr.assem.inputs);
2461 /* Return the input constraints for an ASM node. This is a flexible array. */
2462 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2463 assert(is_ASM(node));
2464 return node->attr.assem.inputs;
2467 /* Return the number of output constraints for an ASM node. */
2468 int get_ASM_n_output_constraints(const ir_node *node) {
2469 assert(is_ASM(node));
2470 return ARR_LEN(node->attr.assem.outputs);
2473 /* Return the output constraints for an ASM node. */
2474 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2475 assert(is_ASM(node));
2476 return node->attr.assem.outputs;
2479 /* Return the number of clobbered registers for an ASM node. */
2480 int get_ASM_n_clobbers(const ir_node *node) {
2481 assert(is_ASM(node));
2482 return ARR_LEN(node->attr.assem.clobber);
2485 /* Return the list of clobbered registers for an ASM node. */
2486 ident **get_ASM_clobbers(const ir_node *node) {
2487 assert(is_ASM(node));
2488 return node->attr.assem.clobber;
2491 /* returns the graph of a node */
2493 get_irn_irg(const ir_node *node) {
2495 * Do not use get_nodes_Block() here, because this
2496 * will check the pinned state.
2497 * However even a 'wrong' block is always in the proper
2500 if (! is_Block(node))
2501 node = get_irn_n(node, -1);
2502 /* note that get_Block_irg() can handle Bad nodes */
2503 return get_Block_irg(node);
2507 /*----------------------------------------------------------------*/
2508 /* Auxiliary routines */
2509 /*----------------------------------------------------------------*/
2512 skip_Proj(ir_node *node) {
2513 /* don't assert node !!! */
2518 node = get_Proj_pred(node);
2524 skip_Proj_const(const ir_node *node) {
2525 /* don't assert node !!! */
2530 node = get_Proj_pred(node);
2536 skip_Tuple(ir_node *node) {
2541 if (is_Proj(node)) {
2542 pred = get_Proj_pred(node);
2543 op = get_irn_op(pred);
2546 * Looks strange but calls get_irn_op() only once
2547 * in most often cases.
2549 if (op == op_Proj) { /* nested Tuple ? */
2550 pred = skip_Tuple(pred);
2552 if (is_Tuple(pred)) {
2553 node = get_Tuple_pred(pred, get_Proj_proj(node));
2556 } else if (op == op_Tuple) {
2557 node = get_Tuple_pred(pred, get_Proj_proj(node));
2564 /* returns operand of node if node is a Cast */
2565 ir_node *skip_Cast(ir_node *node) {
2567 return get_Cast_op(node);
2571 /* returns operand of node if node is a Cast */
2572 const ir_node *skip_Cast_const(const ir_node *node) {
2574 return get_Cast_op(node);
2578 /* returns operand of node if node is a Pin */
2579 ir_node *skip_Pin(ir_node *node) {
2581 return get_Pin_op(node);
2585 /* returns operand of node if node is a Confirm */
2586 ir_node *skip_Confirm(ir_node *node) {
2587 if (is_Confirm(node))
2588 return get_Confirm_value(node);
2592 /* skip all high-level ops */
2593 ir_node *skip_HighLevel_ops(ir_node *node) {
2594 while (is_op_highlevel(get_irn_op(node))) {
2595 node = get_irn_n(node, 0);
2601 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2602 * than any other approach, as Id chains are resolved and all point to the real node, or
2603 * all id's are self loops.
2605 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2606 * a little bit "hand optimized".
2608 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2611 skip_Id(ir_node *node) {
2613 /* don't assert node !!! */
2615 if (!node || (node->op != op_Id)) return node;
2617 /* Don't use get_Id_pred(): We get into an endless loop for
2618 self-referencing Ids. */
2619 pred = node->in[0+1];
2621 if (pred->op != op_Id) return pred;
2623 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2624 ir_node *rem_pred, *res;
2626 if (pred->op != op_Id) return pred; /* shortcut */
2629 assert(get_irn_arity (node) > 0);
2631 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2632 res = skip_Id(rem_pred);
2633 if (res->op == op_Id) /* self-loop */ return node;
2635 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2642 void skip_Id_and_store(ir_node **node) {
2645 if (!n || (n->op != op_Id)) return;
2647 /* Don't use get_Id_pred(): We get into an endless loop for
2648 self-referencing Ids. */
2653 (is_strictConv)(const ir_node *node) {
2654 return _is_strictConv(node);
2658 (is_no_Block)(const ir_node *node) {
2659 return _is_no_Block(node);
2662 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2664 (is_SymConst_addr_ent)(const ir_node *node) {
2665 return _is_SymConst_addr_ent(node);
2668 /* Returns true if the operation manipulates control flow. */
2669 int is_cfop(const ir_node *node) {
2670 return is_op_cfopcode(get_irn_op(node));
2673 /* Returns true if the operation manipulates interprocedural control flow:
2674 CallBegin, EndReg, EndExcept */
2675 int is_ip_cfop(const ir_node *node) {
2676 return is_ip_cfopcode(get_irn_op(node));
2679 /* Returns true if the operation can change the control flow because
2682 is_fragile_op(const ir_node *node) {
2683 return is_op_fragile(get_irn_op(node));
2686 /* Returns the memory operand of fragile operations. */
2687 ir_node *get_fragile_op_mem(ir_node *node) {
2688 assert(node && is_fragile_op(node));
2690 switch (get_irn_opcode(node)) {
2701 return get_irn_n(node, pn_Generic_M);
2706 assert(0 && "should not be reached");
2711 /* Returns the result mode of a Div operation. */
2712 ir_mode *get_divop_resmod(const ir_node *node) {
2713 switch (get_irn_opcode(node)) {
2714 case iro_Quot : return get_Quot_resmode(node);
2715 case iro_DivMod: return get_DivMod_resmode(node);
2716 case iro_Div : return get_Div_resmode(node);
2717 case iro_Mod : return get_Mod_resmode(node);
2719 assert(0 && "should not be reached");
2724 /* Returns true if the operation is a forking control flow operation. */
2725 int (is_irn_forking)(const ir_node *node) {
2726 return _is_irn_forking(node);
2729 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
2730 _copy_node_attr(old_node, new_node);
2733 /* Return the type associated with the value produced by n
2734 * if the node remarks this type as it is the case for
2735 * Cast, Const, SymConst and some Proj nodes. */
2736 ir_type *(get_irn_type)(ir_node *node) {
2737 return _get_irn_type(node);
2740 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2742 ir_type *(get_irn_type_attr)(ir_node *node) {
2743 return _get_irn_type_attr(node);
2746 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2747 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2748 return _get_irn_entity_attr(node);
2751 /* Returns non-zero for constant-like nodes. */
2752 int (is_irn_constlike)(const ir_node *node) {
2753 return _is_irn_constlike(node);
2757 * Returns non-zero for nodes that are allowed to have keep-alives and
2758 * are neither Block nor PhiM.
2760 int (is_irn_keep)(const ir_node *node) {
2761 return _is_irn_keep(node);
2765 * Returns non-zero for nodes that are always placed in the start block.
2767 int (is_irn_start_block_placed)(const ir_node *node) {
2768 return _is_irn_start_block_placed(node);
2771 /* Returns non-zero for nodes that are machine operations. */
2772 int (is_irn_machine_op)(const ir_node *node) {
2773 return _is_irn_machine_op(node);
2776 /* Returns non-zero for nodes that are machine operands. */
2777 int (is_irn_machine_operand)(const ir_node *node) {
2778 return _is_irn_machine_operand(node);
2781 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2782 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2783 return _is_irn_machine_user(node, n);
2786 /* Returns non-zero for nodes that are CSE neutral to its users. */
2787 int (is_irn_cse_neutral)(const ir_node *node) {
2788 return _is_irn_cse_neutral(node);
2791 /* Gets the string representation of the jump prediction .*/
2792 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2793 #define X(a) case a: return #a;
2795 X(COND_JMP_PRED_NONE);
2796 X(COND_JMP_PRED_TRUE);
2797 X(COND_JMP_PRED_FALSE);
2803 /* Returns the conditional jump prediction of a Cond node. */
2804 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2805 return _get_Cond_jmp_pred(cond);
2808 /* Sets a new conditional jump prediction. */
2809 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2810 _set_Cond_jmp_pred(cond, pred);
2813 /** the get_type operation must be always implemented and return a firm type */
2814 static ir_type *get_Default_type(ir_node *n) {
2816 return get_unknown_type();
2819 /* Sets the get_type operation for an ir_op_ops. */
2820 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2822 case iro_Const: ops->get_type = get_Const_type; break;
2823 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2824 case iro_Cast: ops->get_type = get_Cast_type; break;
2825 case iro_Proj: ops->get_type = get_Proj_type; break;
2827 /* not allowed to be NULL */
2828 if (! ops->get_type)
2829 ops->get_type = get_Default_type;
2835 /** Return the attribute type of a SymConst node if exists */
2836 static ir_type *get_SymConst_attr_type(ir_node *self) {
2837 symconst_kind kind = get_SymConst_kind(self);
2838 if (SYMCONST_HAS_TYPE(kind))
2839 return get_SymConst_type(self);
2843 /** Return the attribute entity of a SymConst node if exists */
2844 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2845 symconst_kind kind = get_SymConst_kind(self);
2846 if (SYMCONST_HAS_ENT(kind))
2847 return get_SymConst_entity(self);
2851 /** the get_type_attr operation must be always implemented */
2852 static ir_type *get_Null_type(ir_node *n) {
2854 return firm_unknown_type;
2857 /* Sets the get_type operation for an ir_op_ops. */
2858 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2860 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2861 case iro_Call: ops->get_type_attr = get_Call_type; break;
2862 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2863 case iro_Free: ops->get_type_attr = get_Free_type; break;
2864 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2866 /* not allowed to be NULL */
2867 if (! ops->get_type_attr)
2868 ops->get_type_attr = get_Null_type;
2874 /** the get_entity_attr operation must be always implemented */
2875 static ir_entity *get_Null_ent(ir_node *n) {
2880 /* Sets the get_type operation for an ir_op_ops. */
2881 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2883 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2884 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
2886 /* not allowed to be NULL */
2887 if (! ops->get_entity_attr)
2888 ops->get_entity_attr = get_Null_ent;
2894 /* Sets the debug information of a node. */
2895 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2896 _set_irn_dbg_info(n, db);
2900 * Returns the debug information of an node.
2902 * @param n The node.
2904 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2905 return _get_irn_dbg_info(n);
2908 /* checks whether a node represents a global address */
2909 int is_Global(const ir_node *node) {
2910 return is_SymConst_addr_ent(node);
2913 /* returns the entity of a global address */
2914 ir_entity *get_Global_entity(const ir_node *node) {
2915 return get_SymConst_entity(node);
2919 * Calculate a hash value of a node.
2921 unsigned firm_default_hash(const ir_node *node) {
2925 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2926 h = irn_arity = get_irn_intra_arity(node);
2928 /* consider all in nodes... except the block if not a control flow. */
2929 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2930 ir_node *pred = get_irn_intra_n(node, i);
2931 if (is_irn_cse_neutral(pred))
2934 h = 9*h + HASH_PTR(pred);
2938 h = 9*h + HASH_PTR(get_irn_mode(node));
2940 h = 9*h + HASH_PTR(get_irn_op(node));
2943 } /* firm_default_hash */
2945 /* include generated code */
2946 #include "gen_irnode.c.inl"