2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
86 pn_Cmp get_inversed_pnc(long pnc) {
87 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
88 long lesser = pnc & pn_Cmp_Lt;
89 long greater = pnc & pn_Cmp_Gt;
91 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size) {
111 assert(!forbid_new_data && "Too late to register additional node data");
116 return firm_add_node_size += size;
120 void init_irnode(void) {
121 /* Forbid the addition of new data to an ir node. */
126 * irnode constructor.
127 * Create a new irnode in irg, with an op, mode, arity and
128 * some incoming irnodes.
129 * If arity is negative, a node with a dynamic array is created.
132 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
133 int arity, ir_node **in)
136 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 /* not nice but necessary: End and Sync must always have a flexible array */
159 if (op == op_End || op == op_Sync)
160 res->in = NEW_ARR_F(ir_node *, (arity+1));
162 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
163 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
167 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
171 for (i = 0; i < EDGE_KIND_LAST; ++i) {
172 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* edges will be build immediately */
174 res->edge_info[i].edges_built = 1;
175 res->edge_info[i].out_count = 0;
178 /* don't put this into the for loop, arity is -1 for some nodes! */
179 edges_notify_edge(res, -1, res->in[0], NULL, irg);
180 for (i = 1; i <= arity; ++i)
181 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
183 hook_new_node(irg, res);
184 if (get_irg_phase_state(irg) == phase_backend) {
185 be_info_new_node(res);
191 /*-- getting some parameters from ir_nodes --*/
193 int (is_ir_node)(const void *thing) {
194 return _is_ir_node(thing);
197 int (get_irn_intra_arity)(const ir_node *node) {
198 return _get_irn_intra_arity(node);
201 int (get_irn_inter_arity)(const ir_node *node) {
202 return _get_irn_inter_arity(node);
205 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
207 int (get_irn_arity)(const ir_node *node) {
208 return _get_irn_arity(node);
211 /* Returns the array with ins. This array is shifted with respect to the
212 array accessed by get_irn_n: The block operand is at position 0 not -1.
213 (@@@ This should be changed.)
214 The order of the predecessors in this array is not guaranteed, except that
215 lists of operands as predecessors of Block or arguments of a Call are
217 ir_node **get_irn_in(const ir_node *node) {
219 #ifdef INTERPROCEDURAL_VIEW
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 return node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 return node->attr.block.in_cg;
227 /* else fall through */
229 #endif /* INTERPROCEDURAL_VIEW */
233 void set_irn_in(ir_node *node, int arity, ir_node **in) {
236 ir_graph *irg = get_irn_irg(node);
239 #ifdef INTERPROCEDURAL_VIEW
240 if (get_interprocedural_view()) { /* handle Filter and Block specially */
241 ir_opcode code = get_irn_opcode(node);
242 if (code == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 pOld_in = &node->attr.filter.in_cg;
245 } else if (code == iro_Block && node->attr.block.in_cg) {
246 pOld_in = &node->attr.block.in_cg;
251 #endif /* INTERPROCEDURAL_VIEW */
255 for (i = 0; i < arity; i++) {
256 if (i < ARR_LEN(*pOld_in)-1)
257 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
259 edges_notify_edge(node, i, in[i], NULL, irg);
261 for (;i < ARR_LEN(*pOld_in)-1; i++) {
262 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
265 if (arity != ARR_LEN(*pOld_in) - 1) {
266 ir_node * block = (*pOld_in)[0];
267 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
268 (*pOld_in)[0] = block;
270 fix_backedges(irg->obst, node);
272 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
275 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
276 return _get_irn_intra_n(node, n);
279 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
280 return _get_irn_inter_n(node, n);
283 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
285 ir_node *(get_irn_n)(const ir_node *node, int n) {
286 return _get_irn_n(node, n);
289 void set_irn_n(ir_node *node, int n, ir_node *in) {
290 assert(node && node->kind == k_ir_node);
292 assert(n < get_irn_arity(node));
293 assert(in && in->kind == k_ir_node);
295 #ifdef INTERPROCEDURAL_VIEW
296 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
297 /* Change block pred in both views! */
298 node->in[n + 1] = in;
299 assert(node->attr.filter.in_cg);
300 node->attr.filter.in_cg[n + 1] = in;
303 if (get_interprocedural_view()) { /* handle Filter and Block specially */
304 if (get_irn_opcode(node) == iro_Filter) {
305 assert(node->attr.filter.in_cg);
306 node->attr.filter.in_cg[n + 1] = in;
308 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
309 node->attr.block.in_cg[n + 1] = in;
312 /* else fall through */
314 #endif /* INTERPROCEDURAL_VIEW */
317 hook_set_irn_n(node, n, in, node->in[n + 1]);
319 /* Here, we rely on src and tgt being in the current ir graph */
320 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
322 node->in[n + 1] = in;
325 int add_irn_n(ir_node *node, ir_node *in) {
327 ir_graph *irg = get_irn_irg(node);
329 assert(node->op->opar == oparity_dynamic);
330 pos = ARR_LEN(node->in) - 1;
331 ARR_APP1(ir_node *, node->in, in);
332 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
335 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
340 void del_Sync_n(ir_node *n, int i)
342 int arity = get_Sync_n_preds(n);
343 ir_node *last_pred = get_Sync_pred(n, arity - 1);
344 set_Sync_pred(n, i, last_pred);
345 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
346 ARR_SHRINKLEN(get_irn_in(n), arity);
349 int (get_irn_deps)(const ir_node *node) {
350 return _get_irn_deps(node);
353 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
354 return _get_irn_dep(node, pos);
357 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
358 _set_irn_dep(node, pos, dep);
361 int add_irn_dep(ir_node *node, ir_node *dep) {
364 /* DEP edges are only allowed in backend phase */
365 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
366 if (node->deps == NULL) {
367 node->deps = NEW_ARR_F(ir_node *, 1);
373 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
374 if(node->deps[i] == NULL)
377 if(node->deps[i] == dep)
381 if (first_zero >= 0) {
382 node->deps[first_zero] = dep;
385 ARR_APP1(ir_node *, node->deps, dep);
390 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
395 void add_irn_deps(ir_node *tgt, ir_node *src) {
398 for (i = 0, n = get_irn_deps(src); i < n; ++i)
399 add_irn_dep(tgt, get_irn_dep(src, i));
403 ir_mode *(get_irn_mode)(const ir_node *node) {
404 return _get_irn_mode(node);
407 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
408 _set_irn_mode(node, mode);
411 /** Gets the string representation of the mode .*/
412 const char *get_irn_modename(const ir_node *node) {
414 return get_mode_name(node->mode);
417 ident *get_irn_modeident(const ir_node *node) {
419 return get_mode_ident(node->mode);
422 ir_op *(get_irn_op)(const ir_node *node) {
423 return _get_irn_op(node);
426 /* should be private to the library: */
427 void (set_irn_op)(ir_node *node, ir_op *op) {
428 _set_irn_op(node, op);
431 unsigned (get_irn_opcode)(const ir_node *node) {
432 return _get_irn_opcode(node);
435 const char *get_irn_opname(const ir_node *node) {
437 if (is_Phi0(node)) return "Phi0";
438 return get_id_str(node->op->name);
441 ident *get_irn_opident(const ir_node *node) {
443 return node->op->name;
446 ir_visited_t (get_irn_visited)(const ir_node *node) {
447 return _get_irn_visited(node);
450 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
451 _set_irn_visited(node, visited);
454 void (mark_irn_visited)(ir_node *node) {
455 _mark_irn_visited(node);
458 int (irn_visited)(const ir_node *node) {
459 return _irn_visited(node);
462 int (irn_visited_else_mark)(ir_node *node) {
463 return _irn_visited_else_mark(node);
466 void (set_irn_link)(ir_node *node, void *link) {
467 _set_irn_link(node, link);
470 void *(get_irn_link)(const ir_node *node) {
471 return _get_irn_link(node);
474 op_pin_state (get_irn_pinned)(const ir_node *node) {
475 return _get_irn_pinned(node);
478 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
479 return _is_irn_pinned_in_irg(node);
482 void set_irn_pinned(ir_node *node, op_pin_state state) {
483 /* due to optimization an opt may be turned into a Tuple */
487 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
488 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
490 node->attr.except.pin_state = state;
493 /* Outputs a unique number for this node */
494 long get_irn_node_nr(const ir_node *node) {
496 return node->node_nr;
499 const_attr *get_irn_const_attr(ir_node *node) {
500 assert(is_Const(node));
501 return &node->attr.con;
504 long get_irn_proj_attr(ir_node *node) {
505 /* BEWARE: check for true Proj node here, no Filter */
506 assert(node->op == op_Proj);
507 return node->attr.proj;
510 alloc_attr *get_irn_alloc_attr(ir_node *node) {
511 assert(is_Alloc(node));
512 return &node->attr.alloc;
515 free_attr *get_irn_free_attr(ir_node *node) {
516 assert(is_Free(node));
517 return &node->attr.free;
520 symconst_attr *get_irn_symconst_attr(ir_node *node) {
521 assert(is_SymConst(node));
522 return &node->attr.symc;
525 call_attr *get_irn_call_attr(ir_node *node) {
526 assert(is_Call(node));
527 return &node->attr.call;
530 sel_attr *get_irn_sel_attr(ir_node *node) {
531 assert(is_Sel(node));
532 return &node->attr.sel;
535 phi_attr *get_irn_phi_attr(ir_node *node) {
536 return &node->attr.phi;
539 block_attr *get_irn_block_attr(ir_node *node) {
540 assert(is_Block(node));
541 return &node->attr.block;
544 load_attr *get_irn_load_attr(ir_node *node) {
545 assert(is_Load(node));
546 return &node->attr.load;
549 store_attr *get_irn_store_attr(ir_node *node) {
550 assert(is_Store(node));
551 return &node->attr.store;
554 except_attr *get_irn_except_attr(ir_node *node) {
555 assert(node->op == op_Div || node->op == op_Quot ||
556 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
557 return &node->attr.except;
560 divmod_attr *get_irn_divmod_attr(ir_node *node) {
561 assert(node->op == op_Div || node->op == op_Quot ||
562 node->op == op_DivMod || node->op == op_Mod);
563 return &node->attr.divmod;
566 builtin_attr *get_irn_builtin_attr(ir_node *node) {
567 assert(is_Builtin(node));
568 return &node->attr.builtin;
571 void *(get_irn_generic_attr)(ir_node *node) {
572 assert(is_ir_node(node));
573 return _get_irn_generic_attr(node);
576 const void *(get_irn_generic_attr_const)(const ir_node *node) {
577 assert(is_ir_node(node));
578 return _get_irn_generic_attr_const(node);
581 unsigned (get_irn_idx)(const ir_node *node) {
582 assert(is_ir_node(node));
583 return _get_irn_idx(node);
586 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
588 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
589 if (get_irn_n(node, i) == arg)
595 /** manipulate fields of individual nodes **/
597 /* this works for all except Block */
598 ir_node *get_nodes_block(const ir_node *node) {
599 assert(node->op != op_Block);
600 return get_irn_n(node, -1);
603 void set_nodes_block(ir_node *node, ir_node *block) {
604 assert(node->op != op_Block);
605 set_irn_n(node, -1, block);
608 /* this works for all except Block */
609 ir_node *get_nodes_MacroBlock(const ir_node *node) {
610 assert(node->op != op_Block);
611 return get_Block_MacroBlock(get_irn_n(node, -1));
614 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
615 * from Start. If so returns frame type, else Null. */
616 ir_type *is_frame_pointer(const ir_node *n) {
617 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
618 ir_node *start = get_Proj_pred(n);
619 if (is_Start(start)) {
620 return get_irg_frame_type(get_irn_irg(start));
626 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
627 * from Start. If so returns tls type, else Null. */
628 ir_type *is_tls_pointer(const ir_node *n) {
629 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
630 ir_node *start = get_Proj_pred(n);
631 if (is_Start(start)) {
632 return get_tls_type();
638 ir_node **get_Block_cfgpred_arr(ir_node *node) {
639 assert(is_Block(node));
640 return (ir_node **)&(get_irn_in(node)[1]);
643 int (get_Block_n_cfgpreds)(const ir_node *node) {
644 return _get_Block_n_cfgpreds(node);
647 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
648 return _get_Block_cfgpred(node, pos);
651 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
652 assert(is_Block(node));
653 set_irn_n(node, pos, pred);
656 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
659 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
660 if (get_Block_cfgpred_block(block, i) == pred)
666 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
667 return _get_Block_cfgpred_block(node, pos);
670 int get_Block_matured(const ir_node *node) {
671 assert(is_Block(node));
672 return (int)node->attr.block.is_matured;
675 void set_Block_matured(ir_node *node, int matured) {
676 assert(is_Block(node));
677 node->attr.block.is_matured = matured;
680 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
681 return _get_Block_block_visited(node);
684 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
685 _set_Block_block_visited(node, visit);
688 /* For this current_ir_graph must be set. */
689 void (mark_Block_block_visited)(ir_node *node) {
690 _mark_Block_block_visited(node);
693 int (Block_block_visited)(const ir_node *node) {
694 return _Block_block_visited(node);
697 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
698 assert(is_Block(node));
699 return node->attr.block.graph_arr[pos+1];
702 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
703 assert(is_Block(node));
704 node->attr.block.graph_arr[pos+1] = value;
707 #ifdef INTERPROCEDURAL_VIEW
708 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
709 assert(is_Block(node));
710 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
711 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
712 node->attr.block.in_cg[0] = NULL;
713 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
715 /* Fix backedge array. fix_backedges() operates depending on
716 interprocedural_view. */
717 int ipv = get_interprocedural_view();
718 set_interprocedural_view(1);
719 fix_backedges(current_ir_graph->obst, node);
720 set_interprocedural_view(ipv);
723 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
726 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
727 assert(is_Block(node) && node->attr.block.in_cg &&
728 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
729 node->attr.block.in_cg[pos + 1] = pred;
732 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
733 assert(is_Block(node));
734 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
737 int get_Block_cg_n_cfgpreds(const ir_node *node) {
738 assert(is_Block(node));
739 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
742 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
743 assert(is_Block(node) && node->attr.block.in_cg);
744 return node->attr.block.in_cg[pos + 1];
747 void remove_Block_cg_cfgpred_arr(ir_node *node) {
748 assert(is_Block(node));
749 node->attr.block.in_cg = NULL;
751 #endif /* INTERPROCEDURAL_VIEW */
753 ir_node *(set_Block_dead)(ir_node *block) {
754 return _set_Block_dead(block);
757 int (is_Block_dead)(const ir_node *block) {
758 return _is_Block_dead(block);
761 ir_extblk *get_Block_extbb(const ir_node *block) {
763 assert(is_Block(block));
764 res = block->attr.block.extblk;
765 assert(res == NULL || is_ir_extbb(res));
769 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
770 assert(is_Block(block));
771 assert(extblk == NULL || is_ir_extbb(extblk));
772 block->attr.block.extblk = extblk;
775 /* Returns the macro block header of a block.*/
776 ir_node *get_Block_MacroBlock(const ir_node *block) {
778 assert(is_Block(block));
779 mbh = get_irn_n(block, -1);
780 /* once macro block header is respected by all optimizations,
781 this assert can be removed */
786 /* Sets the macro block header of a block. */
787 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
788 assert(is_Block(block));
790 assert(is_Block(mbh));
791 set_irn_n(block, -1, mbh);
794 /* returns the macro block header of a node. */
795 ir_node *get_irn_MacroBlock(const ir_node *n) {
797 n = get_nodes_block(n);
798 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
802 return get_Block_MacroBlock(n);
805 /* returns the graph of a Block. */
806 ir_graph *(get_Block_irg)(const ir_node *block) {
807 return _get_Block_irg(block);
810 ir_entity *create_Block_entity(ir_node *block) {
812 assert(is_Block(block));
814 entity = block->attr.block.entity;
815 if (entity == NULL) {
819 glob = get_glob_type();
820 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
821 nr = get_irp_next_label_nr();
822 set_entity_label(entity, nr);
823 set_entity_compiler_generated(entity, 1);
824 set_entity_allocation(entity, allocation_static);
826 block->attr.block.entity = entity;
831 ir_entity *get_Block_entity(const ir_node *block) {
832 assert(is_Block(block));
833 return block->attr.block.entity;
836 void set_Block_entity(ir_node *block, ir_entity *entity)
838 assert(is_Block(block));
839 assert(get_entity_type(entity) == get_code_type());
840 block->attr.block.entity = entity;
843 int has_Block_entity(const ir_node *block)
845 return block->attr.block.entity != NULL;
848 ir_node *(get_Block_phis)(const ir_node *block) {
849 return _get_Block_phis(block);
852 void (set_Block_phis)(ir_node *block, ir_node *phi) {
853 _set_Block_phis(block, phi);
856 void (add_Block_phi)(ir_node *block, ir_node *phi) {
857 _add_Block_phi(block, phi);
860 /* Get the Block mark (single bit). */
861 unsigned (get_Block_mark)(const ir_node *block) {
862 return _get_Block_mark(block);
865 /* Set the Block mark (single bit). */
866 void (set_Block_mark)(ir_node *block, unsigned mark) {
867 _set_Block_mark(block, mark);
870 int get_End_n_keepalives(const ir_node *end) {
872 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
875 ir_node *get_End_keepalive(const ir_node *end, int pos) {
877 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
880 void add_End_keepalive(ir_node *end, ir_node *ka) {
885 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
887 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
890 /* Set new keep-alives */
891 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
893 ir_graph *irg = get_irn_irg(end);
895 /* notify that edges are deleted */
896 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
897 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
899 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
901 for (i = 0; i < n; ++i) {
902 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
903 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
907 /* Set new keep-alives from old keep-alives, skipping irn */
908 void remove_End_keepalive(ir_node *end, ir_node *irn) {
909 int n = get_End_n_keepalives(end);
914 for (i = n -1; i >= 0; --i) {
915 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
925 irg = get_irn_irg(end);
927 /* remove the edge */
928 edges_notify_edge(end, idx, NULL, irn, irg);
931 /* exchange with the last one */
932 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
933 edges_notify_edge(end, n - 1, NULL, old, irg);
934 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
935 edges_notify_edge(end, idx, old, NULL, irg);
937 /* now n - 1 keeps, 1 block input */
938 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
941 /* remove Bads, NoMems and doublets from the keep-alive set */
942 void remove_End_Bads_and_doublets(ir_node *end) {
944 int idx, n = get_End_n_keepalives(end);
950 irg = get_irn_irg(end);
951 pset_new_init(&keeps);
953 for (idx = n - 1; idx >= 0; --idx) {
954 ir_node *ka = get_End_keepalive(end, idx);
956 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
957 /* remove the edge */
958 edges_notify_edge(end, idx, NULL, ka, irg);
961 /* exchange with the last one */
962 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
963 edges_notify_edge(end, n - 1, NULL, old, irg);
964 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
965 edges_notify_edge(end, idx, old, NULL, irg);
969 pset_new_insert(&keeps, ka);
972 /* n keeps, 1 block input */
973 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
975 pset_new_destroy(&keeps);
978 void free_End(ir_node *end) {
982 end->in = NULL; /* @@@ make sure we get an error if we use the
983 in array afterwards ... */
986 /* Return the target address of an IJmp */
987 ir_node *get_IJmp_target(const ir_node *ijmp) {
988 assert(is_IJmp(ijmp));
989 return get_irn_n(ijmp, 0);
992 /** Sets the target address of an IJmp */
993 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
994 assert(is_IJmp(ijmp));
995 set_irn_n(ijmp, 0, tgt);
999 get_Cond_selector(const ir_node *node) {
1000 assert(is_Cond(node));
1001 return get_irn_n(node, 0);
1005 set_Cond_selector(ir_node *node, ir_node *selector) {
1006 assert(is_Cond(node));
1007 set_irn_n(node, 0, selector);
1011 get_Cond_default_proj(const ir_node *node) {
1012 assert(is_Cond(node));
1013 return node->attr.cond.default_proj;
1016 void set_Cond_default_proj(ir_node *node, long defproj) {
1017 assert(is_Cond(node));
1018 node->attr.cond.default_proj = defproj;
1022 get_Return_mem(const ir_node *node) {
1023 assert(is_Return(node));
1024 return get_irn_n(node, 0);
1028 set_Return_mem(ir_node *node, ir_node *mem) {
1029 assert(is_Return(node));
1030 set_irn_n(node, 0, mem);
1034 get_Return_n_ress(const ir_node *node) {
1035 assert(is_Return(node));
1036 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1040 get_Return_res_arr(ir_node *node) {
1041 assert(is_Return(node));
1042 if (get_Return_n_ress(node) > 0)
1043 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1050 set_Return_n_res(ir_node *node, int results) {
1051 assert(is_Return(node));
1056 get_Return_res(const ir_node *node, int pos) {
1057 assert(is_Return(node));
1058 assert(get_Return_n_ress(node) > pos);
1059 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1063 set_Return_res(ir_node *node, int pos, ir_node *res){
1064 assert(is_Return(node));
1065 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1068 tarval *(get_Const_tarval)(const ir_node *node) {
1069 return _get_Const_tarval(node);
1073 set_Const_tarval(ir_node *node, tarval *con) {
1074 assert(is_Const(node));
1075 node->attr.con.tv = con;
1078 int (is_Const_null)(const ir_node *node) {
1079 return _is_Const_null(node);
1082 int (is_Const_one)(const ir_node *node) {
1083 return _is_Const_one(node);
1086 int (is_Const_all_one)(const ir_node *node) {
1087 return _is_Const_all_one(node);
1091 /* The source language type. Must be an atomic type. Mode of type must
1092 be mode of node. For tarvals from entities type must be pointer to
1095 get_Const_type(ir_node *node) {
1096 assert(is_Const(node));
1097 return node->attr.con.tp;
1101 set_Const_type(ir_node *node, ir_type *tp) {
1102 assert(is_Const(node));
1103 if (tp != firm_unknown_type) {
1104 assert(is_atomic_type(tp));
1105 assert(get_type_mode(tp) == get_irn_mode(node));
1107 node->attr.con.tp = tp;
1112 get_SymConst_kind(const ir_node *node) {
1113 assert(is_SymConst(node));
1114 return node->attr.symc.kind;
1118 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1119 assert(is_SymConst(node));
1120 node->attr.symc.kind = kind;
1124 get_SymConst_type(const ir_node *node) {
1125 /* the cast here is annoying, but we have to compensate for
1127 ir_node *irn = (ir_node *)node;
1128 assert(is_SymConst(node) &&
1129 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1130 return irn->attr.symc.sym.type_p;
1134 set_SymConst_type(ir_node *node, ir_type *tp) {
1135 assert(is_SymConst(node) &&
1136 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1137 node->attr.symc.sym.type_p = tp;
1141 get_SymConst_name(const ir_node *node) {
1142 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1143 return node->attr.symc.sym.ident_p;
1147 set_SymConst_name(ir_node *node, ident *name) {
1148 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1149 node->attr.symc.sym.ident_p = name;
1153 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1154 ir_entity *get_SymConst_entity(const ir_node *node) {
1155 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1156 return node->attr.symc.sym.entity_p;
1159 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1160 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1161 node->attr.symc.sym.entity_p = ent;
1164 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1165 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1166 return node->attr.symc.sym.enum_p;
1169 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1170 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1171 node->attr.symc.sym.enum_p = ec;
1174 union symconst_symbol
1175 get_SymConst_symbol(const ir_node *node) {
1176 assert(is_SymConst(node));
1177 return node->attr.symc.sym;
1181 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1182 assert(is_SymConst(node));
1183 node->attr.symc.sym = sym;
1187 get_SymConst_value_type(ir_node *node) {
1188 assert(is_SymConst(node));
1189 return node->attr.symc.tp;
1193 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1194 assert(is_SymConst(node));
1195 node->attr.symc.tp = tp;
1199 get_Sel_mem(const ir_node *node) {
1200 assert(is_Sel(node));
1201 return get_irn_n(node, 0);
1205 set_Sel_mem(ir_node *node, ir_node *mem) {
1206 assert(is_Sel(node));
1207 set_irn_n(node, 0, mem);
1211 get_Sel_ptr(const ir_node *node) {
1212 assert(is_Sel(node));
1213 return get_irn_n(node, 1);
1217 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1218 assert(is_Sel(node));
1219 set_irn_n(node, 1, ptr);
1223 get_Sel_n_indexs(const ir_node *node) {
1224 assert(is_Sel(node));
1225 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1229 get_Sel_index_arr(ir_node *node) {
1230 assert(is_Sel(node));
1231 if (get_Sel_n_indexs(node) > 0)
1232 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1238 get_Sel_index(const ir_node *node, int pos) {
1239 assert(is_Sel(node));
1240 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1244 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1245 assert(is_Sel(node));
1246 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1250 get_Sel_entity(const ir_node *node) {
1251 assert(is_Sel(node));
1252 return node->attr.sel.entity;
1255 /* need a version without const to prevent warning */
1256 static ir_entity *_get_Sel_entity(ir_node *node) {
1257 return get_Sel_entity(node);
1261 set_Sel_entity(ir_node *node, ir_entity *ent) {
1262 assert(is_Sel(node));
1263 node->attr.sel.entity = ent;
1267 /* For unary and binary arithmetic operations the access to the
1268 operands can be factored out. Left is the first, right the
1269 second arithmetic value as listed in tech report 0999-33.
1270 unops are: Minus, Abs, Not, Conv, Cast
1271 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1272 Shr, Shrs, Rotate, Cmp */
1276 get_Call_mem(const ir_node *node) {
1277 assert(is_Call(node));
1278 return get_irn_n(node, 0);
1282 set_Call_mem(ir_node *node, ir_node *mem) {
1283 assert(is_Call(node));
1284 set_irn_n(node, 0, mem);
1288 get_Call_ptr(const ir_node *node) {
1289 assert(is_Call(node));
1290 return get_irn_n(node, 1);
1294 set_Call_ptr(ir_node *node, ir_node *ptr) {
1295 assert(is_Call(node));
1296 set_irn_n(node, 1, ptr);
1300 get_Call_param_arr(ir_node *node) {
1301 assert(is_Call(node));
1302 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1306 get_Call_n_params(const ir_node *node) {
1307 assert(is_Call(node));
1308 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1312 get_Call_param(const ir_node *node, int pos) {
1313 assert(is_Call(node));
1314 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1318 set_Call_param(ir_node *node, int pos, ir_node *param) {
1319 assert(is_Call(node));
1320 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1324 get_Call_type(ir_node *node) {
1325 assert(is_Call(node));
1326 return node->attr.call.type;
1330 set_Call_type(ir_node *node, ir_type *tp) {
1331 assert(is_Call(node));
1332 assert((get_unknown_type() == tp) || is_Method_type(tp));
1333 node->attr.call.type = tp;
1337 get_Call_tail_call(const ir_node *node) {
1338 assert(is_Call(node));
1339 return node->attr.call.tail_call;
1343 set_Call_tail_call(ir_node *node, unsigned tail_call) {
1344 assert(is_Call(node));
1345 node->attr.call.tail_call = tail_call != 0;
1349 get_Builtin_mem(const ir_node *node) {
1350 assert(is_Builtin(node));
1351 return get_irn_n(node, 0);
1355 set_Builin_mem(ir_node *node, ir_node *mem) {
1356 assert(is_Builtin(node));
1357 set_irn_n(node, 0, mem);
1361 get_Builtin_kind(const ir_node *node) {
1362 assert(is_Builtin(node));
1363 return node->attr.builtin.kind;
1367 set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
1368 assert(is_Builtin(node));
1369 node->attr.builtin.kind = kind;
1373 get_Builtin_param_arr(ir_node *node) {
1374 assert(is_Builtin(node));
1375 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1379 get_Builtin_n_params(const ir_node *node) {
1380 assert(is_Builtin(node));
1381 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1385 get_Builtin_param(const ir_node *node, int pos) {
1386 assert(is_Builtin(node));
1387 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1391 set_Builtin_param(ir_node *node, int pos, ir_node *param) {
1392 assert(is_Builtin(node));
1393 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1397 get_Builtin_type(ir_node *node) {
1398 assert(is_Builtin(node));
1399 return node->attr.builtin.type;
1403 set_Builtin_type(ir_node *node, ir_type *tp) {
1404 assert(is_Builtin(node));
1405 assert((get_unknown_type() == tp) || is_Method_type(tp));
1406 node->attr.builtin.type = tp;
1409 /* Returns a human readable string for the ir_builtin_kind. */
1410 const char *get_builtin_kind_name(ir_builtin_kind kind) {
1411 #define X(a) case a: return #a;
1414 X(ir_bk_debugbreak);
1415 X(ir_bk_return_address);
1416 X(ir_bk_frame_address);
1426 X(ir_bk_inner_trampoline);
1433 int Call_has_callees(const ir_node *node) {
1434 assert(is_Call(node));
1435 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1436 (node->attr.call.callee_arr != NULL));
1439 int get_Call_n_callees(const ir_node *node) {
1440 assert(is_Call(node) && node->attr.call.callee_arr);
1441 return ARR_LEN(node->attr.call.callee_arr);
1444 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1445 assert(pos >= 0 && pos < get_Call_n_callees(node));
1446 return node->attr.call.callee_arr[pos];
1449 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1450 assert(is_Call(node));
1451 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1452 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1454 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1457 void remove_Call_callee_arr(ir_node *node) {
1458 assert(is_Call(node));
1459 node->attr.call.callee_arr = NULL;
1462 ir_node *get_CallBegin_ptr(const ir_node *node) {
1463 assert(is_CallBegin(node));
1464 return get_irn_n(node, 0);
1467 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1468 assert(is_CallBegin(node));
1469 set_irn_n(node, 0, ptr);
1472 ir_node *get_CallBegin_call(const ir_node *node) {
1473 assert(is_CallBegin(node));
1474 return node->attr.callbegin.call;
1477 void set_CallBegin_call(ir_node *node, ir_node *call) {
1478 assert(is_CallBegin(node));
1479 node->attr.callbegin.call = call;
1483 * Returns non-zero if a Call is surely a self-recursive Call.
1484 * Beware: if this functions returns 0, the call might be self-recursive!
1486 int is_self_recursive_Call(const ir_node *call) {
1487 const ir_node *callee = get_Call_ptr(call);
1489 if (is_SymConst_addr_ent(callee)) {
1490 const ir_entity *ent = get_SymConst_entity(callee);
1491 const ir_graph *irg = get_entity_irg(ent);
1492 if (irg == get_irn_irg(call))
1499 ir_node * get_##OP##_left(const ir_node *node) { \
1500 assert(is_##OP(node)); \
1501 return get_irn_n(node, node->op->op_index); \
1503 void set_##OP##_left(ir_node *node, ir_node *left) { \
1504 assert(is_##OP(node)); \
1505 set_irn_n(node, node->op->op_index, left); \
1507 ir_node *get_##OP##_right(const ir_node *node) { \
1508 assert(is_##OP(node)); \
1509 return get_irn_n(node, node->op->op_index + 1); \
1511 void set_##OP##_right(ir_node *node, ir_node *right) { \
1512 assert(is_##OP(node)); \
1513 set_irn_n(node, node->op->op_index + 1, right); \
1517 ir_node *get_##OP##_op(const ir_node *node) { \
1518 assert(is_##OP(node)); \
1519 return get_irn_n(node, node->op->op_index); \
1521 void set_##OP##_op(ir_node *node, ir_node *op) { \
1522 assert(is_##OP(node)); \
1523 set_irn_n(node, node->op->op_index, op); \
1526 #define BINOP_MEM(OP) \
1530 get_##OP##_mem(const ir_node *node) { \
1531 assert(is_##OP(node)); \
1532 return get_irn_n(node, 0); \
1536 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1537 assert(is_##OP(node)); \
1538 set_irn_n(node, 0, mem); \
1544 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1545 assert(is_##OP(node)); \
1546 return node->attr.divmod.resmode; \
1549 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1550 assert(is_##OP(node)); \
1551 node->attr.divmod.resmode = mode; \
1579 int get_Div_no_remainder(const ir_node *node) {
1580 assert(is_Div(node));
1581 return node->attr.divmod.no_remainder;
1584 void set_Div_no_remainder(ir_node *node, int no_remainder) {
1585 assert(is_Div(node));
1586 node->attr.divmod.no_remainder = no_remainder;
1589 int get_Conv_strict(const ir_node *node) {
1590 assert(is_Conv(node));
1591 return node->attr.conv.strict;
1594 void set_Conv_strict(ir_node *node, int strict_flag) {
1595 assert(is_Conv(node));
1596 node->attr.conv.strict = (char)strict_flag;
1600 get_Cast_type(ir_node *node) {
1601 assert(is_Cast(node));
1602 return node->attr.cast.type;
1606 set_Cast_type(ir_node *node, ir_type *to_tp) {
1607 assert(is_Cast(node));
1608 node->attr.cast.type = to_tp;
1612 /* Checks for upcast.
1614 * Returns true if the Cast node casts a class type to a super type.
1616 int is_Cast_upcast(ir_node *node) {
1617 ir_type *totype = get_Cast_type(node);
1618 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1620 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1623 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1624 totype = get_pointer_points_to_type(totype);
1625 fromtype = get_pointer_points_to_type(fromtype);
1630 if (!is_Class_type(totype)) return 0;
1631 return is_SubClass_of(fromtype, totype);
1634 /* Checks for downcast.
1636 * Returns true if the Cast node casts a class type to a sub type.
1638 int is_Cast_downcast(ir_node *node) {
1639 ir_type *totype = get_Cast_type(node);
1640 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1642 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1645 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1646 totype = get_pointer_points_to_type(totype);
1647 fromtype = get_pointer_points_to_type(fromtype);
1652 if (!is_Class_type(totype)) return 0;
1653 return is_SubClass_of(totype, fromtype);
1657 (is_unop)(const ir_node *node) {
1658 return _is_unop(node);
1662 get_unop_op(const ir_node *node) {
1663 if (node->op->opar == oparity_unary)
1664 return get_irn_n(node, node->op->op_index);
1666 assert(node->op->opar == oparity_unary);
1671 set_unop_op(ir_node *node, ir_node *op) {
1672 if (node->op->opar == oparity_unary)
1673 set_irn_n(node, node->op->op_index, op);
1675 assert(node->op->opar == oparity_unary);
1679 (is_binop)(const ir_node *node) {
1680 return _is_binop(node);
1684 get_binop_left(const ir_node *node) {
1685 assert(node->op->opar == oparity_binary);
1686 return get_irn_n(node, node->op->op_index);
1690 set_binop_left(ir_node *node, ir_node *left) {
1691 assert(node->op->opar == oparity_binary);
1692 set_irn_n(node, node->op->op_index, left);
1696 get_binop_right(const ir_node *node) {
1697 assert(node->op->opar == oparity_binary);
1698 return get_irn_n(node, node->op->op_index + 1);
1702 set_binop_right(ir_node *node, ir_node *right) {
1703 assert(node->op->opar == oparity_binary);
1704 set_irn_n(node, node->op->op_index + 1, right);
1707 int is_Phi0(const ir_node *n) {
1710 return ((get_irn_op(n) == op_Phi) &&
1711 (get_irn_arity(n) == 0) &&
1712 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1716 get_Phi_preds_arr(ir_node *node) {
1717 assert(node->op == op_Phi);
1718 return (ir_node **)&(get_irn_in(node)[1]);
1722 get_Phi_n_preds(const ir_node *node) {
1723 assert(is_Phi(node) || is_Phi0(node));
1724 return (get_irn_arity(node));
1728 void set_Phi_n_preds(ir_node *node, int n_preds) {
1729 assert(node->op == op_Phi);
1734 get_Phi_pred(const ir_node *node, int pos) {
1735 assert(is_Phi(node) || is_Phi0(node));
1736 return get_irn_n(node, pos);
1740 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1741 assert(is_Phi(node) || is_Phi0(node));
1742 set_irn_n(node, pos, pred);
1745 ir_node *(get_Phi_next)(const ir_node *phi) {
1746 return _get_Phi_next(phi);
1749 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1750 _set_Phi_next(phi, next);
1753 int is_memop(const ir_node *node) {
1754 ir_opcode code = get_irn_opcode(node);
1755 return (code == iro_Load || code == iro_Store);
1758 ir_node *get_memop_mem(const ir_node *node) {
1759 assert(is_memop(node));
1760 return get_irn_n(node, 0);
1763 void set_memop_mem(ir_node *node, ir_node *mem) {
1764 assert(is_memop(node));
1765 set_irn_n(node, 0, mem);
1768 ir_node *get_memop_ptr(const ir_node *node) {
1769 assert(is_memop(node));
1770 return get_irn_n(node, 1);
1773 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1774 assert(is_memop(node));
1775 set_irn_n(node, 1, ptr);
1779 get_Load_mem(const ir_node *node) {
1780 assert(is_Load(node));
1781 return get_irn_n(node, 0);
1785 set_Load_mem(ir_node *node, ir_node *mem) {
1786 assert(is_Load(node));
1787 set_irn_n(node, 0, mem);
1791 get_Load_ptr(const ir_node *node) {
1792 assert(is_Load(node));
1793 return get_irn_n(node, 1);
1797 set_Load_ptr(ir_node *node, ir_node *ptr) {
1798 assert(is_Load(node));
1799 set_irn_n(node, 1, ptr);
1803 get_Load_mode(const ir_node *node) {
1804 assert(is_Load(node));
1805 return node->attr.load.mode;
1809 set_Load_mode(ir_node *node, ir_mode *mode) {
1810 assert(is_Load(node));
1811 node->attr.load.mode = mode;
1815 get_Load_volatility(const ir_node *node) {
1816 assert(is_Load(node));
1817 return node->attr.load.volatility;
1821 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1822 assert(is_Load(node));
1823 node->attr.load.volatility = volatility;
1827 get_Load_align(const ir_node *node) {
1828 assert(is_Load(node));
1829 return node->attr.load.aligned;
1833 set_Load_align(ir_node *node, ir_align align) {
1834 assert(is_Load(node));
1835 node->attr.load.aligned = align;
1840 get_Store_mem(const ir_node *node) {
1841 assert(is_Store(node));
1842 return get_irn_n(node, 0);
1846 set_Store_mem(ir_node *node, ir_node *mem) {
1847 assert(is_Store(node));
1848 set_irn_n(node, 0, mem);
1852 get_Store_ptr(const ir_node *node) {
1853 assert(is_Store(node));
1854 return get_irn_n(node, 1);
1858 set_Store_ptr(ir_node *node, ir_node *ptr) {
1859 assert(is_Store(node));
1860 set_irn_n(node, 1, ptr);
1864 get_Store_value(const ir_node *node) {
1865 assert(is_Store(node));
1866 return get_irn_n(node, 2);
1870 set_Store_value(ir_node *node, ir_node *value) {
1871 assert(is_Store(node));
1872 set_irn_n(node, 2, value);
1876 get_Store_volatility(const ir_node *node) {
1877 assert(is_Store(node));
1878 return node->attr.store.volatility;
1882 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1883 assert(is_Store(node));
1884 node->attr.store.volatility = volatility;
1888 get_Store_align(const ir_node *node) {
1889 assert(is_Store(node));
1890 return node->attr.store.aligned;
1894 set_Store_align(ir_node *node, ir_align align) {
1895 assert(is_Store(node));
1896 node->attr.store.aligned = align;
1901 get_Alloc_mem(const ir_node *node) {
1902 assert(is_Alloc(node));
1903 return get_irn_n(node, 0);
1907 set_Alloc_mem(ir_node *node, ir_node *mem) {
1908 assert(is_Alloc(node));
1909 set_irn_n(node, 0, mem);
1913 get_Alloc_size(const ir_node *node) {
1914 assert(is_Alloc(node));
1915 return get_irn_n(node, 1);
1919 set_Alloc_size(ir_node *node, ir_node *size) {
1920 assert(is_Alloc(node));
1921 set_irn_n(node, 1, size);
1925 get_Alloc_type(ir_node *node) {
1926 assert(is_Alloc(node));
1927 return node->attr.alloc.type;
1931 set_Alloc_type(ir_node *node, ir_type *tp) {
1932 assert(is_Alloc(node));
1933 node->attr.alloc.type = tp;
1937 get_Alloc_where(const ir_node *node) {
1938 assert(is_Alloc(node));
1939 return node->attr.alloc.where;
1943 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1944 assert(is_Alloc(node));
1945 node->attr.alloc.where = where;
1950 get_Free_mem(const ir_node *node) {
1951 assert(is_Free(node));
1952 return get_irn_n(node, 0);
1956 set_Free_mem(ir_node *node, ir_node *mem) {
1957 assert(is_Free(node));
1958 set_irn_n(node, 0, mem);
1962 get_Free_ptr(const ir_node *node) {
1963 assert(is_Free(node));
1964 return get_irn_n(node, 1);
1968 set_Free_ptr(ir_node *node, ir_node *ptr) {
1969 assert(is_Free(node));
1970 set_irn_n(node, 1, ptr);
1974 get_Free_size(const ir_node *node) {
1975 assert(is_Free(node));
1976 return get_irn_n(node, 2);
1980 set_Free_size(ir_node *node, ir_node *size) {
1981 assert(is_Free(node));
1982 set_irn_n(node, 2, size);
1986 get_Free_type(ir_node *node) {
1987 assert(is_Free(node));
1988 return node->attr.free.type;
1992 set_Free_type(ir_node *node, ir_type *tp) {
1993 assert(is_Free(node));
1994 node->attr.free.type = tp;
1998 get_Free_where(const ir_node *node) {
1999 assert(is_Free(node));
2000 return node->attr.free.where;
2004 set_Free_where(ir_node *node, ir_where_alloc where) {
2005 assert(is_Free(node));
2006 node->attr.free.where = where;
2009 ir_node **get_Sync_preds_arr(ir_node *node) {
2010 assert(is_Sync(node));
2011 return (ir_node **)&(get_irn_in(node)[1]);
2014 int get_Sync_n_preds(const ir_node *node) {
2015 assert(is_Sync(node));
2016 return (get_irn_arity(node));
2020 void set_Sync_n_preds(ir_node *node, int n_preds) {
2021 assert(is_Sync(node));
2025 ir_node *get_Sync_pred(const ir_node *node, int pos) {
2026 assert(is_Sync(node));
2027 return get_irn_n(node, pos);
2030 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
2031 assert(is_Sync(node));
2032 set_irn_n(node, pos, pred);
2035 /* Add a new Sync predecessor */
2036 void add_Sync_pred(ir_node *node, ir_node *pred) {
2037 assert(is_Sync(node));
2038 add_irn_n(node, pred);
2041 /* Returns the source language type of a Proj node. */
2042 ir_type *get_Proj_type(ir_node *n) {
2043 ir_type *tp = firm_unknown_type;
2044 ir_node *pred = get_Proj_pred(n);
2046 switch (get_irn_opcode(pred)) {
2049 /* Deal with Start / Call here: we need to know the Proj Nr. */
2050 assert(get_irn_mode(pred) == mode_T);
2051 pred_pred = get_Proj_pred(pred);
2053 if (is_Start(pred_pred)) {
2054 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2055 tp = get_method_param_type(mtp, get_Proj_proj(n));
2056 } else if (is_Call(pred_pred)) {
2057 ir_type *mtp = get_Call_type(pred_pred);
2058 tp = get_method_res_type(mtp, get_Proj_proj(n));
2061 case iro_Start: break;
2062 case iro_Call: break;
2064 ir_node *a = get_Load_ptr(pred);
2066 tp = get_entity_type(get_Sel_entity(a));
2075 get_Proj_pred(const ir_node *node) {
2076 assert(is_Proj(node));
2077 return get_irn_n(node, 0);
2081 set_Proj_pred(ir_node *node, ir_node *pred) {
2082 assert(is_Proj(node));
2083 set_irn_n(node, 0, pred);
2087 get_Proj_proj(const ir_node *node) {
2088 #ifdef INTERPROCEDURAL_VIEW
2089 ir_opcode code = get_irn_opcode(node);
2091 if (code == iro_Proj) {
2092 return node->attr.proj;
2095 assert(code == iro_Filter);
2096 return node->attr.filter.proj;
2099 assert(is_Proj(node));
2100 return node->attr.proj;
2101 #endif /* INTERPROCEDURAL_VIEW */
2105 set_Proj_proj(ir_node *node, long proj) {
2106 #ifdef INTERPROCEDURAL_VIEW
2107 ir_opcode code = get_irn_opcode(node);
2109 if (code == iro_Proj) {
2110 node->attr.proj = proj;
2113 assert(code == iro_Filter);
2114 node->attr.filter.proj = proj;
2117 assert(is_Proj(node));
2118 node->attr.proj = proj;
2119 #endif /* INTERPROCEDURAL_VIEW */
2122 /* Returns non-zero if a node is a routine parameter. */
2123 int (is_arg_Proj)(const ir_node *node) {
2124 return _is_arg_Proj(node);
2128 get_Tuple_preds_arr(ir_node *node) {
2129 assert(is_Tuple(node));
2130 return (ir_node **)&(get_irn_in(node)[1]);
2134 get_Tuple_n_preds(const ir_node *node) {
2135 assert(is_Tuple(node));
2136 return get_irn_arity(node);
2141 set_Tuple_n_preds(ir_node *node, int n_preds) {
2142 assert(is_Tuple(node));
2147 get_Tuple_pred(const ir_node *node, int pos) {
2148 assert(is_Tuple(node));
2149 return get_irn_n(node, pos);
2153 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2154 assert(is_Tuple(node));
2155 set_irn_n(node, pos, pred);
2159 get_Id_pred(const ir_node *node) {
2160 assert(is_Id(node));
2161 return get_irn_n(node, 0);
2165 set_Id_pred(ir_node *node, ir_node *pred) {
2166 assert(is_Id(node));
2167 set_irn_n(node, 0, pred);
2170 ir_node *get_Confirm_value(const ir_node *node) {
2171 assert(is_Confirm(node));
2172 return get_irn_n(node, 0);
2175 void set_Confirm_value(ir_node *node, ir_node *value) {
2176 assert(is_Confirm(node));
2177 set_irn_n(node, 0, value);
2180 ir_node *get_Confirm_bound(const ir_node *node) {
2181 assert(is_Confirm(node));
2182 return get_irn_n(node, 1);
2185 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2186 assert(is_Confirm(node));
2187 set_irn_n(node, 0, bound);
2190 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2191 assert(is_Confirm(node));
2192 return node->attr.confirm.cmp;
2195 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2196 assert(is_Confirm(node));
2197 node->attr.confirm.cmp = cmp;
2201 get_Filter_pred(ir_node *node) {
2202 assert(is_Filter(node));
2207 set_Filter_pred(ir_node *node, ir_node *pred) {
2208 assert(is_Filter(node));
2213 get_Filter_proj(ir_node *node) {
2214 assert(is_Filter(node));
2215 return node->attr.filter.proj;
2219 set_Filter_proj(ir_node *node, long proj) {
2220 assert(is_Filter(node));
2221 node->attr.filter.proj = proj;
2224 /* Don't use get_irn_arity, get_irn_n in implementation as access
2225 shall work independent of view!!! */
2226 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2227 assert(is_Filter(node));
2228 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2229 ir_graph *irg = get_irn_irg(node);
2230 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2231 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2232 node->attr.filter.in_cg[0] = node->in[0];
2234 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2237 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2238 assert(is_Filter(node) && node->attr.filter.in_cg &&
2239 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2240 node->attr.filter.in_cg[pos + 1] = pred;
2243 int get_Filter_n_cg_preds(ir_node *node) {
2244 assert(is_Filter(node) && node->attr.filter.in_cg);
2245 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2248 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2250 assert(is_Filter(node) && node->attr.filter.in_cg &&
2252 arity = ARR_LEN(node->attr.filter.in_cg);
2253 assert(pos < arity - 1);
2254 return node->attr.filter.in_cg[pos + 1];
2258 ir_node *get_Mux_sel(const ir_node *node) {
2259 assert(is_Mux(node));
2263 void set_Mux_sel(ir_node *node, ir_node *sel) {
2264 assert(is_Mux(node));
2268 ir_node *get_Mux_false(const ir_node *node) {
2269 assert(is_Mux(node));
2273 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2274 assert(is_Mux(node));
2275 node->in[2] = ir_false;
2278 ir_node *get_Mux_true(const ir_node *node) {
2279 assert(is_Mux(node));
2283 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2284 assert(is_Mux(node));
2285 node->in[3] = ir_true;
2289 ir_node *get_CopyB_mem(const ir_node *node) {
2290 assert(is_CopyB(node));
2291 return get_irn_n(node, 0);
2294 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2295 assert(node->op == op_CopyB);
2296 set_irn_n(node, 0, mem);
2299 ir_node *get_CopyB_dst(const ir_node *node) {
2300 assert(is_CopyB(node));
2301 return get_irn_n(node, 1);
2304 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2305 assert(is_CopyB(node));
2306 set_irn_n(node, 1, dst);
2309 ir_node *get_CopyB_src(const ir_node *node) {
2310 assert(is_CopyB(node));
2311 return get_irn_n(node, 2);
2314 void set_CopyB_src(ir_node *node, ir_node *src) {
2315 assert(is_CopyB(node));
2316 set_irn_n(node, 2, src);
2319 ir_type *get_CopyB_type(ir_node *node) {
2320 assert(is_CopyB(node));
2321 return node->attr.copyb.type;
2324 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2325 assert(is_CopyB(node) && data_type);
2326 node->attr.copyb.type = data_type;
2331 get_InstOf_type(ir_node *node) {
2332 assert(node->op == op_InstOf);
2333 return node->attr.instof.type;
2337 set_InstOf_type(ir_node *node, ir_type *type) {
2338 assert(node->op == op_InstOf);
2339 node->attr.instof.type = type;
2343 get_InstOf_store(const ir_node *node) {
2344 assert(node->op == op_InstOf);
2345 return get_irn_n(node, 0);
2349 set_InstOf_store(ir_node *node, ir_node *obj) {
2350 assert(node->op == op_InstOf);
2351 set_irn_n(node, 0, obj);
2355 get_InstOf_obj(const ir_node *node) {
2356 assert(node->op == op_InstOf);
2357 return get_irn_n(node, 1);
2361 set_InstOf_obj(ir_node *node, ir_node *obj) {
2362 assert(node->op == op_InstOf);
2363 set_irn_n(node, 1, obj);
2366 /* Returns the memory input of a Raise operation. */
2368 get_Raise_mem(const ir_node *node) {
2369 assert(is_Raise(node));
2370 return get_irn_n(node, 0);
2374 set_Raise_mem(ir_node *node, ir_node *mem) {
2375 assert(is_Raise(node));
2376 set_irn_n(node, 0, mem);
2380 get_Raise_exo_ptr(const ir_node *node) {
2381 assert(is_Raise(node));
2382 return get_irn_n(node, 1);
2386 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2387 assert(is_Raise(node));
2388 set_irn_n(node, 1, exo_ptr);
2393 /* Returns the memory input of a Bound operation. */
2394 ir_node *get_Bound_mem(const ir_node *bound) {
2395 assert(is_Bound(bound));
2396 return get_irn_n(bound, 0);
2399 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2400 assert(is_Bound(bound));
2401 set_irn_n(bound, 0, mem);
2404 /* Returns the index input of a Bound operation. */
2405 ir_node *get_Bound_index(const ir_node *bound) {
2406 assert(is_Bound(bound));
2407 return get_irn_n(bound, 1);
2410 void set_Bound_index(ir_node *bound, ir_node *idx) {
2411 assert(is_Bound(bound));
2412 set_irn_n(bound, 1, idx);
2415 /* Returns the lower bound input of a Bound operation. */
2416 ir_node *get_Bound_lower(const ir_node *bound) {
2417 assert(is_Bound(bound));
2418 return get_irn_n(bound, 2);
2421 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2422 assert(is_Bound(bound));
2423 set_irn_n(bound, 2, lower);
2426 /* Returns the upper bound input of a Bound operation. */
2427 ir_node *get_Bound_upper(const ir_node *bound) {
2428 assert(is_Bound(bound));
2429 return get_irn_n(bound, 3);
2432 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2433 assert(is_Bound(bound));
2434 set_irn_n(bound, 3, upper);
2437 /* Return the operand of a Pin node. */
2438 ir_node *get_Pin_op(const ir_node *pin) {
2439 assert(is_Pin(pin));
2440 return get_irn_n(pin, 0);
2443 void set_Pin_op(ir_node *pin, ir_node *node) {
2444 assert(is_Pin(pin));
2445 set_irn_n(pin, 0, node);
2448 /* Return the assembler text of an ASM pseudo node. */
2449 ident *get_ASM_text(const ir_node *node) {
2450 assert(is_ASM(node));
2451 return node->attr.assem.asm_text;
2454 /* Return the number of input constraints for an ASM node. */
2455 int get_ASM_n_input_constraints(const ir_node *node) {
2456 assert(is_ASM(node));
2457 return ARR_LEN(node->attr.assem.inputs);
2460 /* Return the input constraints for an ASM node. This is a flexible array. */
2461 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2462 assert(is_ASM(node));
2463 return node->attr.assem.inputs;
2466 /* Return the number of output constraints for an ASM node. */
2467 int get_ASM_n_output_constraints(const ir_node *node) {
2468 assert(is_ASM(node));
2469 return ARR_LEN(node->attr.assem.outputs);
2472 /* Return the output constraints for an ASM node. */
2473 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2474 assert(is_ASM(node));
2475 return node->attr.assem.outputs;
2478 /* Return the number of clobbered registers for an ASM node. */
2479 int get_ASM_n_clobbers(const ir_node *node) {
2480 assert(is_ASM(node));
2481 return ARR_LEN(node->attr.assem.clobber);
2484 /* Return the list of clobbered registers for an ASM node. */
2485 ident **get_ASM_clobbers(const ir_node *node) {
2486 assert(is_ASM(node));
2487 return node->attr.assem.clobber;
2490 /* returns the graph of a node */
2492 get_irn_irg(const ir_node *node) {
2494 * Do not use get_nodes_Block() here, because this
2495 * will check the pinned state.
2496 * However even a 'wrong' block is always in the proper
2499 if (! is_Block(node))
2500 node = get_irn_n(node, -1);
2501 /* note that get_Block_irg() can handle Bad nodes */
2502 return get_Block_irg(node);
2506 /*----------------------------------------------------------------*/
2507 /* Auxiliary routines */
2508 /*----------------------------------------------------------------*/
2511 skip_Proj(ir_node *node) {
2512 /* don't assert node !!! */
2517 node = get_Proj_pred(node);
2523 skip_Proj_const(const ir_node *node) {
2524 /* don't assert node !!! */
2529 node = get_Proj_pred(node);
2535 skip_Tuple(ir_node *node) {
2540 if (is_Proj(node)) {
2541 pred = get_Proj_pred(node);
2542 op = get_irn_op(pred);
2545 * Looks strange but calls get_irn_op() only once
2546 * in most often cases.
2548 if (op == op_Proj) { /* nested Tuple ? */
2549 pred = skip_Tuple(pred);
2551 if (is_Tuple(pred)) {
2552 node = get_Tuple_pred(pred, get_Proj_proj(node));
2555 } else if (op == op_Tuple) {
2556 node = get_Tuple_pred(pred, get_Proj_proj(node));
2563 /* returns operand of node if node is a Cast */
2564 ir_node *skip_Cast(ir_node *node) {
2566 return get_Cast_op(node);
2570 /* returns operand of node if node is a Cast */
2571 const ir_node *skip_Cast_const(const ir_node *node) {
2573 return get_Cast_op(node);
2577 /* returns operand of node if node is a Pin */
2578 ir_node *skip_Pin(ir_node *node) {
2580 return get_Pin_op(node);
2584 /* returns operand of node if node is a Confirm */
2585 ir_node *skip_Confirm(ir_node *node) {
2586 if (is_Confirm(node))
2587 return get_Confirm_value(node);
2591 /* skip all high-level ops */
2592 ir_node *skip_HighLevel_ops(ir_node *node) {
2593 while (is_op_highlevel(get_irn_op(node))) {
2594 node = get_irn_n(node, 0);
2600 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2601 * than any other approach, as Id chains are resolved and all point to the real node, or
2602 * all id's are self loops.
2604 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2605 * a little bit "hand optimized".
2607 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2610 skip_Id(ir_node *node) {
2612 /* don't assert node !!! */
2614 if (!node || (node->op != op_Id)) return node;
2616 /* Don't use get_Id_pred(): We get into an endless loop for
2617 self-referencing Ids. */
2618 pred = node->in[0+1];
2620 if (pred->op != op_Id) return pred;
2622 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2623 ir_node *rem_pred, *res;
2625 if (pred->op != op_Id) return pred; /* shortcut */
2628 assert(get_irn_arity (node) > 0);
2630 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2631 res = skip_Id(rem_pred);
2632 if (res->op == op_Id) /* self-loop */ return node;
2634 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2641 void skip_Id_and_store(ir_node **node) {
2644 if (!n || (n->op != op_Id)) return;
2646 /* Don't use get_Id_pred(): We get into an endless loop for
2647 self-referencing Ids. */
2652 (is_strictConv)(const ir_node *node) {
2653 return _is_strictConv(node);
2657 (is_no_Block)(const ir_node *node) {
2658 return _is_no_Block(node);
2661 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2663 (is_SymConst_addr_ent)(const ir_node *node) {
2664 return _is_SymConst_addr_ent(node);
2667 /* Returns true if the operation manipulates control flow. */
2668 int is_cfop(const ir_node *node) {
2669 return is_op_cfopcode(get_irn_op(node));
2672 /* Returns true if the operation manipulates interprocedural control flow:
2673 CallBegin, EndReg, EndExcept */
2674 int is_ip_cfop(const ir_node *node) {
2675 return is_ip_cfopcode(get_irn_op(node));
2678 /* Returns true if the operation can change the control flow because
2681 is_fragile_op(const ir_node *node) {
2682 return is_op_fragile(get_irn_op(node));
2685 /* Returns the memory operand of fragile operations. */
2686 ir_node *get_fragile_op_mem(ir_node *node) {
2687 assert(node && is_fragile_op(node));
2689 switch (get_irn_opcode(node)) {
2700 return get_irn_n(node, pn_Generic_M);
2705 assert(0 && "should not be reached");
2710 /* Returns the result mode of a Div operation. */
2711 ir_mode *get_divop_resmod(const ir_node *node) {
2712 switch (get_irn_opcode(node)) {
2713 case iro_Quot : return get_Quot_resmode(node);
2714 case iro_DivMod: return get_DivMod_resmode(node);
2715 case iro_Div : return get_Div_resmode(node);
2716 case iro_Mod : return get_Mod_resmode(node);
2718 assert(0 && "should not be reached");
2723 /* Returns true if the operation is a forking control flow operation. */
2724 int (is_irn_forking)(const ir_node *node) {
2725 return _is_irn_forking(node);
2728 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
2729 _copy_node_attr(old_node, new_node);
2732 /* Return the type associated with the value produced by n
2733 * if the node remarks this type as it is the case for
2734 * Cast, Const, SymConst and some Proj nodes. */
2735 ir_type *(get_irn_type)(ir_node *node) {
2736 return _get_irn_type(node);
2739 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2741 ir_type *(get_irn_type_attr)(ir_node *node) {
2742 return _get_irn_type_attr(node);
2745 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2746 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2747 return _get_irn_entity_attr(node);
2750 /* Returns non-zero for constant-like nodes. */
2751 int (is_irn_constlike)(const ir_node *node) {
2752 return _is_irn_constlike(node);
2756 * Returns non-zero for nodes that are allowed to have keep-alives and
2757 * are neither Block nor PhiM.
2759 int (is_irn_keep)(const ir_node *node) {
2760 return _is_irn_keep(node);
2764 * Returns non-zero for nodes that are always placed in the start block.
2766 int (is_irn_start_block_placed)(const ir_node *node) {
2767 return _is_irn_start_block_placed(node);
2770 /* Returns non-zero for nodes that are machine operations. */
2771 int (is_irn_machine_op)(const ir_node *node) {
2772 return _is_irn_machine_op(node);
2775 /* Returns non-zero for nodes that are machine operands. */
2776 int (is_irn_machine_operand)(const ir_node *node) {
2777 return _is_irn_machine_operand(node);
2780 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2781 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2782 return _is_irn_machine_user(node, n);
2785 /* Returns non-zero for nodes that are CSE neutral to its users. */
2786 int (is_irn_cse_neutral)(const ir_node *node) {
2787 return _is_irn_cse_neutral(node);
2790 /* Gets the string representation of the jump prediction .*/
2791 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2792 #define X(a) case a: return #a;
2794 X(COND_JMP_PRED_NONE);
2795 X(COND_JMP_PRED_TRUE);
2796 X(COND_JMP_PRED_FALSE);
2802 /* Returns the conditional jump prediction of a Cond node. */
2803 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2804 return _get_Cond_jmp_pred(cond);
2807 /* Sets a new conditional jump prediction. */
2808 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2809 _set_Cond_jmp_pred(cond, pred);
2812 /** the get_type operation must be always implemented and return a firm type */
2813 static ir_type *get_Default_type(ir_node *n) {
2815 return get_unknown_type();
2818 /* Sets the get_type operation for an ir_op_ops. */
2819 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2821 case iro_Const: ops->get_type = get_Const_type; break;
2822 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2823 case iro_Cast: ops->get_type = get_Cast_type; break;
2824 case iro_Proj: ops->get_type = get_Proj_type; break;
2826 /* not allowed to be NULL */
2827 if (! ops->get_type)
2828 ops->get_type = get_Default_type;
2834 /** Return the attribute type of a SymConst node if exists */
2835 static ir_type *get_SymConst_attr_type(ir_node *self) {
2836 symconst_kind kind = get_SymConst_kind(self);
2837 if (SYMCONST_HAS_TYPE(kind))
2838 return get_SymConst_type(self);
2842 /** Return the attribute entity of a SymConst node if exists */
2843 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2844 symconst_kind kind = get_SymConst_kind(self);
2845 if (SYMCONST_HAS_ENT(kind))
2846 return get_SymConst_entity(self);
2850 /** the get_type_attr operation must be always implemented */
2851 static ir_type *get_Null_type(ir_node *n) {
2853 return firm_unknown_type;
2856 /* Sets the get_type operation for an ir_op_ops. */
2857 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2859 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2860 case iro_Call: ops->get_type_attr = get_Call_type; break;
2861 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2862 case iro_Free: ops->get_type_attr = get_Free_type; break;
2863 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2865 /* not allowed to be NULL */
2866 if (! ops->get_type_attr)
2867 ops->get_type_attr = get_Null_type;
2873 /** the get_entity_attr operation must be always implemented */
2874 static ir_entity *get_Null_ent(ir_node *n) {
2879 /* Sets the get_type operation for an ir_op_ops. */
2880 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2882 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2883 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
2885 /* not allowed to be NULL */
2886 if (! ops->get_entity_attr)
2887 ops->get_entity_attr = get_Null_ent;
2893 /* Sets the debug information of a node. */
2894 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2895 _set_irn_dbg_info(n, db);
2899 * Returns the debug information of an node.
2901 * @param n The node.
2903 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2904 return _get_irn_dbg_info(n);
2907 /* checks whether a node represents a global address */
2908 int is_Global(const ir_node *node) {
2909 return is_SymConst_addr_ent(node);
2912 /* returns the entity of a global address */
2913 ir_entity *get_Global_entity(const ir_node *node) {
2914 return get_SymConst_entity(node);
2918 * Calculate a hash value of a node.
2920 unsigned firm_default_hash(const ir_node *node) {
2924 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2925 h = irn_arity = get_irn_intra_arity(node);
2927 /* consider all in nodes... except the block if not a control flow. */
2928 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2929 ir_node *pred = get_irn_intra_n(node, i);
2930 if (is_irn_cse_neutral(pred))
2933 h = 9*h + HASH_PTR(pred);
2937 h = 9*h + HASH_PTR(get_irn_mode(node));
2939 h = 9*h + HASH_PTR(get_irn_op(node));
2942 } /* firm_default_hash */
2944 /* include generated code */
2945 #include "gen_irnode.c.inl"