2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc)
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
80 /* do NOT add the Uo bit for non-floating point values */
81 if (! mode_is_float(mode))
87 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
88 pn_Cmp get_inversed_pnc(long pnc)
90 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
91 long lesser = pnc & pn_Cmp_Lt;
92 long greater = pnc & pn_Cmp_Gt;
94 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
100 * Indicates, whether additional data can be registered to ir nodes.
101 * If set to 1, this is not possible anymore.
103 static int forbid_new_data = 0;
106 * The amount of additional space for custom data to be allocated upon
107 * creating a new node.
109 unsigned firm_add_node_size = 0;
112 /* register new space for every node */
113 unsigned firm_register_additional_node_data(unsigned size)
115 assert(!forbid_new_data && "Too late to register additional node data");
120 return firm_add_node_size += size;
124 void init_irnode(void)
126 /* Forbid the addition of new data to an ir node. */
131 * irnode constructor.
132 * Create a new irnode in irg, with an op, mode, arity and
133 * some incoming irnodes.
134 * If arity is negative, a node with a dynamic array is created.
136 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
137 ir_mode *mode, int arity, ir_node **in)
140 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
147 p = obstack_alloc(irg->obst, node_size);
148 memset(p, 0, node_size);
149 res = (ir_node *)(p + firm_add_node_size);
151 res->kind = k_ir_node;
155 res->node_idx = irg_register_node_idx(irg, res);
160 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
162 /* not nice but necessary: End and Sync must always have a flexible array */
163 if (op == op_End || op == op_Sync)
164 res->in = NEW_ARR_F(ir_node *, (arity+1));
166 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
167 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
171 set_irn_dbg_info(res, db);
173 res->node_nr = get_irp_new_node_nr();
175 for (i = 0; i < EDGE_KIND_LAST; ++i) {
176 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
177 /* edges will be build immediately */
178 res->edge_info[i].edges_built = 1;
179 res->edge_info[i].out_count = 0;
182 /* don't put this into the for loop, arity is -1 for some nodes! */
183 edges_notify_edge(res, -1, res->in[0], NULL, irg);
184 for (i = 1; i <= arity; ++i)
185 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
187 hook_new_node(irg, res);
188 if (get_irg_phase_state(irg) == phase_backend) {
189 be_info_new_node(res);
191 // Init the VRP structures
192 res->vrp.range_type = VRP_UNDEFINED;
194 if (mode_is_int(mode)) {
195 // We are assuming that 0 is always represented as 0x0000
196 res->vrp.bits_set = new_tarval_from_long(0, mode);
197 res->vrp.bits_not_set = new_tarval_from_long(0, mode);
198 res->vrp.range_bottom = get_tarval_top();
199 res->vrp.range_top = get_tarval_top();
201 res->vrp.bits_set = get_tarval_bad();
202 res->vrp.bits_not_set = get_tarval_bad();
203 res->vrp.range_bottom = get_tarval_bad();
204 res->vrp.range_top = get_tarval_bad();
206 res->vrp.bits_node = NULL;
207 res->vrp.range_node = NULL;
208 res->vrp.range_op = VRP_NONE;
214 /*-- getting some parameters from ir_nodes --*/
216 int (is_ir_node)(const void *thing)
218 return _is_ir_node(thing);
221 int (get_irn_intra_arity)(const ir_node *node)
223 return _get_irn_intra_arity(node);
226 int (get_irn_inter_arity)(const ir_node *node)
228 return _get_irn_inter_arity(node);
231 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
233 int (get_irn_arity)(const ir_node *node)
235 return _get_irn_arity(node);
238 /* Returns the array with ins. This array is shifted with respect to the
239 array accessed by get_irn_n: The block operand is at position 0 not -1.
240 (@@@ This should be changed.)
241 The order of the predecessors in this array is not guaranteed, except that
242 lists of operands as predecessors of Block or arguments of a Call are
244 ir_node **get_irn_in(const ir_node *node)
247 #ifdef INTERPROCEDURAL_VIEW
248 if (get_interprocedural_view()) { /* handle Filter and Block specially */
249 if (get_irn_opcode(node) == iro_Filter) {
250 assert(node->attr.filter.in_cg);
251 return node->attr.filter.in_cg;
252 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
253 return node->attr.block.in_cg;
255 /* else fall through */
257 #endif /* INTERPROCEDURAL_VIEW */
261 void set_irn_in(ir_node *node, int arity, ir_node **in)
265 ir_graph *irg = get_irn_irg(node);
268 #ifdef INTERPROCEDURAL_VIEW
269 if (get_interprocedural_view()) { /* handle Filter and Block specially */
270 ir_opcode code = get_irn_opcode(node);
271 if (code == iro_Filter) {
272 assert(node->attr.filter.in_cg);
273 pOld_in = &node->attr.filter.in_cg;
274 } else if (code == iro_Block && node->attr.block.in_cg) {
275 pOld_in = &node->attr.block.in_cg;
280 #endif /* INTERPROCEDURAL_VIEW */
284 for (i = 0; i < arity; i++) {
285 if (i < ARR_LEN(*pOld_in)-1)
286 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
288 edges_notify_edge(node, i, in[i], NULL, irg);
290 for (;i < ARR_LEN(*pOld_in)-1; i++) {
291 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
294 if (arity != ARR_LEN(*pOld_in) - 1) {
295 ir_node * block = (*pOld_in)[0];
296 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
297 (*pOld_in)[0] = block;
299 fix_backedges(irg->obst, node);
301 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
304 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
306 return _get_irn_intra_n(node, n);
309 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
311 return _get_irn_inter_n(node, n);
314 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
316 ir_node *(get_irn_n)(const ir_node *node, int n)
318 return _get_irn_n(node, n);
321 void set_irn_n(ir_node *node, int n, ir_node *in)
323 assert(node && node->kind == k_ir_node);
325 assert(n < get_irn_arity(node));
326 assert(in && in->kind == k_ir_node);
328 #ifdef INTERPROCEDURAL_VIEW
329 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
330 /* Change block pred in both views! */
331 node->in[n + 1] = in;
332 assert(node->attr.filter.in_cg);
333 node->attr.filter.in_cg[n + 1] = in;
336 if (get_interprocedural_view()) { /* handle Filter and Block specially */
337 if (get_irn_opcode(node) == iro_Filter) {
338 assert(node->attr.filter.in_cg);
339 node->attr.filter.in_cg[n + 1] = in;
341 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
342 node->attr.block.in_cg[n + 1] = in;
345 /* else fall through */
347 #endif /* INTERPROCEDURAL_VIEW */
350 hook_set_irn_n(node, n, in, node->in[n + 1]);
352 /* Here, we rely on src and tgt being in the current ir graph */
353 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
355 node->in[n + 1] = in;
358 int add_irn_n(ir_node *node, ir_node *in)
361 ir_graph *irg = get_irn_irg(node);
363 assert(node->op->opar == oparity_dynamic);
364 pos = ARR_LEN(node->in) - 1;
365 ARR_APP1(ir_node *, node->in, in);
366 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
369 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
374 void del_Sync_n(ir_node *n, int i)
376 int arity = get_Sync_n_preds(n);
377 ir_node *last_pred = get_Sync_pred(n, arity - 1);
378 set_Sync_pred(n, i, last_pred);
379 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
380 ARR_SHRINKLEN(get_irn_in(n), arity);
383 int (get_irn_deps)(const ir_node *node)
385 return _get_irn_deps(node);
388 ir_node *(get_irn_dep)(const ir_node *node, int pos)
390 return _get_irn_dep(node, pos);
393 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
395 _set_irn_dep(node, pos, dep);
398 int add_irn_dep(ir_node *node, ir_node *dep)
402 /* DEP edges are only allowed in backend phase */
403 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
404 if (node->deps == NULL) {
405 node->deps = NEW_ARR_F(ir_node *, 1);
411 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
412 if (node->deps[i] == NULL)
415 if (node->deps[i] == dep)
419 if (first_zero >= 0) {
420 node->deps[first_zero] = dep;
423 ARR_APP1(ir_node *, node->deps, dep);
428 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
433 void add_irn_deps(ir_node *tgt, ir_node *src)
437 for (i = 0, n = get_irn_deps(src); i < n; ++i)
438 add_irn_dep(tgt, get_irn_dep(src, i));
442 ir_mode *(get_irn_mode)(const ir_node *node)
444 return _get_irn_mode(node);
447 void (set_irn_mode)(ir_node *node, ir_mode *mode)
449 _set_irn_mode(node, mode);
452 /** Gets the string representation of the mode .*/
453 const char *get_irn_modename(const ir_node *node)
456 return get_mode_name(node->mode);
459 ident *get_irn_modeident(const ir_node *node)
462 return get_mode_ident(node->mode);
465 ir_op *(get_irn_op)(const ir_node *node)
467 return _get_irn_op(node);
470 /* should be private to the library: */
471 void (set_irn_op)(ir_node *node, ir_op *op)
473 _set_irn_op(node, op);
476 unsigned (get_irn_opcode)(const ir_node *node)
478 return _get_irn_opcode(node);
481 const char *get_irn_opname(const ir_node *node)
484 if (is_Phi0(node)) return "Phi0";
485 return get_id_str(node->op->name);
488 ident *get_irn_opident(const ir_node *node)
491 return node->op->name;
494 ir_visited_t (get_irn_visited)(const ir_node *node)
496 return _get_irn_visited(node);
499 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
501 _set_irn_visited(node, visited);
504 void (mark_irn_visited)(ir_node *node)
506 _mark_irn_visited(node);
509 int (irn_visited)(const ir_node *node)
511 return _irn_visited(node);
514 int (irn_visited_else_mark)(ir_node *node)
516 return _irn_visited_else_mark(node);
519 void (set_irn_link)(ir_node *node, void *link)
521 _set_irn_link(node, link);
524 void *(get_irn_link)(const ir_node *node)
526 return _get_irn_link(node);
529 op_pin_state (get_irn_pinned)(const ir_node *node)
531 return _get_irn_pinned(node);
534 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
536 return _is_irn_pinned_in_irg(node);
539 void set_irn_pinned(ir_node *node, op_pin_state state)
541 /* due to optimization an opt may be turned into a Tuple */
545 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
546 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
548 node->attr.except.pin_state = state;
551 /* Outputs a unique number for this node */
552 long get_irn_node_nr(const ir_node *node)
555 return node->node_nr;
558 const_attr *get_irn_const_attr(ir_node *node)
560 assert(is_Const(node));
561 return &node->attr.con;
564 long get_irn_proj_attr(ir_node *node)
566 /* BEWARE: check for true Proj node here, no Filter */
567 assert(node->op == op_Proj);
568 return node->attr.proj;
571 alloc_attr *get_irn_alloc_attr(ir_node *node)
573 assert(is_Alloc(node));
574 return &node->attr.alloc;
577 free_attr *get_irn_free_attr(ir_node *node)
579 assert(is_Free(node));
580 return &node->attr.free;
583 symconst_attr *get_irn_symconst_attr(ir_node *node)
585 assert(is_SymConst(node));
586 return &node->attr.symc;
589 call_attr *get_irn_call_attr(ir_node *node)
591 assert(is_Call(node));
592 return &node->attr.call;
595 sel_attr *get_irn_sel_attr(ir_node *node)
597 assert(is_Sel(node));
598 return &node->attr.sel;
601 phi_attr *get_irn_phi_attr(ir_node *node)
603 return &node->attr.phi;
606 block_attr *get_irn_block_attr(ir_node *node)
608 assert(is_Block(node));
609 return &node->attr.block;
612 load_attr *get_irn_load_attr(ir_node *node)
614 assert(is_Load(node));
615 return &node->attr.load;
618 store_attr *get_irn_store_attr(ir_node *node)
620 assert(is_Store(node));
621 return &node->attr.store;
624 except_attr *get_irn_except_attr(ir_node *node)
626 assert(node->op == op_Div || node->op == op_Quot ||
627 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
628 return &node->attr.except;
631 divmod_attr *get_irn_divmod_attr(ir_node *node)
633 assert(node->op == op_Div || node->op == op_Quot ||
634 node->op == op_DivMod || node->op == op_Mod);
635 return &node->attr.divmod;
638 builtin_attr *get_irn_builtin_attr(ir_node *node)
640 assert(is_Builtin(node));
641 return &node->attr.builtin;
644 void *(get_irn_generic_attr)(ir_node *node)
646 assert(is_ir_node(node));
647 return _get_irn_generic_attr(node);
650 const void *(get_irn_generic_attr_const)(const ir_node *node)
652 assert(is_ir_node(node));
653 return _get_irn_generic_attr_const(node);
656 unsigned (get_irn_idx)(const ir_node *node)
658 assert(is_ir_node(node));
659 return _get_irn_idx(node);
662 int get_irn_pred_pos(ir_node *node, ir_node *arg)
665 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
666 if (get_irn_n(node, i) == arg)
672 /** manipulate fields of individual nodes **/
674 /* this works for all except Block */
675 ir_node *get_nodes_block(const ir_node *node)
677 assert(node->op != op_Block);
678 return get_irn_n(node, -1);
681 void set_nodes_block(ir_node *node, ir_node *block)
683 assert(node->op != op_Block);
684 set_irn_n(node, -1, block);
687 /* this works for all except Block */
688 ir_node *get_nodes_MacroBlock(const ir_node *node)
690 assert(node->op != op_Block);
691 return get_Block_MacroBlock(get_irn_n(node, -1));
694 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
695 * from Start. If so returns frame type, else Null. */
696 ir_type *is_frame_pointer(const ir_node *n)
698 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
699 ir_node *start = get_Proj_pred(n);
700 if (is_Start(start)) {
701 return get_irg_frame_type(get_irn_irg(start));
707 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
708 * from Start. If so returns tls type, else Null. */
709 ir_type *is_tls_pointer(const ir_node *n)
711 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
712 ir_node *start = get_Proj_pred(n);
713 if (is_Start(start)) {
714 return get_tls_type();
720 ir_node **get_Block_cfgpred_arr(ir_node *node)
722 assert(is_Block(node));
723 return (ir_node **)&(get_irn_in(node)[1]);
726 int (get_Block_n_cfgpreds)(const ir_node *node)
728 return _get_Block_n_cfgpreds(node);
731 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
733 return _get_Block_cfgpred(node, pos);
736 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
738 assert(is_Block(node));
739 set_irn_n(node, pos, pred);
742 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
746 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
747 if (get_Block_cfgpred_block(block, i) == pred)
753 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
755 return _get_Block_cfgpred_block(node, pos);
758 int get_Block_matured(const ir_node *node)
760 assert(is_Block(node));
761 return (int)node->attr.block.is_matured;
764 void set_Block_matured(ir_node *node, int matured)
766 assert(is_Block(node));
767 node->attr.block.is_matured = matured;
770 ir_visited_t (get_Block_block_visited)(const ir_node *node)
772 return _get_Block_block_visited(node);
775 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
777 _set_Block_block_visited(node, visit);
780 /* For this current_ir_graph must be set. */
781 void (mark_Block_block_visited)(ir_node *node)
783 _mark_Block_block_visited(node);
786 int (Block_block_visited)(const ir_node *node)
788 return _Block_block_visited(node);
791 ir_node *get_Block_graph_arr(ir_node *node, int pos)
793 assert(is_Block(node));
794 return node->attr.block.graph_arr[pos+1];
797 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value)
799 assert(is_Block(node));
800 node->attr.block.graph_arr[pos+1] = value;
803 #ifdef INTERPROCEDURAL_VIEW
804 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
806 assert(is_Block(node));
807 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
808 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
809 node->attr.block.in_cg[0] = NULL;
810 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
812 /* Fix backedge array. fix_backedges() operates depending on
813 interprocedural_view. */
814 int ipv = get_interprocedural_view();
815 set_interprocedural_view(1);
816 fix_backedges(current_ir_graph->obst, node);
817 set_interprocedural_view(ipv);
820 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
823 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
825 assert(is_Block(node) && node->attr.block.in_cg &&
826 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
827 node->attr.block.in_cg[pos + 1] = pred;
830 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
832 assert(is_Block(node));
833 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
836 int get_Block_cg_n_cfgpreds(const ir_node *node)
838 assert(is_Block(node));
839 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
842 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
844 assert(is_Block(node) && node->attr.block.in_cg);
845 return node->attr.block.in_cg[pos + 1];
848 void remove_Block_cg_cfgpred_arr(ir_node *node)
850 assert(is_Block(node));
851 node->attr.block.in_cg = NULL;
853 #endif /* INTERPROCEDURAL_VIEW */
855 ir_node *(set_Block_dead)(ir_node *block)
857 return _set_Block_dead(block);
860 int (is_Block_dead)(const ir_node *block)
862 return _is_Block_dead(block);
865 ir_extblk *get_Block_extbb(const ir_node *block)
868 assert(is_Block(block));
869 res = block->attr.block.extblk;
870 assert(res == NULL || is_ir_extbb(res));
874 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
876 assert(is_Block(block));
877 assert(extblk == NULL || is_ir_extbb(extblk));
878 block->attr.block.extblk = extblk;
881 /* Returns the macro block header of a block.*/
882 ir_node *get_Block_MacroBlock(const ir_node *block)
885 assert(is_Block(block));
886 mbh = get_irn_n(block, -1);
887 /* once macro block header is respected by all optimizations,
888 this assert can be removed */
893 /* Sets the macro block header of a block. */
894 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
896 assert(is_Block(block));
898 assert(is_Block(mbh));
899 set_irn_n(block, -1, mbh);
902 /* returns the macro block header of a node. */
903 ir_node *get_irn_MacroBlock(const ir_node *n)
906 n = get_nodes_block(n);
907 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
911 return get_Block_MacroBlock(n);
914 /* returns the graph of a Block. */
915 ir_graph *(get_Block_irg)(const ir_node *block)
917 return _get_Block_irg(block);
920 ir_entity *create_Block_entity(ir_node *block)
923 assert(is_Block(block));
925 entity = block->attr.block.entity;
926 if (entity == NULL) {
930 glob = get_glob_type();
931 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
932 set_entity_visibility(entity, ir_visibility_local);
933 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
934 nr = get_irp_next_label_nr();
935 set_entity_label(entity, nr);
936 set_entity_compiler_generated(entity, 1);
938 block->attr.block.entity = entity;
943 ir_entity *get_Block_entity(const ir_node *block)
945 assert(is_Block(block));
946 return block->attr.block.entity;
949 void set_Block_entity(ir_node *block, ir_entity *entity)
951 assert(is_Block(block));
952 assert(get_entity_type(entity) == get_code_type());
953 block->attr.block.entity = entity;
956 int has_Block_entity(const ir_node *block)
958 return block->attr.block.entity != NULL;
961 ir_node *(get_Block_phis)(const ir_node *block)
963 return _get_Block_phis(block);
966 void (set_Block_phis)(ir_node *block, ir_node *phi)
968 _set_Block_phis(block, phi);
971 void (add_Block_phi)(ir_node *block, ir_node *phi)
973 _add_Block_phi(block, phi);
976 /* Get the Block mark (single bit). */
977 unsigned (get_Block_mark)(const ir_node *block)
979 return _get_Block_mark(block);
982 /* Set the Block mark (single bit). */
983 void (set_Block_mark)(ir_node *block, unsigned mark)
985 _set_Block_mark(block, mark);
988 int get_End_n_keepalives(const ir_node *end)
991 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
994 ir_node *get_End_keepalive(const ir_node *end, int pos)
997 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
1000 void add_End_keepalive(ir_node *end, ir_node *ka)
1002 assert(is_End(end));
1006 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
1008 assert(is_End(end));
1009 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
1012 /* Set new keep-alives */
1013 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
1016 ir_graph *irg = get_irn_irg(end);
1018 /* notify that edges are deleted */
1019 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1020 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1022 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1024 for (i = 0; i < n; ++i) {
1025 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1026 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1030 /* Set new keep-alives from old keep-alives, skipping irn */
1031 void remove_End_keepalive(ir_node *end, ir_node *irn)
1033 int n = get_End_n_keepalives(end);
1038 for (i = n -1; i >= 0; --i) {
1039 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1042 if (old_ka == irn) {
1049 irg = get_irn_irg(end);
1051 /* remove the edge */
1052 edges_notify_edge(end, idx, NULL, irn, irg);
1055 /* exchange with the last one */
1056 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1057 edges_notify_edge(end, n - 1, NULL, old, irg);
1058 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1059 edges_notify_edge(end, idx, old, NULL, irg);
1061 /* now n - 1 keeps, 1 block input */
1062 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1065 /* remove Bads, NoMems and doublets from the keep-alive set */
1066 void remove_End_Bads_and_doublets(ir_node *end)
1069 int idx, n = get_End_n_keepalives(end);
1075 irg = get_irn_irg(end);
1076 pset_new_init(&keeps);
1078 for (idx = n - 1; idx >= 0; --idx) {
1079 ir_node *ka = get_End_keepalive(end, idx);
1081 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1082 /* remove the edge */
1083 edges_notify_edge(end, idx, NULL, ka, irg);
1086 /* exchange with the last one */
1087 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1088 edges_notify_edge(end, n - 1, NULL, old, irg);
1089 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1090 edges_notify_edge(end, idx, old, NULL, irg);
1094 pset_new_insert(&keeps, ka);
1097 /* n keeps, 1 block input */
1098 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1100 pset_new_destroy(&keeps);
1103 void free_End(ir_node *end)
1105 assert(is_End(end));
1108 end->in = NULL; /* @@@ make sure we get an error if we use the
1109 in array afterwards ... */
1112 /* Return the target address of an IJmp */
1113 ir_node *get_IJmp_target(const ir_node *ijmp)
1115 assert(is_IJmp(ijmp));
1116 return get_irn_n(ijmp, 0);
1119 /** Sets the target address of an IJmp */
1120 void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
1122 assert(is_IJmp(ijmp));
1123 set_irn_n(ijmp, 0, tgt);
1126 ir_node *get_Cond_selector(const ir_node *node)
1128 assert(is_Cond(node));
1129 return get_irn_n(node, 0);
1132 void set_Cond_selector(ir_node *node, ir_node *selector)
1134 assert(is_Cond(node));
1135 set_irn_n(node, 0, selector);
1138 long get_Cond_default_proj(const ir_node *node)
1140 assert(is_Cond(node));
1141 return node->attr.cond.default_proj;
1144 void set_Cond_default_proj(ir_node *node, long defproj)
1146 assert(is_Cond(node));
1147 node->attr.cond.default_proj = defproj;
1150 ir_node *get_Return_mem(const ir_node *node)
1152 assert(is_Return(node));
1153 return get_irn_n(node, 0);
1156 void set_Return_mem(ir_node *node, ir_node *mem)
1158 assert(is_Return(node));
1159 set_irn_n(node, 0, mem);
1162 int get_Return_n_ress(const ir_node *node)
1164 assert(is_Return(node));
1165 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1168 ir_node **get_Return_res_arr(ir_node *node)
1170 assert(is_Return(node));
1171 if (get_Return_n_ress(node) > 0)
1172 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1178 void set_Return_n_res(ir_node *node, int results)
1180 assert(is_Return(node));
1184 ir_node *get_Return_res(const ir_node *node, int pos)
1186 assert(is_Return(node));
1187 assert(get_Return_n_ress(node) > pos);
1188 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1191 void set_Return_res(ir_node *node, int pos, ir_node *res)
1193 assert(is_Return(node));
1194 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1197 tarval *(get_Const_tarval)(const ir_node *node)
1199 return _get_Const_tarval(node);
1202 void set_Const_tarval(ir_node *node, tarval *con)
1204 assert(is_Const(node));
1205 node->attr.con.tv = con;
1208 int (is_Const_null)(const ir_node *node)
1210 return _is_Const_null(node);
1213 int (is_Const_one)(const ir_node *node)
1215 return _is_Const_one(node);
1218 int (is_Const_all_one)(const ir_node *node)
1220 return _is_Const_all_one(node);
1224 /* The source language type. Must be an atomic type. Mode of type must
1225 be mode of node. For tarvals from entities type must be pointer to
1227 ir_type *get_Const_type(ir_node *node)
1229 assert(is_Const(node));
1230 return node->attr.con.tp;
1233 void set_Const_type(ir_node *node, ir_type *tp)
1235 assert(is_Const(node));
1236 if (tp != firm_unknown_type) {
1237 assert(is_atomic_type(tp));
1238 assert(get_type_mode(tp) == get_irn_mode(node));
1240 node->attr.con.tp = tp;
1244 symconst_kind get_SymConst_kind(const ir_node *node)
1246 assert(is_SymConst(node));
1247 return node->attr.symc.kind;
1250 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1252 assert(is_SymConst(node));
1253 node->attr.symc.kind = kind;
1256 ir_type *get_SymConst_type(const ir_node *node)
1258 /* the cast here is annoying, but we have to compensate for
1260 ir_node *irn = (ir_node *)node;
1261 assert(is_SymConst(node) &&
1262 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1263 return irn->attr.symc.sym.type_p;
1266 void set_SymConst_type(ir_node *node, ir_type *tp)
1268 assert(is_SymConst(node) &&
1269 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1270 node->attr.symc.sym.type_p = tp;
1273 ident *get_SymConst_name(const ir_node *node)
1275 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1276 return node->attr.symc.sym.ident_p;
1279 void set_SymConst_name(ir_node *node, ident *name)
1281 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1282 node->attr.symc.sym.ident_p = name;
1286 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1287 ir_entity *get_SymConst_entity(const ir_node *node)
1289 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1290 return node->attr.symc.sym.entity_p;
1293 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1295 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1296 node->attr.symc.sym.entity_p = ent;
1299 ir_enum_const *get_SymConst_enum(const ir_node *node)
1301 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1302 return node->attr.symc.sym.enum_p;
1305 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1307 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1308 node->attr.symc.sym.enum_p = ec;
1311 union symconst_symbol
1312 get_SymConst_symbol(const ir_node *node)
1314 assert(is_SymConst(node));
1315 return node->attr.symc.sym;
1318 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1320 assert(is_SymConst(node));
1321 node->attr.symc.sym = sym;
1324 ir_type *get_SymConst_value_type(ir_node *node)
1326 assert(is_SymConst(node));
1327 return node->attr.symc.tp;
1330 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1332 assert(is_SymConst(node));
1333 node->attr.symc.tp = tp;
1336 ir_node *get_Sel_mem(const ir_node *node)
1338 assert(is_Sel(node));
1339 return get_irn_n(node, 0);
1342 void set_Sel_mem(ir_node *node, ir_node *mem)
1344 assert(is_Sel(node));
1345 set_irn_n(node, 0, mem);
1348 ir_node *get_Sel_ptr(const ir_node *node)
1350 assert(is_Sel(node));
1351 return get_irn_n(node, 1);
1354 void set_Sel_ptr(ir_node *node, ir_node *ptr)
1356 assert(is_Sel(node));
1357 set_irn_n(node, 1, ptr);
1360 int get_Sel_n_indexs(const ir_node *node)
1362 assert(is_Sel(node));
1363 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1366 ir_node **get_Sel_index_arr(ir_node *node)
1368 assert(is_Sel(node));
1369 if (get_Sel_n_indexs(node) > 0)
1370 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1375 ir_node *get_Sel_index(const ir_node *node, int pos)
1377 assert(is_Sel(node));
1378 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1381 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1383 assert(is_Sel(node));
1384 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1387 ir_entity *get_Sel_entity(const ir_node *node)
1389 assert(is_Sel(node));
1390 return node->attr.sel.entity;
1393 /* need a version without const to prevent warning */
1394 static ir_entity *_get_Sel_entity(ir_node *node)
1396 return get_Sel_entity(node);
1399 void set_Sel_entity(ir_node *node, ir_entity *ent)
1401 assert(is_Sel(node));
1402 node->attr.sel.entity = ent;
1406 /* For unary and binary arithmetic operations the access to the
1407 operands can be factored out. Left is the first, right the
1408 second arithmetic value as listed in tech report 0999-33.
1409 unops are: Minus, Abs, Not, Conv, Cast
1410 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1411 Shr, Shrs, Rotate, Cmp */
1414 ir_node *get_Call_mem(const ir_node *node)
1416 assert(is_Call(node));
1417 return get_irn_n(node, 0);
1420 void set_Call_mem(ir_node *node, ir_node *mem)
1422 assert(is_Call(node));
1423 set_irn_n(node, 0, mem);
1426 ir_node *get_Call_ptr(const ir_node *node)
1428 assert(is_Call(node));
1429 return get_irn_n(node, 1);
1432 void set_Call_ptr(ir_node *node, ir_node *ptr)
1434 assert(is_Call(node));
1435 set_irn_n(node, 1, ptr);
1438 ir_node **get_Call_param_arr(ir_node *node)
1440 assert(is_Call(node));
1441 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1444 int get_Call_n_params(const ir_node *node)
1446 assert(is_Call(node));
1447 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1450 ir_node *get_Call_param(const ir_node *node, int pos)
1452 assert(is_Call(node));
1453 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1456 void set_Call_param(ir_node *node, int pos, ir_node *param)
1458 assert(is_Call(node));
1459 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1462 ir_type *get_Call_type(ir_node *node)
1464 assert(is_Call(node));
1465 return node->attr.call.type;
1468 void set_Call_type(ir_node *node, ir_type *tp)
1470 assert(is_Call(node));
1471 assert((get_unknown_type() == tp) || is_Method_type(tp));
1472 node->attr.call.type = tp;
1475 unsigned get_Call_tail_call(const ir_node *node)
1477 assert(is_Call(node));
1478 return node->attr.call.tail_call;
1481 void set_Call_tail_call(ir_node *node, unsigned tail_call)
1483 assert(is_Call(node));
1484 node->attr.call.tail_call = tail_call != 0;
1487 ir_node *get_Builtin_mem(const ir_node *node)
1489 assert(is_Builtin(node));
1490 return get_irn_n(node, 0);
1493 void set_Builin_mem(ir_node *node, ir_node *mem)
1495 assert(is_Builtin(node));
1496 set_irn_n(node, 0, mem);
1499 ir_builtin_kind get_Builtin_kind(const ir_node *node)
1501 assert(is_Builtin(node));
1502 return node->attr.builtin.kind;
1505 void set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
1507 assert(is_Builtin(node));
1508 node->attr.builtin.kind = kind;
1511 ir_node **get_Builtin_param_arr(ir_node *node)
1513 assert(is_Builtin(node));
1514 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1517 int get_Builtin_n_params(const ir_node *node)
1519 assert(is_Builtin(node));
1520 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1523 ir_node *get_Builtin_param(const ir_node *node, int pos)
1525 assert(is_Builtin(node));
1526 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1529 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1531 assert(is_Builtin(node));
1532 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1535 ir_type *get_Builtin_type(ir_node *node)
1537 assert(is_Builtin(node));
1538 return node->attr.builtin.type;
1541 void set_Builtin_type(ir_node *node, ir_type *tp)
1543 assert(is_Builtin(node));
1544 assert((get_unknown_type() == tp) || is_Method_type(tp));
1545 node->attr.builtin.type = tp;
1548 /* Returns a human readable string for the ir_builtin_kind. */
1549 const char *get_builtin_kind_name(ir_builtin_kind kind)
1551 #define X(a) case a: return #a;
1554 X(ir_bk_debugbreak);
1555 X(ir_bk_return_address);
1556 X(ir_bk_frame_address);
1566 X(ir_bk_inner_trampoline);
1573 int Call_has_callees(const ir_node *node)
1575 assert(is_Call(node));
1576 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1577 (node->attr.call.callee_arr != NULL));
1580 int get_Call_n_callees(const ir_node *node)
1582 assert(is_Call(node) && node->attr.call.callee_arr);
1583 return ARR_LEN(node->attr.call.callee_arr);
1586 ir_entity *get_Call_callee(const ir_node *node, int pos)
1588 assert(pos >= 0 && pos < get_Call_n_callees(node));
1589 return node->attr.call.callee_arr[pos];
1592 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1594 assert(is_Call(node));
1595 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1596 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1598 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1601 void remove_Call_callee_arr(ir_node *node)
1603 assert(is_Call(node));
1604 node->attr.call.callee_arr = NULL;
1607 ir_node *get_CallBegin_ptr(const ir_node *node)
1609 assert(is_CallBegin(node));
1610 return get_irn_n(node, 0);
1613 void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
1615 assert(is_CallBegin(node));
1616 set_irn_n(node, 0, ptr);
1619 ir_node *get_CallBegin_call(const ir_node *node)
1621 assert(is_CallBegin(node));
1622 return node->attr.callbegin.call;
1625 void set_CallBegin_call(ir_node *node, ir_node *call)
1627 assert(is_CallBegin(node));
1628 node->attr.callbegin.call = call;
1632 * Returns non-zero if a Call is surely a self-recursive Call.
1633 * Beware: if this functions returns 0, the call might be self-recursive!
1635 int is_self_recursive_Call(const ir_node *call)
1637 const ir_node *callee = get_Call_ptr(call);
1639 if (is_SymConst_addr_ent(callee)) {
1640 const ir_entity *ent = get_SymConst_entity(callee);
1641 const ir_graph *irg = get_entity_irg(ent);
1642 if (irg == get_irn_irg(call))
1649 ir_node * get_##OP##_left(const ir_node *node) { \
1650 assert(is_##OP(node)); \
1651 return get_irn_n(node, node->op->op_index); \
1653 void set_##OP##_left(ir_node *node, ir_node *left) { \
1654 assert(is_##OP(node)); \
1655 set_irn_n(node, node->op->op_index, left); \
1657 ir_node *get_##OP##_right(const ir_node *node) { \
1658 assert(is_##OP(node)); \
1659 return get_irn_n(node, node->op->op_index + 1); \
1661 void set_##OP##_right(ir_node *node, ir_node *right) { \
1662 assert(is_##OP(node)); \
1663 set_irn_n(node, node->op->op_index + 1, right); \
1667 ir_node *get_##OP##_op(const ir_node *node) { \
1668 assert(is_##OP(node)); \
1669 return get_irn_n(node, node->op->op_index); \
1671 void set_##OP##_op(ir_node *node, ir_node *op) { \
1672 assert(is_##OP(node)); \
1673 set_irn_n(node, node->op->op_index, op); \
1676 #define BINOP_MEM(OP) \
1680 get_##OP##_mem(const ir_node *node) { \
1681 assert(is_##OP(node)); \
1682 return get_irn_n(node, 0); \
1686 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1687 assert(is_##OP(node)); \
1688 set_irn_n(node, 0, mem); \
1694 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1695 assert(is_##OP(node)); \
1696 return node->attr.divmod.resmode; \
1699 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1700 assert(is_##OP(node)); \
1701 node->attr.divmod.resmode = mode; \
1729 int get_Div_no_remainder(const ir_node *node)
1731 assert(is_Div(node));
1732 return node->attr.divmod.no_remainder;
1735 void set_Div_no_remainder(ir_node *node, int no_remainder)
1737 assert(is_Div(node));
1738 node->attr.divmod.no_remainder = no_remainder;
1741 int get_Conv_strict(const ir_node *node)
1743 assert(is_Conv(node));
1744 return node->attr.conv.strict;
1747 void set_Conv_strict(ir_node *node, int strict_flag)
1749 assert(is_Conv(node));
1750 node->attr.conv.strict = (char)strict_flag;
1753 ir_type *get_Cast_type(ir_node *node)
1755 assert(is_Cast(node));
1756 return node->attr.cast.type;
1759 void set_Cast_type(ir_node *node, ir_type *to_tp)
1761 assert(is_Cast(node));
1762 node->attr.cast.type = to_tp;
1766 /* Checks for upcast.
1768 * Returns true if the Cast node casts a class type to a super type.
1770 int is_Cast_upcast(ir_node *node)
1772 ir_type *totype = get_Cast_type(node);
1773 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1775 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1778 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1779 totype = get_pointer_points_to_type(totype);
1780 fromtype = get_pointer_points_to_type(fromtype);
1785 if (!is_Class_type(totype)) return 0;
1786 return is_SubClass_of(fromtype, totype);
1789 /* Checks for downcast.
1791 * Returns true if the Cast node casts a class type to a sub type.
1793 int is_Cast_downcast(ir_node *node)
1795 ir_type *totype = get_Cast_type(node);
1796 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1798 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1801 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1802 totype = get_pointer_points_to_type(totype);
1803 fromtype = get_pointer_points_to_type(fromtype);
1808 if (!is_Class_type(totype)) return 0;
1809 return is_SubClass_of(totype, fromtype);
1812 int (is_unop)(const ir_node *node)
1814 return _is_unop(node);
1817 ir_node *get_unop_op(const ir_node *node)
1819 if (node->op->opar == oparity_unary)
1820 return get_irn_n(node, node->op->op_index);
1822 assert(node->op->opar == oparity_unary);
1826 void set_unop_op(ir_node *node, ir_node *op)
1828 if (node->op->opar == oparity_unary)
1829 set_irn_n(node, node->op->op_index, op);
1831 assert(node->op->opar == oparity_unary);
1834 int (is_binop)(const ir_node *node)
1836 return _is_binop(node);
1839 ir_node *get_binop_left(const ir_node *node)
1841 assert(node->op->opar == oparity_binary);
1842 return get_irn_n(node, node->op->op_index);
1845 void set_binop_left(ir_node *node, ir_node *left)
1847 assert(node->op->opar == oparity_binary);
1848 set_irn_n(node, node->op->op_index, left);
1851 ir_node *get_binop_right(const ir_node *node)
1853 assert(node->op->opar == oparity_binary);
1854 return get_irn_n(node, node->op->op_index + 1);
1857 void set_binop_right(ir_node *node, ir_node *right)
1859 assert(node->op->opar == oparity_binary);
1860 set_irn_n(node, node->op->op_index + 1, right);
1863 int is_Phi0(const ir_node *n)
1867 return ((get_irn_op(n) == op_Phi) &&
1868 (get_irn_arity(n) == 0) &&
1869 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1872 ir_node **get_Phi_preds_arr(ir_node *node)
1874 assert(node->op == op_Phi);
1875 return (ir_node **)&(get_irn_in(node)[1]);
1878 int get_Phi_n_preds(const ir_node *node)
1880 assert(is_Phi(node) || is_Phi0(node));
1881 return (get_irn_arity(node));
1885 void set_Phi_n_preds(ir_node *node, int n_preds)
1887 assert(node->op == op_Phi);
1891 ir_node *get_Phi_pred(const ir_node *node, int pos)
1893 assert(is_Phi(node) || is_Phi0(node));
1894 return get_irn_n(node, pos);
1897 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1899 assert(is_Phi(node) || is_Phi0(node));
1900 set_irn_n(node, pos, pred);
1903 ir_node *(get_Phi_next)(const ir_node *phi)
1905 return _get_Phi_next(phi);
1908 void (set_Phi_next)(ir_node *phi, ir_node *next)
1910 _set_Phi_next(phi, next);
1913 int is_memop(const ir_node *node)
1915 ir_opcode code = get_irn_opcode(node);
1916 return (code == iro_Load || code == iro_Store);
1919 ir_node *get_memop_mem(const ir_node *node)
1921 assert(is_memop(node));
1922 return get_irn_n(node, 0);
1925 void set_memop_mem(ir_node *node, ir_node *mem)
1927 assert(is_memop(node));
1928 set_irn_n(node, 0, mem);
1931 ir_node *get_memop_ptr(const ir_node *node)
1933 assert(is_memop(node));
1934 return get_irn_n(node, 1);
1937 void set_memop_ptr(ir_node *node, ir_node *ptr)
1939 assert(is_memop(node));
1940 set_irn_n(node, 1, ptr);
1943 ir_node *get_Load_mem(const ir_node *node)
1945 assert(is_Load(node));
1946 return get_irn_n(node, 0);
1949 void set_Load_mem(ir_node *node, ir_node *mem)
1951 assert(is_Load(node));
1952 set_irn_n(node, 0, mem);
1955 ir_node *get_Load_ptr(const ir_node *node)
1957 assert(is_Load(node));
1958 return get_irn_n(node, 1);
1961 void set_Load_ptr(ir_node *node, ir_node *ptr)
1963 assert(is_Load(node));
1964 set_irn_n(node, 1, ptr);
1967 ir_mode *get_Load_mode(const ir_node *node)
1969 assert(is_Load(node));
1970 return node->attr.load.mode;
1973 void set_Load_mode(ir_node *node, ir_mode *mode)
1975 assert(is_Load(node));
1976 node->attr.load.mode = mode;
1979 ir_volatility get_Load_volatility(const ir_node *node)
1981 assert(is_Load(node));
1982 return node->attr.load.volatility;
1985 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1987 assert(is_Load(node));
1988 node->attr.load.volatility = volatility;
1991 ir_align get_Load_align(const ir_node *node)
1993 assert(is_Load(node));
1994 return node->attr.load.aligned;
1997 void set_Load_align(ir_node *node, ir_align align)
1999 assert(is_Load(node));
2000 node->attr.load.aligned = align;
2004 ir_node *get_Store_mem(const ir_node *node)
2006 assert(is_Store(node));
2007 return get_irn_n(node, 0);
2010 void set_Store_mem(ir_node *node, ir_node *mem)
2012 assert(is_Store(node));
2013 set_irn_n(node, 0, mem);
2016 ir_node *get_Store_ptr(const ir_node *node)
2018 assert(is_Store(node));
2019 return get_irn_n(node, 1);
2022 void set_Store_ptr(ir_node *node, ir_node *ptr)
2024 assert(is_Store(node));
2025 set_irn_n(node, 1, ptr);
2028 ir_node *get_Store_value(const ir_node *node)
2030 assert(is_Store(node));
2031 return get_irn_n(node, 2);
2034 void set_Store_value(ir_node *node, ir_node *value)
2036 assert(is_Store(node));
2037 set_irn_n(node, 2, value);
2040 ir_volatility get_Store_volatility(const ir_node *node)
2042 assert(is_Store(node));
2043 return node->attr.store.volatility;
2046 void set_Store_volatility(ir_node *node, ir_volatility volatility)
2048 assert(is_Store(node));
2049 node->attr.store.volatility = volatility;
2052 ir_align get_Store_align(const ir_node *node)
2054 assert(is_Store(node));
2055 return node->attr.store.aligned;
2058 void set_Store_align(ir_node *node, ir_align align)
2060 assert(is_Store(node));
2061 node->attr.store.aligned = align;
2065 ir_node *get_Alloc_mem(const ir_node *node)
2067 assert(is_Alloc(node));
2068 return get_irn_n(node, 0);
2071 void set_Alloc_mem(ir_node *node, ir_node *mem)
2073 assert(is_Alloc(node));
2074 set_irn_n(node, 0, mem);
2077 ir_node *get_Alloc_size(const ir_node *node)
2079 assert(is_Alloc(node));
2080 return get_irn_n(node, 1);
2083 void set_Alloc_size(ir_node *node, ir_node *size)
2085 assert(is_Alloc(node));
2086 set_irn_n(node, 1, size);
2089 ir_type *get_Alloc_type(ir_node *node)
2091 assert(is_Alloc(node));
2092 return node->attr.alloc.type;
2095 void set_Alloc_type(ir_node *node, ir_type *tp)
2097 assert(is_Alloc(node));
2098 node->attr.alloc.type = tp;
2101 ir_where_alloc get_Alloc_where(const ir_node *node)
2103 assert(is_Alloc(node));
2104 return node->attr.alloc.where;
2107 void set_Alloc_where(ir_node *node, ir_where_alloc where)
2109 assert(is_Alloc(node));
2110 node->attr.alloc.where = where;
2114 ir_node *get_Free_mem(const ir_node *node)
2116 assert(is_Free(node));
2117 return get_irn_n(node, 0);
2120 void set_Free_mem(ir_node *node, ir_node *mem)
2122 assert(is_Free(node));
2123 set_irn_n(node, 0, mem);
2126 ir_node *get_Free_ptr(const ir_node *node)
2128 assert(is_Free(node));
2129 return get_irn_n(node, 1);
2132 void set_Free_ptr(ir_node *node, ir_node *ptr)
2134 assert(is_Free(node));
2135 set_irn_n(node, 1, ptr);
2138 ir_node *get_Free_size(const ir_node *node)
2140 assert(is_Free(node));
2141 return get_irn_n(node, 2);
2144 void set_Free_size(ir_node *node, ir_node *size)
2146 assert(is_Free(node));
2147 set_irn_n(node, 2, size);
2150 ir_type *get_Free_type(ir_node *node)
2152 assert(is_Free(node));
2153 return node->attr.free.type;
2156 void set_Free_type(ir_node *node, ir_type *tp)
2158 assert(is_Free(node));
2159 node->attr.free.type = tp;
2162 ir_where_alloc get_Free_where(const ir_node *node)
2164 assert(is_Free(node));
2165 return node->attr.free.where;
2168 void set_Free_where(ir_node *node, ir_where_alloc where)
2170 assert(is_Free(node));
2171 node->attr.free.where = where;
2174 ir_node **get_Sync_preds_arr(ir_node *node)
2176 assert(is_Sync(node));
2177 return (ir_node **)&(get_irn_in(node)[1]);
2180 int get_Sync_n_preds(const ir_node *node)
2182 assert(is_Sync(node));
2183 return (get_irn_arity(node));
2187 void set_Sync_n_preds(ir_node *node, int n_preds)
2189 assert(is_Sync(node));
2193 ir_node *get_Sync_pred(const ir_node *node, int pos)
2195 assert(is_Sync(node));
2196 return get_irn_n(node, pos);
2199 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
2201 assert(is_Sync(node));
2202 set_irn_n(node, pos, pred);
2205 /* Add a new Sync predecessor */
2206 void add_Sync_pred(ir_node *node, ir_node *pred)
2208 assert(is_Sync(node));
2209 add_irn_n(node, pred);
2212 /* Returns the source language type of a Proj node. */
2213 ir_type *get_Proj_type(ir_node *n)
2215 ir_type *tp = firm_unknown_type;
2216 ir_node *pred = get_Proj_pred(n);
2218 switch (get_irn_opcode(pred)) {
2221 /* Deal with Start / Call here: we need to know the Proj Nr. */
2222 assert(get_irn_mode(pred) == mode_T);
2223 pred_pred = get_Proj_pred(pred);
2225 if (is_Start(pred_pred)) {
2226 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2227 tp = get_method_param_type(mtp, get_Proj_proj(n));
2228 } else if (is_Call(pred_pred)) {
2229 ir_type *mtp = get_Call_type(pred_pred);
2230 tp = get_method_res_type(mtp, get_Proj_proj(n));
2233 case iro_Start: break;
2234 case iro_Call: break;
2236 ir_node *a = get_Load_ptr(pred);
2238 tp = get_entity_type(get_Sel_entity(a));
2246 ir_node *get_Proj_pred(const ir_node *node)
2248 assert(is_Proj(node));
2249 return get_irn_n(node, 0);
2252 void set_Proj_pred(ir_node *node, ir_node *pred)
2254 assert(is_Proj(node));
2255 set_irn_n(node, 0, pred);
2258 long get_Proj_proj(const ir_node *node)
2260 #ifdef INTERPROCEDURAL_VIEW
2261 ir_opcode code = get_irn_opcode(node);
2263 if (code == iro_Proj) {
2264 return node->attr.proj;
2267 assert(code == iro_Filter);
2268 return node->attr.filter.proj;
2271 assert(is_Proj(node));
2272 return node->attr.proj;
2273 #endif /* INTERPROCEDURAL_VIEW */
2276 void set_Proj_proj(ir_node *node, long proj)
2278 #ifdef INTERPROCEDURAL_VIEW
2279 ir_opcode code = get_irn_opcode(node);
2281 if (code == iro_Proj) {
2282 node->attr.proj = proj;
2285 assert(code == iro_Filter);
2286 node->attr.filter.proj = proj;
2289 assert(is_Proj(node));
2290 node->attr.proj = proj;
2291 #endif /* INTERPROCEDURAL_VIEW */
2294 /* Returns non-zero if a node is a routine parameter. */
2295 int (is_arg_Proj)(const ir_node *node)
2297 return _is_arg_Proj(node);
2300 ir_node **get_Tuple_preds_arr(ir_node *node)
2302 assert(is_Tuple(node));
2303 return (ir_node **)&(get_irn_in(node)[1]);
2306 int get_Tuple_n_preds(const ir_node *node)
2308 assert(is_Tuple(node));
2309 return get_irn_arity(node);
2313 void set_Tuple_n_preds(ir_node *node, int n_preds)
2315 assert(is_Tuple(node));
2319 ir_node *get_Tuple_pred(const ir_node *node, int pos)
2321 assert(is_Tuple(node));
2322 return get_irn_n(node, pos);
2325 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
2327 assert(is_Tuple(node));
2328 set_irn_n(node, pos, pred);
2331 ir_node *get_Id_pred(const ir_node *node)
2333 assert(is_Id(node));
2334 return get_irn_n(node, 0);
2337 void set_Id_pred(ir_node *node, ir_node *pred)
2339 assert(is_Id(node));
2340 set_irn_n(node, 0, pred);
2343 ir_node *get_Confirm_value(const ir_node *node)
2345 assert(is_Confirm(node));
2346 return get_irn_n(node, 0);
2349 void set_Confirm_value(ir_node *node, ir_node *value)
2351 assert(is_Confirm(node));
2352 set_irn_n(node, 0, value);
2355 ir_node *get_Confirm_bound(const ir_node *node)
2357 assert(is_Confirm(node));
2358 return get_irn_n(node, 1);
2361 void set_Confirm_bound(ir_node *node, ir_node *bound)
2363 assert(is_Confirm(node));
2364 set_irn_n(node, 0, bound);
2367 pn_Cmp get_Confirm_cmp(const ir_node *node)
2369 assert(is_Confirm(node));
2370 return node->attr.confirm.cmp;
2373 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
2375 assert(is_Confirm(node));
2376 node->attr.confirm.cmp = cmp;
2379 ir_node *get_Filter_pred(ir_node *node)
2381 assert(is_Filter(node));
2385 void set_Filter_pred(ir_node *node, ir_node *pred)
2387 assert(is_Filter(node));
2391 long get_Filter_proj(ir_node *node)
2393 assert(is_Filter(node));
2394 return node->attr.filter.proj;
2397 void set_Filter_proj(ir_node *node, long proj)
2399 assert(is_Filter(node));
2400 node->attr.filter.proj = proj;
2403 /* Don't use get_irn_arity, get_irn_n in implementation as access
2404 shall work independent of view!!! */
2405 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
2407 assert(is_Filter(node));
2408 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2409 ir_graph *irg = get_irn_irg(node);
2410 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2411 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2412 node->attr.filter.in_cg[0] = node->in[0];
2414 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2417 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
2419 assert(is_Filter(node) && node->attr.filter.in_cg &&
2420 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2421 node->attr.filter.in_cg[pos + 1] = pred;
2424 int get_Filter_n_cg_preds(ir_node *node)
2426 assert(is_Filter(node) && node->attr.filter.in_cg);
2427 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2430 ir_node *get_Filter_cg_pred(ir_node *node, int pos)
2433 assert(is_Filter(node) && node->attr.filter.in_cg &&
2435 arity = ARR_LEN(node->attr.filter.in_cg);
2436 assert(pos < arity - 1);
2437 return node->attr.filter.in_cg[pos + 1];
2441 ir_node *get_Mux_sel(const ir_node *node)
2443 assert(is_Mux(node));
2447 void set_Mux_sel(ir_node *node, ir_node *sel)
2449 assert(is_Mux(node));
2453 ir_node *get_Mux_false(const ir_node *node)
2455 assert(is_Mux(node));
2459 void set_Mux_false(ir_node *node, ir_node *ir_false)
2461 assert(is_Mux(node));
2462 node->in[2] = ir_false;
2465 ir_node *get_Mux_true(const ir_node *node)
2467 assert(is_Mux(node));
2471 void set_Mux_true(ir_node *node, ir_node *ir_true)
2473 assert(is_Mux(node));
2474 node->in[3] = ir_true;
2478 ir_node *get_CopyB_mem(const ir_node *node)
2480 assert(is_CopyB(node));
2481 return get_irn_n(node, 0);
2484 void set_CopyB_mem(ir_node *node, ir_node *mem)
2486 assert(node->op == op_CopyB);
2487 set_irn_n(node, 0, mem);
2490 ir_node *get_CopyB_dst(const ir_node *node)
2492 assert(is_CopyB(node));
2493 return get_irn_n(node, 1);
2496 void set_CopyB_dst(ir_node *node, ir_node *dst)
2498 assert(is_CopyB(node));
2499 set_irn_n(node, 1, dst);
2502 ir_node *get_CopyB_src(const ir_node *node)
2504 assert(is_CopyB(node));
2505 return get_irn_n(node, 2);
2508 void set_CopyB_src(ir_node *node, ir_node *src)
2510 assert(is_CopyB(node));
2511 set_irn_n(node, 2, src);
2514 ir_type *get_CopyB_type(ir_node *node)
2516 assert(is_CopyB(node));
2517 return node->attr.copyb.type;
2520 void set_CopyB_type(ir_node *node, ir_type *data_type)
2522 assert(is_CopyB(node) && data_type);
2523 node->attr.copyb.type = data_type;
2527 ir_type *get_InstOf_type(ir_node *node)
2529 assert(node->op == op_InstOf);
2530 return node->attr.instof.type;
2533 void set_InstOf_type(ir_node *node, ir_type *type)
2535 assert(node->op == op_InstOf);
2536 node->attr.instof.type = type;
2539 ir_node *get_InstOf_store(const ir_node *node)
2541 assert(node->op == op_InstOf);
2542 return get_irn_n(node, 0);
2545 void set_InstOf_store(ir_node *node, ir_node *obj)
2547 assert(node->op == op_InstOf);
2548 set_irn_n(node, 0, obj);
2551 ir_node *get_InstOf_obj(const ir_node *node)
2553 assert(node->op == op_InstOf);
2554 return get_irn_n(node, 1);
2557 void set_InstOf_obj(ir_node *node, ir_node *obj)
2559 assert(node->op == op_InstOf);
2560 set_irn_n(node, 1, obj);
2563 /* Returns the memory input of a Raise operation. */
2564 ir_node *get_Raise_mem(const ir_node *node)
2566 assert(is_Raise(node));
2567 return get_irn_n(node, 0);
2570 void set_Raise_mem(ir_node *node, ir_node *mem)
2572 assert(is_Raise(node));
2573 set_irn_n(node, 0, mem);
2576 ir_node *get_Raise_exo_ptr(const ir_node *node)
2578 assert(is_Raise(node));
2579 return get_irn_n(node, 1);
2582 void set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
2584 assert(is_Raise(node));
2585 set_irn_n(node, 1, exo_ptr);
2590 /* Returns the memory input of a Bound operation. */
2591 ir_node *get_Bound_mem(const ir_node *bound)
2593 assert(is_Bound(bound));
2594 return get_irn_n(bound, 0);
2597 void set_Bound_mem(ir_node *bound, ir_node *mem)
2599 assert(is_Bound(bound));
2600 set_irn_n(bound, 0, mem);
2603 /* Returns the index input of a Bound operation. */
2604 ir_node *get_Bound_index(const ir_node *bound)
2606 assert(is_Bound(bound));
2607 return get_irn_n(bound, 1);
2610 void set_Bound_index(ir_node *bound, ir_node *idx)
2612 assert(is_Bound(bound));
2613 set_irn_n(bound, 1, idx);
2616 /* Returns the lower bound input of a Bound operation. */
2617 ir_node *get_Bound_lower(const ir_node *bound)
2619 assert(is_Bound(bound));
2620 return get_irn_n(bound, 2);
2623 void set_Bound_lower(ir_node *bound, ir_node *lower)
2625 assert(is_Bound(bound));
2626 set_irn_n(bound, 2, lower);
2629 /* Returns the upper bound input of a Bound operation. */
2630 ir_node *get_Bound_upper(const ir_node *bound)
2632 assert(is_Bound(bound));
2633 return get_irn_n(bound, 3);
2636 void set_Bound_upper(ir_node *bound, ir_node *upper)
2638 assert(is_Bound(bound));
2639 set_irn_n(bound, 3, upper);
2642 /* Return the operand of a Pin node. */
2643 ir_node *get_Pin_op(const ir_node *pin)
2645 assert(is_Pin(pin));
2646 return get_irn_n(pin, 0);
2649 void set_Pin_op(ir_node *pin, ir_node *node)
2651 assert(is_Pin(pin));
2652 set_irn_n(pin, 0, node);
2655 /* Return the assembler text of an ASM pseudo node. */
2656 ident *get_ASM_text(const ir_node *node)
2658 assert(is_ASM(node));
2659 return node->attr.assem.asm_text;
2662 /* Return the number of input constraints for an ASM node. */
2663 int get_ASM_n_input_constraints(const ir_node *node)
2665 assert(is_ASM(node));
2666 return ARR_LEN(node->attr.assem.inputs);
2669 /* Return the input constraints for an ASM node. This is a flexible array. */
2670 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
2672 assert(is_ASM(node));
2673 return node->attr.assem.inputs;
2676 /* Return the number of output constraints for an ASM node. */
2677 int get_ASM_n_output_constraints(const ir_node *node)
2679 assert(is_ASM(node));
2680 return ARR_LEN(node->attr.assem.outputs);
2683 /* Return the output constraints for an ASM node. */
2684 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
2686 assert(is_ASM(node));
2687 return node->attr.assem.outputs;
2690 /* Return the number of clobbered registers for an ASM node. */
2691 int get_ASM_n_clobbers(const ir_node *node)
2693 assert(is_ASM(node));
2694 return ARR_LEN(node->attr.assem.clobber);
2697 /* Return the list of clobbered registers for an ASM node. */
2698 ident **get_ASM_clobbers(const ir_node *node)
2700 assert(is_ASM(node));
2701 return node->attr.assem.clobber;
2704 /* returns the graph of a node */
2705 ir_graph *get_irn_irg(const ir_node *node)
2708 * Do not use get_nodes_Block() here, because this
2709 * will check the pinned state.
2710 * However even a 'wrong' block is always in the proper
2713 if (! is_Block(node))
2714 node = get_irn_n(node, -1);
2715 /* note that get_Block_irg() can handle Bad nodes */
2716 return get_Block_irg(node);
2720 /*----------------------------------------------------------------*/
2721 /* Auxiliary routines */
2722 /*----------------------------------------------------------------*/
2724 ir_node *skip_Proj(ir_node *node)
2726 /* don't assert node !!! */
2731 node = get_Proj_pred(node);
2737 skip_Proj_const(const ir_node *node)
2739 /* don't assert node !!! */
2744 node = get_Proj_pred(node);
2749 ir_node *skip_Tuple(ir_node *node)
2755 if (is_Proj(node)) {
2756 pred = get_Proj_pred(node);
2757 op = get_irn_op(pred);
2760 * Looks strange but calls get_irn_op() only once
2761 * in most often cases.
2763 if (op == op_Proj) { /* nested Tuple ? */
2764 pred = skip_Tuple(pred);
2766 if (is_Tuple(pred)) {
2767 node = get_Tuple_pred(pred, get_Proj_proj(node));
2770 } else if (op == op_Tuple) {
2771 node = get_Tuple_pred(pred, get_Proj_proj(node));
2778 /* returns operand of node if node is a Cast */
2779 ir_node *skip_Cast(ir_node *node)
2782 return get_Cast_op(node);
2786 /* returns operand of node if node is a Cast */
2787 const ir_node *skip_Cast_const(const ir_node *node)
2790 return get_Cast_op(node);
2794 /* returns operand of node if node is a Pin */
2795 ir_node *skip_Pin(ir_node *node)
2798 return get_Pin_op(node);
2802 /* returns operand of node if node is a Confirm */
2803 ir_node *skip_Confirm(ir_node *node)
2805 if (is_Confirm(node))
2806 return get_Confirm_value(node);
2810 /* skip all high-level ops */
2811 ir_node *skip_HighLevel_ops(ir_node *node)
2813 while (is_op_highlevel(get_irn_op(node))) {
2814 node = get_irn_n(node, 0);
2820 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2821 * than any other approach, as Id chains are resolved and all point to the real node, or
2822 * all id's are self loops.
2824 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2825 * a little bit "hand optimized".
2827 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2829 ir_node *skip_Id(ir_node *node)
2832 /* don't assert node !!! */
2834 if (!node || (node->op != op_Id)) return node;
2836 /* Don't use get_Id_pred(): We get into an endless loop for
2837 self-referencing Ids. */
2838 pred = node->in[0+1];
2840 if (pred->op != op_Id) return pred;
2842 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2843 ir_node *rem_pred, *res;
2845 if (pred->op != op_Id) return pred; /* shortcut */
2848 assert(get_irn_arity (node) > 0);
2850 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2851 res = skip_Id(rem_pred);
2852 if (res->op == op_Id) /* self-loop */ return node;
2854 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2861 void skip_Id_and_store(ir_node **node)
2865 if (!n || (n->op != op_Id)) return;
2867 /* Don't use get_Id_pred(): We get into an endless loop for
2868 self-referencing Ids. */
2872 int (is_strictConv)(const ir_node *node)
2874 return _is_strictConv(node);
2877 int (is_no_Block)(const ir_node *node)
2879 return _is_no_Block(node);
2882 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2883 int (is_SymConst_addr_ent)(const ir_node *node)
2885 return _is_SymConst_addr_ent(node);
2888 /* Returns true if the operation manipulates control flow. */
2889 int is_cfop(const ir_node *node)
2891 return is_op_cfopcode(get_irn_op(node));
2894 /* Returns true if the operation manipulates interprocedural control flow:
2895 CallBegin, EndReg, EndExcept */
2896 int is_ip_cfop(const ir_node *node)
2898 return is_ip_cfopcode(get_irn_op(node));
2901 /* Returns true if the operation can change the control flow because
2903 int is_fragile_op(const ir_node *node)
2905 return is_op_fragile(get_irn_op(node));
2908 /* Returns the memory operand of fragile operations. */
2909 ir_node *get_fragile_op_mem(ir_node *node)
2911 assert(node && is_fragile_op(node));
2913 switch (get_irn_opcode(node)) {
2924 return get_irn_n(node, pn_Generic_M);
2929 assert(0 && "should not be reached");
2934 /* Returns the result mode of a Div operation. */
2935 ir_mode *get_divop_resmod(const ir_node *node)
2937 switch (get_irn_opcode(node)) {
2938 case iro_Quot : return get_Quot_resmode(node);
2939 case iro_DivMod: return get_DivMod_resmode(node);
2940 case iro_Div : return get_Div_resmode(node);
2941 case iro_Mod : return get_Mod_resmode(node);
2943 assert(0 && "should not be reached");
2948 /* Returns true if the operation is a forking control flow operation. */
2949 int (is_irn_forking)(const ir_node *node)
2951 return _is_irn_forking(node);
2954 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node)
2956 _copy_node_attr(old_node, new_node);
2959 /* Return the type associated with the value produced by n
2960 * if the node remarks this type as it is the case for
2961 * Cast, Const, SymConst and some Proj nodes. */
2962 ir_type *(get_irn_type)(ir_node *node)
2964 return _get_irn_type(node);
2967 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2969 ir_type *(get_irn_type_attr)(ir_node *node)
2971 return _get_irn_type_attr(node);
2974 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2975 ir_entity *(get_irn_entity_attr)(ir_node *node)
2977 return _get_irn_entity_attr(node);
2980 /* Returns non-zero for constant-like nodes. */
2981 int (is_irn_constlike)(const ir_node *node)
2983 return _is_irn_constlike(node);
2987 * Returns non-zero for nodes that are allowed to have keep-alives and
2988 * are neither Block nor PhiM.
2990 int (is_irn_keep)(const ir_node *node)
2992 return _is_irn_keep(node);
2996 * Returns non-zero for nodes that are always placed in the start block.
2998 int (is_irn_start_block_placed)(const ir_node *node)
3000 return _is_irn_start_block_placed(node);
3003 /* Returns non-zero for nodes that are machine operations. */
3004 int (is_irn_machine_op)(const ir_node *node)
3006 return _is_irn_machine_op(node);
3009 /* Returns non-zero for nodes that are machine operands. */
3010 int (is_irn_machine_operand)(const ir_node *node)
3012 return _is_irn_machine_operand(node);
3015 /* Returns non-zero for nodes that have the n'th user machine flag set. */
3016 int (is_irn_machine_user)(const ir_node *node, unsigned n)
3018 return _is_irn_machine_user(node, n);
3021 /* Returns non-zero for nodes that are CSE neutral to its users. */
3022 int (is_irn_cse_neutral)(const ir_node *node)
3024 return _is_irn_cse_neutral(node);
3027 /* Gets the string representation of the jump prediction .*/
3028 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
3030 #define X(a) case a: return #a;
3032 X(COND_JMP_PRED_NONE);
3033 X(COND_JMP_PRED_TRUE);
3034 X(COND_JMP_PRED_FALSE);
3040 /* Returns the conditional jump prediction of a Cond node. */
3041 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
3043 return _get_Cond_jmp_pred(cond);
3046 /* Sets a new conditional jump prediction. */
3047 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
3049 _set_Cond_jmp_pred(cond, pred);
3052 /** the get_type operation must be always implemented and return a firm type */
3053 static ir_type *get_Default_type(ir_node *n)
3056 return get_unknown_type();
3059 /* Sets the get_type operation for an ir_op_ops. */
3060 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
3063 case iro_Const: ops->get_type = get_Const_type; break;
3064 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3065 case iro_Cast: ops->get_type = get_Cast_type; break;
3066 case iro_Proj: ops->get_type = get_Proj_type; break;
3068 /* not allowed to be NULL */
3069 if (! ops->get_type)
3070 ops->get_type = get_Default_type;
3076 /** Return the attribute type of a SymConst node if exists */
3077 static ir_type *get_SymConst_attr_type(ir_node *self)
3079 symconst_kind kind = get_SymConst_kind(self);
3080 if (SYMCONST_HAS_TYPE(kind))
3081 return get_SymConst_type(self);
3085 /** Return the attribute entity of a SymConst node if exists */
3086 static ir_entity *get_SymConst_attr_entity(ir_node *self)
3088 symconst_kind kind = get_SymConst_kind(self);
3089 if (SYMCONST_HAS_ENT(kind))
3090 return get_SymConst_entity(self);
3094 /** the get_type_attr operation must be always implemented */
3095 static ir_type *get_Null_type(ir_node *n)
3098 return firm_unknown_type;
3101 /* Sets the get_type operation for an ir_op_ops. */
3102 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
3105 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3106 case iro_Call: ops->get_type_attr = get_Call_type; break;
3107 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3108 case iro_Free: ops->get_type_attr = get_Free_type; break;
3109 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3111 /* not allowed to be NULL */
3112 if (! ops->get_type_attr)
3113 ops->get_type_attr = get_Null_type;
3119 /** the get_entity_attr operation must be always implemented */
3120 static ir_entity *get_Null_ent(ir_node *n)
3126 /* Sets the get_type operation for an ir_op_ops. */
3127 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
3130 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3131 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3133 /* not allowed to be NULL */
3134 if (! ops->get_entity_attr)
3135 ops->get_entity_attr = get_Null_ent;
3141 /* Sets the debug information of a node. */
3142 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
3144 _set_irn_dbg_info(n, db);
3148 * Returns the debug information of an node.
3150 * @param n The node.
3152 dbg_info *(get_irn_dbg_info)(const ir_node *n)
3154 return _get_irn_dbg_info(n);
3157 /* checks whether a node represents a global address */
3158 int is_Global(const ir_node *node)
3160 return is_SymConst_addr_ent(node);
3163 /* returns the entity of a global address */
3164 ir_entity *get_Global_entity(const ir_node *node)
3166 return get_SymConst_entity(node);
3170 * Calculate a hash value of a node.
3172 unsigned firm_default_hash(const ir_node *node)
3177 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3178 h = irn_arity = get_irn_intra_arity(node);
3180 /* consider all in nodes... except the block if not a control flow. */
3181 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3182 ir_node *pred = get_irn_intra_n(node, i);
3183 if (is_irn_cse_neutral(pred))
3186 h = 9*h + HASH_PTR(pred);
3190 h = 9*h + HASH_PTR(get_irn_mode(node));
3192 h = 9*h + HASH_PTR(get_irn_op(node));
3195 } /* firm_default_hash */
3197 /* include generated code */
3198 #include "gen_irnode.c.inl"