2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node **in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_intra_arity)(const ir_node *node)
216 return _get_irn_intra_arity(node);
219 int (get_irn_inter_arity)(const ir_node *node)
221 return _get_irn_inter_arity(node);
224 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
226 int (get_irn_arity)(const ir_node *node)
228 return _get_irn_arity(node);
231 /* Returns the array with ins. This array is shifted with respect to the
232 array accessed by get_irn_n: The block operand is at position 0 not -1.
233 (@@@ This should be changed.)
234 The order of the predecessors in this array is not guaranteed, except that
235 lists of operands as predecessors of Block or arguments of a Call are
237 ir_node **get_irn_in(const ir_node *node)
240 #ifdef INTERPROCEDURAL_VIEW
241 if (get_interprocedural_view()) { /* handle Filter and Block specially */
242 if (get_irn_opcode(node) == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 return node->attr.filter.in_cg;
245 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
246 return node->attr.block.in_cg;
248 /* else fall through */
250 #endif /* INTERPROCEDURAL_VIEW */
254 void set_irn_in(ir_node *node, int arity, ir_node **in)
258 ir_graph *irg = get_irn_irg(node);
261 #ifdef INTERPROCEDURAL_VIEW
262 if (get_interprocedural_view()) { /* handle Filter and Block specially */
263 ir_opcode code = get_irn_opcode(node);
264 if (code == iro_Filter) {
265 assert(node->attr.filter.in_cg);
266 pOld_in = &node->attr.filter.in_cg;
267 } else if (code == iro_Block && node->attr.block.in_cg) {
268 pOld_in = &node->attr.block.in_cg;
273 #endif /* INTERPROCEDURAL_VIEW */
277 for (i = 0; i < arity; i++) {
278 if (i < ARR_LEN(*pOld_in)-1)
279 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
281 edges_notify_edge(node, i, in[i], NULL, irg);
283 for (;i < ARR_LEN(*pOld_in)-1; i++) {
284 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
287 if (arity != ARR_LEN(*pOld_in) - 1) {
288 ir_node * block = (*pOld_in)[0];
289 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
290 (*pOld_in)[0] = block;
292 fix_backedges(irg->obst, node);
294 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
297 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
299 return _get_irn_intra_n(node, n);
302 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
304 return _get_irn_inter_n(node, n);
307 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
309 ir_node *(get_irn_n)(const ir_node *node, int n)
311 return _get_irn_n(node, n);
314 void set_irn_n(ir_node *node, int n, ir_node *in)
316 assert(node && node->kind == k_ir_node);
318 assert(n < get_irn_arity(node));
319 assert(in && in->kind == k_ir_node);
321 #ifdef INTERPROCEDURAL_VIEW
322 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
323 /* Change block pred in both views! */
324 node->in[n + 1] = in;
325 assert(node->attr.filter.in_cg);
326 node->attr.filter.in_cg[n + 1] = in;
329 if (get_interprocedural_view()) { /* handle Filter and Block specially */
330 if (get_irn_opcode(node) == iro_Filter) {
331 assert(node->attr.filter.in_cg);
332 node->attr.filter.in_cg[n + 1] = in;
334 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
335 node->attr.block.in_cg[n + 1] = in;
338 /* else fall through */
340 #endif /* INTERPROCEDURAL_VIEW */
343 hook_set_irn_n(node, n, in, node->in[n + 1]);
345 /* Here, we rely on src and tgt being in the current ir graph */
346 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
348 node->in[n + 1] = in;
351 int add_irn_n(ir_node *node, ir_node *in)
354 ir_graph *irg = get_irn_irg(node);
356 assert(node->op->opar == oparity_dynamic);
357 pos = ARR_LEN(node->in) - 1;
358 ARR_APP1(ir_node *, node->in, in);
359 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
362 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
367 void del_Sync_n(ir_node *n, int i)
369 int arity = get_Sync_n_preds(n);
370 ir_node *last_pred = get_Sync_pred(n, arity - 1);
371 set_Sync_pred(n, i, last_pred);
372 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
373 ARR_SHRINKLEN(get_irn_in(n), arity);
376 int (get_irn_deps)(const ir_node *node)
378 return _get_irn_deps(node);
381 ir_node *(get_irn_dep)(const ir_node *node, int pos)
383 return _get_irn_dep(node, pos);
386 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
388 _set_irn_dep(node, pos, dep);
391 int add_irn_dep(ir_node *node, ir_node *dep)
395 /* DEP edges are only allowed in backend phase */
396 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
397 if (node->deps == NULL) {
398 node->deps = NEW_ARR_F(ir_node *, 1);
404 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
405 if (node->deps[i] == NULL)
408 if (node->deps[i] == dep)
412 if (first_zero >= 0) {
413 node->deps[first_zero] = dep;
416 ARR_APP1(ir_node *, node->deps, dep);
421 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
426 void add_irn_deps(ir_node *tgt, ir_node *src)
430 for (i = 0, n = get_irn_deps(src); i < n; ++i)
431 add_irn_dep(tgt, get_irn_dep(src, i));
435 ir_mode *(get_irn_mode)(const ir_node *node)
437 return _get_irn_mode(node);
440 void (set_irn_mode)(ir_node *node, ir_mode *mode)
442 _set_irn_mode(node, mode);
445 /** Gets the string representation of the mode .*/
446 const char *get_irn_modename(const ir_node *node)
449 return get_mode_name(node->mode);
452 ident *get_irn_modeident(const ir_node *node)
455 return get_mode_ident(node->mode);
458 ir_op *(get_irn_op)(const ir_node *node)
460 return _get_irn_op(node);
463 /* should be private to the library: */
464 void (set_irn_op)(ir_node *node, ir_op *op)
466 _set_irn_op(node, op);
469 unsigned (get_irn_opcode)(const ir_node *node)
471 return _get_irn_opcode(node);
474 const char *get_irn_opname(const ir_node *node)
477 if (is_Phi0(node)) return "Phi0";
478 return get_id_str(node->op->name);
481 ident *get_irn_opident(const ir_node *node)
484 return node->op->name;
487 ir_visited_t (get_irn_visited)(const ir_node *node)
489 return _get_irn_visited(node);
492 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
494 _set_irn_visited(node, visited);
497 void (mark_irn_visited)(ir_node *node)
499 _mark_irn_visited(node);
502 int (irn_visited)(const ir_node *node)
504 return _irn_visited(node);
507 int (irn_visited_else_mark)(ir_node *node)
509 return _irn_visited_else_mark(node);
512 void (set_irn_link)(ir_node *node, void *link)
514 _set_irn_link(node, link);
517 void *(get_irn_link)(const ir_node *node)
519 return _get_irn_link(node);
522 op_pin_state (get_irn_pinned)(const ir_node *node)
524 return _get_irn_pinned(node);
527 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
529 return _is_irn_pinned_in_irg(node);
532 void set_irn_pinned(ir_node *node, op_pin_state state)
534 /* due to optimization an opt may be turned into a Tuple */
538 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
539 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
541 node->attr.except.pin_state = state;
544 /* Outputs a unique number for this node */
545 long get_irn_node_nr(const ir_node *node)
548 return node->node_nr;
551 const_attr *get_irn_const_attr(ir_node *node)
553 assert(is_Const(node));
554 return &node->attr.con;
557 long get_irn_proj_attr(ir_node *node)
559 /* BEWARE: check for true Proj node here, no Filter */
560 assert(node->op == op_Proj);
561 return node->attr.proj;
564 alloc_attr *get_irn_alloc_attr(ir_node *node)
566 assert(is_Alloc(node));
567 return &node->attr.alloc;
570 free_attr *get_irn_free_attr(ir_node *node)
572 assert(is_Free(node));
573 return &node->attr.free;
576 symconst_attr *get_irn_symconst_attr(ir_node *node)
578 assert(is_SymConst(node));
579 return &node->attr.symc;
582 call_attr *get_irn_call_attr(ir_node *node)
584 assert(is_Call(node));
585 return &node->attr.call;
588 sel_attr *get_irn_sel_attr(ir_node *node)
590 assert(is_Sel(node));
591 return &node->attr.sel;
594 phi_attr *get_irn_phi_attr(ir_node *node)
596 return &node->attr.phi;
599 block_attr *get_irn_block_attr(ir_node *node)
601 assert(is_Block(node));
602 return &node->attr.block;
605 load_attr *get_irn_load_attr(ir_node *node)
607 assert(is_Load(node));
608 return &node->attr.load;
611 store_attr *get_irn_store_attr(ir_node *node)
613 assert(is_Store(node));
614 return &node->attr.store;
617 except_attr *get_irn_except_attr(ir_node *node)
619 assert(node->op == op_Div || node->op == op_Quot ||
620 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
621 return &node->attr.except;
624 divmod_attr *get_irn_divmod_attr(ir_node *node)
626 assert(node->op == op_Div || node->op == op_Quot ||
627 node->op == op_DivMod || node->op == op_Mod);
628 return &node->attr.divmod;
631 builtin_attr *get_irn_builtin_attr(ir_node *node)
633 assert(is_Builtin(node));
634 return &node->attr.builtin;
637 void *(get_irn_generic_attr)(ir_node *node)
639 assert(is_ir_node(node));
640 return _get_irn_generic_attr(node);
643 const void *(get_irn_generic_attr_const)(const ir_node *node)
645 assert(is_ir_node(node));
646 return _get_irn_generic_attr_const(node);
649 unsigned (get_irn_idx)(const ir_node *node)
651 assert(is_ir_node(node));
652 return _get_irn_idx(node);
655 int get_irn_pred_pos(ir_node *node, ir_node *arg)
658 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
659 if (get_irn_n(node, i) == arg)
665 /** manipulate fields of individual nodes **/
667 /* this works for all except Block */
668 ir_node *get_nodes_block(const ir_node *node)
670 assert(node->op != op_Block);
671 return get_irn_n(node, -1);
674 void set_nodes_block(ir_node *node, ir_node *block)
676 assert(node->op != op_Block);
677 set_irn_n(node, -1, block);
680 /* this works for all except Block */
681 ir_node *get_nodes_MacroBlock(const ir_node *node)
683 assert(node->op != op_Block);
684 return get_Block_MacroBlock(get_irn_n(node, -1));
687 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
688 * from Start. If so returns frame type, else Null. */
689 ir_type *is_frame_pointer(const ir_node *n)
691 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
692 ir_node *start = get_Proj_pred(n);
693 if (is_Start(start)) {
694 return get_irg_frame_type(get_irn_irg(start));
700 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
701 * from Start. If so returns tls type, else Null. */
702 ir_type *is_tls_pointer(const ir_node *n)
704 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
705 ir_node *start = get_Proj_pred(n);
706 if (is_Start(start)) {
707 return get_tls_type();
713 ir_node **get_Block_cfgpred_arr(ir_node *node)
715 assert(is_Block(node));
716 return (ir_node **)&(get_irn_in(node)[1]);
719 int (get_Block_n_cfgpreds)(const ir_node *node)
721 return _get_Block_n_cfgpreds(node);
724 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
726 return _get_Block_cfgpred(node, pos);
729 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
731 assert(is_Block(node));
732 set_irn_n(node, pos, pred);
735 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
739 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
740 if (get_Block_cfgpred_block(block, i) == pred)
746 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
748 return _get_Block_cfgpred_block(node, pos);
751 int get_Block_matured(const ir_node *node)
753 assert(is_Block(node));
754 return (int)node->attr.block.is_matured;
757 void set_Block_matured(ir_node *node, int matured)
759 assert(is_Block(node));
760 node->attr.block.is_matured = matured;
763 ir_visited_t (get_Block_block_visited)(const ir_node *node)
765 return _get_Block_block_visited(node);
768 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
770 _set_Block_block_visited(node, visit);
773 /* For this current_ir_graph must be set. */
774 void (mark_Block_block_visited)(ir_node *node)
776 _mark_Block_block_visited(node);
779 int (Block_block_visited)(const ir_node *node)
781 return _Block_block_visited(node);
784 #ifdef INTERPROCEDURAL_VIEW
785 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
787 assert(is_Block(node));
788 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
789 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
790 node->attr.block.in_cg[0] = NULL;
791 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
793 /* Fix backedge array. fix_backedges() operates depending on
794 interprocedural_view. */
795 int ipv = get_interprocedural_view();
796 set_interprocedural_view(1);
797 fix_backedges(current_ir_graph->obst, node);
798 set_interprocedural_view(ipv);
801 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
804 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
806 assert(is_Block(node) && node->attr.block.in_cg &&
807 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
808 node->attr.block.in_cg[pos + 1] = pred;
811 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
813 assert(is_Block(node));
814 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
817 int get_Block_cg_n_cfgpreds(const ir_node *node)
819 assert(is_Block(node));
820 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
823 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
825 assert(is_Block(node) && node->attr.block.in_cg);
826 return node->attr.block.in_cg[pos + 1];
829 void remove_Block_cg_cfgpred_arr(ir_node *node)
831 assert(is_Block(node));
832 node->attr.block.in_cg = NULL;
834 #endif /* INTERPROCEDURAL_VIEW */
836 ir_node *(set_Block_dead)(ir_node *block)
838 return _set_Block_dead(block);
841 int (is_Block_dead)(const ir_node *block)
843 return _is_Block_dead(block);
846 ir_extblk *get_Block_extbb(const ir_node *block)
849 assert(is_Block(block));
850 res = block->attr.block.extblk;
851 assert(res == NULL || is_ir_extbb(res));
855 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
857 assert(is_Block(block));
858 assert(extblk == NULL || is_ir_extbb(extblk));
859 block->attr.block.extblk = extblk;
862 /* Returns the macro block header of a block.*/
863 ir_node *get_Block_MacroBlock(const ir_node *block)
866 assert(is_Block(block));
867 mbh = get_irn_n(block, -1);
868 /* once macro block header is respected by all optimizations,
869 this assert can be removed */
874 /* Sets the macro block header of a block. */
875 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
877 assert(is_Block(block));
879 assert(is_Block(mbh));
880 set_irn_n(block, -1, mbh);
883 /* returns the macro block header of a node. */
884 ir_node *get_irn_MacroBlock(const ir_node *n)
887 n = get_nodes_block(n);
888 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
892 return get_Block_MacroBlock(n);
895 /* returns the graph of a Block. */
896 ir_graph *(get_Block_irg)(const ir_node *block)
898 return _get_Block_irg(block);
901 ir_entity *create_Block_entity(ir_node *block)
904 assert(is_Block(block));
906 entity = block->attr.block.entity;
907 if (entity == NULL) {
911 glob = get_glob_type();
912 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
913 set_entity_visibility(entity, ir_visibility_local);
914 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
915 nr = get_irp_next_label_nr();
916 set_entity_label(entity, nr);
917 set_entity_compiler_generated(entity, 1);
919 block->attr.block.entity = entity;
924 ir_entity *get_Block_entity(const ir_node *block)
926 assert(is_Block(block));
927 return block->attr.block.entity;
930 void set_Block_entity(ir_node *block, ir_entity *entity)
932 assert(is_Block(block));
933 assert(get_entity_type(entity) == get_code_type());
934 block->attr.block.entity = entity;
937 int has_Block_entity(const ir_node *block)
939 return block->attr.block.entity != NULL;
942 ir_node *(get_Block_phis)(const ir_node *block)
944 return _get_Block_phis(block);
947 void (set_Block_phis)(ir_node *block, ir_node *phi)
949 _set_Block_phis(block, phi);
952 void (add_Block_phi)(ir_node *block, ir_node *phi)
954 _add_Block_phi(block, phi);
957 /* Get the Block mark (single bit). */
958 unsigned (get_Block_mark)(const ir_node *block)
960 return _get_Block_mark(block);
963 /* Set the Block mark (single bit). */
964 void (set_Block_mark)(ir_node *block, unsigned mark)
966 _set_Block_mark(block, mark);
969 int get_End_n_keepalives(const ir_node *end)
972 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
975 ir_node *get_End_keepalive(const ir_node *end, int pos)
978 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
981 void add_End_keepalive(ir_node *end, ir_node *ka)
987 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
990 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
993 /* Set new keep-alives */
994 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
997 ir_graph *irg = get_irn_irg(end);
999 /* notify that edges are deleted */
1000 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1001 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1003 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1005 for (i = 0; i < n; ++i) {
1006 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1007 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1011 /* Set new keep-alives from old keep-alives, skipping irn */
1012 void remove_End_keepalive(ir_node *end, ir_node *irn)
1014 int n = get_End_n_keepalives(end);
1019 for (i = n -1; i >= 0; --i) {
1020 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1023 if (old_ka == irn) {
1030 irg = get_irn_irg(end);
1032 /* remove the edge */
1033 edges_notify_edge(end, idx, NULL, irn, irg);
1036 /* exchange with the last one */
1037 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1038 edges_notify_edge(end, n - 1, NULL, old, irg);
1039 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1040 edges_notify_edge(end, idx, old, NULL, irg);
1042 /* now n - 1 keeps, 1 block input */
1043 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1046 /* remove Bads, NoMems and doublets from the keep-alive set */
1047 void remove_End_Bads_and_doublets(ir_node *end)
1050 int idx, n = get_End_n_keepalives(end);
1056 irg = get_irn_irg(end);
1057 pset_new_init(&keeps);
1059 for (idx = n - 1; idx >= 0; --idx) {
1060 ir_node *ka = get_End_keepalive(end, idx);
1062 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1063 /* remove the edge */
1064 edges_notify_edge(end, idx, NULL, ka, irg);
1067 /* exchange with the last one */
1068 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1069 edges_notify_edge(end, n - 1, NULL, old, irg);
1070 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1071 edges_notify_edge(end, idx, old, NULL, irg);
1075 pset_new_insert(&keeps, ka);
1078 /* n keeps, 1 block input */
1079 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1081 pset_new_destroy(&keeps);
1084 void free_End(ir_node *end)
1086 assert(is_End(end));
1089 end->in = NULL; /* @@@ make sure we get an error if we use the
1090 in array afterwards ... */
1093 /* Return the target address of an IJmp */
1094 ir_node *get_IJmp_target(const ir_node *ijmp)
1096 assert(is_IJmp(ijmp));
1097 return get_irn_n(ijmp, 0);
1100 /** Sets the target address of an IJmp */
1101 void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
1103 assert(is_IJmp(ijmp));
1104 set_irn_n(ijmp, 0, tgt);
1107 ir_node *get_Cond_selector(const ir_node *node)
1109 assert(is_Cond(node));
1110 return get_irn_n(node, 0);
1113 void set_Cond_selector(ir_node *node, ir_node *selector)
1115 assert(is_Cond(node));
1116 set_irn_n(node, 0, selector);
1119 long get_Cond_default_proj(const ir_node *node)
1121 assert(is_Cond(node));
1122 return node->attr.cond.default_proj;
1125 void set_Cond_default_proj(ir_node *node, long defproj)
1127 assert(is_Cond(node));
1128 node->attr.cond.default_proj = defproj;
1131 ir_node *get_Return_mem(const ir_node *node)
1133 assert(is_Return(node));
1134 return get_irn_n(node, 0);
1137 void set_Return_mem(ir_node *node, ir_node *mem)
1139 assert(is_Return(node));
1140 set_irn_n(node, 0, mem);
1143 int get_Return_n_ress(const ir_node *node)
1145 assert(is_Return(node));
1146 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1149 ir_node **get_Return_res_arr(ir_node *node)
1151 assert(is_Return(node));
1152 if (get_Return_n_ress(node) > 0)
1153 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1159 void set_Return_n_res(ir_node *node, int results)
1161 assert(is_Return(node));
1165 ir_node *get_Return_res(const ir_node *node, int pos)
1167 assert(is_Return(node));
1168 assert(get_Return_n_ress(node) > pos);
1169 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1172 void set_Return_res(ir_node *node, int pos, ir_node *res)
1174 assert(is_Return(node));
1175 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1178 tarval *(get_Const_tarval)(const ir_node *node)
1180 return _get_Const_tarval(node);
1183 void set_Const_tarval(ir_node *node, tarval *con)
1185 assert(is_Const(node));
1186 node->attr.con.tv = con;
1189 int (is_Const_null)(const ir_node *node)
1191 return _is_Const_null(node);
1194 int (is_Const_one)(const ir_node *node)
1196 return _is_Const_one(node);
1199 int (is_Const_all_one)(const ir_node *node)
1201 return _is_Const_all_one(node);
1205 /* The source language type. Must be an atomic type. Mode of type must
1206 be mode of node. For tarvals from entities type must be pointer to
1208 ir_type *get_Const_type(ir_node *node)
1210 assert(is_Const(node));
1211 return node->attr.con.tp;
1214 void set_Const_type(ir_node *node, ir_type *tp)
1216 assert(is_Const(node));
1217 if (tp != firm_unknown_type) {
1218 assert(is_atomic_type(tp));
1219 assert(get_type_mode(tp) == get_irn_mode(node));
1221 node->attr.con.tp = tp;
1225 symconst_kind get_SymConst_kind(const ir_node *node)
1227 assert(is_SymConst(node));
1228 return node->attr.symc.kind;
1231 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1233 assert(is_SymConst(node));
1234 node->attr.symc.kind = kind;
1237 ir_type *get_SymConst_type(const ir_node *node)
1239 /* the cast here is annoying, but we have to compensate for
1241 ir_node *irn = (ir_node *)node;
1242 assert(is_SymConst(node) &&
1243 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1244 return irn->attr.symc.sym.type_p;
1247 void set_SymConst_type(ir_node *node, ir_type *tp)
1249 assert(is_SymConst(node) &&
1250 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1251 node->attr.symc.sym.type_p = tp;
1255 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1256 ir_entity *get_SymConst_entity(const ir_node *node)
1258 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1259 return node->attr.symc.sym.entity_p;
1262 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1264 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1265 node->attr.symc.sym.entity_p = ent;
1268 ir_enum_const *get_SymConst_enum(const ir_node *node)
1270 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1271 return node->attr.symc.sym.enum_p;
1274 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1276 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1277 node->attr.symc.sym.enum_p = ec;
1280 union symconst_symbol
1281 get_SymConst_symbol(const ir_node *node)
1283 assert(is_SymConst(node));
1284 return node->attr.symc.sym;
1287 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1289 assert(is_SymConst(node));
1290 node->attr.symc.sym = sym;
1293 ir_type *get_SymConst_value_type(ir_node *node)
1295 assert(is_SymConst(node));
1296 return node->attr.symc.tp;
1299 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1301 assert(is_SymConst(node));
1302 node->attr.symc.tp = tp;
1305 ir_node *get_Sel_mem(const ir_node *node)
1307 assert(is_Sel(node));
1308 return get_irn_n(node, 0);
1311 void set_Sel_mem(ir_node *node, ir_node *mem)
1313 assert(is_Sel(node));
1314 set_irn_n(node, 0, mem);
1317 ir_node *get_Sel_ptr(const ir_node *node)
1319 assert(is_Sel(node));
1320 return get_irn_n(node, 1);
1323 void set_Sel_ptr(ir_node *node, ir_node *ptr)
1325 assert(is_Sel(node));
1326 set_irn_n(node, 1, ptr);
1329 int get_Sel_n_indexs(const ir_node *node)
1331 assert(is_Sel(node));
1332 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1335 ir_node **get_Sel_index_arr(ir_node *node)
1337 assert(is_Sel(node));
1338 if (get_Sel_n_indexs(node) > 0)
1339 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1344 ir_node *get_Sel_index(const ir_node *node, int pos)
1346 assert(is_Sel(node));
1347 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1350 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1352 assert(is_Sel(node));
1353 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1356 ir_entity *get_Sel_entity(const ir_node *node)
1358 assert(is_Sel(node));
1359 return node->attr.sel.entity;
1362 /* need a version without const to prevent warning */
1363 static ir_entity *_get_Sel_entity(ir_node *node)
1365 return get_Sel_entity(node);
1368 void set_Sel_entity(ir_node *node, ir_entity *ent)
1370 assert(is_Sel(node));
1371 node->attr.sel.entity = ent;
1375 /* For unary and binary arithmetic operations the access to the
1376 operands can be factored out. Left is the first, right the
1377 second arithmetic value as listed in tech report 0999-33.
1378 unops are: Minus, Abs, Not, Conv, Cast
1379 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1380 Shr, Shrs, Rotate, Cmp */
1383 ir_node *get_Call_mem(const ir_node *node)
1385 assert(is_Call(node));
1386 return get_irn_n(node, 0);
1389 void set_Call_mem(ir_node *node, ir_node *mem)
1391 assert(is_Call(node));
1392 set_irn_n(node, 0, mem);
1395 ir_node *get_Call_ptr(const ir_node *node)
1397 assert(is_Call(node));
1398 return get_irn_n(node, 1);
1401 void set_Call_ptr(ir_node *node, ir_node *ptr)
1403 assert(is_Call(node));
1404 set_irn_n(node, 1, ptr);
1407 ir_node **get_Call_param_arr(ir_node *node)
1409 assert(is_Call(node));
1410 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1413 int get_Call_n_params(const ir_node *node)
1415 assert(is_Call(node));
1416 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1419 ir_node *get_Call_param(const ir_node *node, int pos)
1421 assert(is_Call(node));
1422 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1425 void set_Call_param(ir_node *node, int pos, ir_node *param)
1427 assert(is_Call(node));
1428 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1431 ir_type *get_Call_type(ir_node *node)
1433 assert(is_Call(node));
1434 return node->attr.call.type;
1437 void set_Call_type(ir_node *node, ir_type *tp)
1439 assert(is_Call(node));
1440 assert((get_unknown_type() == tp) || is_Method_type(tp));
1441 node->attr.call.type = tp;
1444 unsigned get_Call_tail_call(const ir_node *node)
1446 assert(is_Call(node));
1447 return node->attr.call.tail_call;
1450 void set_Call_tail_call(ir_node *node, unsigned tail_call)
1452 assert(is_Call(node));
1453 node->attr.call.tail_call = tail_call != 0;
1456 ir_node *get_Builtin_mem(const ir_node *node)
1458 assert(is_Builtin(node));
1459 return get_irn_n(node, 0);
1462 void set_Builtin_mem(ir_node *node, ir_node *mem)
1464 assert(is_Builtin(node));
1465 set_irn_n(node, 0, mem);
1468 ir_builtin_kind get_Builtin_kind(const ir_node *node)
1470 assert(is_Builtin(node));
1471 return node->attr.builtin.kind;
1474 void set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
1476 assert(is_Builtin(node));
1477 node->attr.builtin.kind = kind;
1480 ir_node **get_Builtin_param_arr(ir_node *node)
1482 assert(is_Builtin(node));
1483 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1486 int get_Builtin_n_params(const ir_node *node)
1488 assert(is_Builtin(node));
1489 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1492 ir_node *get_Builtin_param(const ir_node *node, int pos)
1494 assert(is_Builtin(node));
1495 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1498 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1500 assert(is_Builtin(node));
1501 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1504 ir_type *get_Builtin_type(ir_node *node)
1506 assert(is_Builtin(node));
1507 return node->attr.builtin.type;
1510 void set_Builtin_type(ir_node *node, ir_type *tp)
1512 assert(is_Builtin(node));
1513 assert((get_unknown_type() == tp) || is_Method_type(tp));
1514 node->attr.builtin.type = tp;
1517 /* Returns a human readable string for the ir_builtin_kind. */
1518 const char *get_builtin_kind_name(ir_builtin_kind kind)
1520 #define X(a) case a: return #a;
1523 X(ir_bk_debugbreak);
1524 X(ir_bk_return_address);
1525 X(ir_bk_frame_address);
1535 X(ir_bk_inner_trampoline);
1542 int Call_has_callees(const ir_node *node)
1544 assert(is_Call(node));
1545 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1546 (node->attr.call.callee_arr != NULL));
1549 int get_Call_n_callees(const ir_node *node)
1551 assert(is_Call(node) && node->attr.call.callee_arr);
1552 return ARR_LEN(node->attr.call.callee_arr);
1555 ir_entity *get_Call_callee(const ir_node *node, int pos)
1557 assert(pos >= 0 && pos < get_Call_n_callees(node));
1558 return node->attr.call.callee_arr[pos];
1561 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1563 assert(is_Call(node));
1564 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1565 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1567 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1570 void remove_Call_callee_arr(ir_node *node)
1572 assert(is_Call(node));
1573 node->attr.call.callee_arr = NULL;
1576 ir_node *get_CallBegin_ptr(const ir_node *node)
1578 assert(is_CallBegin(node));
1579 return get_irn_n(node, 0);
1582 void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
1584 assert(is_CallBegin(node));
1585 set_irn_n(node, 0, ptr);
1588 ir_node *get_CallBegin_call(const ir_node *node)
1590 assert(is_CallBegin(node));
1591 return node->attr.callbegin.call;
1594 void set_CallBegin_call(ir_node *node, ir_node *call)
1596 assert(is_CallBegin(node));
1597 node->attr.callbegin.call = call;
1601 * Returns non-zero if a Call is surely a self-recursive Call.
1602 * Beware: if this functions returns 0, the call might be self-recursive!
1604 int is_self_recursive_Call(const ir_node *call)
1606 const ir_node *callee = get_Call_ptr(call);
1608 if (is_SymConst_addr_ent(callee)) {
1609 const ir_entity *ent = get_SymConst_entity(callee);
1610 const ir_graph *irg = get_entity_irg(ent);
1611 if (irg == get_irn_irg(call))
1618 ir_node * get_##OP##_left(const ir_node *node) { \
1619 assert(is_##OP(node)); \
1620 return get_irn_n(node, node->op->op_index); \
1622 void set_##OP##_left(ir_node *node, ir_node *left) { \
1623 assert(is_##OP(node)); \
1624 set_irn_n(node, node->op->op_index, left); \
1626 ir_node *get_##OP##_right(const ir_node *node) { \
1627 assert(is_##OP(node)); \
1628 return get_irn_n(node, node->op->op_index + 1); \
1630 void set_##OP##_right(ir_node *node, ir_node *right) { \
1631 assert(is_##OP(node)); \
1632 set_irn_n(node, node->op->op_index + 1, right); \
1636 ir_node *get_##OP##_op(const ir_node *node) { \
1637 assert(is_##OP(node)); \
1638 return get_irn_n(node, node->op->op_index); \
1640 void set_##OP##_op(ir_node *node, ir_node *op) { \
1641 assert(is_##OP(node)); \
1642 set_irn_n(node, node->op->op_index, op); \
1645 #define BINOP_MEM(OP) \
1649 get_##OP##_mem(const ir_node *node) { \
1650 assert(is_##OP(node)); \
1651 return get_irn_n(node, 0); \
1655 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1656 assert(is_##OP(node)); \
1657 set_irn_n(node, 0, mem); \
1663 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1664 assert(is_##OP(node)); \
1665 return node->attr.divmod.resmode; \
1668 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1669 assert(is_##OP(node)); \
1670 node->attr.divmod.resmode = mode; \
1698 int get_Div_no_remainder(const ir_node *node)
1700 assert(is_Div(node));
1701 return node->attr.divmod.no_remainder;
1704 void set_Div_no_remainder(ir_node *node, int no_remainder)
1706 assert(is_Div(node));
1707 node->attr.divmod.no_remainder = no_remainder;
1710 int get_Conv_strict(const ir_node *node)
1712 assert(is_Conv(node));
1713 return node->attr.conv.strict;
1716 void set_Conv_strict(ir_node *node, int strict_flag)
1718 assert(is_Conv(node));
1719 node->attr.conv.strict = (char)strict_flag;
1722 ir_type *get_Cast_type(ir_node *node)
1724 assert(is_Cast(node));
1725 return node->attr.cast.type;
1728 void set_Cast_type(ir_node *node, ir_type *to_tp)
1730 assert(is_Cast(node));
1731 node->attr.cast.type = to_tp;
1735 /* Checks for upcast.
1737 * Returns true if the Cast node casts a class type to a super type.
1739 int is_Cast_upcast(ir_node *node)
1741 ir_type *totype = get_Cast_type(node);
1742 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1744 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1747 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1748 totype = get_pointer_points_to_type(totype);
1749 fromtype = get_pointer_points_to_type(fromtype);
1754 if (!is_Class_type(totype)) return 0;
1755 return is_SubClass_of(fromtype, totype);
1758 /* Checks for downcast.
1760 * Returns true if the Cast node casts a class type to a sub type.
1762 int is_Cast_downcast(ir_node *node)
1764 ir_type *totype = get_Cast_type(node);
1765 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1767 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1770 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1771 totype = get_pointer_points_to_type(totype);
1772 fromtype = get_pointer_points_to_type(fromtype);
1777 if (!is_Class_type(totype)) return 0;
1778 return is_SubClass_of(totype, fromtype);
1781 int (is_unop)(const ir_node *node)
1783 return _is_unop(node);
1786 ir_node *get_unop_op(const ir_node *node)
1788 if (node->op->opar == oparity_unary)
1789 return get_irn_n(node, node->op->op_index);
1791 assert(node->op->opar == oparity_unary);
1795 void set_unop_op(ir_node *node, ir_node *op)
1797 if (node->op->opar == oparity_unary)
1798 set_irn_n(node, node->op->op_index, op);
1800 assert(node->op->opar == oparity_unary);
1803 int (is_binop)(const ir_node *node)
1805 return _is_binop(node);
1808 ir_node *get_binop_left(const ir_node *node)
1810 assert(node->op->opar == oparity_binary);
1811 return get_irn_n(node, node->op->op_index);
1814 void set_binop_left(ir_node *node, ir_node *left)
1816 assert(node->op->opar == oparity_binary);
1817 set_irn_n(node, node->op->op_index, left);
1820 ir_node *get_binop_right(const ir_node *node)
1822 assert(node->op->opar == oparity_binary);
1823 return get_irn_n(node, node->op->op_index + 1);
1826 void set_binop_right(ir_node *node, ir_node *right)
1828 assert(node->op->opar == oparity_binary);
1829 set_irn_n(node, node->op->op_index + 1, right);
1832 int is_Phi0(const ir_node *n)
1836 return ((get_irn_op(n) == op_Phi) &&
1837 (get_irn_arity(n) == 0) &&
1838 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1841 ir_node **get_Phi_preds_arr(ir_node *node)
1843 assert(node->op == op_Phi);
1844 return (ir_node **)&(get_irn_in(node)[1]);
1847 int get_Phi_n_preds(const ir_node *node)
1849 assert(is_Phi(node) || is_Phi0(node));
1850 return (get_irn_arity(node));
1854 void set_Phi_n_preds(ir_node *node, int n_preds)
1856 assert(node->op == op_Phi);
1860 ir_node *get_Phi_pred(const ir_node *node, int pos)
1862 assert(is_Phi(node) || is_Phi0(node));
1863 return get_irn_n(node, pos);
1866 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1868 assert(is_Phi(node) || is_Phi0(node));
1869 set_irn_n(node, pos, pred);
1872 ir_node *(get_Phi_next)(const ir_node *phi)
1874 return _get_Phi_next(phi);
1877 void (set_Phi_next)(ir_node *phi, ir_node *next)
1879 _set_Phi_next(phi, next);
1882 int is_memop(const ir_node *node)
1884 ir_opcode code = get_irn_opcode(node);
1885 return (code == iro_Load || code == iro_Store);
1888 ir_node *get_memop_mem(const ir_node *node)
1890 assert(is_memop(node));
1891 return get_irn_n(node, 0);
1894 void set_memop_mem(ir_node *node, ir_node *mem)
1896 assert(is_memop(node));
1897 set_irn_n(node, 0, mem);
1900 ir_node *get_memop_ptr(const ir_node *node)
1902 assert(is_memop(node));
1903 return get_irn_n(node, 1);
1906 void set_memop_ptr(ir_node *node, ir_node *ptr)
1908 assert(is_memop(node));
1909 set_irn_n(node, 1, ptr);
1912 ir_node *get_Load_mem(const ir_node *node)
1914 assert(is_Load(node));
1915 return get_irn_n(node, 0);
1918 void set_Load_mem(ir_node *node, ir_node *mem)
1920 assert(is_Load(node));
1921 set_irn_n(node, 0, mem);
1924 ir_node *get_Load_ptr(const ir_node *node)
1926 assert(is_Load(node));
1927 return get_irn_n(node, 1);
1930 void set_Load_ptr(ir_node *node, ir_node *ptr)
1932 assert(is_Load(node));
1933 set_irn_n(node, 1, ptr);
1936 ir_mode *get_Load_mode(const ir_node *node)
1938 assert(is_Load(node));
1939 return node->attr.load.mode;
1942 void set_Load_mode(ir_node *node, ir_mode *mode)
1944 assert(is_Load(node));
1945 node->attr.load.mode = mode;
1948 ir_volatility get_Load_volatility(const ir_node *node)
1950 assert(is_Load(node));
1951 return node->attr.load.volatility;
1954 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1956 assert(is_Load(node));
1957 node->attr.load.volatility = volatility;
1960 ir_align get_Load_align(const ir_node *node)
1962 assert(is_Load(node));
1963 return node->attr.load.aligned;
1966 void set_Load_align(ir_node *node, ir_align align)
1968 assert(is_Load(node));
1969 node->attr.load.aligned = align;
1973 ir_node *get_Store_mem(const ir_node *node)
1975 assert(is_Store(node));
1976 return get_irn_n(node, 0);
1979 void set_Store_mem(ir_node *node, ir_node *mem)
1981 assert(is_Store(node));
1982 set_irn_n(node, 0, mem);
1985 ir_node *get_Store_ptr(const ir_node *node)
1987 assert(is_Store(node));
1988 return get_irn_n(node, 1);
1991 void set_Store_ptr(ir_node *node, ir_node *ptr)
1993 assert(is_Store(node));
1994 set_irn_n(node, 1, ptr);
1997 ir_node *get_Store_value(const ir_node *node)
1999 assert(is_Store(node));
2000 return get_irn_n(node, 2);
2003 void set_Store_value(ir_node *node, ir_node *value)
2005 assert(is_Store(node));
2006 set_irn_n(node, 2, value);
2009 ir_volatility get_Store_volatility(const ir_node *node)
2011 assert(is_Store(node));
2012 return node->attr.store.volatility;
2015 void set_Store_volatility(ir_node *node, ir_volatility volatility)
2017 assert(is_Store(node));
2018 node->attr.store.volatility = volatility;
2021 ir_align get_Store_align(const ir_node *node)
2023 assert(is_Store(node));
2024 return node->attr.store.aligned;
2027 void set_Store_align(ir_node *node, ir_align align)
2029 assert(is_Store(node));
2030 node->attr.store.aligned = align;
2034 ir_node *get_Alloc_mem(const ir_node *node)
2036 assert(is_Alloc(node));
2037 return get_irn_n(node, 0);
2040 void set_Alloc_mem(ir_node *node, ir_node *mem)
2042 assert(is_Alloc(node));
2043 set_irn_n(node, 0, mem);
2046 ir_node *get_Alloc_count(const ir_node *node)
2048 assert(is_Alloc(node));
2049 return get_irn_n(node, 1);
2052 void set_Alloc_count(ir_node *node, ir_node *count)
2054 assert(is_Alloc(node));
2055 set_irn_n(node, 1, count);
2058 ir_type *get_Alloc_type(ir_node *node)
2060 assert(is_Alloc(node));
2061 return node->attr.alloc.type;
2064 void set_Alloc_type(ir_node *node, ir_type *tp)
2066 assert(is_Alloc(node));
2067 node->attr.alloc.type = tp;
2070 ir_where_alloc get_Alloc_where(const ir_node *node)
2072 assert(is_Alloc(node));
2073 return node->attr.alloc.where;
2076 void set_Alloc_where(ir_node *node, ir_where_alloc where)
2078 assert(is_Alloc(node));
2079 node->attr.alloc.where = where;
2083 ir_node *get_Free_mem(const ir_node *node)
2085 assert(is_Free(node));
2086 return get_irn_n(node, 0);
2089 void set_Free_mem(ir_node *node, ir_node *mem)
2091 assert(is_Free(node));
2092 set_irn_n(node, 0, mem);
2095 ir_node *get_Free_ptr(const ir_node *node)
2097 assert(is_Free(node));
2098 return get_irn_n(node, 1);
2101 void set_Free_ptr(ir_node *node, ir_node *ptr)
2103 assert(is_Free(node));
2104 set_irn_n(node, 1, ptr);
2107 ir_node *get_Free_size(const ir_node *node)
2109 assert(is_Free(node));
2110 return get_irn_n(node, 2);
2113 void set_Free_size(ir_node *node, ir_node *size)
2115 assert(is_Free(node));
2116 set_irn_n(node, 2, size);
2119 ir_type *get_Free_type(ir_node *node)
2121 assert(is_Free(node));
2122 return node->attr.free.type;
2125 void set_Free_type(ir_node *node, ir_type *tp)
2127 assert(is_Free(node));
2128 node->attr.free.type = tp;
2131 ir_where_alloc get_Free_where(const ir_node *node)
2133 assert(is_Free(node));
2134 return node->attr.free.where;
2137 void set_Free_where(ir_node *node, ir_where_alloc where)
2139 assert(is_Free(node));
2140 node->attr.free.where = where;
2143 ir_node **get_Sync_preds_arr(ir_node *node)
2145 assert(is_Sync(node));
2146 return (ir_node **)&(get_irn_in(node)[1]);
2149 int get_Sync_n_preds(const ir_node *node)
2151 assert(is_Sync(node));
2152 return (get_irn_arity(node));
2156 void set_Sync_n_preds(ir_node *node, int n_preds)
2158 assert(is_Sync(node));
2162 ir_node *get_Sync_pred(const ir_node *node, int pos)
2164 assert(is_Sync(node));
2165 return get_irn_n(node, pos);
2168 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
2170 assert(is_Sync(node));
2171 set_irn_n(node, pos, pred);
2174 /* Add a new Sync predecessor */
2175 void add_Sync_pred(ir_node *node, ir_node *pred)
2177 assert(is_Sync(node));
2178 add_irn_n(node, pred);
2181 /* Returns the source language type of a Proj node. */
2182 ir_type *get_Proj_type(ir_node *n)
2184 ir_type *tp = firm_unknown_type;
2185 ir_node *pred = get_Proj_pred(n);
2187 switch (get_irn_opcode(pred)) {
2190 /* Deal with Start / Call here: we need to know the Proj Nr. */
2191 assert(get_irn_mode(pred) == mode_T);
2192 pred_pred = get_Proj_pred(pred);
2194 if (is_Start(pred_pred)) {
2195 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2196 tp = get_method_param_type(mtp, get_Proj_proj(n));
2197 } else if (is_Call(pred_pred)) {
2198 ir_type *mtp = get_Call_type(pred_pred);
2199 tp = get_method_res_type(mtp, get_Proj_proj(n));
2202 case iro_Start: break;
2203 case iro_Call: break;
2205 ir_node *a = get_Load_ptr(pred);
2207 tp = get_entity_type(get_Sel_entity(a));
2215 ir_node *get_Proj_pred(const ir_node *node)
2217 assert(is_Proj(node));
2218 return get_irn_n(node, 0);
2221 void set_Proj_pred(ir_node *node, ir_node *pred)
2223 assert(is_Proj(node));
2224 set_irn_n(node, 0, pred);
2227 long get_Proj_proj(const ir_node *node)
2229 #ifdef INTERPROCEDURAL_VIEW
2230 ir_opcode code = get_irn_opcode(node);
2232 if (code == iro_Proj) {
2233 return node->attr.proj;
2236 assert(code == iro_Filter);
2237 return node->attr.filter.proj;
2240 assert(is_Proj(node));
2241 return node->attr.proj;
2242 #endif /* INTERPROCEDURAL_VIEW */
2245 void set_Proj_proj(ir_node *node, long proj)
2247 #ifdef INTERPROCEDURAL_VIEW
2248 ir_opcode code = get_irn_opcode(node);
2250 if (code == iro_Proj) {
2251 node->attr.proj = proj;
2254 assert(code == iro_Filter);
2255 node->attr.filter.proj = proj;
2258 assert(is_Proj(node));
2259 node->attr.proj = proj;
2260 #endif /* INTERPROCEDURAL_VIEW */
2263 /* Returns non-zero if a node is a routine parameter. */
2264 int (is_arg_Proj)(const ir_node *node)
2266 return _is_arg_Proj(node);
2269 ir_node **get_Tuple_preds_arr(ir_node *node)
2271 assert(is_Tuple(node));
2272 return (ir_node **)&(get_irn_in(node)[1]);
2275 int get_Tuple_n_preds(const ir_node *node)
2277 assert(is_Tuple(node));
2278 return get_irn_arity(node);
2282 void set_Tuple_n_preds(ir_node *node, int n_preds)
2284 assert(is_Tuple(node));
2288 ir_node *get_Tuple_pred(const ir_node *node, int pos)
2290 assert(is_Tuple(node));
2291 return get_irn_n(node, pos);
2294 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
2296 assert(is_Tuple(node));
2297 set_irn_n(node, pos, pred);
2300 ir_node *get_Id_pred(const ir_node *node)
2302 assert(is_Id(node));
2303 return get_irn_n(node, 0);
2306 void set_Id_pred(ir_node *node, ir_node *pred)
2308 assert(is_Id(node));
2309 set_irn_n(node, 0, pred);
2312 ir_node *get_Confirm_value(const ir_node *node)
2314 assert(is_Confirm(node));
2315 return get_irn_n(node, 0);
2318 void set_Confirm_value(ir_node *node, ir_node *value)
2320 assert(is_Confirm(node));
2321 set_irn_n(node, 0, value);
2324 ir_node *get_Confirm_bound(const ir_node *node)
2326 assert(is_Confirm(node));
2327 return get_irn_n(node, 1);
2330 void set_Confirm_bound(ir_node *node, ir_node *bound)
2332 assert(is_Confirm(node));
2333 set_irn_n(node, 0, bound);
2336 pn_Cmp get_Confirm_cmp(const ir_node *node)
2338 assert(is_Confirm(node));
2339 return node->attr.confirm.cmp;
2342 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
2344 assert(is_Confirm(node));
2345 node->attr.confirm.cmp = cmp;
2348 ir_node *get_Filter_pred(ir_node *node)
2350 assert(is_Filter(node));
2354 void set_Filter_pred(ir_node *node, ir_node *pred)
2356 assert(is_Filter(node));
2360 long get_Filter_proj(ir_node *node)
2362 assert(is_Filter(node));
2363 return node->attr.filter.proj;
2366 void set_Filter_proj(ir_node *node, long proj)
2368 assert(is_Filter(node));
2369 node->attr.filter.proj = proj;
2372 /* Don't use get_irn_arity, get_irn_n in implementation as access
2373 shall work independent of view!!! */
2374 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
2376 assert(is_Filter(node));
2377 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2378 ir_graph *irg = get_irn_irg(node);
2379 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2380 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2381 node->attr.filter.in_cg[0] = node->in[0];
2383 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2386 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
2388 assert(is_Filter(node) && node->attr.filter.in_cg &&
2389 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2390 node->attr.filter.in_cg[pos + 1] = pred;
2393 int get_Filter_n_cg_preds(ir_node *node)
2395 assert(is_Filter(node) && node->attr.filter.in_cg);
2396 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2399 ir_node *get_Filter_cg_pred(ir_node *node, int pos)
2402 assert(is_Filter(node) && node->attr.filter.in_cg &&
2404 arity = ARR_LEN(node->attr.filter.in_cg);
2405 assert(pos < arity - 1);
2406 return node->attr.filter.in_cg[pos + 1];
2410 ir_node *get_Mux_sel(const ir_node *node)
2412 assert(is_Mux(node));
2416 void set_Mux_sel(ir_node *node, ir_node *sel)
2418 assert(is_Mux(node));
2422 ir_node *get_Mux_false(const ir_node *node)
2424 assert(is_Mux(node));
2428 void set_Mux_false(ir_node *node, ir_node *ir_false)
2430 assert(is_Mux(node));
2431 node->in[2] = ir_false;
2434 ir_node *get_Mux_true(const ir_node *node)
2436 assert(is_Mux(node));
2440 void set_Mux_true(ir_node *node, ir_node *ir_true)
2442 assert(is_Mux(node));
2443 node->in[3] = ir_true;
2447 ir_node *get_CopyB_mem(const ir_node *node)
2449 assert(is_CopyB(node));
2450 return get_irn_n(node, 0);
2453 void set_CopyB_mem(ir_node *node, ir_node *mem)
2455 assert(node->op == op_CopyB);
2456 set_irn_n(node, 0, mem);
2459 ir_node *get_CopyB_dst(const ir_node *node)
2461 assert(is_CopyB(node));
2462 return get_irn_n(node, 1);
2465 void set_CopyB_dst(ir_node *node, ir_node *dst)
2467 assert(is_CopyB(node));
2468 set_irn_n(node, 1, dst);
2471 ir_node *get_CopyB_src(const ir_node *node)
2473 assert(is_CopyB(node));
2474 return get_irn_n(node, 2);
2477 void set_CopyB_src(ir_node *node, ir_node *src)
2479 assert(is_CopyB(node));
2480 set_irn_n(node, 2, src);
2483 ir_type *get_CopyB_type(ir_node *node)
2485 assert(is_CopyB(node));
2486 return node->attr.copyb.type;
2489 void set_CopyB_type(ir_node *node, ir_type *data_type)
2491 assert(is_CopyB(node) && data_type);
2492 node->attr.copyb.type = data_type;
2496 ir_type *get_InstOf_type(ir_node *node)
2498 assert(node->op == op_InstOf);
2499 return node->attr.instof.type;
2502 void set_InstOf_type(ir_node *node, ir_type *type)
2504 assert(node->op == op_InstOf);
2505 node->attr.instof.type = type;
2508 ir_node *get_InstOf_store(const ir_node *node)
2510 assert(node->op == op_InstOf);
2511 return get_irn_n(node, 0);
2514 void set_InstOf_store(ir_node *node, ir_node *obj)
2516 assert(node->op == op_InstOf);
2517 set_irn_n(node, 0, obj);
2520 ir_node *get_InstOf_obj(const ir_node *node)
2522 assert(node->op == op_InstOf);
2523 return get_irn_n(node, 1);
2526 void set_InstOf_obj(ir_node *node, ir_node *obj)
2528 assert(node->op == op_InstOf);
2529 set_irn_n(node, 1, obj);
2532 /* Returns the memory input of a Raise operation. */
2533 ir_node *get_Raise_mem(const ir_node *node)
2535 assert(is_Raise(node));
2536 return get_irn_n(node, 0);
2539 void set_Raise_mem(ir_node *node, ir_node *mem)
2541 assert(is_Raise(node));
2542 set_irn_n(node, 0, mem);
2545 ir_node *get_Raise_exo_ptr(const ir_node *node)
2547 assert(is_Raise(node));
2548 return get_irn_n(node, 1);
2551 void set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
2553 assert(is_Raise(node));
2554 set_irn_n(node, 1, exo_ptr);
2559 /* Returns the memory input of a Bound operation. */
2560 ir_node *get_Bound_mem(const ir_node *bound)
2562 assert(is_Bound(bound));
2563 return get_irn_n(bound, 0);
2566 void set_Bound_mem(ir_node *bound, ir_node *mem)
2568 assert(is_Bound(bound));
2569 set_irn_n(bound, 0, mem);
2572 /* Returns the index input of a Bound operation. */
2573 ir_node *get_Bound_index(const ir_node *bound)
2575 assert(is_Bound(bound));
2576 return get_irn_n(bound, 1);
2579 void set_Bound_index(ir_node *bound, ir_node *idx)
2581 assert(is_Bound(bound));
2582 set_irn_n(bound, 1, idx);
2585 /* Returns the lower bound input of a Bound operation. */
2586 ir_node *get_Bound_lower(const ir_node *bound)
2588 assert(is_Bound(bound));
2589 return get_irn_n(bound, 2);
2592 void set_Bound_lower(ir_node *bound, ir_node *lower)
2594 assert(is_Bound(bound));
2595 set_irn_n(bound, 2, lower);
2598 /* Returns the upper bound input of a Bound operation. */
2599 ir_node *get_Bound_upper(const ir_node *bound)
2601 assert(is_Bound(bound));
2602 return get_irn_n(bound, 3);
2605 void set_Bound_upper(ir_node *bound, ir_node *upper)
2607 assert(is_Bound(bound));
2608 set_irn_n(bound, 3, upper);
2611 /* Return the operand of a Pin node. */
2612 ir_node *get_Pin_op(const ir_node *pin)
2614 assert(is_Pin(pin));
2615 return get_irn_n(pin, 0);
2618 void set_Pin_op(ir_node *pin, ir_node *node)
2620 assert(is_Pin(pin));
2621 set_irn_n(pin, 0, node);
2624 /* Return the assembler text of an ASM pseudo node. */
2625 ident *get_ASM_text(const ir_node *node)
2627 assert(is_ASM(node));
2628 return node->attr.assem.asm_text;
2631 /* Return the number of input constraints for an ASM node. */
2632 int get_ASM_n_input_constraints(const ir_node *node)
2634 assert(is_ASM(node));
2635 return ARR_LEN(node->attr.assem.inputs);
2638 /* Return the input constraints for an ASM node. This is a flexible array. */
2639 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
2641 assert(is_ASM(node));
2642 return node->attr.assem.inputs;
2645 /* Return the number of output constraints for an ASM node. */
2646 int get_ASM_n_output_constraints(const ir_node *node)
2648 assert(is_ASM(node));
2649 return ARR_LEN(node->attr.assem.outputs);
2652 /* Return the output constraints for an ASM node. */
2653 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
2655 assert(is_ASM(node));
2656 return node->attr.assem.outputs;
2659 /* Return the number of clobbered registers for an ASM node. */
2660 int get_ASM_n_clobbers(const ir_node *node)
2662 assert(is_ASM(node));
2663 return ARR_LEN(node->attr.assem.clobber);
2666 /* Return the list of clobbered registers for an ASM node. */
2667 ident **get_ASM_clobbers(const ir_node *node)
2669 assert(is_ASM(node));
2670 return node->attr.assem.clobber;
2673 /* returns the graph of a node */
2674 ir_graph *(get_irn_irg)(const ir_node *node)
2676 return _get_irn_irg(node);
2680 /*----------------------------------------------------------------*/
2681 /* Auxiliary routines */
2682 /*----------------------------------------------------------------*/
2684 ir_node *skip_Proj(ir_node *node)
2686 /* don't assert node !!! */
2691 node = get_Proj_pred(node);
2697 skip_Proj_const(const ir_node *node)
2699 /* don't assert node !!! */
2704 node = get_Proj_pred(node);
2709 ir_node *skip_Tuple(ir_node *node)
2715 if (is_Proj(node)) {
2716 pred = get_Proj_pred(node);
2717 op = get_irn_op(pred);
2720 * Looks strange but calls get_irn_op() only once
2721 * in most often cases.
2723 if (op == op_Proj) { /* nested Tuple ? */
2724 pred = skip_Tuple(pred);
2726 if (is_Tuple(pred)) {
2727 node = get_Tuple_pred(pred, get_Proj_proj(node));
2730 } else if (op == op_Tuple) {
2731 node = get_Tuple_pred(pred, get_Proj_proj(node));
2738 /* returns operand of node if node is a Cast */
2739 ir_node *skip_Cast(ir_node *node)
2742 return get_Cast_op(node);
2746 /* returns operand of node if node is a Cast */
2747 const ir_node *skip_Cast_const(const ir_node *node)
2750 return get_Cast_op(node);
2754 /* returns operand of node if node is a Pin */
2755 ir_node *skip_Pin(ir_node *node)
2758 return get_Pin_op(node);
2762 /* returns operand of node if node is a Confirm */
2763 ir_node *skip_Confirm(ir_node *node)
2765 if (is_Confirm(node))
2766 return get_Confirm_value(node);
2770 /* skip all high-level ops */
2771 ir_node *skip_HighLevel_ops(ir_node *node)
2773 while (is_op_highlevel(get_irn_op(node))) {
2774 node = get_irn_n(node, 0);
2780 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2781 * than any other approach, as Id chains are resolved and all point to the real node, or
2782 * all id's are self loops.
2784 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2785 * a little bit "hand optimized".
2787 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2789 ir_node *skip_Id(ir_node *node)
2792 /* don't assert node !!! */
2794 if (!node || (node->op != op_Id)) return node;
2796 /* Don't use get_Id_pred(): We get into an endless loop for
2797 self-referencing Ids. */
2798 pred = node->in[0+1];
2800 if (pred->op != op_Id) return pred;
2802 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2803 ir_node *rem_pred, *res;
2805 if (pred->op != op_Id) return pred; /* shortcut */
2808 assert(get_irn_arity (node) > 0);
2810 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2811 res = skip_Id(rem_pred);
2812 if (res->op == op_Id) /* self-loop */ return node;
2814 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2821 int (is_strictConv)(const ir_node *node)
2823 return _is_strictConv(node);
2826 int (is_no_Block)(const ir_node *node)
2828 return _is_no_Block(node);
2831 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2832 int (is_SymConst_addr_ent)(const ir_node *node)
2834 return _is_SymConst_addr_ent(node);
2837 /* Returns true if the operation manipulates control flow. */
2838 int is_cfop(const ir_node *node)
2840 return is_op_cfopcode(get_irn_op(node));
2843 /* Returns true if the operation manipulates interprocedural control flow:
2844 CallBegin, EndReg, EndExcept */
2845 int is_ip_cfop(const ir_node *node)
2847 return is_ip_cfopcode(get_irn_op(node));
2850 /* Returns true if the operation can change the control flow because
2852 int is_fragile_op(const ir_node *node)
2854 return is_op_fragile(get_irn_op(node));
2857 /* Returns the memory operand of fragile operations. */
2858 ir_node *get_fragile_op_mem(ir_node *node)
2860 assert(node && is_fragile_op(node));
2862 switch (get_irn_opcode(node)) {
2873 return get_irn_n(node, pn_Generic_M);
2878 panic("should not be reached");
2882 /* Returns the result mode of a Div operation. */
2883 ir_mode *get_divop_resmod(const ir_node *node)
2885 switch (get_irn_opcode(node)) {
2886 case iro_Quot : return get_Quot_resmode(node);
2887 case iro_DivMod: return get_DivMod_resmode(node);
2888 case iro_Div : return get_Div_resmode(node);
2889 case iro_Mod : return get_Mod_resmode(node);
2891 panic("should not be reached");
2895 /* Returns true if the operation is a forking control flow operation. */
2896 int (is_irn_forking)(const ir_node *node)
2898 return _is_irn_forking(node);
2901 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
2903 _copy_node_attr(irg, old_node, new_node);
2906 /* Return the type associated with the value produced by n
2907 * if the node remarks this type as it is the case for
2908 * Cast, Const, SymConst and some Proj nodes. */
2909 ir_type *(get_irn_type)(ir_node *node)
2911 return _get_irn_type(node);
2914 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2916 ir_type *(get_irn_type_attr)(ir_node *node)
2918 return _get_irn_type_attr(node);
2921 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2922 ir_entity *(get_irn_entity_attr)(ir_node *node)
2924 return _get_irn_entity_attr(node);
2927 /* Returns non-zero for constant-like nodes. */
2928 int (is_irn_constlike)(const ir_node *node)
2930 return _is_irn_constlike(node);
2934 * Returns non-zero for nodes that are allowed to have keep-alives and
2935 * are neither Block nor PhiM.
2937 int (is_irn_keep)(const ir_node *node)
2939 return _is_irn_keep(node);
2943 * Returns non-zero for nodes that are always placed in the start block.
2945 int (is_irn_start_block_placed)(const ir_node *node)
2947 return _is_irn_start_block_placed(node);
2950 /* Returns non-zero for nodes that are machine operations. */
2951 int (is_irn_machine_op)(const ir_node *node)
2953 return _is_irn_machine_op(node);
2956 /* Returns non-zero for nodes that are machine operands. */
2957 int (is_irn_machine_operand)(const ir_node *node)
2959 return _is_irn_machine_operand(node);
2962 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2963 int (is_irn_machine_user)(const ir_node *node, unsigned n)
2965 return _is_irn_machine_user(node, n);
2968 /* Returns non-zero for nodes that are CSE neutral to its users. */
2969 int (is_irn_cse_neutral)(const ir_node *node)
2971 return _is_irn_cse_neutral(node);
2974 /* Gets the string representation of the jump prediction .*/
2975 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2977 #define X(a) case a: return #a;
2979 X(COND_JMP_PRED_NONE);
2980 X(COND_JMP_PRED_TRUE);
2981 X(COND_JMP_PRED_FALSE);
2987 /* Returns the conditional jump prediction of a Cond node. */
2988 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
2990 return _get_Cond_jmp_pred(cond);
2993 /* Sets a new conditional jump prediction. */
2994 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
2996 _set_Cond_jmp_pred(cond, pred);
2999 /** the get_type operation must be always implemented and return a firm type */
3000 static ir_type *get_Default_type(ir_node *n)
3003 return get_unknown_type();
3006 /* Sets the get_type operation for an ir_op_ops. */
3007 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
3010 case iro_Const: ops->get_type = get_Const_type; break;
3011 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3012 case iro_Cast: ops->get_type = get_Cast_type; break;
3013 case iro_Proj: ops->get_type = get_Proj_type; break;
3015 /* not allowed to be NULL */
3016 if (! ops->get_type)
3017 ops->get_type = get_Default_type;
3023 /** Return the attribute type of a SymConst node if exists */
3024 static ir_type *get_SymConst_attr_type(ir_node *self)
3026 symconst_kind kind = get_SymConst_kind(self);
3027 if (SYMCONST_HAS_TYPE(kind))
3028 return get_SymConst_type(self);
3032 /** Return the attribute entity of a SymConst node if exists */
3033 static ir_entity *get_SymConst_attr_entity(ir_node *self)
3035 symconst_kind kind = get_SymConst_kind(self);
3036 if (SYMCONST_HAS_ENT(kind))
3037 return get_SymConst_entity(self);
3041 /** the get_type_attr operation must be always implemented */
3042 static ir_type *get_Null_type(ir_node *n)
3045 return firm_unknown_type;
3048 /* Sets the get_type operation for an ir_op_ops. */
3049 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
3052 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3053 case iro_Call: ops->get_type_attr = get_Call_type; break;
3054 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3055 case iro_Free: ops->get_type_attr = get_Free_type; break;
3056 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3058 /* not allowed to be NULL */
3059 if (! ops->get_type_attr)
3060 ops->get_type_attr = get_Null_type;
3066 /** the get_entity_attr operation must be always implemented */
3067 static ir_entity *get_Null_ent(ir_node *n)
3073 /* Sets the get_type operation for an ir_op_ops. */
3074 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
3077 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3078 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3080 /* not allowed to be NULL */
3081 if (! ops->get_entity_attr)
3082 ops->get_entity_attr = get_Null_ent;
3088 /* Sets the debug information of a node. */
3089 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
3091 _set_irn_dbg_info(n, db);
3095 * Returns the debug information of an node.
3097 * @param n The node.
3099 dbg_info *(get_irn_dbg_info)(const ir_node *n)
3101 return _get_irn_dbg_info(n);
3104 /* checks whether a node represents a global address */
3105 int is_Global(const ir_node *node)
3107 return is_SymConst_addr_ent(node);
3110 /* returns the entity of a global address */
3111 ir_entity *get_Global_entity(const ir_node *node)
3113 return get_SymConst_entity(node);
3117 * Calculate a hash value of a node.
3119 unsigned firm_default_hash(const ir_node *node)
3124 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3125 h = irn_arity = get_irn_intra_arity(node);
3127 /* consider all in nodes... except the block if not a control flow. */
3128 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3129 ir_node *pred = get_irn_intra_n(node, i);
3130 if (is_irn_cse_neutral(pred))
3133 h = 9*h + HASH_PTR(pred);
3137 h = 9*h + HASH_PTR(get_irn_mode(node));
3139 h = 9*h + HASH_PTR(get_irn_op(node));
3142 } /* firm_default_hash */
3144 /* include generated code */
3145 #include "gen_irnode.c.inl"