2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node **in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_intra_arity)(const ir_node *node)
216 return _get_irn_intra_arity(node);
219 int (get_irn_inter_arity)(const ir_node *node)
221 return _get_irn_inter_arity(node);
224 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
226 int (get_irn_arity)(const ir_node *node)
228 return _get_irn_arity(node);
231 /* Returns the array with ins. This array is shifted with respect to the
232 array accessed by get_irn_n: The block operand is at position 0 not -1.
233 (@@@ This should be changed.)
234 The order of the predecessors in this array is not guaranteed, except that
235 lists of operands as predecessors of Block or arguments of a Call are
237 ir_node **get_irn_in(const ir_node *node)
240 #ifdef INTERPROCEDURAL_VIEW
241 if (get_interprocedural_view()) { /* handle Filter and Block specially */
242 if (get_irn_opcode(node) == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 return node->attr.filter.in_cg;
245 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
246 return node->attr.block.in_cg;
248 /* else fall through */
250 #endif /* INTERPROCEDURAL_VIEW */
254 void set_irn_in(ir_node *node, int arity, ir_node **in)
258 ir_graph *irg = get_irn_irg(node);
261 #ifdef INTERPROCEDURAL_VIEW
262 if (get_interprocedural_view()) { /* handle Filter and Block specially */
263 ir_opcode code = get_irn_opcode(node);
264 if (code == iro_Filter) {
265 assert(node->attr.filter.in_cg);
266 pOld_in = &node->attr.filter.in_cg;
267 } else if (code == iro_Block && node->attr.block.in_cg) {
268 pOld_in = &node->attr.block.in_cg;
273 #endif /* INTERPROCEDURAL_VIEW */
277 for (i = 0; i < arity; i++) {
278 if (i < ARR_LEN(*pOld_in)-1)
279 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
281 edges_notify_edge(node, i, in[i], NULL, irg);
283 for (;i < ARR_LEN(*pOld_in)-1; i++) {
284 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
287 if (arity != ARR_LEN(*pOld_in) - 1) {
288 ir_node * block = (*pOld_in)[0];
289 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
290 (*pOld_in)[0] = block;
292 fix_backedges(irg->obst, node);
294 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
297 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
299 return _get_irn_intra_n(node, n);
302 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
304 return _get_irn_inter_n(node, n);
307 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
309 ir_node *(get_irn_n)(const ir_node *node, int n)
311 return _get_irn_n(node, n);
314 void set_irn_n(ir_node *node, int n, ir_node *in)
316 assert(node && node->kind == k_ir_node);
318 assert(n < get_irn_arity(node));
319 assert(in && in->kind == k_ir_node);
321 #ifdef INTERPROCEDURAL_VIEW
322 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
323 /* Change block pred in both views! */
324 node->in[n + 1] = in;
325 assert(node->attr.filter.in_cg);
326 node->attr.filter.in_cg[n + 1] = in;
329 if (get_interprocedural_view()) { /* handle Filter and Block specially */
330 if (get_irn_opcode(node) == iro_Filter) {
331 assert(node->attr.filter.in_cg);
332 node->attr.filter.in_cg[n + 1] = in;
334 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
335 node->attr.block.in_cg[n + 1] = in;
338 /* else fall through */
340 #endif /* INTERPROCEDURAL_VIEW */
343 hook_set_irn_n(node, n, in, node->in[n + 1]);
345 /* Here, we rely on src and tgt being in the current ir graph */
346 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
348 node->in[n + 1] = in;
351 int add_irn_n(ir_node *node, ir_node *in)
354 ir_graph *irg = get_irn_irg(node);
356 assert(node->op->opar == oparity_dynamic);
357 pos = ARR_LEN(node->in) - 1;
358 ARR_APP1(ir_node *, node->in, in);
359 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
362 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
367 void del_Sync_n(ir_node *n, int i)
369 int arity = get_Sync_n_preds(n);
370 ir_node *last_pred = get_Sync_pred(n, arity - 1);
371 set_Sync_pred(n, i, last_pred);
372 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
373 ARR_SHRINKLEN(get_irn_in(n), arity);
376 int (get_irn_deps)(const ir_node *node)
378 return _get_irn_deps(node);
381 ir_node *(get_irn_dep)(const ir_node *node, int pos)
383 return _get_irn_dep(node, pos);
386 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
388 _set_irn_dep(node, pos, dep);
391 int add_irn_dep(ir_node *node, ir_node *dep)
395 /* DEP edges are only allowed in backend phase */
396 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
397 if (node->deps == NULL) {
398 node->deps = NEW_ARR_F(ir_node *, 1);
404 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
405 if (node->deps[i] == NULL)
408 if (node->deps[i] == dep)
412 if (first_zero >= 0) {
413 node->deps[first_zero] = dep;
416 ARR_APP1(ir_node *, node->deps, dep);
421 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
426 void add_irn_deps(ir_node *tgt, ir_node *src)
430 for (i = 0, n = get_irn_deps(src); i < n; ++i)
431 add_irn_dep(tgt, get_irn_dep(src, i));
435 ir_mode *(get_irn_mode)(const ir_node *node)
437 return _get_irn_mode(node);
440 void (set_irn_mode)(ir_node *node, ir_mode *mode)
442 _set_irn_mode(node, mode);
445 /** Gets the string representation of the mode .*/
446 const char *get_irn_modename(const ir_node *node)
449 return get_mode_name(node->mode);
452 ident *get_irn_modeident(const ir_node *node)
455 return get_mode_ident(node->mode);
458 ir_op *(get_irn_op)(const ir_node *node)
460 return _get_irn_op(node);
463 /* should be private to the library: */
464 void (set_irn_op)(ir_node *node, ir_op *op)
466 _set_irn_op(node, op);
469 unsigned (get_irn_opcode)(const ir_node *node)
471 return _get_irn_opcode(node);
474 const char *get_irn_opname(const ir_node *node)
477 if (is_Phi0(node)) return "Phi0";
478 return get_id_str(node->op->name);
481 ident *get_irn_opident(const ir_node *node)
484 return node->op->name;
487 ir_visited_t (get_irn_visited)(const ir_node *node)
489 return _get_irn_visited(node);
492 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
494 _set_irn_visited(node, visited);
497 void (mark_irn_visited)(ir_node *node)
499 _mark_irn_visited(node);
502 int (irn_visited)(const ir_node *node)
504 return _irn_visited(node);
507 int (irn_visited_else_mark)(ir_node *node)
509 return _irn_visited_else_mark(node);
512 void (set_irn_link)(ir_node *node, void *link)
514 _set_irn_link(node, link);
517 void *(get_irn_link)(const ir_node *node)
519 return _get_irn_link(node);
522 op_pin_state (get_irn_pinned)(const ir_node *node)
524 return _get_irn_pinned(node);
527 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
529 return _is_irn_pinned_in_irg(node);
532 void set_irn_pinned(ir_node *node, op_pin_state state)
534 /* due to optimization an opt may be turned into a Tuple */
538 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
539 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
541 node->attr.except.pin_state = state;
544 /* Outputs a unique number for this node */
545 long get_irn_node_nr(const ir_node *node)
548 return node->node_nr;
551 const_attr *get_irn_const_attr(ir_node *node)
553 assert(is_Const(node));
554 return &node->attr.con;
557 long get_irn_proj_attr(ir_node *node)
559 /* BEWARE: check for true Proj node here, no Filter */
560 assert(node->op == op_Proj);
561 return node->attr.proj;
564 alloc_attr *get_irn_alloc_attr(ir_node *node)
566 assert(is_Alloc(node));
567 return &node->attr.alloc;
570 free_attr *get_irn_free_attr(ir_node *node)
572 assert(is_Free(node));
573 return &node->attr.free;
576 symconst_attr *get_irn_symconst_attr(ir_node *node)
578 assert(is_SymConst(node));
579 return &node->attr.symc;
582 call_attr *get_irn_call_attr(ir_node *node)
584 assert(is_Call(node));
585 return &node->attr.call;
588 sel_attr *get_irn_sel_attr(ir_node *node)
590 assert(is_Sel(node));
591 return &node->attr.sel;
594 phi_attr *get_irn_phi_attr(ir_node *node)
596 return &node->attr.phi;
599 block_attr *get_irn_block_attr(ir_node *node)
601 assert(is_Block(node));
602 return &node->attr.block;
605 load_attr *get_irn_load_attr(ir_node *node)
607 assert(is_Load(node));
608 return &node->attr.load;
611 store_attr *get_irn_store_attr(ir_node *node)
613 assert(is_Store(node));
614 return &node->attr.store;
617 except_attr *get_irn_except_attr(ir_node *node)
619 assert(node->op == op_Div || node->op == op_Quot ||
620 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
621 return &node->attr.except;
624 divmod_attr *get_irn_divmod_attr(ir_node *node)
626 assert(node->op == op_Div || node->op == op_Quot ||
627 node->op == op_DivMod || node->op == op_Mod);
628 return &node->attr.divmod;
631 builtin_attr *get_irn_builtin_attr(ir_node *node)
633 assert(is_Builtin(node));
634 return &node->attr.builtin;
637 void *(get_irn_generic_attr)(ir_node *node)
639 assert(is_ir_node(node));
640 return _get_irn_generic_attr(node);
643 const void *(get_irn_generic_attr_const)(const ir_node *node)
645 assert(is_ir_node(node));
646 return _get_irn_generic_attr_const(node);
649 unsigned (get_irn_idx)(const ir_node *node)
651 assert(is_ir_node(node));
652 return _get_irn_idx(node);
655 int get_irn_pred_pos(ir_node *node, ir_node *arg)
658 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
659 if (get_irn_n(node, i) == arg)
665 /** manipulate fields of individual nodes **/
667 /* this works for all except Block */
668 ir_node *get_nodes_block(const ir_node *node)
670 assert(node->op != op_Block);
671 return get_irn_n(node, -1);
674 void set_nodes_block(ir_node *node, ir_node *block)
676 assert(node->op != op_Block);
677 set_irn_n(node, -1, block);
680 /* this works for all except Block */
681 ir_node *get_nodes_MacroBlock(const ir_node *node)
683 assert(node->op != op_Block);
684 return get_Block_MacroBlock(get_irn_n(node, -1));
687 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
688 * from Start. If so returns frame type, else Null. */
689 ir_type *is_frame_pointer(const ir_node *n)
691 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
692 ir_node *start = get_Proj_pred(n);
693 if (is_Start(start)) {
694 return get_irg_frame_type(get_irn_irg(start));
700 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
701 * from Start. If so returns tls type, else Null. */
702 ir_type *is_tls_pointer(const ir_node *n)
704 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
705 ir_node *start = get_Proj_pred(n);
706 if (is_Start(start)) {
707 return get_tls_type();
713 ir_node **get_Block_cfgpred_arr(ir_node *node)
715 assert(is_Block(node));
716 return (ir_node **)&(get_irn_in(node)[1]);
719 int (get_Block_n_cfgpreds)(const ir_node *node)
721 return _get_Block_n_cfgpreds(node);
724 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
726 return _get_Block_cfgpred(node, pos);
729 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
731 assert(is_Block(node));
732 set_irn_n(node, pos, pred);
735 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
739 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
740 if (get_Block_cfgpred_block(block, i) == pred)
746 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
748 return _get_Block_cfgpred_block(node, pos);
751 int get_Block_matured(const ir_node *node)
753 assert(is_Block(node));
754 return (int)node->attr.block.is_matured;
757 void set_Block_matured(ir_node *node, int matured)
759 assert(is_Block(node));
760 node->attr.block.is_matured = matured;
763 ir_visited_t (get_Block_block_visited)(const ir_node *node)
765 return _get_Block_block_visited(node);
768 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
770 _set_Block_block_visited(node, visit);
773 /* For this current_ir_graph must be set. */
774 void (mark_Block_block_visited)(ir_node *node)
776 _mark_Block_block_visited(node);
779 int (Block_block_visited)(const ir_node *node)
781 return _Block_block_visited(node);
784 #ifdef INTERPROCEDURAL_VIEW
785 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
787 assert(is_Block(node));
788 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
789 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
790 node->attr.block.in_cg[0] = NULL;
791 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
793 /* Fix backedge array. fix_backedges() operates depending on
794 interprocedural_view. */
795 int ipv = get_interprocedural_view();
796 set_interprocedural_view(1);
797 fix_backedges(current_ir_graph->obst, node);
798 set_interprocedural_view(ipv);
801 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
804 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
806 assert(is_Block(node) && node->attr.block.in_cg &&
807 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
808 node->attr.block.in_cg[pos + 1] = pred;
811 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
813 assert(is_Block(node));
814 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
817 int get_Block_cg_n_cfgpreds(const ir_node *node)
819 assert(is_Block(node));
820 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
823 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
825 assert(is_Block(node) && node->attr.block.in_cg);
826 return node->attr.block.in_cg[pos + 1];
829 void remove_Block_cg_cfgpred_arr(ir_node *node)
831 assert(is_Block(node));
832 node->attr.block.in_cg = NULL;
834 #endif /* INTERPROCEDURAL_VIEW */
836 ir_node *(set_Block_dead)(ir_node *block)
838 return _set_Block_dead(block);
841 int (is_Block_dead)(const ir_node *block)
843 return _is_Block_dead(block);
846 ir_extblk *get_Block_extbb(const ir_node *block)
849 assert(is_Block(block));
850 res = block->attr.block.extblk;
851 assert(res == NULL || is_ir_extbb(res));
855 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
857 assert(is_Block(block));
858 assert(extblk == NULL || is_ir_extbb(extblk));
859 block->attr.block.extblk = extblk;
862 /* Returns the macro block header of a block.*/
863 ir_node *get_Block_MacroBlock(const ir_node *block)
866 assert(is_Block(block));
867 mbh = get_irn_n(block, -1);
868 /* once macro block header is respected by all optimizations,
869 this assert can be removed */
874 /* Sets the macro block header of a block. */
875 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
877 assert(is_Block(block));
879 assert(is_Block(mbh));
880 set_irn_n(block, -1, mbh);
883 /* returns the macro block header of a node. */
884 ir_node *get_irn_MacroBlock(const ir_node *n)
887 n = get_nodes_block(n);
888 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
892 return get_Block_MacroBlock(n);
895 /* returns the graph of a Block. */
896 ir_graph *(get_Block_irg)(const ir_node *block)
898 return _get_Block_irg(block);
901 ir_entity *create_Block_entity(ir_node *block)
904 assert(is_Block(block));
906 entity = block->attr.block.entity;
907 if (entity == NULL) {
911 glob = get_glob_type();
912 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
913 set_entity_visibility(entity, ir_visibility_local);
914 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
915 nr = get_irp_next_label_nr();
916 set_entity_label(entity, nr);
917 set_entity_compiler_generated(entity, 1);
919 block->attr.block.entity = entity;
924 ir_entity *get_Block_entity(const ir_node *block)
926 assert(is_Block(block));
927 return block->attr.block.entity;
930 void set_Block_entity(ir_node *block, ir_entity *entity)
932 assert(is_Block(block));
933 assert(get_entity_type(entity) == get_code_type());
934 block->attr.block.entity = entity;
937 int has_Block_entity(const ir_node *block)
939 return block->attr.block.entity != NULL;
942 ir_node *(get_Block_phis)(const ir_node *block)
944 return _get_Block_phis(block);
947 void (set_Block_phis)(ir_node *block, ir_node *phi)
949 _set_Block_phis(block, phi);
952 void (add_Block_phi)(ir_node *block, ir_node *phi)
954 _add_Block_phi(block, phi);
957 /* Get the Block mark (single bit). */
958 unsigned (get_Block_mark)(const ir_node *block)
960 return _get_Block_mark(block);
963 /* Set the Block mark (single bit). */
964 void (set_Block_mark)(ir_node *block, unsigned mark)
966 _set_Block_mark(block, mark);
969 int get_End_n_keepalives(const ir_node *end)
972 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
975 ir_node *get_End_keepalive(const ir_node *end, int pos)
978 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
981 void add_End_keepalive(ir_node *end, ir_node *ka)
987 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
990 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
993 /* Set new keep-alives */
994 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
997 ir_graph *irg = get_irn_irg(end);
999 /* notify that edges are deleted */
1000 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1001 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1003 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1005 for (i = 0; i < n; ++i) {
1006 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1007 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1011 /* Set new keep-alives from old keep-alives, skipping irn */
1012 void remove_End_keepalive(ir_node *end, ir_node *irn)
1014 int n = get_End_n_keepalives(end);
1019 for (i = n -1; i >= 0; --i) {
1020 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1023 if (old_ka == irn) {
1030 irg = get_irn_irg(end);
1032 /* remove the edge */
1033 edges_notify_edge(end, idx, NULL, irn, irg);
1036 /* exchange with the last one */
1037 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1038 edges_notify_edge(end, n - 1, NULL, old, irg);
1039 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1040 edges_notify_edge(end, idx, old, NULL, irg);
1042 /* now n - 1 keeps, 1 block input */
1043 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1046 /* remove Bads, NoMems and doublets from the keep-alive set */
1047 void remove_End_Bads_and_doublets(ir_node *end)
1050 int idx, n = get_End_n_keepalives(end);
1056 irg = get_irn_irg(end);
1057 pset_new_init(&keeps);
1059 for (idx = n - 1; idx >= 0; --idx) {
1060 ir_node *ka = get_End_keepalive(end, idx);
1062 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1063 /* remove the edge */
1064 edges_notify_edge(end, idx, NULL, ka, irg);
1067 /* exchange with the last one */
1068 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1069 edges_notify_edge(end, n - 1, NULL, old, irg);
1070 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1071 edges_notify_edge(end, idx, old, NULL, irg);
1075 pset_new_insert(&keeps, ka);
1078 /* n keeps, 1 block input */
1079 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1081 pset_new_destroy(&keeps);
1084 void free_End(ir_node *end)
1086 assert(is_End(end));
1089 end->in = NULL; /* @@@ make sure we get an error if we use the
1090 in array afterwards ... */
1093 int get_Return_n_ress(const ir_node *node)
1095 assert(is_Return(node));
1096 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1099 ir_node **get_Return_res_arr(ir_node *node)
1101 assert(is_Return(node));
1102 if (get_Return_n_ress(node) > 0)
1103 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1109 void set_Return_n_res(ir_node *node, int results)
1111 assert(is_Return(node));
1115 ir_node *get_Return_res(const ir_node *node, int pos)
1117 assert(is_Return(node));
1118 assert(get_Return_n_ress(node) > pos);
1119 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1122 void set_Return_res(ir_node *node, int pos, ir_node *res)
1124 assert(is_Return(node));
1125 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1128 int (is_Const_null)(const ir_node *node)
1130 return _is_Const_null(node);
1133 int (is_Const_one)(const ir_node *node)
1135 return _is_Const_one(node);
1138 int (is_Const_all_one)(const ir_node *node)
1140 return _is_Const_all_one(node);
1144 /* The source language type. Must be an atomic type. Mode of type must
1145 be mode of node. For tarvals from entities type must be pointer to
1147 ir_type *get_Const_type(const ir_node *node)
1149 assert(is_Const(node));
1150 return node->attr.con.tp;
1153 void set_Const_type(ir_node *node, ir_type *tp)
1155 assert(is_Const(node));
1156 if (tp != firm_unknown_type) {
1157 assert(is_atomic_type(tp));
1158 assert(get_type_mode(tp) == get_irn_mode(node));
1160 node->attr.con.tp = tp;
1164 symconst_kind get_SymConst_kind(const ir_node *node)
1166 assert(is_SymConst(node));
1167 return node->attr.symc.kind;
1170 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1172 assert(is_SymConst(node));
1173 node->attr.symc.kind = kind;
1176 ir_type *get_SymConst_type(const ir_node *node)
1178 /* the cast here is annoying, but we have to compensate for
1180 ir_node *irn = (ir_node *)node;
1181 assert(is_SymConst(node) &&
1182 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1183 return irn->attr.symc.sym.type_p;
1186 void set_SymConst_type(ir_node *node, ir_type *tp)
1188 assert(is_SymConst(node) &&
1189 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1190 node->attr.symc.sym.type_p = tp;
1194 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1195 ir_entity *get_SymConst_entity(const ir_node *node)
1197 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1198 return node->attr.symc.sym.entity_p;
1201 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1203 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1204 node->attr.symc.sym.entity_p = ent;
1207 ir_enum_const *get_SymConst_enum(const ir_node *node)
1209 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1210 return node->attr.symc.sym.enum_p;
1213 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1215 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1216 node->attr.symc.sym.enum_p = ec;
1219 union symconst_symbol
1220 get_SymConst_symbol(const ir_node *node)
1222 assert(is_SymConst(node));
1223 return node->attr.symc.sym;
1226 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1228 assert(is_SymConst(node));
1229 node->attr.symc.sym = sym;
1232 ir_type *get_SymConst_value_type(const ir_node *node)
1234 assert(is_SymConst(node));
1235 return node->attr.symc.tp;
1238 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1240 assert(is_SymConst(node));
1241 node->attr.symc.tp = tp;
1244 int get_Sel_n_indexs(const ir_node *node)
1246 assert(is_Sel(node));
1247 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1250 ir_node **get_Sel_index_arr(ir_node *node)
1252 assert(is_Sel(node));
1253 if (get_Sel_n_indexs(node) > 0)
1254 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1259 ir_node *get_Sel_index(const ir_node *node, int pos)
1261 assert(is_Sel(node));
1262 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1265 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1267 assert(is_Sel(node));
1268 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1272 /* For unary and binary arithmetic operations the access to the
1273 operands can be factored out. Left is the first, right the
1274 second arithmetic value as listed in tech report 0999-33.
1275 unops are: Minus, Abs, Not, Conv, Cast
1276 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1277 Shr, Shrs, Rotate, Cmp */
1280 ir_node **get_Call_param_arr(ir_node *node)
1282 assert(is_Call(node));
1283 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1286 int get_Call_n_params(const ir_node *node)
1288 assert(is_Call(node));
1289 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1292 ir_node *get_Call_param(const ir_node *node, int pos)
1294 assert(is_Call(node));
1295 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1298 void set_Call_param(ir_node *node, int pos, ir_node *param)
1300 assert(is_Call(node));
1301 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1304 ir_node **get_Builtin_param_arr(ir_node *node)
1306 assert(is_Builtin(node));
1307 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1310 int get_Builtin_n_params(const ir_node *node)
1312 assert(is_Builtin(node));
1313 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1316 ir_node *get_Builtin_param(const ir_node *node, int pos)
1318 assert(is_Builtin(node));
1319 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1322 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1324 assert(is_Builtin(node));
1325 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1328 /* Returns a human readable string for the ir_builtin_kind. */
1329 const char *get_builtin_kind_name(ir_builtin_kind kind)
1331 #define X(a) case a: return #a
1334 X(ir_bk_debugbreak);
1335 X(ir_bk_return_address);
1336 X(ir_bk_frame_address);
1346 X(ir_bk_inner_trampoline);
1353 int Call_has_callees(const ir_node *node)
1355 assert(is_Call(node));
1356 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1357 (node->attr.call.callee_arr != NULL));
1360 int get_Call_n_callees(const ir_node *node)
1362 assert(is_Call(node) && node->attr.call.callee_arr);
1363 return ARR_LEN(node->attr.call.callee_arr);
1366 ir_entity *get_Call_callee(const ir_node *node, int pos)
1368 assert(pos >= 0 && pos < get_Call_n_callees(node));
1369 return node->attr.call.callee_arr[pos];
1372 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1374 assert(is_Call(node));
1375 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1376 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1378 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1381 void remove_Call_callee_arr(ir_node *node)
1383 assert(is_Call(node));
1384 node->attr.call.callee_arr = NULL;
1388 * Returns non-zero if a Call is surely a self-recursive Call.
1389 * Beware: if this functions returns 0, the call might be self-recursive!
1391 int is_self_recursive_Call(const ir_node *call)
1393 const ir_node *callee = get_Call_ptr(call);
1395 if (is_SymConst_addr_ent(callee)) {
1396 const ir_entity *ent = get_SymConst_entity(callee);
1397 const ir_graph *irg = get_entity_irg(ent);
1398 if (irg == get_irn_irg(call))
1404 /* Checks for upcast.
1406 * Returns true if the Cast node casts a class type to a super type.
1408 int is_Cast_upcast(ir_node *node)
1410 ir_type *totype = get_Cast_type(node);
1411 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1413 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1416 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1417 totype = get_pointer_points_to_type(totype);
1418 fromtype = get_pointer_points_to_type(fromtype);
1423 if (!is_Class_type(totype)) return 0;
1424 return is_SubClass_of(fromtype, totype);
1427 /* Checks for downcast.
1429 * Returns true if the Cast node casts a class type to a sub type.
1431 int is_Cast_downcast(ir_node *node)
1433 ir_type *totype = get_Cast_type(node);
1434 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1436 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1439 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1440 totype = get_pointer_points_to_type(totype);
1441 fromtype = get_pointer_points_to_type(fromtype);
1446 if (!is_Class_type(totype)) return 0;
1447 return is_SubClass_of(totype, fromtype);
1450 int (is_unop)(const ir_node *node)
1452 return _is_unop(node);
1455 ir_node *get_unop_op(const ir_node *node)
1457 if (node->op->opar == oparity_unary)
1458 return get_irn_n(node, node->op->op_index);
1460 assert(node->op->opar == oparity_unary);
1464 void set_unop_op(ir_node *node, ir_node *op)
1466 if (node->op->opar == oparity_unary)
1467 set_irn_n(node, node->op->op_index, op);
1469 assert(node->op->opar == oparity_unary);
1472 int (is_binop)(const ir_node *node)
1474 return _is_binop(node);
1477 ir_node *get_binop_left(const ir_node *node)
1479 assert(node->op->opar == oparity_binary);
1480 return get_irn_n(node, node->op->op_index);
1483 void set_binop_left(ir_node *node, ir_node *left)
1485 assert(node->op->opar == oparity_binary);
1486 set_irn_n(node, node->op->op_index, left);
1489 ir_node *get_binop_right(const ir_node *node)
1491 assert(node->op->opar == oparity_binary);
1492 return get_irn_n(node, node->op->op_index + 1);
1495 void set_binop_right(ir_node *node, ir_node *right)
1497 assert(node->op->opar == oparity_binary);
1498 set_irn_n(node, node->op->op_index + 1, right);
1501 int is_Phi0(const ir_node *n)
1505 return ((get_irn_op(n) == op_Phi) &&
1506 (get_irn_arity(n) == 0) &&
1507 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1510 ir_node **get_Phi_preds_arr(ir_node *node)
1512 assert(node->op == op_Phi);
1513 return (ir_node **)&(get_irn_in(node)[1]);
1516 int get_Phi_n_preds(const ir_node *node)
1518 assert(is_Phi(node) || is_Phi0(node));
1519 return (get_irn_arity(node));
1523 void set_Phi_n_preds(ir_node *node, int n_preds)
1525 assert(node->op == op_Phi);
1529 ir_node *get_Phi_pred(const ir_node *node, int pos)
1531 assert(is_Phi(node) || is_Phi0(node));
1532 return get_irn_n(node, pos);
1535 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1537 assert(is_Phi(node) || is_Phi0(node));
1538 set_irn_n(node, pos, pred);
1541 ir_node *(get_Phi_next)(const ir_node *phi)
1543 return _get_Phi_next(phi);
1546 void (set_Phi_next)(ir_node *phi, ir_node *next)
1548 _set_Phi_next(phi, next);
1551 int is_memop(const ir_node *node)
1553 ir_opcode code = get_irn_opcode(node);
1554 return (code == iro_Load || code == iro_Store);
1557 ir_node *get_memop_mem(const ir_node *node)
1559 assert(is_memop(node));
1560 return get_irn_n(node, 0);
1563 void set_memop_mem(ir_node *node, ir_node *mem)
1565 assert(is_memop(node));
1566 set_irn_n(node, 0, mem);
1569 ir_node *get_memop_ptr(const ir_node *node)
1571 assert(is_memop(node));
1572 return get_irn_n(node, 1);
1575 void set_memop_ptr(ir_node *node, ir_node *ptr)
1577 assert(is_memop(node));
1578 set_irn_n(node, 1, ptr);
1581 ir_volatility get_Load_volatility(const ir_node *node)
1583 assert(is_Load(node));
1584 return node->attr.load.volatility;
1587 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1589 assert(is_Load(node));
1590 node->attr.load.volatility = volatility;
1593 ir_align get_Load_align(const ir_node *node)
1595 assert(is_Load(node));
1596 return node->attr.load.aligned;
1599 void set_Load_align(ir_node *node, ir_align align)
1601 assert(is_Load(node));
1602 node->attr.load.aligned = align;
1606 ir_volatility get_Store_volatility(const ir_node *node)
1608 assert(is_Store(node));
1609 return node->attr.store.volatility;
1612 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1614 assert(is_Store(node));
1615 node->attr.store.volatility = volatility;
1618 ir_align get_Store_align(const ir_node *node)
1620 assert(is_Store(node));
1621 return node->attr.store.aligned;
1624 void set_Store_align(ir_node *node, ir_align align)
1626 assert(is_Store(node));
1627 node->attr.store.aligned = align;
1631 ir_node **get_Sync_preds_arr(ir_node *node)
1633 assert(is_Sync(node));
1634 return (ir_node **)&(get_irn_in(node)[1]);
1637 int get_Sync_n_preds(const ir_node *node)
1639 assert(is_Sync(node));
1640 return (get_irn_arity(node));
1644 void set_Sync_n_preds(ir_node *node, int n_preds)
1646 assert(is_Sync(node));
1650 ir_node *get_Sync_pred(const ir_node *node, int pos)
1652 assert(is_Sync(node));
1653 return get_irn_n(node, pos);
1656 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1658 assert(is_Sync(node));
1659 set_irn_n(node, pos, pred);
1662 /* Add a new Sync predecessor */
1663 void add_Sync_pred(ir_node *node, ir_node *pred)
1665 assert(is_Sync(node));
1666 add_irn_n(node, pred);
1669 /* Returns the source language type of a Proj node. */
1670 ir_type *get_Proj_type(const ir_node *n)
1672 ir_type *tp = firm_unknown_type;
1673 ir_node *pred = get_Proj_pred(n);
1675 switch (get_irn_opcode(pred)) {
1678 /* Deal with Start / Call here: we need to know the Proj Nr. */
1679 assert(get_irn_mode(pred) == mode_T);
1680 pred_pred = get_Proj_pred(pred);
1682 if (is_Start(pred_pred)) {
1683 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1684 tp = get_method_param_type(mtp, get_Proj_proj(n));
1685 } else if (is_Call(pred_pred)) {
1686 ir_type *mtp = get_Call_type(pred_pred);
1687 tp = get_method_res_type(mtp, get_Proj_proj(n));
1690 case iro_Start: break;
1691 case iro_Call: break;
1693 ir_node *a = get_Load_ptr(pred);
1695 tp = get_entity_type(get_Sel_entity(a));
1703 long get_Proj_proj(const ir_node *node)
1705 #ifdef INTERPROCEDURAL_VIEW
1706 ir_opcode code = get_irn_opcode(node);
1708 if (code == iro_Proj) {
1709 return node->attr.proj;
1712 assert(code == iro_Filter);
1713 return node->attr.filter.proj;
1716 assert(is_Proj(node));
1717 return node->attr.proj;
1718 #endif /* INTERPROCEDURAL_VIEW */
1721 void set_Proj_proj(ir_node *node, long proj)
1723 #ifdef INTERPROCEDURAL_VIEW
1724 ir_opcode code = get_irn_opcode(node);
1726 if (code == iro_Proj) {
1727 node->attr.proj = proj;
1730 assert(code == iro_Filter);
1731 node->attr.filter.proj = proj;
1734 assert(is_Proj(node));
1735 node->attr.proj = proj;
1736 #endif /* INTERPROCEDURAL_VIEW */
1739 /* Returns non-zero if a node is a routine parameter. */
1740 int (is_arg_Proj)(const ir_node *node)
1742 return _is_arg_Proj(node);
1745 ir_node **get_Tuple_preds_arr(ir_node *node)
1747 assert(is_Tuple(node));
1748 return (ir_node **)&(get_irn_in(node)[1]);
1751 int get_Tuple_n_preds(const ir_node *node)
1753 assert(is_Tuple(node));
1754 return get_irn_arity(node);
1758 void set_Tuple_n_preds(ir_node *node, int n_preds)
1760 assert(is_Tuple(node));
1764 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1766 assert(is_Tuple(node));
1767 return get_irn_n(node, pos);
1770 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1772 assert(is_Tuple(node));
1773 set_irn_n(node, pos, pred);
1776 /* Don't use get_irn_arity, get_irn_n in implementation as access
1777 shall work independent of view!!! */
1778 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
1780 assert(is_Filter(node));
1781 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1782 ir_graph *irg = get_irn_irg(node);
1783 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1784 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
1785 node->attr.filter.in_cg[0] = node->in[0];
1787 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1790 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
1792 assert(is_Filter(node) && node->attr.filter.in_cg &&
1793 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1794 node->attr.filter.in_cg[pos + 1] = pred;
1797 int get_Filter_n_cg_preds(const ir_node *node)
1799 assert(is_Filter(node) && node->attr.filter.in_cg);
1800 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1803 ir_node *get_Filter_cg_pred(const ir_node *node, int pos)
1806 assert(is_Filter(node) && node->attr.filter.in_cg &&
1808 arity = ARR_LEN(node->attr.filter.in_cg);
1809 assert(pos < arity - 1);
1810 return node->attr.filter.in_cg[pos + 1];
1813 int get_ASM_n_input_constraints(const ir_node *node)
1815 assert(is_ASM(node));
1816 return ARR_LEN(node->attr.assem.input_constraints);
1819 int get_ASM_n_output_constraints(const ir_node *node)
1821 assert(is_ASM(node));
1822 return ARR_LEN(node->attr.assem.output_constraints);
1825 int get_ASM_n_clobbers(const ir_node *node)
1827 assert(is_ASM(node));
1828 return ARR_LEN(node->attr.assem.clobbers);
1831 /* returns the graph of a node */
1832 ir_graph *(get_irn_irg)(const ir_node *node)
1834 return _get_irn_irg(node);
1838 /*----------------------------------------------------------------*/
1839 /* Auxiliary routines */
1840 /*----------------------------------------------------------------*/
1842 ir_node *skip_Proj(ir_node *node)
1844 /* don't assert node !!! */
1849 node = get_Proj_pred(node);
1855 skip_Proj_const(const ir_node *node)
1857 /* don't assert node !!! */
1862 node = get_Proj_pred(node);
1867 ir_node *skip_Tuple(ir_node *node)
1873 if (is_Proj(node)) {
1874 pred = get_Proj_pred(node);
1875 op = get_irn_op(pred);
1878 * Looks strange but calls get_irn_op() only once
1879 * in most often cases.
1881 if (op == op_Proj) { /* nested Tuple ? */
1882 pred = skip_Tuple(pred);
1884 if (is_Tuple(pred)) {
1885 node = get_Tuple_pred(pred, get_Proj_proj(node));
1888 } else if (op == op_Tuple) {
1889 node = get_Tuple_pred(pred, get_Proj_proj(node));
1896 /* returns operand of node if node is a Cast */
1897 ir_node *skip_Cast(ir_node *node)
1900 return get_Cast_op(node);
1904 /* returns operand of node if node is a Cast */
1905 const ir_node *skip_Cast_const(const ir_node *node)
1908 return get_Cast_op(node);
1912 /* returns operand of node if node is a Pin */
1913 ir_node *skip_Pin(ir_node *node)
1916 return get_Pin_op(node);
1920 /* returns operand of node if node is a Confirm */
1921 ir_node *skip_Confirm(ir_node *node)
1923 if (is_Confirm(node))
1924 return get_Confirm_value(node);
1928 /* skip all high-level ops */
1929 ir_node *skip_HighLevel_ops(ir_node *node)
1931 while (is_op_highlevel(get_irn_op(node))) {
1932 node = get_irn_n(node, 0);
1938 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1939 * than any other approach, as Id chains are resolved and all point to the real node, or
1940 * all id's are self loops.
1942 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1943 * a little bit "hand optimized".
1945 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
1947 ir_node *skip_Id(ir_node *node)
1950 /* don't assert node !!! */
1952 if (!node || (node->op != op_Id)) return node;
1954 /* Don't use get_Id_pred(): We get into an endless loop for
1955 self-referencing Ids. */
1956 pred = node->in[0+1];
1958 if (pred->op != op_Id) return pred;
1960 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1961 ir_node *rem_pred, *res;
1963 if (pred->op != op_Id) return pred; /* shortcut */
1966 assert(get_irn_arity (node) > 0);
1968 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1969 res = skip_Id(rem_pred);
1970 if (res->op == op_Id) /* self-loop */ return node;
1972 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1979 int (is_strictConv)(const ir_node *node)
1981 return _is_strictConv(node);
1984 int (is_no_Block)(const ir_node *node)
1986 return _is_no_Block(node);
1989 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1990 int (is_SymConst_addr_ent)(const ir_node *node)
1992 return _is_SymConst_addr_ent(node);
1995 /* Returns true if the operation manipulates control flow. */
1996 int is_cfop(const ir_node *node)
1998 return is_op_cfopcode(get_irn_op(node));
2001 /* Returns true if the operation manipulates interprocedural control flow:
2002 CallBegin, EndReg, EndExcept */
2003 int is_ip_cfop(const ir_node *node)
2005 return is_ip_cfopcode(get_irn_op(node));
2008 /* Returns true if the operation can change the control flow because
2010 int is_fragile_op(const ir_node *node)
2012 return is_op_fragile(get_irn_op(node));
2015 /* Returns the memory operand of fragile operations. */
2016 ir_node *get_fragile_op_mem(ir_node *node)
2018 assert(node && is_fragile_op(node));
2020 switch (get_irn_opcode(node)) {
2031 return get_irn_n(node, pn_Generic_M);
2036 panic("should not be reached");
2040 /* Returns the result mode of a Div operation. */
2041 ir_mode *get_divop_resmod(const ir_node *node)
2043 switch (get_irn_opcode(node)) {
2044 case iro_Quot : return get_Quot_resmode(node);
2045 case iro_DivMod: return get_DivMod_resmode(node);
2046 case iro_Div : return get_Div_resmode(node);
2047 case iro_Mod : return get_Mod_resmode(node);
2049 panic("should not be reached");
2053 /* Returns true if the operation is a forking control flow operation. */
2054 int (is_irn_forking)(const ir_node *node)
2056 return _is_irn_forking(node);
2059 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
2061 _copy_node_attr(irg, old_node, new_node);
2064 /* Return the type associated with the value produced by n
2065 * if the node remarks this type as it is the case for
2066 * Cast, Const, SymConst and some Proj nodes. */
2067 ir_type *(get_irn_type)(ir_node *node)
2069 return _get_irn_type(node);
2072 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2074 ir_type *(get_irn_type_attr)(ir_node *node)
2076 return _get_irn_type_attr(node);
2079 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2080 ir_entity *(get_irn_entity_attr)(ir_node *node)
2082 return _get_irn_entity_attr(node);
2085 /* Returns non-zero for constant-like nodes. */
2086 int (is_irn_constlike)(const ir_node *node)
2088 return _is_irn_constlike(node);
2092 * Returns non-zero for nodes that are allowed to have keep-alives and
2093 * are neither Block nor PhiM.
2095 int (is_irn_keep)(const ir_node *node)
2097 return _is_irn_keep(node);
2101 * Returns non-zero for nodes that are always placed in the start block.
2103 int (is_irn_start_block_placed)(const ir_node *node)
2105 return _is_irn_start_block_placed(node);
2108 /* Returns non-zero for nodes that are machine operations. */
2109 int (is_irn_machine_op)(const ir_node *node)
2111 return _is_irn_machine_op(node);
2114 /* Returns non-zero for nodes that are machine operands. */
2115 int (is_irn_machine_operand)(const ir_node *node)
2117 return _is_irn_machine_operand(node);
2120 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2121 int (is_irn_machine_user)(const ir_node *node, unsigned n)
2123 return _is_irn_machine_user(node, n);
2126 /* Returns non-zero for nodes that are CSE neutral to its users. */
2127 int (is_irn_cse_neutral)(const ir_node *node)
2129 return _is_irn_cse_neutral(node);
2132 /* Gets the string representation of the jump prediction .*/
2133 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2135 #define X(a) case a: return #a
2137 X(COND_JMP_PRED_NONE);
2138 X(COND_JMP_PRED_TRUE);
2139 X(COND_JMP_PRED_FALSE);
2145 /** the get_type operation must be always implemented and return a firm type */
2146 static ir_type *get_Default_type(const ir_node *n)
2149 return get_unknown_type();
2152 /* Sets the get_type operation for an ir_op_ops. */
2153 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
2156 case iro_Const: ops->get_type = get_Const_type; break;
2157 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2158 case iro_Cast: ops->get_type = get_Cast_type; break;
2159 case iro_Proj: ops->get_type = get_Proj_type; break;
2161 /* not allowed to be NULL */
2162 if (! ops->get_type)
2163 ops->get_type = get_Default_type;
2169 /** Return the attribute type of a SymConst node if exists */
2170 static ir_type *get_SymConst_attr_type(const ir_node *self)
2172 symconst_kind kind = get_SymConst_kind(self);
2173 if (SYMCONST_HAS_TYPE(kind))
2174 return get_SymConst_type(self);
2178 /** Return the attribute entity of a SymConst node if exists */
2179 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
2181 symconst_kind kind = get_SymConst_kind(self);
2182 if (SYMCONST_HAS_ENT(kind))
2183 return get_SymConst_entity(self);
2187 /** the get_type_attr operation must be always implemented */
2188 static ir_type *get_Null_type(const ir_node *n)
2191 return firm_unknown_type;
2194 /* Sets the get_type operation for an ir_op_ops. */
2195 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
2198 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2199 case iro_Call: ops->get_type_attr = get_Call_type; break;
2200 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2201 case iro_Free: ops->get_type_attr = get_Free_type; break;
2202 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2204 /* not allowed to be NULL */
2205 if (! ops->get_type_attr)
2206 ops->get_type_attr = get_Null_type;
2212 /** the get_entity_attr operation must be always implemented */
2213 static ir_entity *get_Null_ent(const ir_node *n)
2219 /* Sets the get_type operation for an ir_op_ops. */
2220 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
2223 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2224 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2226 /* not allowed to be NULL */
2227 if (! ops->get_entity_attr)
2228 ops->get_entity_attr = get_Null_ent;
2234 /* Sets the debug information of a node. */
2235 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
2237 _set_irn_dbg_info(n, db);
2241 * Returns the debug information of an node.
2243 * @param n The node.
2245 dbg_info *(get_irn_dbg_info)(const ir_node *n)
2247 return _get_irn_dbg_info(n);
2250 /* checks whether a node represents a global address */
2251 int is_Global(const ir_node *node)
2253 return is_SymConst_addr_ent(node);
2256 /* returns the entity of a global address */
2257 ir_entity *get_Global_entity(const ir_node *node)
2259 return get_SymConst_entity(node);
2263 * Calculate a hash value of a node.
2265 unsigned firm_default_hash(const ir_node *node)
2270 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2271 h = irn_arity = get_irn_intra_arity(node);
2273 /* consider all in nodes... except the block if not a control flow. */
2274 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2275 ir_node *pred = get_irn_intra_n(node, i);
2276 if (is_irn_cse_neutral(pred))
2279 h = 9*h + HASH_PTR(pred);
2283 h = 9*h + HASH_PTR(get_irn_mode(node));
2285 h = 9*h + HASH_PTR(get_irn_op(node));
2288 } /* firm_default_hash */
2290 /* include generated code */
2291 #include "gen_irnode.c.inl"