2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node **in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_arity)(const ir_node *node)
216 return _get_irn_arity(node);
219 /* Returns the array with ins. This array is shifted with respect to the
220 array accessed by get_irn_n: The block operand is at position 0 not -1.
221 (@@@ This should be changed.)
222 The order of the predecessors in this array is not guaranteed, except that
223 lists of operands as predecessors of Block or arguments of a Call are
225 ir_node **get_irn_in(const ir_node *node)
230 void set_irn_in(ir_node *node, int arity, ir_node **in)
234 ir_graph *irg = get_irn_irg(node);
239 for (i = 0; i < arity; i++) {
240 if (i < ARR_LEN(*pOld_in)-1)
241 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
243 edges_notify_edge(node, i, in[i], NULL, irg);
245 for (;i < ARR_LEN(*pOld_in)-1; i++) {
246 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
249 if (arity != ARR_LEN(*pOld_in) - 1) {
250 ir_node * block = (*pOld_in)[0];
251 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
252 (*pOld_in)[0] = block;
254 fix_backedges(irg->obst, node);
256 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
259 ir_node *(get_irn_n)(const ir_node *node, int n)
261 return _get_irn_n(node, n);
264 void set_irn_n(ir_node *node, int n, ir_node *in)
266 ir_graph *irg = get_irn_irg(node);
267 assert(node && node->kind == k_ir_node);
269 assert(n < get_irn_arity(node));
270 assert(in && in->kind == k_ir_node);
273 hook_set_irn_n(node, n, in, node->in[n + 1]);
275 /* Here, we rely on src and tgt being in the current ir graph */
276 edges_notify_edge(node, n, in, node->in[n + 1], irg);
278 node->in[n + 1] = in;
281 int add_irn_n(ir_node *node, ir_node *in)
284 ir_graph *irg = get_irn_irg(node);
286 assert(node->op->opar == oparity_dynamic);
287 pos = ARR_LEN(node->in) - 1;
288 ARR_APP1(ir_node *, node->in, in);
289 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
292 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
297 void del_Sync_n(ir_node *n, int i)
299 int arity = get_Sync_n_preds(n);
300 ir_node *last_pred = get_Sync_pred(n, arity - 1);
301 set_Sync_pred(n, i, last_pred);
302 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
303 ARR_SHRINKLEN(get_irn_in(n), arity);
306 int (get_irn_deps)(const ir_node *node)
308 return _get_irn_deps(node);
311 ir_node *(get_irn_dep)(const ir_node *node, int pos)
313 return _get_irn_dep(node, pos);
316 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
318 _set_irn_dep(node, pos, dep);
321 int add_irn_dep(ir_node *node, ir_node *dep)
325 /* DEP edges are only allowed in backend phase */
326 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
327 if (node->deps == NULL) {
328 node->deps = NEW_ARR_F(ir_node *, 1);
334 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
335 if (node->deps[i] == NULL)
338 if (node->deps[i] == dep)
342 if (first_zero >= 0) {
343 node->deps[first_zero] = dep;
346 ARR_APP1(ir_node *, node->deps, dep);
351 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
356 void add_irn_deps(ir_node *tgt, ir_node *src)
360 for (i = 0, n = get_irn_deps(src); i < n; ++i)
361 add_irn_dep(tgt, get_irn_dep(src, i));
365 ir_mode *(get_irn_mode)(const ir_node *node)
367 return _get_irn_mode(node);
370 void (set_irn_mode)(ir_node *node, ir_mode *mode)
372 _set_irn_mode(node, mode);
375 /** Gets the string representation of the mode .*/
376 const char *get_irn_modename(const ir_node *node)
379 return get_mode_name(node->mode);
382 ident *get_irn_modeident(const ir_node *node)
385 return get_mode_ident(node->mode);
388 ir_op *(get_irn_op)(const ir_node *node)
390 return _get_irn_op(node);
393 /* should be private to the library: */
394 void (set_irn_op)(ir_node *node, ir_op *op)
396 _set_irn_op(node, op);
399 unsigned (get_irn_opcode)(const ir_node *node)
401 return _get_irn_opcode(node);
404 const char *get_irn_opname(const ir_node *node)
407 if (is_Phi0(node)) return "Phi0";
408 return get_id_str(node->op->name);
411 ident *get_irn_opident(const ir_node *node)
414 return node->op->name;
417 ir_visited_t (get_irn_visited)(const ir_node *node)
419 return _get_irn_visited(node);
422 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
424 _set_irn_visited(node, visited);
427 void (mark_irn_visited)(ir_node *node)
429 _mark_irn_visited(node);
432 int (irn_visited)(const ir_node *node)
434 return _irn_visited(node);
437 int (irn_visited_else_mark)(ir_node *node)
439 return _irn_visited_else_mark(node);
442 void (set_irn_link)(ir_node *node, void *link)
444 _set_irn_link(node, link);
447 void *(get_irn_link)(const ir_node *node)
449 return _get_irn_link(node);
452 op_pin_state (get_irn_pinned)(const ir_node *node)
454 return _get_irn_pinned(node);
457 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
459 return _is_irn_pinned_in_irg(node);
462 void set_irn_pinned(ir_node *node, op_pin_state state)
464 /* due to optimization an opt may be turned into a Tuple */
468 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
469 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
471 node->attr.except.pin_state = state;
474 /* Outputs a unique number for this node */
475 long get_irn_node_nr(const ir_node *node)
478 return node->node_nr;
481 void *(get_irn_generic_attr)(ir_node *node)
483 assert(is_ir_node(node));
484 return _get_irn_generic_attr(node);
487 const void *(get_irn_generic_attr_const)(const ir_node *node)
489 assert(is_ir_node(node));
490 return _get_irn_generic_attr_const(node);
493 unsigned (get_irn_idx)(const ir_node *node)
495 assert(is_ir_node(node));
496 return _get_irn_idx(node);
499 int get_irn_pred_pos(ir_node *node, ir_node *arg)
502 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
503 if (get_irn_n(node, i) == arg)
509 /** manipulate fields of individual nodes **/
511 ir_node *(get_nodes_block)(const ir_node *node)
513 return _get_nodes_block(node);
516 void set_nodes_block(ir_node *node, ir_node *block)
518 assert(node->op != op_Block);
519 set_irn_n(node, -1, block);
522 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
523 * from Start. If so returns frame type, else Null. */
524 ir_type *is_frame_pointer(const ir_node *n)
526 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
527 ir_node *start = get_Proj_pred(n);
528 if (is_Start(start)) {
529 return get_irg_frame_type(get_irn_irg(start));
535 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
536 * from Start. If so returns tls type, else Null. */
537 ir_type *is_tls_pointer(const ir_node *n)
539 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
540 ir_node *start = get_Proj_pred(n);
541 if (is_Start(start)) {
542 return get_tls_type();
548 ir_node **get_Block_cfgpred_arr(ir_node *node)
550 assert(is_Block(node));
551 return (ir_node **)&(get_irn_in(node)[1]);
554 int (get_Block_n_cfgpreds)(const ir_node *node)
556 return _get_Block_n_cfgpreds(node);
559 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
561 return _get_Block_cfgpred(node, pos);
564 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
566 assert(is_Block(node));
567 set_irn_n(node, pos, pred);
570 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
574 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
575 if (get_Block_cfgpred_block(block, i) == pred)
581 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
583 return _get_Block_cfgpred_block(node, pos);
586 int get_Block_matured(const ir_node *node)
588 assert(is_Block(node));
589 return (int)node->attr.block.is_matured;
592 void set_Block_matured(ir_node *node, int matured)
594 assert(is_Block(node));
595 node->attr.block.is_matured = matured;
598 ir_visited_t (get_Block_block_visited)(const ir_node *node)
600 return _get_Block_block_visited(node);
603 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
605 _set_Block_block_visited(node, visit);
608 void (mark_Block_block_visited)(ir_node *node)
610 _mark_Block_block_visited(node);
613 int (Block_block_visited)(const ir_node *node)
615 return _Block_block_visited(node);
618 ir_node *(set_Block_dead)(ir_node *block)
620 return _set_Block_dead(block);
623 int (is_Block_dead)(const ir_node *block)
625 return _is_Block_dead(block);
628 ir_extblk *get_Block_extbb(const ir_node *block)
631 assert(is_Block(block));
632 res = block->attr.block.extblk;
633 assert(res == NULL || is_ir_extbb(res));
637 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
639 assert(is_Block(block));
640 assert(extblk == NULL || is_ir_extbb(extblk));
641 block->attr.block.extblk = extblk;
644 /* returns the graph of a Block. */
645 ir_graph *(get_Block_irg)(const ir_node *block)
647 return _get_Block_irg(block);
650 ir_entity *create_Block_entity(ir_node *block)
653 assert(is_Block(block));
655 entity = block->attr.block.entity;
656 if (entity == NULL) {
660 glob = get_glob_type();
661 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
662 set_entity_visibility(entity, ir_visibility_local);
663 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
664 nr = get_irp_next_label_nr();
665 set_entity_label(entity, nr);
666 set_entity_compiler_generated(entity, 1);
668 block->attr.block.entity = entity;
673 ir_entity *get_Block_entity(const ir_node *block)
675 assert(is_Block(block));
676 return block->attr.block.entity;
679 void set_Block_entity(ir_node *block, ir_entity *entity)
681 assert(is_Block(block));
682 assert(get_entity_type(entity) == get_code_type());
683 block->attr.block.entity = entity;
686 int has_Block_entity(const ir_node *block)
688 return block->attr.block.entity != NULL;
691 ir_node *(get_Block_phis)(const ir_node *block)
693 return _get_Block_phis(block);
696 void (set_Block_phis)(ir_node *block, ir_node *phi)
698 _set_Block_phis(block, phi);
701 void (add_Block_phi)(ir_node *block, ir_node *phi)
703 _add_Block_phi(block, phi);
706 /* Get the Block mark (single bit). */
707 unsigned (get_Block_mark)(const ir_node *block)
709 return _get_Block_mark(block);
712 /* Set the Block mark (single bit). */
713 void (set_Block_mark)(ir_node *block, unsigned mark)
715 _set_Block_mark(block, mark);
718 int get_End_n_keepalives(const ir_node *end)
721 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
724 ir_node *get_End_keepalive(const ir_node *end, int pos)
727 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
730 void add_End_keepalive(ir_node *end, ir_node *ka)
736 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
739 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
742 /* Set new keep-alives */
743 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
746 ir_graph *irg = get_irn_irg(end);
748 /* notify that edges are deleted */
749 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
750 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
752 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
754 for (i = 0; i < n; ++i) {
755 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
756 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
760 /* Set new keep-alives from old keep-alives, skipping irn */
761 void remove_End_keepalive(ir_node *end, ir_node *irn)
763 int n = get_End_n_keepalives(end);
768 for (i = n -1; i >= 0; --i) {
769 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
779 irg = get_irn_irg(end);
781 /* remove the edge */
782 edges_notify_edge(end, idx, NULL, irn, irg);
785 /* exchange with the last one */
786 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
787 edges_notify_edge(end, n - 1, NULL, old, irg);
788 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
789 edges_notify_edge(end, idx, old, NULL, irg);
791 /* now n - 1 keeps, 1 block input */
792 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
795 /* remove Bads, NoMems and doublets from the keep-alive set */
796 void remove_End_Bads_and_doublets(ir_node *end)
799 int idx, n = get_End_n_keepalives(end);
805 irg = get_irn_irg(end);
806 pset_new_init(&keeps);
808 for (idx = n - 1; idx >= 0; --idx) {
809 ir_node *ka = get_End_keepalive(end, idx);
811 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
812 /* remove the edge */
813 edges_notify_edge(end, idx, NULL, ka, irg);
816 /* exchange with the last one */
817 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
818 edges_notify_edge(end, n - 1, NULL, old, irg);
819 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
820 edges_notify_edge(end, idx, old, NULL, irg);
824 pset_new_insert(&keeps, ka);
827 /* n keeps, 1 block input */
828 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
830 pset_new_destroy(&keeps);
833 void free_End(ir_node *end)
838 end->in = NULL; /* @@@ make sure we get an error if we use the
839 in array afterwards ... */
842 int get_Return_n_ress(const ir_node *node)
844 assert(is_Return(node));
845 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
848 ir_node **get_Return_res_arr(ir_node *node)
850 assert(is_Return(node));
851 if (get_Return_n_ress(node) > 0)
852 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
858 void set_Return_n_res(ir_node *node, int results)
860 assert(is_Return(node));
864 ir_node *get_Return_res(const ir_node *node, int pos)
866 assert(is_Return(node));
867 assert(get_Return_n_ress(node) > pos);
868 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
871 void set_Return_res(ir_node *node, int pos, ir_node *res)
873 assert(is_Return(node));
874 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
877 int (is_Const_null)(const ir_node *node)
879 return _is_Const_null(node);
882 int (is_Const_one)(const ir_node *node)
884 return _is_Const_one(node);
887 int (is_Const_all_one)(const ir_node *node)
889 return _is_Const_all_one(node);
893 /* The source language type. Must be an atomic type. Mode of type must
894 be mode of node. For tarvals from entities type must be pointer to
896 ir_type *get_Const_type(const ir_node *node)
898 assert(is_Const(node));
899 return node->attr.con.tp;
902 void set_Const_type(ir_node *node, ir_type *tp)
904 assert(is_Const(node));
905 if (tp != firm_unknown_type) {
906 assert(is_atomic_type(tp));
907 assert(get_type_mode(tp) == get_irn_mode(node));
909 node->attr.con.tp = tp;
913 symconst_kind get_SymConst_kind(const ir_node *node)
915 assert(is_SymConst(node));
916 return node->attr.symc.kind;
919 void set_SymConst_kind(ir_node *node, symconst_kind kind)
921 assert(is_SymConst(node));
922 node->attr.symc.kind = kind;
925 ir_type *get_SymConst_type(const ir_node *node)
927 /* the cast here is annoying, but we have to compensate for
929 ir_node *irn = (ir_node *)node;
930 assert(is_SymConst(node) &&
931 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
932 return irn->attr.symc.sym.type_p;
935 void set_SymConst_type(ir_node *node, ir_type *tp)
937 assert(is_SymConst(node) &&
938 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
939 node->attr.symc.sym.type_p = tp;
943 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
944 ir_entity *get_SymConst_entity(const ir_node *node)
946 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
947 return node->attr.symc.sym.entity_p;
950 void set_SymConst_entity(ir_node *node, ir_entity *ent)
952 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
953 node->attr.symc.sym.entity_p = ent;
956 ir_enum_const *get_SymConst_enum(const ir_node *node)
958 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
959 return node->attr.symc.sym.enum_p;
962 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
964 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
965 node->attr.symc.sym.enum_p = ec;
968 union symconst_symbol
969 get_SymConst_symbol(const ir_node *node)
971 assert(is_SymConst(node));
972 return node->attr.symc.sym;
975 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
977 assert(is_SymConst(node));
978 node->attr.symc.sym = sym;
981 ir_type *get_SymConst_value_type(const ir_node *node)
983 assert(is_SymConst(node));
984 return node->attr.symc.tp;
987 void set_SymConst_value_type(ir_node *node, ir_type *tp)
989 assert(is_SymConst(node));
990 node->attr.symc.tp = tp;
993 int get_Sel_n_indexs(const ir_node *node)
995 assert(is_Sel(node));
996 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
999 ir_node **get_Sel_index_arr(ir_node *node)
1001 assert(is_Sel(node));
1002 if (get_Sel_n_indexs(node) > 0)
1003 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1008 ir_node *get_Sel_index(const ir_node *node, int pos)
1010 assert(is_Sel(node));
1011 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1014 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1016 assert(is_Sel(node));
1017 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1021 /* For unary and binary arithmetic operations the access to the
1022 operands can be factored out. Left is the first, right the
1023 second arithmetic value as listed in tech report 0999-33.
1024 unops are: Minus, Abs, Not, Conv, Cast
1025 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1026 Shr, Shrs, Rotate, Cmp */
1029 ir_node **get_Call_param_arr(ir_node *node)
1031 assert(is_Call(node));
1032 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1035 int get_Call_n_params(const ir_node *node)
1037 assert(is_Call(node));
1038 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1041 ir_node *get_Call_param(const ir_node *node, int pos)
1043 assert(is_Call(node));
1044 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1047 void set_Call_param(ir_node *node, int pos, ir_node *param)
1049 assert(is_Call(node));
1050 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1053 ir_node **get_Builtin_param_arr(ir_node *node)
1055 assert(is_Builtin(node));
1056 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1059 int get_Builtin_n_params(const ir_node *node)
1061 assert(is_Builtin(node));
1062 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1065 ir_node *get_Builtin_param(const ir_node *node, int pos)
1067 assert(is_Builtin(node));
1068 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1071 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1073 assert(is_Builtin(node));
1074 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1077 /* Returns a human readable string for the ir_builtin_kind. */
1078 const char *get_builtin_kind_name(ir_builtin_kind kind)
1080 #define X(a) case a: return #a
1083 X(ir_bk_debugbreak);
1084 X(ir_bk_return_address);
1085 X(ir_bk_frame_address);
1095 X(ir_bk_inner_trampoline);
1102 int Call_has_callees(const ir_node *node)
1104 assert(is_Call(node));
1105 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1106 (node->attr.call.callee_arr != NULL));
1109 int get_Call_n_callees(const ir_node *node)
1111 assert(is_Call(node) && node->attr.call.callee_arr);
1112 return ARR_LEN(node->attr.call.callee_arr);
1115 ir_entity *get_Call_callee(const ir_node *node, int pos)
1117 assert(pos >= 0 && pos < get_Call_n_callees(node));
1118 return node->attr.call.callee_arr[pos];
1121 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1123 ir_graph *irg = get_irn_irg(node);
1125 assert(is_Call(node));
1126 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1127 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1129 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1132 void remove_Call_callee_arr(ir_node *node)
1134 assert(is_Call(node));
1135 node->attr.call.callee_arr = NULL;
1139 * Returns non-zero if a Call is surely a self-recursive Call.
1140 * Beware: if this functions returns 0, the call might be self-recursive!
1142 int is_self_recursive_Call(const ir_node *call)
1144 const ir_node *callee = get_Call_ptr(call);
1146 if (is_SymConst_addr_ent(callee)) {
1147 const ir_entity *ent = get_SymConst_entity(callee);
1148 const ir_graph *irg = get_entity_irg(ent);
1149 if (irg == get_irn_irg(call))
1155 /* Checks for upcast.
1157 * Returns true if the Cast node casts a class type to a super type.
1159 int is_Cast_upcast(ir_node *node)
1161 ir_type *totype = get_Cast_type(node);
1162 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1164 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1167 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1168 totype = get_pointer_points_to_type(totype);
1169 fromtype = get_pointer_points_to_type(fromtype);
1174 if (!is_Class_type(totype)) return 0;
1175 return is_SubClass_of(fromtype, totype);
1178 /* Checks for downcast.
1180 * Returns true if the Cast node casts a class type to a sub type.
1182 int is_Cast_downcast(ir_node *node)
1184 ir_type *totype = get_Cast_type(node);
1185 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1187 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1190 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1191 totype = get_pointer_points_to_type(totype);
1192 fromtype = get_pointer_points_to_type(fromtype);
1197 if (!is_Class_type(totype)) return 0;
1198 return is_SubClass_of(totype, fromtype);
1201 int (is_unop)(const ir_node *node)
1203 return _is_unop(node);
1206 ir_node *get_unop_op(const ir_node *node)
1208 if (node->op->opar == oparity_unary)
1209 return get_irn_n(node, node->op->op_index);
1211 assert(node->op->opar == oparity_unary);
1215 void set_unop_op(ir_node *node, ir_node *op)
1217 if (node->op->opar == oparity_unary)
1218 set_irn_n(node, node->op->op_index, op);
1220 assert(node->op->opar == oparity_unary);
1223 int (is_binop)(const ir_node *node)
1225 return _is_binop(node);
1228 ir_node *get_binop_left(const ir_node *node)
1230 assert(node->op->opar == oparity_binary);
1231 return get_irn_n(node, node->op->op_index);
1234 void set_binop_left(ir_node *node, ir_node *left)
1236 assert(node->op->opar == oparity_binary);
1237 set_irn_n(node, node->op->op_index, left);
1240 ir_node *get_binop_right(const ir_node *node)
1242 assert(node->op->opar == oparity_binary);
1243 return get_irn_n(node, node->op->op_index + 1);
1246 void set_binop_right(ir_node *node, ir_node *right)
1248 assert(node->op->opar == oparity_binary);
1249 set_irn_n(node, node->op->op_index + 1, right);
1252 int is_Phi0(const ir_node *n)
1256 return ((get_irn_op(n) == op_Phi) &&
1257 (get_irn_arity(n) == 0) &&
1258 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1261 ir_node **get_Phi_preds_arr(ir_node *node)
1263 assert(is_Phi(node));
1264 return (ir_node **)&(get_irn_in(node)[1]);
1267 int get_Phi_n_preds(const ir_node *node)
1269 assert(is_Phi(node) || is_Phi0(node));
1270 return (get_irn_arity(node));
1273 ir_node *get_Phi_pred(const ir_node *node, int pos)
1275 assert(is_Phi(node) || is_Phi0(node));
1276 return get_irn_n(node, pos);
1279 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1281 assert(is_Phi(node) || is_Phi0(node));
1282 set_irn_n(node, pos, pred);
1285 ir_node *(get_Phi_next)(const ir_node *phi)
1287 return _get_Phi_next(phi);
1290 void (set_Phi_next)(ir_node *phi, ir_node *next)
1292 _set_Phi_next(phi, next);
1295 int is_memop(const ir_node *node)
1297 ir_opcode code = get_irn_opcode(node);
1298 return (code == iro_Load || code == iro_Store);
1301 ir_node *get_memop_mem(const ir_node *node)
1303 assert(is_memop(node));
1304 return get_irn_n(node, 0);
1307 void set_memop_mem(ir_node *node, ir_node *mem)
1309 assert(is_memop(node));
1310 set_irn_n(node, 0, mem);
1313 ir_node *get_memop_ptr(const ir_node *node)
1315 assert(is_memop(node));
1316 return get_irn_n(node, 1);
1319 void set_memop_ptr(ir_node *node, ir_node *ptr)
1321 assert(is_memop(node));
1322 set_irn_n(node, 1, ptr);
1325 ir_volatility get_Load_volatility(const ir_node *node)
1327 assert(is_Load(node));
1328 return node->attr.load.volatility;
1331 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1333 assert(is_Load(node));
1334 node->attr.load.volatility = volatility;
1337 ir_align get_Load_align(const ir_node *node)
1339 assert(is_Load(node));
1340 return node->attr.load.aligned;
1343 void set_Load_align(ir_node *node, ir_align align)
1345 assert(is_Load(node));
1346 node->attr.load.aligned = align;
1350 ir_volatility get_Store_volatility(const ir_node *node)
1352 assert(is_Store(node));
1353 return node->attr.store.volatility;
1356 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1358 assert(is_Store(node));
1359 node->attr.store.volatility = volatility;
1362 ir_align get_Store_align(const ir_node *node)
1364 assert(is_Store(node));
1365 return node->attr.store.aligned;
1368 void set_Store_align(ir_node *node, ir_align align)
1370 assert(is_Store(node));
1371 node->attr.store.aligned = align;
1375 ir_node **get_Sync_preds_arr(ir_node *node)
1377 assert(is_Sync(node));
1378 return (ir_node **)&(get_irn_in(node)[1]);
1381 int get_Sync_n_preds(const ir_node *node)
1383 assert(is_Sync(node));
1384 return (get_irn_arity(node));
1388 void set_Sync_n_preds(ir_node *node, int n_preds)
1390 assert(is_Sync(node));
1394 ir_node *get_Sync_pred(const ir_node *node, int pos)
1396 assert(is_Sync(node));
1397 return get_irn_n(node, pos);
1400 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1402 assert(is_Sync(node));
1403 set_irn_n(node, pos, pred);
1406 /* Add a new Sync predecessor */
1407 void add_Sync_pred(ir_node *node, ir_node *pred)
1409 assert(is_Sync(node));
1410 add_irn_n(node, pred);
1413 /* Returns the source language type of a Proj node. */
1414 ir_type *get_Proj_type(const ir_node *n)
1416 ir_type *tp = firm_unknown_type;
1417 ir_node *pred = get_Proj_pred(n);
1419 switch (get_irn_opcode(pred)) {
1422 /* Deal with Start / Call here: we need to know the Proj Nr. */
1423 assert(get_irn_mode(pred) == mode_T);
1424 pred_pred = get_Proj_pred(pred);
1426 if (is_Start(pred_pred)) {
1427 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1428 tp = get_method_param_type(mtp, get_Proj_proj(n));
1429 } else if (is_Call(pred_pred)) {
1430 ir_type *mtp = get_Call_type(pred_pred);
1431 tp = get_method_res_type(mtp, get_Proj_proj(n));
1434 case iro_Start: break;
1435 case iro_Call: break;
1437 ir_node *a = get_Load_ptr(pred);
1439 tp = get_entity_type(get_Sel_entity(a));
1447 long get_Proj_proj(const ir_node *node)
1449 assert(is_Proj(node));
1450 return node->attr.proj;
1453 void set_Proj_proj(ir_node *node, long proj)
1455 assert(is_Proj(node));
1456 node->attr.proj = proj;
1459 int (is_arg_Proj)(const ir_node *node)
1461 return _is_arg_Proj(node);
1464 ir_node **get_Tuple_preds_arr(ir_node *node)
1466 assert(is_Tuple(node));
1467 return (ir_node **)&(get_irn_in(node)[1]);
1470 int get_Tuple_n_preds(const ir_node *node)
1472 assert(is_Tuple(node));
1473 return get_irn_arity(node);
1476 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1478 assert(is_Tuple(node));
1479 return get_irn_n(node, pos);
1482 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1484 assert(is_Tuple(node));
1485 set_irn_n(node, pos, pred);
1488 int get_ASM_n_input_constraints(const ir_node *node)
1490 assert(is_ASM(node));
1491 return ARR_LEN(node->attr.assem.input_constraints);
1494 int get_ASM_n_output_constraints(const ir_node *node)
1496 assert(is_ASM(node));
1497 return ARR_LEN(node->attr.assem.output_constraints);
1500 int get_ASM_n_clobbers(const ir_node *node)
1502 assert(is_ASM(node));
1503 return ARR_LEN(node->attr.assem.clobbers);
1506 /* returns the graph of a node */
1507 ir_graph *(get_irn_irg)(const ir_node *node)
1509 return _get_irn_irg(node);
1513 /*----------------------------------------------------------------*/
1514 /* Auxiliary routines */
1515 /*----------------------------------------------------------------*/
1517 ir_node *skip_Proj(ir_node *node)
1519 /* don't assert node !!! */
1524 node = get_Proj_pred(node);
1530 skip_Proj_const(const ir_node *node)
1532 /* don't assert node !!! */
1537 node = get_Proj_pred(node);
1542 ir_node *skip_Tuple(ir_node *node)
1547 if (is_Proj(node)) {
1548 pred = get_Proj_pred(node);
1550 if (is_Proj(pred)) { /* nested Tuple ? */
1551 pred = skip_Tuple(pred);
1553 if (is_Tuple(pred)) {
1554 node = get_Tuple_pred(pred, get_Proj_proj(node));
1557 } else if (is_Tuple(pred)) {
1558 node = get_Tuple_pred(pred, get_Proj_proj(node));
1565 /* returns operand of node if node is a Cast */
1566 ir_node *skip_Cast(ir_node *node)
1569 return get_Cast_op(node);
1573 /* returns operand of node if node is a Cast */
1574 const ir_node *skip_Cast_const(const ir_node *node)
1577 return get_Cast_op(node);
1581 /* returns operand of node if node is a Pin */
1582 ir_node *skip_Pin(ir_node *node)
1585 return get_Pin_op(node);
1589 /* returns operand of node if node is a Confirm */
1590 ir_node *skip_Confirm(ir_node *node)
1592 if (is_Confirm(node))
1593 return get_Confirm_value(node);
1597 /* skip all high-level ops */
1598 ir_node *skip_HighLevel_ops(ir_node *node)
1600 while (is_op_highlevel(get_irn_op(node))) {
1601 node = get_irn_n(node, 0);
1607 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1608 * than any other approach, as Id chains are resolved and all point to the real node, or
1609 * all id's are self loops.
1611 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1612 * a little bit "hand optimized".
1614 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
1616 ir_node *skip_Id(ir_node *node)
1619 /* don't assert node !!! */
1621 if (!node || (node->op != op_Id)) return node;
1623 /* Don't use get_Id_pred(): We get into an endless loop for
1624 self-referencing Ids. */
1625 pred = node->in[0+1];
1627 if (pred->op != op_Id) return pred;
1629 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1630 ir_node *rem_pred, *res;
1632 if (pred->op != op_Id) return pred; /* shortcut */
1635 assert(get_irn_arity (node) > 0);
1637 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1638 res = skip_Id(rem_pred);
1639 if (is_Id(res)) /* self-loop */ return node;
1641 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1648 int (is_strictConv)(const ir_node *node)
1650 return _is_strictConv(node);
1653 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1654 int (is_SymConst_addr_ent)(const ir_node *node)
1656 return _is_SymConst_addr_ent(node);
1659 /* Returns true if the operation manipulates control flow. */
1660 int is_cfop(const ir_node *node)
1662 return is_op_cfopcode(get_irn_op(node));
1665 /* Returns true if the operation can change the control flow because
1667 int is_fragile_op(const ir_node *node)
1669 return is_op_fragile(get_irn_op(node));
1672 /* Returns the memory operand of fragile operations. */
1673 ir_node *get_fragile_op_mem(ir_node *node)
1675 assert(node && is_fragile_op(node));
1677 switch (get_irn_opcode(node)) {
1688 return get_irn_n(node, pn_Generic_M);
1693 panic("should not be reached");
1697 /* Returns the result mode of a Div operation. */
1698 ir_mode *get_divop_resmod(const ir_node *node)
1700 switch (get_irn_opcode(node)) {
1701 case iro_Quot : return get_Quot_resmode(node);
1702 case iro_DivMod: return get_DivMod_resmode(node);
1703 case iro_Div : return get_Div_resmode(node);
1704 case iro_Mod : return get_Mod_resmode(node);
1706 panic("should not be reached");
1710 /* Returns true if the operation is a forking control flow operation. */
1711 int (is_irn_forking)(const ir_node *node)
1713 return _is_irn_forking(node);
1716 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1718 _copy_node_attr(irg, old_node, new_node);
1721 /* Return the type associated with the value produced by n
1722 * if the node remarks this type as it is the case for
1723 * Cast, Const, SymConst and some Proj nodes. */
1724 ir_type *(get_irn_type)(ir_node *node)
1726 return _get_irn_type(node);
1729 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1731 ir_type *(get_irn_type_attr)(ir_node *node)
1733 return _get_irn_type_attr(node);
1736 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1737 ir_entity *(get_irn_entity_attr)(ir_node *node)
1739 return _get_irn_entity_attr(node);
1742 /* Returns non-zero for constant-like nodes. */
1743 int (is_irn_constlike)(const ir_node *node)
1745 return _is_irn_constlike(node);
1749 * Returns non-zero for nodes that are allowed to have keep-alives and
1750 * are neither Block nor PhiM.
1752 int (is_irn_keep)(const ir_node *node)
1754 return _is_irn_keep(node);
1758 * Returns non-zero for nodes that are always placed in the start block.
1760 int (is_irn_start_block_placed)(const ir_node *node)
1762 return _is_irn_start_block_placed(node);
1765 /* Returns non-zero for nodes that are machine operations. */
1766 int (is_irn_machine_op)(const ir_node *node)
1768 return _is_irn_machine_op(node);
1771 /* Returns non-zero for nodes that are machine operands. */
1772 int (is_irn_machine_operand)(const ir_node *node)
1774 return _is_irn_machine_operand(node);
1777 /* Returns non-zero for nodes that have the n'th user machine flag set. */
1778 int (is_irn_machine_user)(const ir_node *node, unsigned n)
1780 return _is_irn_machine_user(node, n);
1783 /* Returns non-zero for nodes that are CSE neutral to its users. */
1784 int (is_irn_cse_neutral)(const ir_node *node)
1786 return _is_irn_cse_neutral(node);
1789 /* Gets the string representation of the jump prediction .*/
1790 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1792 #define X(a) case a: return #a
1794 X(COND_JMP_PRED_NONE);
1795 X(COND_JMP_PRED_TRUE);
1796 X(COND_JMP_PRED_FALSE);
1802 /** the get_type operation must be always implemented and return a firm type */
1803 static ir_type *get_Default_type(const ir_node *n)
1806 return get_unknown_type();
1809 /* Sets the get_type operation for an ir_op_ops. */
1810 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
1813 case iro_Const: ops->get_type = get_Const_type; break;
1814 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
1815 case iro_Cast: ops->get_type = get_Cast_type; break;
1816 case iro_Proj: ops->get_type = get_Proj_type; break;
1818 /* not allowed to be NULL */
1819 if (! ops->get_type)
1820 ops->get_type = get_Default_type;
1826 /** Return the attribute type of a SymConst node if exists */
1827 static ir_type *get_SymConst_attr_type(const ir_node *self)
1829 symconst_kind kind = get_SymConst_kind(self);
1830 if (SYMCONST_HAS_TYPE(kind))
1831 return get_SymConst_type(self);
1835 /** Return the attribute entity of a SymConst node if exists */
1836 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1838 symconst_kind kind = get_SymConst_kind(self);
1839 if (SYMCONST_HAS_ENT(kind))
1840 return get_SymConst_entity(self);
1844 /** the get_type_attr operation must be always implemented */
1845 static ir_type *get_Null_type(const ir_node *n)
1848 return firm_unknown_type;
1851 /* Sets the get_type operation for an ir_op_ops. */
1852 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
1855 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1856 case iro_Call: ops->get_type_attr = get_Call_type; break;
1857 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1858 case iro_Free: ops->get_type_attr = get_Free_type; break;
1859 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1861 /* not allowed to be NULL */
1862 if (! ops->get_type_attr)
1863 ops->get_type_attr = get_Null_type;
1869 /** the get_entity_attr operation must be always implemented */
1870 static ir_entity *get_Null_ent(const ir_node *n)
1876 /* Sets the get_type operation for an ir_op_ops. */
1877 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
1880 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1881 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1883 /* not allowed to be NULL */
1884 if (! ops->get_entity_attr)
1885 ops->get_entity_attr = get_Null_ent;
1891 /* Sets the debug information of a node. */
1892 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1894 _set_irn_dbg_info(n, db);
1898 * Returns the debug information of an node.
1900 * @param n The node.
1902 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1904 return _get_irn_dbg_info(n);
1907 /* checks whether a node represents a global address */
1908 int is_Global(const ir_node *node)
1910 return is_SymConst_addr_ent(node);
1913 /* returns the entity of a global address */
1914 ir_entity *get_Global_entity(const ir_node *node)
1916 return get_SymConst_entity(node);
1920 * Calculate a hash value of a node.
1922 unsigned firm_default_hash(const ir_node *node)
1927 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1928 h = irn_arity = get_irn_arity(node);
1930 /* consider all in nodes... except the block if not a control flow. */
1931 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1932 ir_node *pred = get_irn_n(node, i);
1933 if (is_irn_cse_neutral(pred))
1936 h = 9*h + HASH_PTR(pred);
1940 h = 9*h + HASH_PTR(get_irn_mode(node));
1942 h = 9*h + HASH_PTR(get_irn_op(node));
1945 } /* firm_default_hash */
1947 /* include generated code */
1948 #include "gen_irnode.c.inl"