2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node **in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_arity)(const ir_node *node)
216 return _get_irn_arity(node);
219 /* Returns the array with ins. This array is shifted with respect to the
220 array accessed by get_irn_n: The block operand is at position 0 not -1.
221 (@@@ This should be changed.)
222 The order of the predecessors in this array is not guaranteed, except that
223 lists of operands as predecessors of Block or arguments of a Call are
225 ir_node **get_irn_in(const ir_node *node)
230 void set_irn_in(ir_node *node, int arity, ir_node **in)
234 ir_graph *irg = get_irn_irg(node);
239 for (i = 0; i < arity; i++) {
240 if (i < ARR_LEN(*pOld_in)-1)
241 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
243 edges_notify_edge(node, i, in[i], NULL, irg);
245 for (;i < ARR_LEN(*pOld_in)-1; i++) {
246 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
249 if (arity != ARR_LEN(*pOld_in) - 1) {
250 ir_node * block = (*pOld_in)[0];
251 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
252 (*pOld_in)[0] = block;
254 fix_backedges(irg->obst, node);
256 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
259 ir_node *(get_irn_n)(const ir_node *node, int n)
261 return _get_irn_n(node, n);
264 void set_irn_n(ir_node *node, int n, ir_node *in)
266 ir_graph *irg = get_irn_irg(node);
267 assert(node && node->kind == k_ir_node);
269 assert(n < get_irn_arity(node));
270 assert(in && in->kind == k_ir_node);
273 hook_set_irn_n(node, n, in, node->in[n + 1]);
275 /* Here, we rely on src and tgt being in the current ir graph */
276 edges_notify_edge(node, n, in, node->in[n + 1], irg);
278 node->in[n + 1] = in;
281 int add_irn_n(ir_node *node, ir_node *in)
284 ir_graph *irg = get_irn_irg(node);
286 assert(node->op->opar == oparity_dynamic);
287 pos = ARR_LEN(node->in) - 1;
288 ARR_APP1(ir_node *, node->in, in);
289 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
292 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
297 void del_Sync_n(ir_node *n, int i)
299 int arity = get_Sync_n_preds(n);
300 ir_node *last_pred = get_Sync_pred(n, arity - 1);
301 set_Sync_pred(n, i, last_pred);
302 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
303 ARR_SHRINKLEN(get_irn_in(n), arity);
306 int (get_irn_deps)(const ir_node *node)
308 return _get_irn_deps(node);
311 ir_node *(get_irn_dep)(const ir_node *node, int pos)
313 return _get_irn_dep(node, pos);
316 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
318 _set_irn_dep(node, pos, dep);
321 int add_irn_dep(ir_node *node, ir_node *dep)
325 /* DEP edges are only allowed in backend phase */
326 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
327 if (node->deps == NULL) {
328 node->deps = NEW_ARR_F(ir_node *, 1);
334 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
335 if (node->deps[i] == NULL)
338 if (node->deps[i] == dep)
342 if (first_zero >= 0) {
343 node->deps[first_zero] = dep;
346 ARR_APP1(ir_node *, node->deps, dep);
351 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
356 void add_irn_deps(ir_node *tgt, ir_node *src)
360 for (i = 0, n = get_irn_deps(src); i < n; ++i)
361 add_irn_dep(tgt, get_irn_dep(src, i));
365 ir_mode *(get_irn_mode)(const ir_node *node)
367 return _get_irn_mode(node);
370 void (set_irn_mode)(ir_node *node, ir_mode *mode)
372 _set_irn_mode(node, mode);
375 /** Gets the string representation of the mode .*/
376 const char *get_irn_modename(const ir_node *node)
379 return get_mode_name(node->mode);
382 ident *get_irn_modeident(const ir_node *node)
385 return get_mode_ident(node->mode);
388 ir_op *(get_irn_op)(const ir_node *node)
390 return _get_irn_op(node);
393 /* should be private to the library: */
394 void (set_irn_op)(ir_node *node, ir_op *op)
396 _set_irn_op(node, op);
399 unsigned (get_irn_opcode)(const ir_node *node)
401 return _get_irn_opcode(node);
404 const char *get_irn_opname(const ir_node *node)
407 if (is_Phi0(node)) return "Phi0";
408 return get_id_str(node->op->name);
411 ident *get_irn_opident(const ir_node *node)
414 return node->op->name;
417 ir_visited_t (get_irn_visited)(const ir_node *node)
419 return _get_irn_visited(node);
422 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
424 _set_irn_visited(node, visited);
427 void (mark_irn_visited)(ir_node *node)
429 _mark_irn_visited(node);
432 int (irn_visited)(const ir_node *node)
434 return _irn_visited(node);
437 int (irn_visited_else_mark)(ir_node *node)
439 return _irn_visited_else_mark(node);
442 void (set_irn_link)(ir_node *node, void *link)
444 _set_irn_link(node, link);
447 void *(get_irn_link)(const ir_node *node)
449 return _get_irn_link(node);
452 op_pin_state (get_irn_pinned)(const ir_node *node)
454 return _get_irn_pinned(node);
457 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
459 return _is_irn_pinned_in_irg(node);
462 void set_irn_pinned(ir_node *node, op_pin_state state)
464 /* due to optimization an opt may be turned into a Tuple */
468 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
469 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
471 node->attr.except.pin_state = state;
474 /* Outputs a unique number for this node */
475 long get_irn_node_nr(const ir_node *node)
478 return node->node_nr;
481 void *(get_irn_generic_attr)(ir_node *node)
483 assert(is_ir_node(node));
484 return _get_irn_generic_attr(node);
487 const void *(get_irn_generic_attr_const)(const ir_node *node)
489 assert(is_ir_node(node));
490 return _get_irn_generic_attr_const(node);
493 unsigned (get_irn_idx)(const ir_node *node)
495 assert(is_ir_node(node));
496 return _get_irn_idx(node);
499 int get_irn_pred_pos(ir_node *node, ir_node *arg)
502 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
503 if (get_irn_n(node, i) == arg)
509 /** manipulate fields of individual nodes **/
511 ir_node *(get_nodes_block)(const ir_node *node)
513 return _get_nodes_block(node);
516 void set_nodes_block(ir_node *node, ir_node *block)
518 assert(node->op != op_Block);
519 set_irn_n(node, -1, block);
522 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
523 * from Start. If so returns frame type, else Null. */
524 ir_type *is_frame_pointer(const ir_node *n)
526 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
527 ir_node *start = get_Proj_pred(n);
528 if (is_Start(start)) {
529 return get_irg_frame_type(get_irn_irg(start));
535 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
536 * from Start. If so returns tls type, else Null. */
537 ir_type *is_tls_pointer(const ir_node *n)
539 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
540 ir_node *start = get_Proj_pred(n);
541 if (is_Start(start)) {
542 return get_tls_type();
548 ir_node **get_Block_cfgpred_arr(ir_node *node)
550 assert(is_Block(node));
551 return (ir_node **)&(get_irn_in(node)[1]);
554 int (get_Block_n_cfgpreds)(const ir_node *node)
556 return _get_Block_n_cfgpreds(node);
559 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
561 return _get_Block_cfgpred(node, pos);
564 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
566 assert(is_Block(node));
567 set_irn_n(node, pos, pred);
570 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
574 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
575 if (get_Block_cfgpred_block(block, i) == pred)
581 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
583 return _get_Block_cfgpred_block(node, pos);
586 int get_Block_matured(const ir_node *node)
588 assert(is_Block(node));
589 return (int)node->attr.block.is_matured;
592 void set_Block_matured(ir_node *node, int matured)
594 assert(is_Block(node));
595 node->attr.block.is_matured = matured;
598 ir_visited_t (get_Block_block_visited)(const ir_node *node)
600 return _get_Block_block_visited(node);
603 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
605 _set_Block_block_visited(node, visit);
608 void (mark_Block_block_visited)(ir_node *node)
610 _mark_Block_block_visited(node);
613 int (Block_block_visited)(const ir_node *node)
615 return _Block_block_visited(node);
618 ir_node *(set_Block_dead)(ir_node *block)
620 return _set_Block_dead(block);
623 int (is_Block_dead)(const ir_node *block)
625 return _is_Block_dead(block);
628 ir_extblk *get_Block_extbb(const ir_node *block)
631 assert(is_Block(block));
632 res = block->attr.block.extblk;
633 assert(res == NULL || is_ir_extbb(res));
637 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
639 assert(is_Block(block));
640 assert(extblk == NULL || is_ir_extbb(extblk));
641 block->attr.block.extblk = extblk;
644 /* returns the graph of a Block. */
645 ir_graph *(get_Block_irg)(const ir_node *block)
647 return _get_Block_irg(block);
650 ir_entity *create_Block_entity(ir_node *block)
653 assert(is_Block(block));
655 entity = block->attr.block.entity;
656 if (entity == NULL) {
660 glob = get_glob_type();
661 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
662 set_entity_visibility(entity, ir_visibility_local);
663 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
664 nr = get_irp_next_label_nr();
665 set_entity_label(entity, nr);
666 set_entity_compiler_generated(entity, 1);
668 block->attr.block.entity = entity;
673 ir_entity *get_Block_entity(const ir_node *block)
675 assert(is_Block(block));
676 return block->attr.block.entity;
679 void set_Block_entity(ir_node *block, ir_entity *entity)
681 assert(is_Block(block));
682 assert(get_entity_type(entity) == get_code_type());
683 block->attr.block.entity = entity;
686 int has_Block_entity(const ir_node *block)
688 return block->attr.block.entity != NULL;
691 ir_node *(get_Block_phis)(const ir_node *block)
693 return _get_Block_phis(block);
696 void (set_Block_phis)(ir_node *block, ir_node *phi)
698 _set_Block_phis(block, phi);
701 void (add_Block_phi)(ir_node *block, ir_node *phi)
703 _add_Block_phi(block, phi);
706 /* Get the Block mark (single bit). */
707 unsigned (get_Block_mark)(const ir_node *block)
709 return _get_Block_mark(block);
712 /* Set the Block mark (single bit). */
713 void (set_Block_mark)(ir_node *block, unsigned mark)
715 _set_Block_mark(block, mark);
718 int get_End_n_keepalives(const ir_node *end)
721 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
724 ir_node *get_End_keepalive(const ir_node *end, int pos)
727 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
730 void add_End_keepalive(ir_node *end, ir_node *ka)
736 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
739 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
742 /* Set new keep-alives */
743 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
746 ir_graph *irg = get_irn_irg(end);
748 /* notify that edges are deleted */
749 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
750 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
752 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
754 for (i = 0; i < n; ++i) {
755 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
756 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
760 /* Set new keep-alives from old keep-alives, skipping irn */
761 void remove_End_keepalive(ir_node *end, ir_node *irn)
763 int n = get_End_n_keepalives(end);
768 for (i = n -1; i >= 0; --i) {
769 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
779 irg = get_irn_irg(end);
781 /* remove the edge */
782 edges_notify_edge(end, idx, NULL, irn, irg);
785 /* exchange with the last one */
786 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
787 edges_notify_edge(end, n - 1, NULL, old, irg);
788 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
789 edges_notify_edge(end, idx, old, NULL, irg);
791 /* now n - 1 keeps, 1 block input */
792 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
795 /* remove Bads, NoMems and doublets from the keep-alive set */
796 void remove_End_Bads_and_doublets(ir_node *end)
799 int idx, n = get_End_n_keepalives(end);
805 irg = get_irn_irg(end);
806 pset_new_init(&keeps);
808 for (idx = n - 1; idx >= 0; --idx) {
809 ir_node *ka = get_End_keepalive(end, idx);
811 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
812 /* remove the edge */
813 edges_notify_edge(end, idx, NULL, ka, irg);
816 /* exchange with the last one */
817 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
818 edges_notify_edge(end, n - 1, NULL, old, irg);
819 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
820 edges_notify_edge(end, idx, old, NULL, irg);
824 pset_new_insert(&keeps, ka);
827 /* n keeps, 1 block input */
828 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
830 pset_new_destroy(&keeps);
833 void free_End(ir_node *end)
838 end->in = NULL; /* @@@ make sure we get an error if we use the
839 in array afterwards ... */
842 int get_Return_n_ress(const ir_node *node)
844 assert(is_Return(node));
845 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
848 ir_node **get_Return_res_arr(ir_node *node)
850 assert(is_Return(node));
851 if (get_Return_n_ress(node) > 0)
852 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
858 void set_Return_n_res(ir_node *node, int results)
860 assert(is_Return(node));
864 ir_node *get_Return_res(const ir_node *node, int pos)
866 assert(is_Return(node));
867 assert(get_Return_n_ress(node) > pos);
868 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
871 void set_Return_res(ir_node *node, int pos, ir_node *res)
873 assert(is_Return(node));
874 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
877 int (is_Const_null)(const ir_node *node)
879 return _is_Const_null(node);
882 int (is_Const_one)(const ir_node *node)
884 return _is_Const_one(node);
887 int (is_Const_all_one)(const ir_node *node)
889 return _is_Const_all_one(node);
894 symconst_kind get_SymConst_kind(const ir_node *node)
896 assert(is_SymConst(node));
897 return node->attr.symc.kind;
900 void set_SymConst_kind(ir_node *node, symconst_kind kind)
902 assert(is_SymConst(node));
903 node->attr.symc.kind = kind;
906 ir_type *get_SymConst_type(const ir_node *node)
908 /* the cast here is annoying, but we have to compensate for
910 ir_node *irn = (ir_node *)node;
911 assert(is_SymConst(node) &&
912 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
913 return irn->attr.symc.sym.type_p;
916 void set_SymConst_type(ir_node *node, ir_type *tp)
918 assert(is_SymConst(node) &&
919 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
920 node->attr.symc.sym.type_p = tp;
924 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
925 ir_entity *get_SymConst_entity(const ir_node *node)
927 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
928 return node->attr.symc.sym.entity_p;
931 void set_SymConst_entity(ir_node *node, ir_entity *ent)
933 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
934 node->attr.symc.sym.entity_p = ent;
937 ir_enum_const *get_SymConst_enum(const ir_node *node)
939 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
940 return node->attr.symc.sym.enum_p;
943 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
945 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
946 node->attr.symc.sym.enum_p = ec;
949 union symconst_symbol
950 get_SymConst_symbol(const ir_node *node)
952 assert(is_SymConst(node));
953 return node->attr.symc.sym;
956 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
958 assert(is_SymConst(node));
959 node->attr.symc.sym = sym;
962 int get_Sel_n_indexs(const ir_node *node)
964 assert(is_Sel(node));
965 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
968 ir_node **get_Sel_index_arr(ir_node *node)
970 assert(is_Sel(node));
971 if (get_Sel_n_indexs(node) > 0)
972 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
977 ir_node *get_Sel_index(const ir_node *node, int pos)
979 assert(is_Sel(node));
980 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
983 void set_Sel_index(ir_node *node, int pos, ir_node *index)
985 assert(is_Sel(node));
986 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
990 /* For unary and binary arithmetic operations the access to the
991 operands can be factored out. Left is the first, right the
992 second arithmetic value as listed in tech report 0999-33.
993 unops are: Minus, Abs, Not, Conv, Cast
994 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
995 Shr, Shrs, Rotate, Cmp */
998 ir_node **get_Call_param_arr(ir_node *node)
1000 assert(is_Call(node));
1001 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1004 int get_Call_n_params(const ir_node *node)
1006 assert(is_Call(node));
1007 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1010 ir_node *get_Call_param(const ir_node *node, int pos)
1012 assert(is_Call(node));
1013 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1016 void set_Call_param(ir_node *node, int pos, ir_node *param)
1018 assert(is_Call(node));
1019 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1022 ir_node **get_Builtin_param_arr(ir_node *node)
1024 assert(is_Builtin(node));
1025 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1028 int get_Builtin_n_params(const ir_node *node)
1030 assert(is_Builtin(node));
1031 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1034 ir_node *get_Builtin_param(const ir_node *node, int pos)
1036 assert(is_Builtin(node));
1037 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1040 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1042 assert(is_Builtin(node));
1043 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1046 /* Returns a human readable string for the ir_builtin_kind. */
1047 const char *get_builtin_kind_name(ir_builtin_kind kind)
1049 #define X(a) case a: return #a
1052 X(ir_bk_debugbreak);
1053 X(ir_bk_return_address);
1054 X(ir_bk_frame_address);
1064 X(ir_bk_inner_trampoline);
1071 int Call_has_callees(const ir_node *node)
1073 assert(is_Call(node));
1074 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1075 (node->attr.call.callee_arr != NULL));
1078 int get_Call_n_callees(const ir_node *node)
1080 assert(is_Call(node) && node->attr.call.callee_arr);
1081 return ARR_LEN(node->attr.call.callee_arr);
1084 ir_entity *get_Call_callee(const ir_node *node, int pos)
1086 assert(pos >= 0 && pos < get_Call_n_callees(node));
1087 return node->attr.call.callee_arr[pos];
1090 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1092 ir_graph *irg = get_irn_irg(node);
1094 assert(is_Call(node));
1095 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1096 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1098 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1101 void remove_Call_callee_arr(ir_node *node)
1103 assert(is_Call(node));
1104 node->attr.call.callee_arr = NULL;
1108 * Returns non-zero if a Call is surely a self-recursive Call.
1109 * Beware: if this functions returns 0, the call might be self-recursive!
1111 int is_self_recursive_Call(const ir_node *call)
1113 const ir_node *callee = get_Call_ptr(call);
1115 if (is_SymConst_addr_ent(callee)) {
1116 const ir_entity *ent = get_SymConst_entity(callee);
1117 const ir_graph *irg = get_entity_irg(ent);
1118 if (irg == get_irn_irg(call))
1124 /* Checks for upcast.
1126 * Returns true if the Cast node casts a class type to a super type.
1128 int is_Cast_upcast(ir_node *node)
1130 ir_type *totype = get_Cast_type(node);
1131 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1133 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1136 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1137 totype = get_pointer_points_to_type(totype);
1138 fromtype = get_pointer_points_to_type(fromtype);
1143 if (!is_Class_type(totype)) return 0;
1144 return is_SubClass_of(fromtype, totype);
1147 /* Checks for downcast.
1149 * Returns true if the Cast node casts a class type to a sub type.
1151 int is_Cast_downcast(ir_node *node)
1153 ir_type *totype = get_Cast_type(node);
1154 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1156 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1159 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1160 totype = get_pointer_points_to_type(totype);
1161 fromtype = get_pointer_points_to_type(fromtype);
1166 if (!is_Class_type(totype)) return 0;
1167 return is_SubClass_of(totype, fromtype);
1170 int (is_unop)(const ir_node *node)
1172 return _is_unop(node);
1175 ir_node *get_unop_op(const ir_node *node)
1177 if (node->op->opar == oparity_unary)
1178 return get_irn_n(node, node->op->op_index);
1180 assert(node->op->opar == oparity_unary);
1184 void set_unop_op(ir_node *node, ir_node *op)
1186 if (node->op->opar == oparity_unary)
1187 set_irn_n(node, node->op->op_index, op);
1189 assert(node->op->opar == oparity_unary);
1192 int (is_binop)(const ir_node *node)
1194 return _is_binop(node);
1197 ir_node *get_binop_left(const ir_node *node)
1199 assert(node->op->opar == oparity_binary);
1200 return get_irn_n(node, node->op->op_index);
1203 void set_binop_left(ir_node *node, ir_node *left)
1205 assert(node->op->opar == oparity_binary);
1206 set_irn_n(node, node->op->op_index, left);
1209 ir_node *get_binop_right(const ir_node *node)
1211 assert(node->op->opar == oparity_binary);
1212 return get_irn_n(node, node->op->op_index + 1);
1215 void set_binop_right(ir_node *node, ir_node *right)
1217 assert(node->op->opar == oparity_binary);
1218 set_irn_n(node, node->op->op_index + 1, right);
1221 int is_Phi0(const ir_node *n)
1225 return ((get_irn_op(n) == op_Phi) &&
1226 (get_irn_arity(n) == 0) &&
1227 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1230 ir_node **get_Phi_preds_arr(ir_node *node)
1232 assert(is_Phi(node));
1233 return (ir_node **)&(get_irn_in(node)[1]);
1236 int get_Phi_n_preds(const ir_node *node)
1238 assert(is_Phi(node) || is_Phi0(node));
1239 return (get_irn_arity(node));
1242 ir_node *get_Phi_pred(const ir_node *node, int pos)
1244 assert(is_Phi(node) || is_Phi0(node));
1245 return get_irn_n(node, pos);
1248 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1250 assert(is_Phi(node) || is_Phi0(node));
1251 set_irn_n(node, pos, pred);
1254 ir_node *(get_Phi_next)(const ir_node *phi)
1256 return _get_Phi_next(phi);
1259 void (set_Phi_next)(ir_node *phi, ir_node *next)
1261 _set_Phi_next(phi, next);
1264 int is_memop(const ir_node *node)
1266 ir_opcode code = get_irn_opcode(node);
1267 return (code == iro_Load || code == iro_Store);
1270 ir_node *get_memop_mem(const ir_node *node)
1272 assert(is_memop(node));
1273 return get_irn_n(node, 0);
1276 void set_memop_mem(ir_node *node, ir_node *mem)
1278 assert(is_memop(node));
1279 set_irn_n(node, 0, mem);
1282 ir_node *get_memop_ptr(const ir_node *node)
1284 assert(is_memop(node));
1285 return get_irn_n(node, 1);
1288 void set_memop_ptr(ir_node *node, ir_node *ptr)
1290 assert(is_memop(node));
1291 set_irn_n(node, 1, ptr);
1294 ir_volatility get_Load_volatility(const ir_node *node)
1296 assert(is_Load(node));
1297 return node->attr.load.volatility;
1300 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1302 assert(is_Load(node));
1303 node->attr.load.volatility = volatility;
1306 ir_align get_Load_align(const ir_node *node)
1308 assert(is_Load(node));
1309 return node->attr.load.aligned;
1312 void set_Load_align(ir_node *node, ir_align align)
1314 assert(is_Load(node));
1315 node->attr.load.aligned = align;
1319 ir_volatility get_Store_volatility(const ir_node *node)
1321 assert(is_Store(node));
1322 return node->attr.store.volatility;
1325 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1327 assert(is_Store(node));
1328 node->attr.store.volatility = volatility;
1331 ir_align get_Store_align(const ir_node *node)
1333 assert(is_Store(node));
1334 return node->attr.store.aligned;
1337 void set_Store_align(ir_node *node, ir_align align)
1339 assert(is_Store(node));
1340 node->attr.store.aligned = align;
1344 ir_node **get_Sync_preds_arr(ir_node *node)
1346 assert(is_Sync(node));
1347 return (ir_node **)&(get_irn_in(node)[1]);
1350 int get_Sync_n_preds(const ir_node *node)
1352 assert(is_Sync(node));
1353 return (get_irn_arity(node));
1357 void set_Sync_n_preds(ir_node *node, int n_preds)
1359 assert(is_Sync(node));
1363 ir_node *get_Sync_pred(const ir_node *node, int pos)
1365 assert(is_Sync(node));
1366 return get_irn_n(node, pos);
1369 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1371 assert(is_Sync(node));
1372 set_irn_n(node, pos, pred);
1375 /* Add a new Sync predecessor */
1376 void add_Sync_pred(ir_node *node, ir_node *pred)
1378 assert(is_Sync(node));
1379 add_irn_n(node, pred);
1382 long get_Proj_proj(const ir_node *node)
1384 assert(is_Proj(node));
1385 return node->attr.proj;
1388 void set_Proj_proj(ir_node *node, long proj)
1390 assert(is_Proj(node));
1391 node->attr.proj = proj;
1394 int (is_arg_Proj)(const ir_node *node)
1396 return _is_arg_Proj(node);
1399 ir_node **get_Tuple_preds_arr(ir_node *node)
1401 assert(is_Tuple(node));
1402 return (ir_node **)&(get_irn_in(node)[1]);
1405 int get_Tuple_n_preds(const ir_node *node)
1407 assert(is_Tuple(node));
1408 return get_irn_arity(node);
1411 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1413 assert(is_Tuple(node));
1414 return get_irn_n(node, pos);
1417 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1419 assert(is_Tuple(node));
1420 set_irn_n(node, pos, pred);
1423 int get_ASM_n_input_constraints(const ir_node *node)
1425 assert(is_ASM(node));
1426 return ARR_LEN(node->attr.assem.input_constraints);
1429 int get_ASM_n_output_constraints(const ir_node *node)
1431 assert(is_ASM(node));
1432 return ARR_LEN(node->attr.assem.output_constraints);
1435 int get_ASM_n_clobbers(const ir_node *node)
1437 assert(is_ASM(node));
1438 return ARR_LEN(node->attr.assem.clobbers);
1441 /* returns the graph of a node */
1442 ir_graph *(get_irn_irg)(const ir_node *node)
1444 return _get_irn_irg(node);
1448 /*----------------------------------------------------------------*/
1449 /* Auxiliary routines */
1450 /*----------------------------------------------------------------*/
1452 ir_node *skip_Proj(ir_node *node)
1454 /* don't assert node !!! */
1459 node = get_Proj_pred(node);
1465 skip_Proj_const(const ir_node *node)
1467 /* don't assert node !!! */
1472 node = get_Proj_pred(node);
1477 ir_node *skip_Tuple(ir_node *node)
1482 if (is_Proj(node)) {
1483 pred = get_Proj_pred(node);
1485 if (is_Proj(pred)) { /* nested Tuple ? */
1486 pred = skip_Tuple(pred);
1488 if (is_Tuple(pred)) {
1489 node = get_Tuple_pred(pred, get_Proj_proj(node));
1492 } else if (is_Tuple(pred)) {
1493 node = get_Tuple_pred(pred, get_Proj_proj(node));
1500 /* returns operand of node if node is a Cast */
1501 ir_node *skip_Cast(ir_node *node)
1504 return get_Cast_op(node);
1508 /* returns operand of node if node is a Cast */
1509 const ir_node *skip_Cast_const(const ir_node *node)
1512 return get_Cast_op(node);
1516 /* returns operand of node if node is a Pin */
1517 ir_node *skip_Pin(ir_node *node)
1520 return get_Pin_op(node);
1524 /* returns operand of node if node is a Confirm */
1525 ir_node *skip_Confirm(ir_node *node)
1527 if (is_Confirm(node))
1528 return get_Confirm_value(node);
1532 /* skip all high-level ops */
1533 ir_node *skip_HighLevel_ops(ir_node *node)
1535 while (is_op_highlevel(get_irn_op(node))) {
1536 node = get_irn_n(node, 0);
1542 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1543 * than any other approach, as Id chains are resolved and all point to the real node, or
1544 * all id's are self loops.
1546 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1547 * a little bit "hand optimized".
1549 ir_node *skip_Id(ir_node *node)
1552 /* don't assert node !!! */
1554 if (!node || (node->op != op_Id)) return node;
1556 /* Don't use get_Id_pred(): We get into an endless loop for
1557 self-referencing Ids. */
1558 pred = node->in[0+1];
1560 if (pred->op != op_Id) return pred;
1562 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1563 ir_node *rem_pred, *res;
1565 if (pred->op != op_Id) return pred; /* shortcut */
1568 assert(get_irn_arity (node) > 0);
1570 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1571 res = skip_Id(rem_pred);
1572 if (is_Id(res)) /* self-loop */ return node;
1574 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1581 int (is_strictConv)(const ir_node *node)
1583 return _is_strictConv(node);
1586 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1587 int (is_SymConst_addr_ent)(const ir_node *node)
1589 return _is_SymConst_addr_ent(node);
1592 /* Returns true if the operation manipulates control flow. */
1593 int is_cfop(const ir_node *node)
1595 return is_op_cfopcode(get_irn_op(node));
1598 /* Returns true if the operation can change the control flow because
1600 int is_fragile_op(const ir_node *node)
1602 return is_op_fragile(get_irn_op(node));
1605 /* Returns the memory operand of fragile operations. */
1606 ir_node *get_fragile_op_mem(ir_node *node)
1608 assert(node && is_fragile_op(node));
1610 switch (get_irn_opcode(node)) {
1621 return get_irn_n(node, pn_Generic_M);
1626 panic("should not be reached");
1630 /* Returns the result mode of a Div operation. */
1631 ir_mode *get_divop_resmod(const ir_node *node)
1633 switch (get_irn_opcode(node)) {
1634 case iro_Quot : return get_Quot_resmode(node);
1635 case iro_DivMod: return get_DivMod_resmode(node);
1636 case iro_Div : return get_Div_resmode(node);
1637 case iro_Mod : return get_Mod_resmode(node);
1639 panic("should not be reached");
1643 /* Returns true if the operation is a forking control flow operation. */
1644 int (is_irn_forking)(const ir_node *node)
1646 return _is_irn_forking(node);
1649 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1651 _copy_node_attr(irg, old_node, new_node);
1654 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1656 ir_type *(get_irn_type_attr)(ir_node *node)
1658 return _get_irn_type_attr(node);
1661 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1662 ir_entity *(get_irn_entity_attr)(ir_node *node)
1664 return _get_irn_entity_attr(node);
1667 /* Returns non-zero for constant-like nodes. */
1668 int (is_irn_constlike)(const ir_node *node)
1670 return _is_irn_constlike(node);
1674 * Returns non-zero for nodes that are allowed to have keep-alives and
1675 * are neither Block nor PhiM.
1677 int (is_irn_keep)(const ir_node *node)
1679 return _is_irn_keep(node);
1683 * Returns non-zero for nodes that are always placed in the start block.
1685 int (is_irn_start_block_placed)(const ir_node *node)
1687 return _is_irn_start_block_placed(node);
1690 /* Returns non-zero for nodes that are machine operations. */
1691 int (is_irn_machine_op)(const ir_node *node)
1693 return _is_irn_machine_op(node);
1696 /* Returns non-zero for nodes that are machine operands. */
1697 int (is_irn_machine_operand)(const ir_node *node)
1699 return _is_irn_machine_operand(node);
1702 /* Returns non-zero for nodes that have the n'th user machine flag set. */
1703 int (is_irn_machine_user)(const ir_node *node, unsigned n)
1705 return _is_irn_machine_user(node, n);
1708 /* Returns non-zero for nodes that are CSE neutral to its users. */
1709 int (is_irn_cse_neutral)(const ir_node *node)
1711 return _is_irn_cse_neutral(node);
1714 /* Gets the string representation of the jump prediction .*/
1715 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1717 #define X(a) case a: return #a
1719 X(COND_JMP_PRED_NONE);
1720 X(COND_JMP_PRED_TRUE);
1721 X(COND_JMP_PRED_FALSE);
1727 /** Return the attribute type of a SymConst node if exists */
1728 static ir_type *get_SymConst_attr_type(const ir_node *self)
1730 symconst_kind kind = get_SymConst_kind(self);
1731 if (SYMCONST_HAS_TYPE(kind))
1732 return get_SymConst_type(self);
1736 /** Return the attribute entity of a SymConst node if exists */
1737 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1739 symconst_kind kind = get_SymConst_kind(self);
1740 if (SYMCONST_HAS_ENT(kind))
1741 return get_SymConst_entity(self);
1745 /** the get_type_attr operation must be always implemented */
1746 static ir_type *get_Null_type(const ir_node *n)
1749 return firm_unknown_type;
1752 /* Sets the get_type operation for an ir_op_ops. */
1753 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
1756 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1757 case iro_Call: ops->get_type_attr = get_Call_type; break;
1758 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1759 case iro_Free: ops->get_type_attr = get_Free_type; break;
1760 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1762 /* not allowed to be NULL */
1763 if (! ops->get_type_attr)
1764 ops->get_type_attr = get_Null_type;
1770 /** the get_entity_attr operation must be always implemented */
1771 static ir_entity *get_Null_ent(const ir_node *n)
1777 /* Sets the get_type operation for an ir_op_ops. */
1778 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
1781 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1782 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1784 /* not allowed to be NULL */
1785 if (! ops->get_entity_attr)
1786 ops->get_entity_attr = get_Null_ent;
1792 /* Sets the debug information of a node. */
1793 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1795 _set_irn_dbg_info(n, db);
1799 * Returns the debug information of an node.
1801 * @param n The node.
1803 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1805 return _get_irn_dbg_info(n);
1808 /* checks whether a node represents a global address */
1809 int is_Global(const ir_node *node)
1811 return is_SymConst_addr_ent(node);
1814 /* returns the entity of a global address */
1815 ir_entity *get_Global_entity(const ir_node *node)
1817 return get_SymConst_entity(node);
1821 * Calculate a hash value of a node.
1823 unsigned firm_default_hash(const ir_node *node)
1828 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1829 h = irn_arity = get_irn_arity(node);
1831 /* consider all in nodes... except the block if not a control flow. */
1832 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1833 ir_node *pred = get_irn_n(node, i);
1834 if (is_irn_cse_neutral(pred))
1837 h = 9*h + HASH_PTR(pred);
1841 h = 9*h + HASH_PTR(get_irn_mode(node));
1843 h = 9*h + HASH_PTR(get_irn_op(node));
1846 } /* firm_default_hash */
1848 /* include generated code */
1849 #include "gen_irnode.c.inl"