2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node *const *in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = (char*)obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_arity)(const ir_node *node)
216 return _get_irn_arity(node);
219 /* Returns the array with ins. This array is shifted with respect to the
220 array accessed by get_irn_n: The block operand is at position 0 not -1.
221 (@@@ This should be changed.)
222 The order of the predecessors in this array is not guaranteed, except that
223 lists of operands as predecessors of Block or arguments of a Call are
225 ir_node **get_irn_in(const ir_node *node)
230 void set_irn_in(ir_node *node, int arity, ir_node **in)
234 ir_graph *irg = get_irn_irg(node);
239 for (i = 0; i < arity; i++) {
240 if (i < ARR_LEN(*pOld_in)-1)
241 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
243 edges_notify_edge(node, i, in[i], NULL, irg);
245 for (;i < ARR_LEN(*pOld_in)-1; i++) {
246 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
249 if (arity != ARR_LEN(*pOld_in) - 1) {
250 ir_node * block = (*pOld_in)[0];
251 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
252 (*pOld_in)[0] = block;
254 fix_backedges(irg->obst, node);
256 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
259 ir_node *(get_irn_n)(const ir_node *node, int n)
261 return _get_irn_n(node, n);
264 void set_irn_n(ir_node *node, int n, ir_node *in)
266 ir_graph *irg = get_irn_irg(node);
267 assert(node && node->kind == k_ir_node);
269 assert(n < get_irn_arity(node));
270 assert(in && in->kind == k_ir_node);
273 hook_set_irn_n(node, n, in, node->in[n + 1]);
275 /* Here, we rely on src and tgt being in the current ir graph */
276 edges_notify_edge(node, n, in, node->in[n + 1], irg);
278 node->in[n + 1] = in;
281 int add_irn_n(ir_node *node, ir_node *in)
284 ir_graph *irg = get_irn_irg(node);
286 assert(node->op->opar == oparity_dynamic);
287 pos = ARR_LEN(node->in) - 1;
288 ARR_APP1(ir_node *, node->in, in);
289 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
292 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
297 void del_Sync_n(ir_node *n, int i)
299 int arity = get_Sync_n_preds(n);
300 ir_node *last_pred = get_Sync_pred(n, arity - 1);
301 set_Sync_pred(n, i, last_pred);
302 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
303 ARR_SHRINKLEN(get_irn_in(n), arity);
306 int (get_irn_deps)(const ir_node *node)
308 return _get_irn_deps(node);
311 ir_node *(get_irn_dep)(const ir_node *node, int pos)
313 return _get_irn_dep(node, pos);
316 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
318 _set_irn_dep(node, pos, dep);
321 int add_irn_dep(ir_node *node, ir_node *dep)
325 /* DEP edges are only allowed in backend phase */
326 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
327 if (node->deps == NULL) {
328 node->deps = NEW_ARR_F(ir_node *, 1);
334 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
335 if (node->deps[i] == NULL)
338 if (node->deps[i] == dep)
342 if (first_zero >= 0) {
343 node->deps[first_zero] = dep;
346 ARR_APP1(ir_node *, node->deps, dep);
351 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
356 void add_irn_deps(ir_node *tgt, ir_node *src)
360 for (i = 0, n = get_irn_deps(src); i < n; ++i)
361 add_irn_dep(tgt, get_irn_dep(src, i));
365 ir_mode *(get_irn_mode)(const ir_node *node)
367 return _get_irn_mode(node);
370 void (set_irn_mode)(ir_node *node, ir_mode *mode)
372 _set_irn_mode(node, mode);
375 ir_op *(get_irn_op)(const ir_node *node)
377 return _get_irn_op(node);
380 /* should be private to the library: */
381 void (set_irn_op)(ir_node *node, ir_op *op)
383 _set_irn_op(node, op);
386 unsigned (get_irn_opcode)(const ir_node *node)
388 return _get_irn_opcode(node);
391 const char *get_irn_opname(const ir_node *node)
394 if (is_Phi0(node)) return "Phi0";
395 return get_id_str(node->op->name);
398 ident *get_irn_opident(const ir_node *node)
401 return node->op->name;
404 ir_visited_t (get_irn_visited)(const ir_node *node)
406 return _get_irn_visited(node);
409 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
411 _set_irn_visited(node, visited);
414 void (mark_irn_visited)(ir_node *node)
416 _mark_irn_visited(node);
419 int (irn_visited)(const ir_node *node)
421 return _irn_visited(node);
424 int (irn_visited_else_mark)(ir_node *node)
426 return _irn_visited_else_mark(node);
429 void (set_irn_link)(ir_node *node, void *link)
431 _set_irn_link(node, link);
434 void *(get_irn_link)(const ir_node *node)
436 return _get_irn_link(node);
439 op_pin_state (get_irn_pinned)(const ir_node *node)
441 return _get_irn_pinned(node);
444 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
446 return _is_irn_pinned_in_irg(node);
449 void set_irn_pinned(ir_node *node, op_pin_state state)
451 /* due to optimization an opt may be turned into a Tuple */
455 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
456 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
458 node->attr.except.pin_state = state;
461 /* Outputs a unique number for this node */
462 long get_irn_node_nr(const ir_node *node)
465 return node->node_nr;
468 void *(get_irn_generic_attr)(ir_node *node)
470 assert(is_ir_node(node));
471 return _get_irn_generic_attr(node);
474 const void *(get_irn_generic_attr_const)(const ir_node *node)
476 assert(is_ir_node(node));
477 return _get_irn_generic_attr_const(node);
480 unsigned (get_irn_idx)(const ir_node *node)
482 assert(is_ir_node(node));
483 return _get_irn_idx(node);
486 int get_irn_pred_pos(ir_node *node, ir_node *arg)
489 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
490 if (get_irn_n(node, i) == arg)
496 /** manipulate fields of individual nodes **/
498 ir_node *(get_nodes_block)(const ir_node *node)
500 return _get_nodes_block(node);
503 void set_nodes_block(ir_node *node, ir_node *block)
505 assert(node->op != op_Block);
506 set_irn_n(node, -1, block);
509 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
510 * from Start. If so returns frame type, else Null. */
511 ir_type *is_frame_pointer(const ir_node *n)
513 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
514 ir_node *start = get_Proj_pred(n);
515 if (is_Start(start)) {
516 return get_irg_frame_type(get_irn_irg(start));
522 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
523 * from Start. If so returns tls type, else Null. */
524 ir_type *is_tls_pointer(const ir_node *n)
526 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
527 ir_node *start = get_Proj_pred(n);
528 if (is_Start(start)) {
529 return get_tls_type();
535 ir_node **get_Block_cfgpred_arr(ir_node *node)
537 assert(is_Block(node));
538 return (ir_node **)&(get_irn_in(node)[1]);
541 int (get_Block_n_cfgpreds)(const ir_node *node)
543 return _get_Block_n_cfgpreds(node);
546 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
548 return _get_Block_cfgpred(node, pos);
551 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
553 assert(is_Block(node));
554 set_irn_n(node, pos, pred);
557 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
561 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
562 if (get_Block_cfgpred_block(block, i) == pred)
568 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
570 return _get_Block_cfgpred_block(node, pos);
573 int get_Block_matured(const ir_node *node)
575 assert(is_Block(node));
576 return (int)node->attr.block.is_matured;
579 void set_Block_matured(ir_node *node, int matured)
581 assert(is_Block(node));
582 node->attr.block.is_matured = matured;
585 ir_visited_t (get_Block_block_visited)(const ir_node *node)
587 return _get_Block_block_visited(node);
590 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
592 _set_Block_block_visited(node, visit);
595 void (mark_Block_block_visited)(ir_node *node)
597 _mark_Block_block_visited(node);
600 int (Block_block_visited)(const ir_node *node)
602 return _Block_block_visited(node);
605 ir_node *(set_Block_dead)(ir_node *block)
607 return _set_Block_dead(block);
610 int (is_Block_dead)(const ir_node *block)
612 return _is_Block_dead(block);
615 ir_extblk *get_Block_extbb(const ir_node *block)
618 assert(is_Block(block));
619 res = block->attr.block.extblk;
620 assert(res == NULL || is_ir_extbb(res));
624 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
626 assert(is_Block(block));
627 assert(extblk == NULL || is_ir_extbb(extblk));
628 block->attr.block.extblk = extblk;
631 /* returns the graph of a Block. */
632 ir_graph *(get_Block_irg)(const ir_node *block)
634 return _get_Block_irg(block);
637 ir_entity *create_Block_entity(ir_node *block)
640 assert(is_Block(block));
642 entity = block->attr.block.entity;
643 if (entity == NULL) {
647 glob = get_glob_type();
648 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
649 set_entity_visibility(entity, ir_visibility_local);
650 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
651 nr = get_irp_next_label_nr();
652 set_entity_label(entity, nr);
653 set_entity_compiler_generated(entity, 1);
655 block->attr.block.entity = entity;
660 ir_entity *get_Block_entity(const ir_node *block)
662 assert(is_Block(block));
663 return block->attr.block.entity;
666 void set_Block_entity(ir_node *block, ir_entity *entity)
668 assert(is_Block(block));
669 assert(get_entity_type(entity) == get_code_type());
670 block->attr.block.entity = entity;
673 int has_Block_entity(const ir_node *block)
675 return block->attr.block.entity != NULL;
678 ir_node *(get_Block_phis)(const ir_node *block)
680 return _get_Block_phis(block);
683 void (set_Block_phis)(ir_node *block, ir_node *phi)
685 _set_Block_phis(block, phi);
688 void (add_Block_phi)(ir_node *block, ir_node *phi)
690 _add_Block_phi(block, phi);
693 /* Get the Block mark (single bit). */
694 unsigned (get_Block_mark)(const ir_node *block)
696 return _get_Block_mark(block);
699 /* Set the Block mark (single bit). */
700 void (set_Block_mark)(ir_node *block, unsigned mark)
702 _set_Block_mark(block, mark);
705 int get_End_n_keepalives(const ir_node *end)
708 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
711 ir_node *get_End_keepalive(const ir_node *end, int pos)
714 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
717 void add_End_keepalive(ir_node *end, ir_node *ka)
723 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
726 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
729 /* Set new keep-alives */
730 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
733 ir_graph *irg = get_irn_irg(end);
735 /* notify that edges are deleted */
736 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
737 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
739 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
741 for (i = 0; i < n; ++i) {
742 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
743 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
747 /* Set new keep-alives from old keep-alives, skipping irn */
748 void remove_End_keepalive(ir_node *end, ir_node *irn)
750 int n = get_End_n_keepalives(end);
755 for (i = n -1; i >= 0; --i) {
756 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
766 irg = get_irn_irg(end);
768 /* remove the edge */
769 edges_notify_edge(end, idx, NULL, irn, irg);
772 /* exchange with the last one */
773 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
774 edges_notify_edge(end, n - 1, NULL, old, irg);
775 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
776 edges_notify_edge(end, idx, old, NULL, irg);
778 /* now n - 1 keeps, 1 block input */
779 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
782 /* remove Bads, NoMems and doublets from the keep-alive set */
783 void remove_End_Bads_and_doublets(ir_node *end)
786 int idx, n = get_End_n_keepalives(end);
792 irg = get_irn_irg(end);
793 pset_new_init(&keeps);
795 for (idx = n - 1; idx >= 0; --idx) {
796 ir_node *ka = get_End_keepalive(end, idx);
798 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
799 /* remove the edge */
800 edges_notify_edge(end, idx, NULL, ka, irg);
803 /* exchange with the last one */
804 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
805 edges_notify_edge(end, n - 1, NULL, old, irg);
806 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
807 edges_notify_edge(end, idx, old, NULL, irg);
811 pset_new_insert(&keeps, ka);
814 /* n keeps, 1 block input */
815 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
817 pset_new_destroy(&keeps);
820 void free_End(ir_node *end)
825 end->in = NULL; /* @@@ make sure we get an error if we use the
826 in array afterwards ... */
829 int get_Return_n_ress(const ir_node *node)
831 assert(is_Return(node));
832 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
835 ir_node **get_Return_res_arr(ir_node *node)
837 assert(is_Return(node));
838 if (get_Return_n_ress(node) > 0)
839 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
845 void set_Return_n_res(ir_node *node, int results)
847 assert(is_Return(node));
851 ir_node *get_Return_res(const ir_node *node, int pos)
853 assert(is_Return(node));
854 assert(get_Return_n_ress(node) > pos);
855 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
858 void set_Return_res(ir_node *node, int pos, ir_node *res)
860 assert(is_Return(node));
861 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
864 int (is_Const_null)(const ir_node *node)
866 return _is_Const_null(node);
869 int (is_Const_one)(const ir_node *node)
871 return _is_Const_one(node);
874 int (is_Const_all_one)(const ir_node *node)
876 return _is_Const_all_one(node);
881 symconst_kind get_SymConst_kind(const ir_node *node)
883 assert(is_SymConst(node));
884 return node->attr.symc.kind;
887 void set_SymConst_kind(ir_node *node, symconst_kind kind)
889 assert(is_SymConst(node));
890 node->attr.symc.kind = kind;
893 ir_type *get_SymConst_type(const ir_node *node)
895 /* the cast here is annoying, but we have to compensate for
897 ir_node *irn = (ir_node *)node;
898 assert(is_SymConst(node) &&
899 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
900 return irn->attr.symc.sym.type_p;
903 void set_SymConst_type(ir_node *node, ir_type *tp)
905 assert(is_SymConst(node) &&
906 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
907 node->attr.symc.sym.type_p = tp;
911 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
912 ir_entity *get_SymConst_entity(const ir_node *node)
914 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
915 return node->attr.symc.sym.entity_p;
918 void set_SymConst_entity(ir_node *node, ir_entity *ent)
920 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
921 node->attr.symc.sym.entity_p = ent;
924 ir_enum_const *get_SymConst_enum(const ir_node *node)
926 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
927 return node->attr.symc.sym.enum_p;
930 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
932 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
933 node->attr.symc.sym.enum_p = ec;
936 union symconst_symbol
937 get_SymConst_symbol(const ir_node *node)
939 assert(is_SymConst(node));
940 return node->attr.symc.sym;
943 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
945 assert(is_SymConst(node));
946 node->attr.symc.sym = sym;
949 int get_Sel_n_indexs(const ir_node *node)
951 assert(is_Sel(node));
952 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
955 ir_node **get_Sel_index_arr(ir_node *node)
957 assert(is_Sel(node));
958 if (get_Sel_n_indexs(node) > 0)
959 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
964 ir_node *get_Sel_index(const ir_node *node, int pos)
966 assert(is_Sel(node));
967 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
970 void set_Sel_index(ir_node *node, int pos, ir_node *index)
972 assert(is_Sel(node));
973 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
977 /* For unary and binary arithmetic operations the access to the
978 operands can be factored out. Left is the first, right the
979 second arithmetic value as listed in tech report 0999-33.
980 unops are: Minus, Abs, Not, Conv, Cast
981 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
982 Shr, Shrs, Rotate, Cmp */
985 ir_node **get_Call_param_arr(ir_node *node)
987 assert(is_Call(node));
988 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
991 int get_Call_n_params(const ir_node *node)
993 assert(is_Call(node));
994 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
997 ir_node *get_Call_param(const ir_node *node, int pos)
999 assert(is_Call(node));
1000 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1003 void set_Call_param(ir_node *node, int pos, ir_node *param)
1005 assert(is_Call(node));
1006 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1009 ir_node **get_Builtin_param_arr(ir_node *node)
1011 assert(is_Builtin(node));
1012 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1015 int get_Builtin_n_params(const ir_node *node)
1017 assert(is_Builtin(node));
1018 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1021 ir_node *get_Builtin_param(const ir_node *node, int pos)
1023 assert(is_Builtin(node));
1024 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1027 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1029 assert(is_Builtin(node));
1030 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1033 /* Returns a human readable string for the ir_builtin_kind. */
1034 const char *get_builtin_kind_name(ir_builtin_kind kind)
1036 #define X(a) case a: return #a
1039 X(ir_bk_debugbreak);
1040 X(ir_bk_return_address);
1041 X(ir_bk_frame_address);
1051 X(ir_bk_inner_trampoline);
1058 int Call_has_callees(const ir_node *node)
1060 assert(is_Call(node));
1061 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1062 (node->attr.call.callee_arr != NULL));
1065 int get_Call_n_callees(const ir_node *node)
1067 assert(is_Call(node) && node->attr.call.callee_arr);
1068 return ARR_LEN(node->attr.call.callee_arr);
1071 ir_entity *get_Call_callee(const ir_node *node, int pos)
1073 assert(pos >= 0 && pos < get_Call_n_callees(node));
1074 return node->attr.call.callee_arr[pos];
1077 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1079 ir_graph *irg = get_irn_irg(node);
1081 assert(is_Call(node));
1082 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1083 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1085 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1088 void remove_Call_callee_arr(ir_node *node)
1090 assert(is_Call(node));
1091 node->attr.call.callee_arr = NULL;
1095 * Returns non-zero if a Call is surely a self-recursive Call.
1096 * Beware: if this functions returns 0, the call might be self-recursive!
1098 int is_self_recursive_Call(const ir_node *call)
1100 const ir_node *callee = get_Call_ptr(call);
1102 if (is_SymConst_addr_ent(callee)) {
1103 const ir_entity *ent = get_SymConst_entity(callee);
1104 const ir_graph *irg = get_entity_irg(ent);
1105 if (irg == get_irn_irg(call))
1111 /* Checks for upcast.
1113 * Returns true if the Cast node casts a class type to a super type.
1115 int is_Cast_upcast(ir_node *node)
1117 ir_type *totype = get_Cast_type(node);
1118 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1120 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1123 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1124 totype = get_pointer_points_to_type(totype);
1125 fromtype = get_pointer_points_to_type(fromtype);
1130 if (!is_Class_type(totype)) return 0;
1131 return is_SubClass_of(fromtype, totype);
1134 /* Checks for downcast.
1136 * Returns true if the Cast node casts a class type to a sub type.
1138 int is_Cast_downcast(ir_node *node)
1140 ir_type *totype = get_Cast_type(node);
1141 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1143 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1146 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1147 totype = get_pointer_points_to_type(totype);
1148 fromtype = get_pointer_points_to_type(fromtype);
1153 if (!is_Class_type(totype)) return 0;
1154 return is_SubClass_of(totype, fromtype);
1157 int (is_unop)(const ir_node *node)
1159 return _is_unop(node);
1162 ir_node *get_unop_op(const ir_node *node)
1164 if (node->op->opar == oparity_unary)
1165 return get_irn_n(node, node->op->op_index);
1167 assert(node->op->opar == oparity_unary);
1171 void set_unop_op(ir_node *node, ir_node *op)
1173 if (node->op->opar == oparity_unary)
1174 set_irn_n(node, node->op->op_index, op);
1176 assert(node->op->opar == oparity_unary);
1179 int (is_binop)(const ir_node *node)
1181 return _is_binop(node);
1184 ir_node *get_binop_left(const ir_node *node)
1186 assert(node->op->opar == oparity_binary);
1187 return get_irn_n(node, node->op->op_index);
1190 void set_binop_left(ir_node *node, ir_node *left)
1192 assert(node->op->opar == oparity_binary);
1193 set_irn_n(node, node->op->op_index, left);
1196 ir_node *get_binop_right(const ir_node *node)
1198 assert(node->op->opar == oparity_binary);
1199 return get_irn_n(node, node->op->op_index + 1);
1202 void set_binop_right(ir_node *node, ir_node *right)
1204 assert(node->op->opar == oparity_binary);
1205 set_irn_n(node, node->op->op_index + 1, right);
1208 int is_Phi0(const ir_node *n)
1212 return ((get_irn_op(n) == op_Phi) &&
1213 (get_irn_arity(n) == 0) &&
1214 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1217 ir_node **get_Phi_preds_arr(ir_node *node)
1219 assert(is_Phi(node));
1220 return (ir_node **)&(get_irn_in(node)[1]);
1223 int get_Phi_n_preds(const ir_node *node)
1225 assert(is_Phi(node) || is_Phi0(node));
1226 return (get_irn_arity(node));
1229 ir_node *get_Phi_pred(const ir_node *node, int pos)
1231 assert(is_Phi(node) || is_Phi0(node));
1232 return get_irn_n(node, pos);
1235 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1237 assert(is_Phi(node) || is_Phi0(node));
1238 set_irn_n(node, pos, pred);
1241 ir_node *(get_Phi_next)(const ir_node *phi)
1243 return _get_Phi_next(phi);
1246 void (set_Phi_next)(ir_node *phi, ir_node *next)
1248 _set_Phi_next(phi, next);
1251 int is_memop(const ir_node *node)
1253 unsigned code = get_irn_opcode(node);
1254 return (code == iro_Load || code == iro_Store);
1257 ir_node *get_memop_mem(const ir_node *node)
1259 assert(is_memop(node));
1260 return get_irn_n(node, 0);
1263 void set_memop_mem(ir_node *node, ir_node *mem)
1265 assert(is_memop(node));
1266 set_irn_n(node, 0, mem);
1269 ir_node *get_memop_ptr(const ir_node *node)
1271 assert(is_memop(node));
1272 return get_irn_n(node, 1);
1275 void set_memop_ptr(ir_node *node, ir_node *ptr)
1277 assert(is_memop(node));
1278 set_irn_n(node, 1, ptr);
1281 ir_volatility get_Load_volatility(const ir_node *node)
1283 assert(is_Load(node));
1284 return (ir_volatility)node->attr.load.volatility;
1287 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1289 assert(is_Load(node));
1290 node->attr.load.volatility = volatility;
1293 ir_align get_Load_align(const ir_node *node)
1295 assert(is_Load(node));
1296 return (ir_align)node->attr.load.aligned;
1299 void set_Load_align(ir_node *node, ir_align align)
1301 assert(is_Load(node));
1302 node->attr.load.aligned = align;
1306 ir_volatility get_Store_volatility(const ir_node *node)
1308 assert(is_Store(node));
1309 return (ir_volatility)node->attr.store.volatility;
1312 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1314 assert(is_Store(node));
1315 node->attr.store.volatility = volatility;
1318 ir_align get_Store_align(const ir_node *node)
1320 assert(is_Store(node));
1321 return (ir_align)node->attr.store.aligned;
1324 void set_Store_align(ir_node *node, ir_align align)
1326 assert(is_Store(node));
1327 node->attr.store.aligned = align;
1331 ir_node **get_Sync_preds_arr(ir_node *node)
1333 assert(is_Sync(node));
1334 return (ir_node **)&(get_irn_in(node)[1]);
1337 int get_Sync_n_preds(const ir_node *node)
1339 assert(is_Sync(node));
1340 return (get_irn_arity(node));
1344 void set_Sync_n_preds(ir_node *node, int n_preds)
1346 assert(is_Sync(node));
1350 ir_node *get_Sync_pred(const ir_node *node, int pos)
1352 assert(is_Sync(node));
1353 return get_irn_n(node, pos);
1356 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1358 assert(is_Sync(node));
1359 set_irn_n(node, pos, pred);
1362 /* Add a new Sync predecessor */
1363 void add_Sync_pred(ir_node *node, ir_node *pred)
1365 assert(is_Sync(node));
1366 add_irn_n(node, pred);
1369 int (is_arg_Proj)(const ir_node *node)
1371 return _is_arg_Proj(node);
1374 pn_Cmp (get_Proj_pn_cmp)(const ir_node *node)
1376 return _get_Proj_pn_cmp(node);
1379 ir_node **get_Tuple_preds_arr(ir_node *node)
1381 assert(is_Tuple(node));
1382 return (ir_node **)&(get_irn_in(node)[1]);
1385 int get_Tuple_n_preds(const ir_node *node)
1387 assert(is_Tuple(node));
1388 return get_irn_arity(node);
1391 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1393 assert(is_Tuple(node));
1394 return get_irn_n(node, pos);
1397 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1399 assert(is_Tuple(node));
1400 set_irn_n(node, pos, pred);
1403 int get_ASM_n_input_constraints(const ir_node *node)
1405 assert(is_ASM(node));
1406 return ARR_LEN(node->attr.assem.input_constraints);
1409 int get_ASM_n_output_constraints(const ir_node *node)
1411 assert(is_ASM(node));
1412 return ARR_LEN(node->attr.assem.output_constraints);
1415 int get_ASM_n_clobbers(const ir_node *node)
1417 assert(is_ASM(node));
1418 return ARR_LEN(node->attr.assem.clobbers);
1421 /* returns the graph of a node */
1422 ir_graph *(get_irn_irg)(const ir_node *node)
1424 return _get_irn_irg(node);
1428 /*----------------------------------------------------------------*/
1429 /* Auxiliary routines */
1430 /*----------------------------------------------------------------*/
1432 ir_node *skip_Proj(ir_node *node)
1434 /* don't assert node !!! */
1439 node = get_Proj_pred(node);
1445 skip_Proj_const(const ir_node *node)
1447 /* don't assert node !!! */
1452 node = get_Proj_pred(node);
1457 ir_node *skip_Tuple(ir_node *node)
1462 if (is_Proj(node)) {
1463 pred = get_Proj_pred(node);
1465 if (is_Proj(pred)) { /* nested Tuple ? */
1466 pred = skip_Tuple(pred);
1468 if (is_Tuple(pred)) {
1469 node = get_Tuple_pred(pred, get_Proj_proj(node));
1472 } else if (is_Tuple(pred)) {
1473 node = get_Tuple_pred(pred, get_Proj_proj(node));
1480 /* returns operand of node if node is a Cast */
1481 ir_node *skip_Cast(ir_node *node)
1484 return get_Cast_op(node);
1488 /* returns operand of node if node is a Cast */
1489 const ir_node *skip_Cast_const(const ir_node *node)
1492 return get_Cast_op(node);
1496 /* returns operand of node if node is a Pin */
1497 ir_node *skip_Pin(ir_node *node)
1500 return get_Pin_op(node);
1504 /* returns operand of node if node is a Confirm */
1505 ir_node *skip_Confirm(ir_node *node)
1507 if (is_Confirm(node))
1508 return get_Confirm_value(node);
1512 /* skip all high-level ops */
1513 ir_node *skip_HighLevel_ops(ir_node *node)
1515 while (is_op_highlevel(get_irn_op(node))) {
1516 node = get_irn_n(node, 0);
1522 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1523 * than any other approach, as Id chains are resolved and all point to the real node, or
1524 * all id's are self loops.
1526 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1527 * a little bit "hand optimized".
1529 ir_node *skip_Id(ir_node *node)
1532 /* don't assert node !!! */
1534 if (!node || (node->op != op_Id)) return node;
1536 /* Don't use get_Id_pred(): We get into an endless loop for
1537 self-referencing Ids. */
1538 pred = node->in[0+1];
1540 if (pred->op != op_Id) return pred;
1542 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1543 ir_node *rem_pred, *res;
1545 if (pred->op != op_Id) return pred; /* shortcut */
1548 assert(get_irn_arity (node) > 0);
1550 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1551 res = skip_Id(rem_pred);
1552 if (is_Id(res)) /* self-loop */ return node;
1554 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1561 int (is_strictConv)(const ir_node *node)
1563 return _is_strictConv(node);
1566 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1567 int (is_SymConst_addr_ent)(const ir_node *node)
1569 return _is_SymConst_addr_ent(node);
1572 /* Returns true if the operation manipulates control flow. */
1573 int is_cfop(const ir_node *node)
1575 return is_op_cfopcode(get_irn_op(node));
1578 /* Returns true if the operation can change the control flow because
1580 int is_fragile_op(const ir_node *node)
1582 return is_op_fragile(get_irn_op(node));
1585 /* Returns the memory operand of fragile operations. */
1586 ir_node *get_fragile_op_mem(ir_node *node)
1588 assert(node && is_fragile_op(node));
1590 switch (get_irn_opcode(node)) {
1601 return get_irn_n(node, pn_Generic_M);
1606 panic("should not be reached");
1610 /* Returns the result mode of a Div operation. */
1611 ir_mode *get_divop_resmod(const ir_node *node)
1613 switch (get_irn_opcode(node)) {
1614 case iro_Quot : return get_Quot_resmode(node);
1615 case iro_DivMod: return get_DivMod_resmode(node);
1616 case iro_Div : return get_Div_resmode(node);
1617 case iro_Mod : return get_Mod_resmode(node);
1619 panic("should not be reached");
1623 /* Returns true if the operation is a forking control flow operation. */
1624 int (is_irn_forking)(const ir_node *node)
1626 return _is_irn_forking(node);
1629 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1631 _copy_node_attr(irg, old_node, new_node);
1634 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1636 ir_type *(get_irn_type_attr)(ir_node *node)
1638 return _get_irn_type_attr(node);
1641 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1642 ir_entity *(get_irn_entity_attr)(ir_node *node)
1644 return _get_irn_entity_attr(node);
1647 /* Returns non-zero for constant-like nodes. */
1648 int (is_irn_constlike)(const ir_node *node)
1650 return _is_irn_constlike(node);
1654 * Returns non-zero for nodes that are allowed to have keep-alives and
1655 * are neither Block nor PhiM.
1657 int (is_irn_keep)(const ir_node *node)
1659 return _is_irn_keep(node);
1663 * Returns non-zero for nodes that are always placed in the start block.
1665 int (is_irn_start_block_placed)(const ir_node *node)
1667 return _is_irn_start_block_placed(node);
1670 /* Returns non-zero for nodes that are machine operations. */
1671 int (is_irn_machine_op)(const ir_node *node)
1673 return _is_irn_machine_op(node);
1676 /* Returns non-zero for nodes that are machine operands. */
1677 int (is_irn_machine_operand)(const ir_node *node)
1679 return _is_irn_machine_operand(node);
1682 /* Returns non-zero for nodes that have the n'th user machine flag set. */
1683 int (is_irn_machine_user)(const ir_node *node, unsigned n)
1685 return _is_irn_machine_user(node, n);
1688 /* Returns non-zero for nodes that are CSE neutral to its users. */
1689 int (is_irn_cse_neutral)(const ir_node *node)
1691 return _is_irn_cse_neutral(node);
1694 /* Gets the string representation of the jump prediction .*/
1695 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1697 #define X(a) case a: return #a
1699 X(COND_JMP_PRED_NONE);
1700 X(COND_JMP_PRED_TRUE);
1701 X(COND_JMP_PRED_FALSE);
1707 /** Return the attribute type of a SymConst node if exists */
1708 static ir_type *get_SymConst_attr_type(const ir_node *self)
1710 symconst_kind kind = get_SymConst_kind(self);
1711 if (SYMCONST_HAS_TYPE(kind))
1712 return get_SymConst_type(self);
1716 /** Return the attribute entity of a SymConst node if exists */
1717 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1719 symconst_kind kind = get_SymConst_kind(self);
1720 if (SYMCONST_HAS_ENT(kind))
1721 return get_SymConst_entity(self);
1725 /** the get_type_attr operation must be always implemented */
1726 static ir_type *get_Null_type(const ir_node *n)
1729 return firm_unknown_type;
1732 /* Sets the get_type operation for an ir_op_ops. */
1733 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
1736 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1737 case iro_Call: ops->get_type_attr = get_Call_type; break;
1738 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1739 case iro_Free: ops->get_type_attr = get_Free_type; break;
1740 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1742 /* not allowed to be NULL */
1743 if (! ops->get_type_attr)
1744 ops->get_type_attr = get_Null_type;
1750 /** the get_entity_attr operation must be always implemented */
1751 static ir_entity *get_Null_ent(const ir_node *n)
1757 /* Sets the get_type operation for an ir_op_ops. */
1758 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
1761 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1762 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1764 /* not allowed to be NULL */
1765 if (! ops->get_entity_attr)
1766 ops->get_entity_attr = get_Null_ent;
1772 /* Sets the debug information of a node. */
1773 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1775 _set_irn_dbg_info(n, db);
1779 * Returns the debug information of an node.
1781 * @param n The node.
1783 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1785 return _get_irn_dbg_info(n);
1788 /* checks whether a node represents a global address */
1789 int is_Global(const ir_node *node)
1791 return is_SymConst_addr_ent(node);
1794 /* returns the entity of a global address */
1795 ir_entity *get_Global_entity(const ir_node *node)
1797 return get_SymConst_entity(node);
1801 * Calculate a hash value of a node.
1803 unsigned firm_default_hash(const ir_node *node)
1808 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1809 h = irn_arity = get_irn_arity(node);
1811 /* consider all in nodes... except the block if not a control flow. */
1812 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1813 ir_node *pred = get_irn_n(node, i);
1814 if (is_irn_cse_neutral(pred))
1817 h = 9*h + HASH_PTR(pred);
1821 h = 9*h + HASH_PTR(get_irn_mode(node));
1823 h = 9*h + HASH_PTR(get_irn_op(node));
1826 } /* firm_default_hash */
1828 /* include generated code */
1829 #include "gen_irnode.c.inl"