2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
50 /* some constants fixing the positions of nodes predecessors
52 #define CALL_PARAM_OFFSET (n_Call_max+1)
53 #define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
54 #define SEL_INDEX_OFFSET (n_Sel_max+1)
55 #define RETURN_RESULT_OFFSET (n_Return_max+1)
56 #define END_KEEPALIVE_OFFSET 0
58 static const char *relation_names [] = {
70 "unordered_less_equal",
72 "unordered_greater_equal",
77 const char *get_relation_string(ir_relation relation)
79 assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
80 return relation_names[relation];
83 ir_relation get_negated_relation(ir_relation relation)
85 return relation ^ ir_relation_true;
88 ir_relation get_inversed_relation(ir_relation relation)
90 ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
91 bool less = relation & ir_relation_less;
92 bool greater = relation & ir_relation_greater;
93 code |= (less ? ir_relation_greater : 0) | (greater ? ir_relation_less : 0);
98 * Indicates, whether additional data can be registered to ir nodes.
99 * If set to 1, this is not possible anymore.
101 static int forbid_new_data = 0;
104 * The amount of additional space for custom data to be allocated upon
105 * creating a new node.
107 unsigned firm_add_node_size = 0;
110 /* register new space for every node */
111 unsigned firm_register_additional_node_data(unsigned size)
113 assert(!forbid_new_data && "Too late to register additional node data");
118 return firm_add_node_size += size;
122 void init_irnode(void)
124 /* Forbid the addition of new data to an ir node. */
128 struct struct_align {
138 * irnode constructor.
139 * Create a new irnode in irg, with an op, mode, arity and
140 * some incoming irnodes.
141 * If arity is negative, a node with a dynamic array is created.
143 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
144 ir_mode *mode, int arity, ir_node *const *in)
147 unsigned align = offsetof(struct struct_align, s) - 1;
148 unsigned add_node_size = (firm_add_node_size + align) & ~align;
149 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
156 p = (char*)obstack_alloc(irg->obst, node_size);
157 memset(p, 0, node_size);
158 res = (ir_node *)(p + add_node_size);
160 res->kind = k_ir_node;
164 res->node_idx = irg_register_node_idx(irg, res);
169 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
171 /* not nice but necessary: End and Sync must always have a flexible array */
172 if (op == op_End || op == op_Sync)
173 res->in = NEW_ARR_F(ir_node *, (arity+1));
175 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
176 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
180 set_irn_dbg_info(res, db);
182 res->node_nr = get_irp_new_node_nr();
184 for (i = 0; i < EDGE_KIND_LAST; ++i) {
185 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
186 /* edges will be build immediately */
187 res->edge_info[i].edges_built = 1;
188 res->edge_info[i].out_count = 0;
191 /* don't put this into the for loop, arity is -1 for some nodes! */
192 edges_notify_edge(res, -1, res->in[0], NULL, irg);
193 for (i = 1; i <= arity; ++i)
194 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
196 hook_new_node(irg, res);
197 if (get_irg_phase_state(irg) == phase_backend) {
198 be_info_new_node(res);
204 /*-- getting some parameters from ir_nodes --*/
206 int (is_ir_node)(const void *thing)
208 return _is_ir_node(thing);
211 int (get_irn_arity)(const ir_node *node)
213 return _get_irn_arity(node);
216 /* Returns the array with ins. This array is shifted with respect to the
217 array accessed by get_irn_n: The block operand is at position 0 not -1.
218 (@@@ This should be changed.)
219 The order of the predecessors in this array is not guaranteed, except that
220 lists of operands as predecessors of Block or arguments of a Call are
222 ir_node **get_irn_in(const ir_node *node)
227 void set_irn_in(ir_node *node, int arity, ir_node **in)
231 ir_graph *irg = get_irn_irg(node);
236 for (i = 0; i < arity; i++) {
237 if (i < (int)ARR_LEN(*pOld_in)-1)
238 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
240 edges_notify_edge(node, i, in[i], NULL, irg);
242 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
243 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
246 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
247 ir_node * block = (*pOld_in)[0];
248 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
249 (*pOld_in)[0] = block;
251 fix_backedges(irg->obst, node);
253 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
255 /* update irg flags */
256 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
259 ir_node *(get_irn_n)(const ir_node *node, int n)
261 return _get_irn_n(node, n);
264 void set_irn_n(ir_node *node, int n, ir_node *in)
266 ir_graph *irg = get_irn_irg(node);
267 assert(node && node->kind == k_ir_node);
269 assert(n < get_irn_arity(node));
270 assert(in && in->kind == k_ir_node);
273 hook_set_irn_n(node, n, in, node->in[n + 1]);
275 /* Here, we rely on src and tgt being in the current ir graph */
276 edges_notify_edge(node, n, in, node->in[n + 1], irg);
278 node->in[n + 1] = in;
280 /* update irg flags */
281 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
284 int add_irn_n(ir_node *node, ir_node *in)
287 ir_graph *irg = get_irn_irg(node);
289 assert(node->op->opar == oparity_dynamic);
290 pos = ARR_LEN(node->in) - 1;
291 ARR_APP1(ir_node *, node->in, in);
292 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
295 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
300 void del_Sync_n(ir_node *n, int i)
302 int arity = get_Sync_n_preds(n);
303 ir_node *last_pred = get_Sync_pred(n, arity - 1);
304 set_Sync_pred(n, i, last_pred);
305 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
306 ARR_SHRINKLEN(get_irn_in(n), arity);
309 int (get_irn_deps)(const ir_node *node)
311 return _get_irn_deps(node);
314 ir_node *(get_irn_dep)(const ir_node *node, int pos)
316 return _get_irn_dep(node, pos);
319 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
321 _set_irn_dep(node, pos, dep);
324 int add_irn_dep(ir_node *node, ir_node *dep)
328 /* DEP edges are only allowed in backend phase */
329 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
330 if (node->deps == NULL) {
331 node->deps = NEW_ARR_F(ir_node *, 1);
337 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
338 if (node->deps[i] == NULL)
341 if (node->deps[i] == dep)
345 if (first_zero >= 0) {
346 node->deps[first_zero] = dep;
349 ARR_APP1(ir_node *, node->deps, dep);
354 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
359 void add_irn_deps(ir_node *tgt, ir_node *src)
363 for (i = 0, n = get_irn_deps(src); i < n; ++i)
364 add_irn_dep(tgt, get_irn_dep(src, i));
368 ir_mode *(get_irn_mode)(const ir_node *node)
370 return _get_irn_mode(node);
373 void (set_irn_mode)(ir_node *node, ir_mode *mode)
375 _set_irn_mode(node, mode);
378 ir_op *(get_irn_op)(const ir_node *node)
380 return _get_irn_op(node);
383 /* should be private to the library: */
384 void (set_irn_op)(ir_node *node, ir_op *op)
386 _set_irn_op(node, op);
389 unsigned (get_irn_opcode)(const ir_node *node)
391 return _get_irn_opcode(node);
394 const char *get_irn_opname(const ir_node *node)
397 if (is_Phi0(node)) return "Phi0";
398 return get_id_str(node->op->name);
401 ident *get_irn_opident(const ir_node *node)
404 return node->op->name;
407 ir_visited_t (get_irn_visited)(const ir_node *node)
409 return _get_irn_visited(node);
412 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
414 _set_irn_visited(node, visited);
417 void (mark_irn_visited)(ir_node *node)
419 _mark_irn_visited(node);
422 int (irn_visited)(const ir_node *node)
424 return _irn_visited(node);
427 int (irn_visited_else_mark)(ir_node *node)
429 return _irn_visited_else_mark(node);
432 void (set_irn_link)(ir_node *node, void *link)
434 _set_irn_link(node, link);
437 void *(get_irn_link)(const ir_node *node)
439 return _get_irn_link(node);
442 op_pin_state (get_irn_pinned)(const ir_node *node)
444 return _get_irn_pinned(node);
447 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
449 return _is_irn_pinned_in_irg(node);
452 void set_irn_pinned(ir_node *node, op_pin_state state)
454 /* due to optimization an opt may be turned into a Tuple */
458 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
459 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
461 node->attr.except.pin_state = state;
464 /* Outputs a unique number for this node */
465 long get_irn_node_nr(const ir_node *node)
468 return node->node_nr;
471 void *(get_irn_generic_attr)(ir_node *node)
473 assert(is_ir_node(node));
474 return _get_irn_generic_attr(node);
477 const void *(get_irn_generic_attr_const)(const ir_node *node)
479 assert(is_ir_node(node));
480 return _get_irn_generic_attr_const(node);
483 unsigned (get_irn_idx)(const ir_node *node)
485 assert(is_ir_node(node));
486 return _get_irn_idx(node);
489 int get_irn_pred_pos(ir_node *node, ir_node *arg)
492 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
493 if (get_irn_n(node, i) == arg)
499 /** manipulate fields of individual nodes **/
501 ir_node *(get_nodes_block)(const ir_node *node)
503 return _get_nodes_block(node);
506 void set_nodes_block(ir_node *node, ir_node *block)
508 assert(node->op != op_Block);
509 set_irn_n(node, -1, block);
512 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
513 * from Start. If so returns frame type, else Null. */
514 ir_type *is_frame_pointer(const ir_node *n)
516 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
517 ir_node *start = get_Proj_pred(n);
518 if (is_Start(start)) {
519 return get_irg_frame_type(get_irn_irg(start));
525 ir_node **get_Block_cfgpred_arr(ir_node *node)
527 assert(is_Block(node));
528 return (ir_node **)&(get_irn_in(node)[1]);
531 int (get_Block_n_cfgpreds)(const ir_node *node)
533 return _get_Block_n_cfgpreds(node);
536 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
538 return _get_Block_cfgpred(node, pos);
541 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
543 assert(is_Block(node));
544 set_irn_n(node, pos, pred);
547 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
551 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
552 if (get_Block_cfgpred_block(block, i) == pred)
558 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
560 return _get_Block_cfgpred_block(node, pos);
563 int get_Block_matured(const ir_node *node)
565 assert(is_Block(node));
566 return (int)node->attr.block.is_matured;
569 void set_Block_matured(ir_node *node, int matured)
571 assert(is_Block(node));
572 node->attr.block.is_matured = matured;
575 ir_visited_t (get_Block_block_visited)(const ir_node *node)
577 return _get_Block_block_visited(node);
580 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
582 _set_Block_block_visited(node, visit);
585 void (mark_Block_block_visited)(ir_node *node)
587 _mark_Block_block_visited(node);
590 int (Block_block_visited)(const ir_node *node)
592 return _Block_block_visited(node);
595 ir_extblk *get_Block_extbb(const ir_node *block)
598 assert(is_Block(block));
599 res = block->attr.block.extblk;
600 assert(res == NULL || is_ir_extbb(res));
604 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
606 assert(is_Block(block));
607 assert(extblk == NULL || is_ir_extbb(extblk));
608 block->attr.block.extblk = extblk;
611 /* returns the graph of a Block. */
612 ir_graph *(get_Block_irg)(const ir_node *block)
614 return _get_Block_irg(block);
617 ir_entity *create_Block_entity(ir_node *block)
620 assert(is_Block(block));
622 entity = block->attr.block.entity;
623 if (entity == NULL) {
627 glob = get_glob_type();
628 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
629 set_entity_visibility(entity, ir_visibility_local);
630 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
631 nr = get_irp_next_label_nr();
632 set_entity_label(entity, nr);
633 set_entity_compiler_generated(entity, 1);
635 block->attr.block.entity = entity;
640 ir_entity *get_Block_entity(const ir_node *block)
642 assert(is_Block(block));
643 return block->attr.block.entity;
646 void set_Block_entity(ir_node *block, ir_entity *entity)
648 assert(is_Block(block));
649 assert(get_entity_type(entity) == get_code_type());
650 block->attr.block.entity = entity;
653 int has_Block_entity(const ir_node *block)
655 return block->attr.block.entity != NULL;
658 ir_node *(get_Block_phis)(const ir_node *block)
660 return _get_Block_phis(block);
663 void (set_Block_phis)(ir_node *block, ir_node *phi)
665 _set_Block_phis(block, phi);
668 void (add_Block_phi)(ir_node *block, ir_node *phi)
670 _add_Block_phi(block, phi);
673 /* Get the Block mark (single bit). */
674 unsigned (get_Block_mark)(const ir_node *block)
676 return _get_Block_mark(block);
679 /* Set the Block mark (single bit). */
680 void (set_Block_mark)(ir_node *block, unsigned mark)
682 _set_Block_mark(block, mark);
685 int get_End_n_keepalives(const ir_node *end)
688 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
691 ir_node *get_End_keepalive(const ir_node *end, int pos)
694 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
697 void add_End_keepalive(ir_node *end, ir_node *ka)
703 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
706 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
709 /* Set new keep-alives */
710 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
714 ir_graph *irg = get_irn_irg(end);
716 /* notify that edges are deleted */
717 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
718 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
720 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
722 for (i = 0; i < n; ++i) {
723 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
724 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
727 /* update irg flags */
728 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
731 /* Set new keep-alives from old keep-alives, skipping irn */
732 void remove_End_keepalive(ir_node *end, ir_node *irn)
734 int n = get_End_n_keepalives(end);
739 for (i = n -1; i >= 0; --i) {
740 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
750 irg = get_irn_irg(end);
752 /* remove the edge */
753 edges_notify_edge(end, idx, NULL, irn, irg);
756 /* exchange with the last one */
757 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
758 edges_notify_edge(end, n - 1, NULL, old, irg);
759 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
760 edges_notify_edge(end, idx, old, NULL, irg);
762 /* now n - 1 keeps, 1 block input */
763 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
765 /* update irg flags */
766 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
769 /* remove Bads, NoMems and doublets from the keep-alive set */
770 void remove_End_Bads_and_doublets(ir_node *end)
773 int idx, n = get_End_n_keepalives(end);
775 bool changed = false;
780 irg = get_irn_irg(end);
781 pset_new_init(&keeps);
783 for (idx = n - 1; idx >= 0; --idx) {
784 ir_node *ka = get_End_keepalive(end, idx);
786 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
788 /* remove the edge */
789 edges_notify_edge(end, idx, NULL, ka, irg);
792 /* exchange with the last one */
793 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
794 edges_notify_edge(end, n - 1, NULL, old, irg);
795 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
796 edges_notify_edge(end, idx, old, NULL, irg);
800 pset_new_insert(&keeps, ka);
803 /* n keeps, 1 block input */
804 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
806 pset_new_destroy(&keeps);
809 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
813 void free_End(ir_node *end)
818 end->in = NULL; /* @@@ make sure we get an error if we use the
819 in array afterwards ... */
822 size_t get_Return_n_ress(const ir_node *node)
824 assert(is_Return(node));
825 return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
828 ir_node **get_Return_res_arr(ir_node *node)
830 assert(is_Return(node));
831 if (get_Return_n_ress(node) > 0)
832 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
837 ir_node *get_Return_res(const ir_node *node, int pos)
839 assert(is_Return(node));
841 assert(get_Return_n_ress(node) > (size_t)pos);
842 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
845 void set_Return_res(ir_node *node, int pos, ir_node *res)
847 assert(is_Return(node));
848 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
851 int (is_Const_null)(const ir_node *node)
853 return _is_Const_null(node);
856 int (is_Const_one)(const ir_node *node)
858 return _is_Const_one(node);
861 int (is_Const_all_one)(const ir_node *node)
863 return _is_Const_all_one(node);
868 symconst_kind get_SymConst_kind(const ir_node *node)
870 assert(is_SymConst(node));
871 return node->attr.symc.kind;
874 void set_SymConst_kind(ir_node *node, symconst_kind kind)
876 assert(is_SymConst(node));
877 node->attr.symc.kind = kind;
880 ir_type *get_SymConst_type(const ir_node *node)
882 /* the cast here is annoying, but we have to compensate for
884 ir_node *irn = (ir_node *)node;
885 assert(is_SymConst(node) &&
886 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
887 return irn->attr.symc.sym.type_p;
890 void set_SymConst_type(ir_node *node, ir_type *tp)
892 assert(is_SymConst(node) &&
893 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
894 node->attr.symc.sym.type_p = tp;
898 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
899 ir_entity *get_SymConst_entity(const ir_node *node)
901 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
902 return node->attr.symc.sym.entity_p;
905 void set_SymConst_entity(ir_node *node, ir_entity *ent)
907 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
908 node->attr.symc.sym.entity_p = ent;
911 ir_enum_const *get_SymConst_enum(const ir_node *node)
913 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
914 return node->attr.symc.sym.enum_p;
917 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
919 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
920 node->attr.symc.sym.enum_p = ec;
923 union symconst_symbol
924 get_SymConst_symbol(const ir_node *node)
926 assert(is_SymConst(node));
927 return node->attr.symc.sym;
930 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
932 assert(is_SymConst(node));
933 node->attr.symc.sym = sym;
936 int get_Sel_n_indexs(const ir_node *node)
938 assert(is_Sel(node));
939 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
942 ir_node **get_Sel_index_arr(ir_node *node)
944 assert(is_Sel(node));
945 if (get_Sel_n_indexs(node) > 0)
946 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
951 ir_node *get_Sel_index(const ir_node *node, int pos)
953 assert(is_Sel(node));
954 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
957 void set_Sel_index(ir_node *node, int pos, ir_node *index)
959 assert(is_Sel(node));
960 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
963 ir_node **get_Call_param_arr(ir_node *node)
965 assert(is_Call(node));
966 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
969 size_t get_Call_n_params(const ir_node *node)
971 assert(is_Call(node));
972 return (size_t) (get_irn_arity(node) - CALL_PARAM_OFFSET);
975 ir_node *get_Call_param(const ir_node *node, int pos)
977 assert(is_Call(node));
978 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
981 void set_Call_param(ir_node *node, int pos, ir_node *param)
983 assert(is_Call(node));
984 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
987 ir_node **get_Builtin_param_arr(ir_node *node)
989 assert(is_Builtin(node));
990 return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
993 int get_Builtin_n_params(const ir_node *node)
995 assert(is_Builtin(node));
996 return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
999 ir_node *get_Builtin_param(const ir_node *node, int pos)
1001 assert(is_Builtin(node));
1002 return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
1005 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1007 assert(is_Builtin(node));
1008 set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
1011 /* Returns a human readable string for the ir_builtin_kind. */
1012 const char *get_builtin_kind_name(ir_builtin_kind kind)
1014 #define X(a) case a: return #a
1017 X(ir_bk_debugbreak);
1018 X(ir_bk_return_address);
1019 X(ir_bk_frame_address);
1029 X(ir_bk_inner_trampoline);
1036 int Call_has_callees(const ir_node *node)
1038 assert(is_Call(node));
1039 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1040 (node->attr.call.callee_arr != NULL));
1043 size_t get_Call_n_callees(const ir_node *node)
1045 assert(is_Call(node) && node->attr.call.callee_arr);
1046 return ARR_LEN(node->attr.call.callee_arr);
1049 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
1051 assert(pos < get_Call_n_callees(node));
1052 return node->attr.call.callee_arr[pos];
1055 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
1057 ir_graph *irg = get_irn_irg(node);
1059 assert(is_Call(node));
1060 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1061 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1063 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1066 void remove_Call_callee_arr(ir_node *node)
1068 assert(is_Call(node));
1069 node->attr.call.callee_arr = NULL;
1072 /* Checks for upcast.
1074 * Returns true if the Cast node casts a class type to a super type.
1076 int is_Cast_upcast(ir_node *node)
1078 ir_type *totype = get_Cast_type(node);
1079 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1081 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1084 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1085 totype = get_pointer_points_to_type(totype);
1086 fromtype = get_pointer_points_to_type(fromtype);
1091 if (!is_Class_type(totype)) return 0;
1092 return is_SubClass_of(fromtype, totype);
1095 /* Checks for downcast.
1097 * Returns true if the Cast node casts a class type to a sub type.
1099 int is_Cast_downcast(ir_node *node)
1101 ir_type *totype = get_Cast_type(node);
1102 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1104 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1107 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1108 totype = get_pointer_points_to_type(totype);
1109 fromtype = get_pointer_points_to_type(fromtype);
1114 if (!is_Class_type(totype)) return 0;
1115 return is_SubClass_of(totype, fromtype);
1118 int (is_unop)(const ir_node *node)
1120 return _is_unop(node);
1123 ir_node *get_unop_op(const ir_node *node)
1125 if (node->op->opar == oparity_unary)
1126 return get_irn_n(node, node->op->op_index);
1128 assert(node->op->opar == oparity_unary);
1132 void set_unop_op(ir_node *node, ir_node *op)
1134 if (node->op->opar == oparity_unary)
1135 set_irn_n(node, node->op->op_index, op);
1137 assert(node->op->opar == oparity_unary);
1140 int (is_binop)(const ir_node *node)
1142 return _is_binop(node);
1145 ir_node *get_binop_left(const ir_node *node)
1147 assert(node->op->opar == oparity_binary);
1148 return get_irn_n(node, node->op->op_index);
1151 void set_binop_left(ir_node *node, ir_node *left)
1153 assert(node->op->opar == oparity_binary);
1154 set_irn_n(node, node->op->op_index, left);
1157 ir_node *get_binop_right(const ir_node *node)
1159 assert(node->op->opar == oparity_binary);
1160 return get_irn_n(node, node->op->op_index + 1);
1163 void set_binop_right(ir_node *node, ir_node *right)
1165 assert(node->op->opar == oparity_binary);
1166 set_irn_n(node, node->op->op_index + 1, right);
1169 int is_Phi0(const ir_node *n)
1173 return ((get_irn_op(n) == op_Phi) &&
1174 (get_irn_arity(n) == 0) &&
1175 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1178 ir_node **get_Phi_preds_arr(ir_node *node)
1180 assert(is_Phi(node));
1181 return (ir_node **)&(get_irn_in(node)[1]);
1184 int get_Phi_n_preds(const ir_node *node)
1186 assert(is_Phi(node) || is_Phi0(node));
1187 return (get_irn_arity(node));
1190 ir_node *get_Phi_pred(const ir_node *node, int pos)
1192 assert(is_Phi(node) || is_Phi0(node));
1193 return get_irn_n(node, pos);
1196 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1198 assert(is_Phi(node) || is_Phi0(node));
1199 set_irn_n(node, pos, pred);
1202 ir_node *(get_Phi_next)(const ir_node *phi)
1204 return _get_Phi_next(phi);
1207 void (set_Phi_next)(ir_node *phi, ir_node *next)
1209 _set_Phi_next(phi, next);
1212 int is_memop(const ir_node *node)
1214 unsigned code = get_irn_opcode(node);
1215 return (code == iro_Load || code == iro_Store);
1218 ir_node *get_memop_mem(const ir_node *node)
1220 assert(is_memop(node));
1221 assert(n_Load_mem == 0 && n_Store_mem == 0);
1222 return get_irn_n(node, 0);
1225 void set_memop_mem(ir_node *node, ir_node *mem)
1227 assert(is_memop(node));
1228 assert(n_Load_mem == 0 && n_Store_mem == 0);
1229 set_irn_n(node, 0, mem);
1232 ir_node *get_memop_ptr(const ir_node *node)
1234 assert(is_memop(node));
1235 assert(n_Load_mem == 1 && n_Store_mem == 1);
1236 return get_irn_n(node, 1);
1239 void set_memop_ptr(ir_node *node, ir_node *ptr)
1241 assert(is_memop(node));
1242 assert(n_Load_mem == 1 && n_Store_mem == 1);
1243 set_irn_n(node, 1, ptr);
1247 ir_node **get_Sync_preds_arr(ir_node *node)
1249 assert(is_Sync(node));
1250 return (ir_node **)&(get_irn_in(node)[1]);
1253 int get_Sync_n_preds(const ir_node *node)
1255 assert(is_Sync(node));
1256 return (get_irn_arity(node));
1260 void set_Sync_n_preds(ir_node *node, int n_preds)
1262 assert(is_Sync(node));
1266 ir_node *get_Sync_pred(const ir_node *node, int pos)
1268 assert(is_Sync(node));
1269 return get_irn_n(node, pos);
1272 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1274 assert(is_Sync(node));
1275 set_irn_n(node, pos, pred);
1278 /* Add a new Sync predecessor */
1279 void add_Sync_pred(ir_node *node, ir_node *pred)
1281 assert(is_Sync(node));
1282 add_irn_n(node, pred);
1285 int (is_arg_Proj)(const ir_node *node)
1287 return _is_arg_Proj(node);
1290 int is_x_except_Proj(const ir_node *node)
1295 pred = get_Proj_pred(node);
1296 if (!is_fragile_op(pred))
1298 return get_Proj_proj(node) == pred->op->pn_x_except;
1301 int is_x_regular_Proj(const ir_node *node)
1306 pred = get_Proj_pred(node);
1307 if (!is_fragile_op(pred))
1309 return get_Proj_proj(node) == pred->op->pn_x_regular;
1312 void ir_set_throws_exception(ir_node *node, int throws_exception)
1314 except_attr *attr = &node->attr.except;
1315 assert(is_fragile_op(node));
1316 attr->throws_exception = throws_exception;
1319 int ir_throws_exception(const ir_node *node)
1321 const except_attr *attr = &node->attr.except;
1322 assert(is_fragile_op(node));
1323 return attr->throws_exception;
1326 ir_node **get_Tuple_preds_arr(ir_node *node)
1328 assert(is_Tuple(node));
1329 return (ir_node **)&(get_irn_in(node)[1]);
1332 int get_Tuple_n_preds(const ir_node *node)
1334 assert(is_Tuple(node));
1335 return get_irn_arity(node);
1338 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1340 assert(is_Tuple(node));
1341 return get_irn_n(node, pos);
1344 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1346 assert(is_Tuple(node));
1347 set_irn_n(node, pos, pred);
1350 int get_ASM_n_input_constraints(const ir_node *node)
1352 assert(is_ASM(node));
1353 return ARR_LEN(node->attr.assem.input_constraints);
1356 int get_ASM_n_output_constraints(const ir_node *node)
1358 assert(is_ASM(node));
1359 return ARR_LEN(node->attr.assem.output_constraints);
1362 int get_ASM_n_clobbers(const ir_node *node)
1364 assert(is_ASM(node));
1365 return ARR_LEN(node->attr.assem.clobbers);
1368 /* returns the graph of a node */
1369 ir_graph *(get_irn_irg)(const ir_node *node)
1371 return _get_irn_irg(node);
1375 /*----------------------------------------------------------------*/
1376 /* Auxiliary routines */
1377 /*----------------------------------------------------------------*/
1379 ir_node *skip_Proj(ir_node *node)
1381 /* don't assert node !!! */
1386 node = get_Proj_pred(node);
1392 skip_Proj_const(const ir_node *node)
1394 /* don't assert node !!! */
1399 node = get_Proj_pred(node);
1404 ir_node *skip_Tuple(ir_node *node)
1409 if (is_Proj(node)) {
1410 pred = get_Proj_pred(node);
1412 if (is_Proj(pred)) { /* nested Tuple ? */
1413 pred = skip_Tuple(pred);
1415 if (is_Tuple(pred)) {
1416 node = get_Tuple_pred(pred, get_Proj_proj(node));
1419 } else if (is_Tuple(pred)) {
1420 node = get_Tuple_pred(pred, get_Proj_proj(node));
1427 /* returns operand of node if node is a Cast */
1428 ir_node *skip_Cast(ir_node *node)
1431 return get_Cast_op(node);
1435 /* returns operand of node if node is a Cast */
1436 const ir_node *skip_Cast_const(const ir_node *node)
1439 return get_Cast_op(node);
1443 /* returns operand of node if node is a Pin */
1444 ir_node *skip_Pin(ir_node *node)
1447 return get_Pin_op(node);
1451 /* returns operand of node if node is a Confirm */
1452 ir_node *skip_Confirm(ir_node *node)
1454 if (is_Confirm(node))
1455 return get_Confirm_value(node);
1459 /* skip all high-level ops */
1460 ir_node *skip_HighLevel_ops(ir_node *node)
1462 while (is_op_highlevel(get_irn_op(node))) {
1463 node = get_irn_n(node, 0);
1469 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1470 * than any other approach, as Id chains are resolved and all point to the real node, or
1471 * all id's are self loops.
1473 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1474 * a little bit "hand optimized".
1476 ir_node *skip_Id(ir_node *node)
1479 /* don't assert node !!! */
1481 if (!node || (node->op != op_Id)) return node;
1483 /* Don't use get_Id_pred(): We get into an endless loop for
1484 self-referencing Ids. */
1485 pred = node->in[0+1];
1487 if (pred->op != op_Id) return pred;
1489 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1490 ir_node *rem_pred, *res;
1492 if (pred->op != op_Id) return pred; /* shortcut */
1495 assert(get_irn_arity (node) > 0);
1497 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1498 res = skip_Id(rem_pred);
1499 if (is_Id(res)) /* self-loop */ return node;
1501 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1508 int (is_strictConv)(const ir_node *node)
1510 return _is_strictConv(node);
1513 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1514 int (is_SymConst_addr_ent)(const ir_node *node)
1516 return _is_SymConst_addr_ent(node);
1519 /* Returns true if the operation manipulates control flow. */
1520 int is_cfop(const ir_node *node)
1522 if (is_fragile_op(node) && ir_throws_exception(node))
1525 return is_op_cfopcode(get_irn_op(node));
1528 int is_unknown_jump(const ir_node *node)
1530 return is_op_unknown_jump(get_irn_op(node));
1533 /* Returns true if the operation can change the control flow because
1535 int is_fragile_op(const ir_node *node)
1537 return is_op_fragile(get_irn_op(node));
1540 /* Returns the memory operand of fragile operations. */
1541 ir_node *get_fragile_op_mem(ir_node *node)
1543 assert(node && is_fragile_op(node));
1544 return get_irn_n(node, node->op->fragile_mem_index);
1547 /* Returns true if the operation is a forking control flow operation. */
1548 int (is_irn_forking)(const ir_node *node)
1550 return _is_irn_forking(node);
1553 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1555 _copy_node_attr(irg, old_node, new_node);
1558 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1560 ir_type *(get_irn_type_attr)(ir_node *node)
1562 return _get_irn_type_attr(node);
1565 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1566 ir_entity *(get_irn_entity_attr)(ir_node *node)
1568 return _get_irn_entity_attr(node);
1571 /* Returns non-zero for constant-like nodes. */
1572 int (is_irn_constlike)(const ir_node *node)
1574 return _is_irn_constlike(node);
1578 * Returns non-zero for nodes that are allowed to have keep-alives and
1579 * are neither Block nor PhiM.
1581 int (is_irn_keep)(const ir_node *node)
1583 return _is_irn_keep(node);
1587 * Returns non-zero for nodes that are always placed in the start block.
1589 int (is_irn_start_block_placed)(const ir_node *node)
1591 return _is_irn_start_block_placed(node);
1594 /* Returns non-zero for nodes that are machine operations. */
1595 int (is_irn_machine_op)(const ir_node *node)
1597 return _is_irn_machine_op(node);
1600 /* Returns non-zero for nodes that are machine operands. */
1601 int (is_irn_machine_operand)(const ir_node *node)
1603 return _is_irn_machine_operand(node);
1606 /* Returns non-zero for nodes that have the n'th user machine flag set. */
1607 int (is_irn_machine_user)(const ir_node *node, unsigned n)
1609 return _is_irn_machine_user(node, n);
1612 /* Returns non-zero for nodes that are CSE neutral to its users. */
1613 int (is_irn_cse_neutral)(const ir_node *node)
1615 return _is_irn_cse_neutral(node);
1618 /* Gets the string representation of the jump prediction .*/
1619 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1621 #define X(a) case a: return #a
1623 X(COND_JMP_PRED_NONE);
1624 X(COND_JMP_PRED_TRUE);
1625 X(COND_JMP_PRED_FALSE);
1631 /** Return the attribute type of a SymConst node if exists */
1632 static ir_type *get_SymConst_attr_type(const ir_node *self)
1634 symconst_kind kind = get_SymConst_kind(self);
1635 if (SYMCONST_HAS_TYPE(kind))
1636 return get_SymConst_type(self);
1640 /** Return the attribute entity of a SymConst node if exists */
1641 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1643 symconst_kind kind = get_SymConst_kind(self);
1644 if (SYMCONST_HAS_ENT(kind))
1645 return get_SymConst_entity(self);
1649 /** the get_type_attr operation must be always implemented */
1650 static ir_type *get_Null_type(const ir_node *n)
1653 return firm_unknown_type;
1656 /* Sets the get_type operation for an ir_op_ops. */
1657 ir_op_ops *firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
1660 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1661 case iro_Call: ops->get_type_attr = get_Call_type; break;
1662 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1663 case iro_Free: ops->get_type_attr = get_Free_type; break;
1664 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1666 /* not allowed to be NULL */
1667 if (! ops->get_type_attr)
1668 ops->get_type_attr = get_Null_type;
1674 /** the get_entity_attr operation must be always implemented */
1675 static ir_entity *get_Null_ent(const ir_node *n)
1681 /* Sets the get_type operation for an ir_op_ops. */
1682 ir_op_ops *firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
1685 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1686 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1688 /* not allowed to be NULL */
1689 if (! ops->get_entity_attr)
1690 ops->get_entity_attr = get_Null_ent;
1696 /* Sets the debug information of a node. */
1697 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1699 _set_irn_dbg_info(n, db);
1703 * Returns the debug information of an node.
1705 * @param n The node.
1707 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1709 return _get_irn_dbg_info(n);
1713 * Calculate a hash value of a node.
1715 unsigned firm_default_hash(const ir_node *node)
1720 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1721 h = irn_arity = get_irn_arity(node);
1723 /* consider all in nodes... except the block if not a control flow. */
1724 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1725 ir_node *pred = get_irn_n(node, i);
1726 if (is_irn_cse_neutral(pred))
1729 h = 9*h + HASH_PTR(pred);
1733 h = 9*h + HASH_PTR(get_irn_mode(node));
1735 h = 9*h + HASH_PTR(get_irn_op(node));
1740 /* include generated code */
1741 #include "gen_irnode.c.inl"