2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irgraph_t.h"
34 #include "irbackedge_t.h"
38 #include "iredgekinds.h"
39 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET (n_Call_max+1)
52 #define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
53 #define SEL_INDEX_OFFSET (n_Sel_max+1)
54 #define RETURN_RESULT_OFFSET (n_Return_max+1)
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *relation_names [] = {
69 "unordered_less_equal",
71 "unordered_greater_equal",
76 const char *get_relation_string(ir_relation relation)
78 assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
79 return relation_names[relation];
82 ir_relation get_negated_relation(ir_relation relation)
84 return relation ^ ir_relation_true;
87 ir_relation get_inversed_relation(ir_relation relation)
89 ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
90 bool less = relation & ir_relation_less;
91 bool greater = relation & ir_relation_greater;
92 code |= (less ? ir_relation_greater : 0) | (greater ? ir_relation_less : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size)
112 assert(!forbid_new_data && "Too late to register additional node data");
117 return firm_add_node_size += size;
121 void init_irnode(void)
123 /* Forbid the addition of new data to an ir node. */
127 struct struct_align {
137 * irnode constructor.
138 * Create a new irnode in irg, with an op, mode, arity and
139 * some incoming irnodes.
140 * If arity is negative, a node with a dynamic array is created.
142 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
143 ir_mode *mode, int arity, ir_node *const *in)
146 unsigned align = offsetof(struct struct_align, s) - 1;
147 unsigned add_node_size = (firm_add_node_size + align) & ~align;
148 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
155 p = (char*)obstack_alloc(irg->obst, node_size);
156 memset(p, 0, node_size);
157 res = (ir_node *)(p + add_node_size);
159 res->kind = k_ir_node;
163 res->node_idx = irg_register_node_idx(irg, res);
168 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
170 /* not nice but necessary: End and Sync must always have a flexible array */
171 if (op == op_End || op == op_Sync)
172 res->in = NEW_ARR_F(ir_node *, (arity+1));
174 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
175 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
179 set_irn_dbg_info(res, db);
181 res->node_nr = get_irp_new_node_nr();
183 for (i = 0; i < EDGE_KIND_LAST; ++i) {
184 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
185 /* edges will be build immediately */
186 res->edge_info[i].edges_built = 1;
187 res->edge_info[i].out_count = 0;
190 /* don't put this into the for loop, arity is -1 for some nodes! */
191 edges_notify_edge(res, -1, res->in[0], NULL, irg);
192 for (i = 1; i <= arity; ++i)
193 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
195 hook_new_node(irg, res);
196 if (get_irg_phase_state(irg) == phase_backend) {
197 be_info_new_node(res);
203 /*-- getting some parameters from ir_nodes --*/
205 int (is_ir_node)(const void *thing)
207 return is_ir_node_(thing);
210 int (get_irn_arity)(const ir_node *node)
212 return get_irn_arity_(node);
215 /* Returns the array with ins. This array is shifted with respect to the
216 array accessed by get_irn_n: The block operand is at position 0 not -1.
217 (@@@ This should be changed.)
218 The order of the predecessors in this array is not guaranteed, except that
219 lists of operands as predecessors of Block or arguments of a Call are
221 ir_node **get_irn_in(const ir_node *node)
226 void set_irn_in(ir_node *node, int arity, ir_node **in)
230 ir_graph *irg = get_irn_irg(node);
235 assert(node != NULL && node->kind == k_ir_node);
237 for (i = 0; i < arity; ++i) {
238 assert(in[i] != NULL && in[0]->kind == k_ir_node);
242 for (i = 0; i < arity; i++) {
243 if (i < (int)ARR_LEN(*pOld_in)-1)
244 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
246 edges_notify_edge(node, i, in[i], NULL, irg);
248 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
249 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
252 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
253 ir_node * block = (*pOld_in)[0];
254 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
255 (*pOld_in)[0] = block;
257 fix_backedges(irg->obst, node);
259 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
261 /* update irg flags */
262 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
265 ir_node *(get_irn_n)(const ir_node *node, int n)
267 return get_irn_n_(node, n);
270 void set_irn_n(ir_node *node, int n, ir_node *in)
272 ir_graph *irg = get_irn_irg(node);
273 assert(node && node->kind == k_ir_node);
275 assert(n < get_irn_arity(node));
276 assert(in && in->kind == k_ir_node);
279 hook_set_irn_n(node, n, in, node->in[n + 1]);
281 /* Here, we rely on src and tgt being in the current ir graph */
282 edges_notify_edge(node, n, in, node->in[n + 1], irg);
284 node->in[n + 1] = in;
286 /* update irg flags */
287 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
290 int add_irn_n(ir_node *node, ir_node *in)
293 ir_graph *irg = get_irn_irg(node);
295 assert(node->op->opar == oparity_dynamic);
296 pos = ARR_LEN(node->in) - 1;
297 ARR_APP1(ir_node *, node->in, in);
298 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
301 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
306 void del_Sync_n(ir_node *n, int i)
308 int arity = get_Sync_n_preds(n);
309 ir_node *last_pred = get_Sync_pred(n, arity - 1);
310 set_Sync_pred(n, i, last_pred);
311 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
312 ARR_SHRINKLEN(get_irn_in(n), arity);
315 int (get_irn_deps)(const ir_node *node)
317 return get_irn_deps_(node);
320 ir_node *(get_irn_dep)(const ir_node *node, int pos)
322 return get_irn_dep_(node, pos);
325 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
327 set_irn_dep_(node, pos, dep);
330 int add_irn_dep(ir_node *node, ir_node *dep)
334 /* DEP edges are only allowed in backend phase */
335 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
336 if (node->deps == NULL) {
337 node->deps = NEW_ARR_F(ir_node *, 1);
343 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
344 if (node->deps[i] == NULL)
347 if (node->deps[i] == dep)
351 if (first_zero >= 0) {
352 node->deps[first_zero] = dep;
355 ARR_APP1(ir_node *, node->deps, dep);
360 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
365 void add_irn_deps(ir_node *tgt, ir_node *src)
369 for (i = 0, n = get_irn_deps(src); i < n; ++i)
370 add_irn_dep(tgt, get_irn_dep(src, i));
374 ir_mode *(get_irn_mode)(const ir_node *node)
376 return get_irn_mode_(node);
379 void (set_irn_mode)(ir_node *node, ir_mode *mode)
381 set_irn_mode_(node, mode);
384 ir_op *(get_irn_op)(const ir_node *node)
386 return get_irn_op_(node);
389 /* should be private to the library: */
390 void (set_irn_op)(ir_node *node, ir_op *op)
392 set_irn_op_(node, op);
395 unsigned (get_irn_opcode)(const ir_node *node)
397 return get_irn_opcode_(node);
400 const char *get_irn_opname(const ir_node *node)
403 if (is_Phi0(node)) return "Phi0";
404 return get_id_str(node->op->name);
407 ident *get_irn_opident(const ir_node *node)
410 return node->op->name;
413 ir_visited_t (get_irn_visited)(const ir_node *node)
415 return get_irn_visited_(node);
418 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
420 set_irn_visited_(node, visited);
423 void (mark_irn_visited)(ir_node *node)
425 mark_irn_visited_(node);
428 int (irn_visited)(const ir_node *node)
430 return irn_visited_(node);
433 int (irn_visited_else_mark)(ir_node *node)
435 return irn_visited_else_mark_(node);
438 void (set_irn_link)(ir_node *node, void *link)
440 set_irn_link_(node, link);
443 void *(get_irn_link)(const ir_node *node)
445 return get_irn_link_(node);
448 op_pin_state (get_irn_pinned)(const ir_node *node)
450 return get_irn_pinned_(node);
453 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
455 return is_irn_pinned_in_irg_(node);
458 void set_irn_pinned(ir_node *node, op_pin_state state)
460 /* due to optimization an opt may be turned into a Tuple */
464 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
465 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
467 node->attr.except.pin_state = state;
470 /* Outputs a unique number for this node */
471 long get_irn_node_nr(const ir_node *node)
474 return node->node_nr;
477 void *(get_irn_generic_attr)(ir_node *node)
479 assert(is_ir_node(node));
480 return get_irn_generic_attr_(node);
483 const void *(get_irn_generic_attr_const)(const ir_node *node)
485 assert(is_ir_node(node));
486 return get_irn_generic_attr_const_(node);
489 unsigned (get_irn_idx)(const ir_node *node)
491 assert(is_ir_node(node));
492 return get_irn_idx_(node);
495 int get_irn_pred_pos(ir_node *node, ir_node *arg)
498 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
499 if (get_irn_n(node, i) == arg)
505 /** manipulate fields of individual nodes **/
507 ir_node *(get_nodes_block)(const ir_node *node)
509 return get_nodes_block_(node);
512 void set_nodes_block(ir_node *node, ir_node *block)
514 assert(node->op != op_Block);
515 set_irn_n(node, -1, block);
518 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
519 * from Start. If so returns frame type, else Null. */
520 ir_type *is_frame_pointer(const ir_node *n)
522 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
523 ir_node *start = get_Proj_pred(n);
524 if (is_Start(start)) {
525 return get_irg_frame_type(get_irn_irg(start));
531 ir_node **get_Block_cfgpred_arr(ir_node *node)
533 assert(is_Block(node));
534 return (ir_node **)&(get_irn_in(node)[1]);
537 int (get_Block_n_cfgpreds)(const ir_node *node)
539 return get_Block_n_cfgpreds_(node);
542 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
544 return get_Block_cfgpred_(node, pos);
547 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
549 assert(is_Block(node));
550 set_irn_n(node, pos, pred);
553 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
557 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
558 if (get_Block_cfgpred_block(block, i) == pred)
564 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
566 return get_Block_cfgpred_block_(node, pos);
569 int get_Block_matured(const ir_node *node)
571 assert(is_Block(node));
572 return (int)node->attr.block.is_matured;
575 void set_Block_matured(ir_node *node, int matured)
577 assert(is_Block(node));
578 node->attr.block.is_matured = matured;
581 ir_visited_t (get_Block_block_visited)(const ir_node *node)
583 return get_Block_block_visited_(node);
586 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
588 set_Block_block_visited_(node, visit);
591 void (mark_Block_block_visited)(ir_node *node)
593 mark_Block_block_visited_(node);
596 int (Block_block_visited)(const ir_node *node)
598 return Block_block_visited_(node);
601 ir_extblk *get_Block_extbb(const ir_node *block)
604 assert(is_Block(block));
605 res = block->attr.block.extblk;
606 assert(res == NULL || is_ir_extbb(res));
610 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
612 assert(is_Block(block));
613 assert(extblk == NULL || is_ir_extbb(extblk));
614 block->attr.block.extblk = extblk;
617 /* returns the graph of a Block. */
618 ir_graph *(get_Block_irg)(const ir_node *block)
620 return get_Block_irg_(block);
623 ir_entity *create_Block_entity(ir_node *block)
626 assert(is_Block(block));
628 entity = block->attr.block.entity;
629 if (entity == NULL) {
630 ir_label_t nr = get_irp_next_label_nr();
631 entity = new_label_entity(nr);
632 set_entity_visibility(entity, ir_visibility_local);
633 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
634 set_entity_compiler_generated(entity, 1);
636 block->attr.block.entity = entity;
641 ir_node *(get_Block_phis)(const ir_node *block)
643 return get_Block_phis_(block);
646 void (set_Block_phis)(ir_node *block, ir_node *phi)
648 set_Block_phis_(block, phi);
651 void (add_Block_phi)(ir_node *block, ir_node *phi)
653 add_Block_phi_(block, phi);
656 /* Get the Block mark (single bit). */
657 unsigned (get_Block_mark)(const ir_node *block)
659 return get_Block_mark_(block);
662 /* Set the Block mark (single bit). */
663 void (set_Block_mark)(ir_node *block, unsigned mark)
665 set_Block_mark_(block, mark);
668 int get_End_n_keepalives(const ir_node *end)
671 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
674 ir_node *get_End_keepalive(const ir_node *end, int pos)
677 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
680 void add_End_keepalive(ir_node *end, ir_node *ka)
686 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
689 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
692 /* Set new keep-alives */
693 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
697 ir_graph *irg = get_irn_irg(end);
699 /* notify that edges are deleted */
700 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
701 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
703 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
705 for (i = 0; i < n; ++i) {
706 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
707 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
710 /* update irg flags */
711 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
714 /* Set new keep-alives from old keep-alives, skipping irn */
715 void remove_End_keepalive(ir_node *end, ir_node *irn)
717 int n = get_End_n_keepalives(end);
722 for (i = n -1; i >= 0; --i) {
723 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
733 irg = get_irn_irg(end);
735 /* remove the edge */
736 edges_notify_edge(end, idx, NULL, irn, irg);
739 /* exchange with the last one */
740 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
741 edges_notify_edge(end, n - 1, NULL, old, irg);
742 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
743 edges_notify_edge(end, idx, old, NULL, irg);
745 /* now n - 1 keeps, 1 block input */
746 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
748 /* update irg flags */
749 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
752 /* remove Bads, NoMems and doublets from the keep-alive set */
753 void remove_End_Bads_and_doublets(ir_node *end)
756 int idx, n = get_End_n_keepalives(end);
758 bool changed = false;
763 irg = get_irn_irg(end);
764 pset_new_init(&keeps);
766 for (idx = n - 1; idx >= 0; --idx) {
767 ir_node *ka = get_End_keepalive(end, idx);
769 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
771 /* remove the edge */
772 edges_notify_edge(end, idx, NULL, ka, irg);
775 /* exchange with the last one */
776 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
777 edges_notify_edge(end, n - 1, NULL, old, irg);
778 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
779 edges_notify_edge(end, idx, old, NULL, irg);
783 pset_new_insert(&keeps, ka);
786 /* n keeps, 1 block input */
787 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
789 pset_new_destroy(&keeps);
792 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
796 void free_End(ir_node *end)
801 end->in = NULL; /* @@@ make sure we get an error if we use the
802 in array afterwards ... */
805 size_t get_Return_n_ress(const ir_node *node)
807 assert(is_Return(node));
808 return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
811 ir_node **get_Return_res_arr(ir_node *node)
813 assert(is_Return(node));
814 if (get_Return_n_ress(node) > 0)
815 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
820 ir_node *get_Return_res(const ir_node *node, int pos)
822 assert(is_Return(node));
824 assert(get_Return_n_ress(node) > (size_t)pos);
825 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
828 void set_Return_res(ir_node *node, int pos, ir_node *res)
830 assert(is_Return(node));
831 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
834 int (is_Const_null)(const ir_node *node)
836 return is_Const_null_(node);
839 int (is_Const_one)(const ir_node *node)
841 return is_Const_one_(node);
844 int (is_Const_all_one)(const ir_node *node)
846 return is_Const_all_one_(node);
851 symconst_kind get_SymConst_kind(const ir_node *node)
853 assert(is_SymConst(node));
854 return node->attr.symc.kind;
857 void set_SymConst_kind(ir_node *node, symconst_kind kind)
859 assert(is_SymConst(node));
860 node->attr.symc.kind = kind;
863 ir_type *get_SymConst_type(const ir_node *node)
865 /* the cast here is annoying, but we have to compensate for
867 ir_node *irn = (ir_node *)node;
868 assert(is_SymConst(node) &&
869 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
870 return irn->attr.symc.sym.type_p;
873 void set_SymConst_type(ir_node *node, ir_type *tp)
875 assert(is_SymConst(node) &&
876 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
877 node->attr.symc.sym.type_p = tp;
881 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
882 ir_entity *get_SymConst_entity(const ir_node *node)
884 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
885 return node->attr.symc.sym.entity_p;
888 void set_SymConst_entity(ir_node *node, ir_entity *ent)
890 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
891 node->attr.symc.sym.entity_p = ent;
894 ir_enum_const *get_SymConst_enum(const ir_node *node)
896 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
897 return node->attr.symc.sym.enum_p;
900 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
902 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
903 node->attr.symc.sym.enum_p = ec;
906 union symconst_symbol
907 get_SymConst_symbol(const ir_node *node)
909 assert(is_SymConst(node));
910 return node->attr.symc.sym;
913 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
915 assert(is_SymConst(node));
916 node->attr.symc.sym = sym;
919 int get_Sel_n_indexs(const ir_node *node)
921 assert(is_Sel(node));
922 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
925 ir_node **get_Sel_index_arr(ir_node *node)
927 assert(is_Sel(node));
928 if (get_Sel_n_indexs(node) > 0)
929 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
934 ir_node *get_Sel_index(const ir_node *node, int pos)
936 assert(is_Sel(node));
937 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
940 void set_Sel_index(ir_node *node, int pos, ir_node *index)
942 assert(is_Sel(node));
943 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
946 ir_node **get_Call_param_arr(ir_node *node)
948 assert(is_Call(node));
949 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
952 size_t get_Call_n_params(const ir_node *node)
954 assert(is_Call(node));
955 return (size_t) (get_irn_arity(node) - CALL_PARAM_OFFSET);
958 ir_node *get_Call_param(const ir_node *node, int pos)
960 assert(is_Call(node));
961 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
964 void set_Call_param(ir_node *node, int pos, ir_node *param)
966 assert(is_Call(node));
967 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
970 ir_node **get_Builtin_param_arr(ir_node *node)
972 assert(is_Builtin(node));
973 return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
976 int get_Builtin_n_params(const ir_node *node)
978 assert(is_Builtin(node));
979 return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
982 ir_node *get_Builtin_param(const ir_node *node, int pos)
984 assert(is_Builtin(node));
985 return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
988 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
990 assert(is_Builtin(node));
991 set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
994 /* Returns a human readable string for the ir_builtin_kind. */
995 const char *get_builtin_kind_name(ir_builtin_kind kind)
997 #define X(a) case a: return #a
1000 X(ir_bk_debugbreak);
1001 X(ir_bk_return_address);
1002 X(ir_bk_frame_address);
1012 X(ir_bk_inner_trampoline);
1019 int Call_has_callees(const ir_node *node)
1021 assert(is_Call(node));
1022 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1023 (node->attr.call.callee_arr != NULL));
1026 size_t get_Call_n_callees(const ir_node *node)
1028 assert(is_Call(node) && node->attr.call.callee_arr);
1029 return ARR_LEN(node->attr.call.callee_arr);
1032 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
1034 assert(pos < get_Call_n_callees(node));
1035 return node->attr.call.callee_arr[pos];
1038 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
1040 ir_graph *irg = get_irn_irg(node);
1042 assert(is_Call(node));
1043 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1044 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1046 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1049 void remove_Call_callee_arr(ir_node *node)
1051 assert(is_Call(node));
1052 node->attr.call.callee_arr = NULL;
1055 /* Checks for upcast.
1057 * Returns true if the Cast node casts a class type to a super type.
1059 int is_Cast_upcast(ir_node *node)
1061 ir_type *totype = get_Cast_type(node);
1062 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1064 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1067 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1068 totype = get_pointer_points_to_type(totype);
1069 fromtype = get_pointer_points_to_type(fromtype);
1074 if (!is_Class_type(totype)) return 0;
1075 return is_SubClass_of(fromtype, totype);
1078 /* Checks for downcast.
1080 * Returns true if the Cast node casts a class type to a sub type.
1082 int is_Cast_downcast(ir_node *node)
1084 ir_type *totype = get_Cast_type(node);
1085 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1087 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1090 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1091 totype = get_pointer_points_to_type(totype);
1092 fromtype = get_pointer_points_to_type(fromtype);
1097 if (!is_Class_type(totype)) return 0;
1098 return is_SubClass_of(totype, fromtype);
1101 int (is_unop)(const ir_node *node)
1103 return is_unop_(node);
1106 ir_node *get_unop_op(const ir_node *node)
1108 if (node->op->opar == oparity_unary)
1109 return get_irn_n(node, node->op->op_index);
1111 assert(node->op->opar == oparity_unary);
1115 void set_unop_op(ir_node *node, ir_node *op)
1117 if (node->op->opar == oparity_unary)
1118 set_irn_n(node, node->op->op_index, op);
1120 assert(node->op->opar == oparity_unary);
1123 int (is_binop)(const ir_node *node)
1125 return is_binop_(node);
1128 ir_node *get_binop_left(const ir_node *node)
1130 assert(node->op->opar == oparity_binary);
1131 return get_irn_n(node, node->op->op_index);
1134 void set_binop_left(ir_node *node, ir_node *left)
1136 assert(node->op->opar == oparity_binary);
1137 set_irn_n(node, node->op->op_index, left);
1140 ir_node *get_binop_right(const ir_node *node)
1142 assert(node->op->opar == oparity_binary);
1143 return get_irn_n(node, node->op->op_index + 1);
1146 void set_binop_right(ir_node *node, ir_node *right)
1148 assert(node->op->opar == oparity_binary);
1149 set_irn_n(node, node->op->op_index + 1, right);
1152 int is_Phi0(const ir_node *n)
1156 return ((get_irn_op(n) == op_Phi) &&
1157 (get_irn_arity(n) == 0) &&
1158 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1161 ir_node **get_Phi_preds_arr(ir_node *node)
1163 assert(is_Phi(node));
1164 return (ir_node **)&(get_irn_in(node)[1]);
1167 int get_Phi_n_preds(const ir_node *node)
1169 assert(is_Phi(node) || is_Phi0(node));
1170 return (get_irn_arity(node));
1173 ir_node *get_Phi_pred(const ir_node *node, int pos)
1175 assert(is_Phi(node) || is_Phi0(node));
1176 return get_irn_n(node, pos);
1179 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1181 assert(is_Phi(node) || is_Phi0(node));
1182 set_irn_n(node, pos, pred);
1185 ir_node *(get_Phi_next)(const ir_node *phi)
1187 return get_Phi_next_(phi);
1190 void (set_Phi_next)(ir_node *phi, ir_node *next)
1192 set_Phi_next_(phi, next);
1195 int is_memop(const ir_node *node)
1197 return is_op_uses_memory(get_irn_op(node));
1200 ir_node *get_memop_mem(const ir_node *node)
1202 const ir_op *op = get_irn_op(node);
1203 assert(is_memop(node));
1204 return get_irn_n(node, op->memory_index);
1207 void set_memop_mem(ir_node *node, ir_node *mem)
1209 const ir_op *op = get_irn_op(node);
1210 assert(is_memop(node));
1211 set_irn_n(node, op->memory_index, mem);
1214 ir_node **get_Sync_preds_arr(ir_node *node)
1216 assert(is_Sync(node));
1217 return (ir_node **)&(get_irn_in(node)[1]);
1220 int get_Sync_n_preds(const ir_node *node)
1222 assert(is_Sync(node));
1223 return (get_irn_arity(node));
1227 void set_Sync_n_preds(ir_node *node, int n_preds)
1229 assert(is_Sync(node));
1233 ir_node *get_Sync_pred(const ir_node *node, int pos)
1235 assert(is_Sync(node));
1236 return get_irn_n(node, pos);
1239 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1241 assert(is_Sync(node));
1242 set_irn_n(node, pos, pred);
1245 /* Add a new Sync predecessor */
1246 void add_Sync_pred(ir_node *node, ir_node *pred)
1248 assert(is_Sync(node));
1249 add_irn_n(node, pred);
1252 int (is_arg_Proj)(const ir_node *node)
1254 return is_arg_Proj_(node);
1257 int is_x_except_Proj(const ir_node *node)
1262 pred = get_Proj_pred(node);
1263 if (!is_fragile_op(pred))
1265 return get_Proj_proj(node) == pred->op->pn_x_except;
1268 int is_x_regular_Proj(const ir_node *node)
1273 pred = get_Proj_pred(node);
1274 if (!is_fragile_op(pred))
1276 return get_Proj_proj(node) == pred->op->pn_x_regular;
1279 void ir_set_throws_exception(ir_node *node, int throws_exception)
1281 except_attr *attr = &node->attr.except;
1282 assert(is_fragile_op(node));
1283 attr->throws_exception = throws_exception;
1286 int ir_throws_exception(const ir_node *node)
1288 const except_attr *attr = &node->attr.except;
1289 assert(is_fragile_op(node));
1290 return attr->throws_exception;
1293 ir_node **get_Tuple_preds_arr(ir_node *node)
1295 assert(is_Tuple(node));
1296 return (ir_node **)&(get_irn_in(node)[1]);
1299 int get_Tuple_n_preds(const ir_node *node)
1301 assert(is_Tuple(node));
1302 return get_irn_arity(node);
1305 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1307 assert(is_Tuple(node));
1308 return get_irn_n(node, pos);
1311 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1313 assert(is_Tuple(node));
1314 set_irn_n(node, pos, pred);
1317 size_t get_ASM_n_input_constraints(const ir_node *node)
1319 assert(is_ASM(node));
1320 return ARR_LEN(node->attr.assem.input_constraints);
1323 size_t get_ASM_n_output_constraints(const ir_node *node)
1325 assert(is_ASM(node));
1326 return ARR_LEN(node->attr.assem.output_constraints);
1329 size_t get_ASM_n_clobbers(const ir_node *node)
1331 assert(is_ASM(node));
1332 return ARR_LEN(node->attr.assem.clobbers);
1335 /* returns the graph of a node */
1336 ir_graph *(get_irn_irg)(const ir_node *node)
1338 return get_irn_irg_(node);
1342 /*----------------------------------------------------------------*/
1343 /* Auxiliary routines */
1344 /*----------------------------------------------------------------*/
1346 ir_node *skip_Proj(ir_node *node)
1348 /* don't assert node !!! */
1353 node = get_Proj_pred(node);
1359 skip_Proj_const(const ir_node *node)
1361 /* don't assert node !!! */
1366 node = get_Proj_pred(node);
1371 ir_node *skip_Tuple(ir_node *node)
1376 if (is_Proj(node)) {
1377 pred = get_Proj_pred(node);
1379 if (is_Proj(pred)) { /* nested Tuple ? */
1380 pred = skip_Tuple(pred);
1382 if (is_Tuple(pred)) {
1383 node = get_Tuple_pred(pred, get_Proj_proj(node));
1386 } else if (is_Tuple(pred)) {
1387 node = get_Tuple_pred(pred, get_Proj_proj(node));
1394 /* returns operand of node if node is a Cast */
1395 ir_node *skip_Cast(ir_node *node)
1398 return get_Cast_op(node);
1402 /* returns operand of node if node is a Cast */
1403 const ir_node *skip_Cast_const(const ir_node *node)
1406 return get_Cast_op(node);
1410 /* returns operand of node if node is a Pin */
1411 ir_node *skip_Pin(ir_node *node)
1414 return get_Pin_op(node);
1418 /* returns operand of node if node is a Confirm */
1419 ir_node *skip_Confirm(ir_node *node)
1421 if (is_Confirm(node))
1422 return get_Confirm_value(node);
1426 /* skip all high-level ops */
1427 ir_node *skip_HighLevel_ops(ir_node *node)
1429 while (is_op_highlevel(get_irn_op(node))) {
1430 node = get_irn_n(node, 0);
1436 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1437 * than any other approach, as Id chains are resolved and all point to the real node, or
1438 * all id's are self loops.
1440 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1441 * a little bit "hand optimized".
1443 ir_node *skip_Id(ir_node *node)
1446 /* don't assert node !!! */
1448 if (!node || (node->op != op_Id)) return node;
1450 /* Don't use get_Id_pred(): We get into an endless loop for
1451 self-referencing Ids. */
1452 pred = node->in[0+1];
1454 if (pred->op != op_Id) return pred;
1456 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1457 ir_node *rem_pred, *res;
1459 if (pred->op != op_Id) return pred; /* shortcut */
1462 assert(get_irn_arity (node) > 0);
1464 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1465 res = skip_Id(rem_pred);
1466 if (is_Id(res)) /* self-loop */ return node;
1468 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1475 int (is_strictConv)(const ir_node *node)
1477 return is_strictConv_(node);
1480 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1481 int (is_SymConst_addr_ent)(const ir_node *node)
1483 return is_SymConst_addr_ent_(node);
1486 /* Returns true if the operation manipulates control flow. */
1487 int is_cfop(const ir_node *node)
1489 if (is_fragile_op(node) && ir_throws_exception(node))
1492 return is_op_cfopcode(get_irn_op(node));
1495 int is_unknown_jump(const ir_node *node)
1497 return is_op_unknown_jump(get_irn_op(node));
1500 /* Returns true if the operation can change the control flow because
1502 int is_fragile_op(const ir_node *node)
1504 return is_op_fragile(get_irn_op(node));
1507 /* Returns true if the operation is a forking control flow operation. */
1508 int (is_irn_forking)(const ir_node *node)
1510 return is_irn_forking_(node);
1513 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1515 copy_node_attr_(irg, old_node, new_node);
1518 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1520 ir_type *(get_irn_type_attr)(ir_node *node)
1522 return get_irn_type_attr_(node);
1525 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1526 ir_entity *(get_irn_entity_attr)(ir_node *node)
1528 return get_irn_entity_attr_(node);
1531 /* Returns non-zero for constant-like nodes. */
1532 int (is_irn_constlike)(const ir_node *node)
1534 return is_irn_constlike_(node);
1538 * Returns non-zero for nodes that are allowed to have keep-alives and
1539 * are neither Block nor PhiM.
1541 int (is_irn_keep)(const ir_node *node)
1543 return is_irn_keep_(node);
1547 * Returns non-zero for nodes that are always placed in the start block.
1549 int (is_irn_start_block_placed)(const ir_node *node)
1551 return is_irn_start_block_placed_(node);
1554 /* Returns non-zero for nodes that are CSE neutral to its users. */
1555 int (is_irn_cse_neutral)(const ir_node *node)
1557 return is_irn_cse_neutral_(node);
1560 /* Gets the string representation of the jump prediction .*/
1561 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1563 #define X(a) case a: return #a
1565 X(COND_JMP_PRED_NONE);
1566 X(COND_JMP_PRED_TRUE);
1567 X(COND_JMP_PRED_FALSE);
1573 /** Return the attribute type of a SymConst node if exists */
1574 static ir_type *get_SymConst_attr_type(const ir_node *self)
1576 symconst_kind kind = get_SymConst_kind(self);
1577 if (SYMCONST_HAS_TYPE(kind))
1578 return get_SymConst_type(self);
1582 /** Return the attribute entity of a SymConst node if exists */
1583 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1585 symconst_kind kind = get_SymConst_kind(self);
1586 if (SYMCONST_HAS_ENT(kind))
1587 return get_SymConst_entity(self);
1591 /** the get_type_attr operation must be always implemented */
1592 static ir_type *get_Null_type(const ir_node *n)
1595 return firm_unknown_type;
1598 /* Sets the get_type operation for an ir_op_ops. */
1599 void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
1602 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1603 case iro_Builtin: ops->get_type_attr = get_Builtin_type; break;
1604 case iro_Call: ops->get_type_attr = get_Call_type; break;
1605 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1606 case iro_CopyB: ops->get_type_attr = get_CopyB_type; break;
1607 case iro_Free: ops->get_type_attr = get_Free_type; break;
1608 case iro_InstOf: ops->get_type_attr = get_InstOf_type; break;
1609 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1611 /* not allowed to be NULL */
1612 if (! ops->get_type_attr)
1613 ops->get_type_attr = get_Null_type;
1618 /** the get_entity_attr operation must be always implemented */
1619 static ir_entity *get_Null_ent(const ir_node *n)
1625 /* Sets the get_type operation for an ir_op_ops. */
1626 void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
1629 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1630 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1631 case iro_Block: ops->get_entity_attr = get_Block_entity; break;
1633 /* not allowed to be NULL */
1634 if (! ops->get_entity_attr)
1635 ops->get_entity_attr = get_Null_ent;
1640 /* Sets the debug information of a node. */
1641 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1643 set_irn_dbg_info_(n, db);
1647 * Returns the debug information of an node.
1649 * @param n The node.
1651 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1653 return get_irn_dbg_info_(n);
1656 ir_switch_table *ir_new_switch_table(ir_graph *irg, size_t n_entries)
1658 struct obstack *obst = get_irg_obstack(irg);
1659 ir_switch_table *res = OALLOCFZ(obst, ir_switch_table, entries, n_entries);
1660 res->n_entries = n_entries;
1664 void ir_switch_table_set(ir_switch_table *table, size_t n,
1665 ir_tarval *min, ir_tarval *max, long pn)
1667 ir_switch_table_entry *entry = ir_switch_table_get_entry(table, n);
1673 size_t (ir_switch_table_get_n_entries)(const ir_switch_table *table)
1675 return ir_switch_table_get_n_entries_(table);
1678 ir_tarval *ir_switch_table_get_max(const ir_switch_table *table, size_t e)
1680 return ir_switch_table_get_entry_const(table, e)->max;
1683 ir_tarval *ir_switch_table_get_min(const ir_switch_table *table, size_t e)
1685 return ir_switch_table_get_entry_const(table, e)->min;
1688 long ir_switch_table_get_pn(const ir_switch_table *table, size_t e)
1690 return ir_switch_table_get_entry_const(table, e)->pn;
1693 ir_switch_table *ir_switch_table_duplicate(ir_graph *irg,
1694 const ir_switch_table *table)
1696 size_t n_entries = ir_switch_table_get_n_entries(table);
1698 ir_switch_table *res = ir_new_switch_table(irg, n_entries);
1699 for (e = 0; e < n_entries; ++e) {
1700 const ir_switch_table_entry *entry
1701 = ir_switch_table_get_entry_const(table, e);
1702 ir_switch_table_entry *new_entry = ir_switch_table_get_entry(res, e);
1703 *new_entry = *entry;
1709 * Calculate a hash value of a node.
1711 unsigned firm_default_hash(const ir_node *node)
1716 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1717 h = irn_arity = get_irn_arity(node);
1719 /* consider all in nodes... except the block if not a control flow. */
1720 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1721 ir_node *pred = get_irn_n(node, i);
1722 if (is_irn_cse_neutral(pred))
1725 h = 9*h + HASH_PTR(pred);
1729 h = 9*h + HASH_PTR(get_irn_mode(node));
1731 h = 9*h + HASH_PTR(get_irn_op(node));
1736 /* include generated code */
1737 #include "gen_irnode.c.inl"