2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irgraph_t.h"
34 #include "irbackedge_t.h"
38 #include "iredgekinds.h"
39 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET (n_Call_max+1)
52 #define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
53 #define ASM_PARAM_OFFSET (n_ASM_max+1)
54 #define SEL_INDEX_OFFSET (n_Sel_max+1)
55 #define RETURN_RESULT_OFFSET (n_Return_max+1)
56 #define END_KEEPALIVE_OFFSET 0
58 static const char *relation_names [] = {
70 "unordered_less_equal",
72 "unordered_greater_equal",
77 const char *get_relation_string(ir_relation relation)
79 assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
80 return relation_names[relation];
83 ir_relation get_negated_relation(ir_relation relation)
85 return relation ^ ir_relation_true;
88 ir_relation get_inversed_relation(ir_relation relation)
90 ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
91 bool less = relation & ir_relation_less;
92 bool greater = relation & ir_relation_greater;
93 code |= (less ? ir_relation_greater : ir_relation_false)
94 | (greater ? ir_relation_less : ir_relation_false);
99 * Indicates, whether additional data can be registered to ir nodes.
100 * If set to 1, this is not possible anymore.
102 static int forbid_new_data = 0;
104 unsigned firm_add_node_size = 0;
107 unsigned firm_register_additional_node_data(unsigned size)
109 assert(!forbid_new_data && "Too late to register additional node data");
114 return firm_add_node_size += size;
118 void init_irnode(void)
120 /* Forbid the addition of new data to an ir node. */
124 struct struct_align {
133 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
134 ir_mode *mode, int arity, ir_node *const *in)
137 unsigned align = offsetof(struct struct_align, s) - 1;
138 unsigned add_node_size = (firm_add_node_size + align) & ~align;
139 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
146 p = (char*)obstack_alloc(irg->obst, node_size);
147 memset(p, 0, node_size);
148 res = (ir_node *)(p + add_node_size);
150 res->kind = k_ir_node;
154 res->node_idx = irg_register_node_idx(irg, res);
159 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
161 /* not nice but necessary: End and Sync must always have a flexible array */
162 if (op == op_End || op == op_Sync)
163 res->in = NEW_ARR_F(ir_node *, (arity+1));
165 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
166 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
170 set_irn_dbg_info(res, db);
172 res->node_nr = get_irp_new_node_nr();
174 for (i = 0; i < EDGE_KIND_LAST; ++i) {
175 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
176 /* edges will be build immediately */
177 res->edge_info[i].edges_built = 1;
178 res->edge_info[i].out_count = 0;
181 /* don't put this into the for loop, arity is -1 for some nodes! */
182 edges_notify_edge(res, -1, res->in[0], NULL, irg);
183 for (i = 1; i <= arity; ++i)
184 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
186 hook_new_node(irg, res);
187 if (get_irg_phase_state(irg) == phase_backend) {
188 be_info_new_node(irg, res);
194 int (is_ir_node)(const void *thing)
196 return is_ir_node_(thing);
199 int (get_irn_arity)(const ir_node *node)
201 return get_irn_arity_(node);
204 ir_node **get_irn_in(const ir_node *node)
209 void set_irn_in(ir_node *node, int arity, ir_node **in)
213 ir_graph *irg = get_irn_irg(node);
218 assert(node != NULL && node->kind == k_ir_node);
220 for (i = 0; i < arity; ++i) {
221 assert(in[i] != NULL && in[0]->kind == k_ir_node);
225 for (i = 0; i < arity; i++) {
226 if (i < (int)ARR_LEN(*pOld_in)-1)
227 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
229 edges_notify_edge(node, i, in[i], NULL, irg);
231 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
232 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
235 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
236 ir_node * block = (*pOld_in)[0];
237 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
238 (*pOld_in)[0] = block;
240 fix_backedges(irg->obst, node);
242 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
244 /* update irg flags */
245 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
248 ir_node *(get_irn_n)(const ir_node *node, int n)
250 return get_irn_n_(node, n);
253 void set_irn_n(ir_node *node, int n, ir_node *in)
255 ir_graph *irg = get_irn_irg(node);
256 assert(node && node->kind == k_ir_node);
258 assert(n < get_irn_arity(node));
259 assert(in && in->kind == k_ir_node);
262 hook_set_irn_n(node, n, in, node->in[n + 1]);
264 /* Here, we rely on src and tgt being in the current ir graph */
265 edges_notify_edge(node, n, in, node->in[n + 1], irg);
267 node->in[n + 1] = in;
269 /* update irg flags */
270 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
273 int add_irn_n(ir_node *node, ir_node *in)
276 ir_graph *irg = get_irn_irg(node);
278 assert(node->op->opar == oparity_dynamic);
279 pos = ARR_LEN(node->in) - 1;
280 ARR_APP1(ir_node *, node->in, in);
281 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
284 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
289 void del_Sync_n(ir_node *n, int i)
291 int arity = get_Sync_n_preds(n);
292 ir_node *last_pred = get_Sync_pred(n, arity - 1);
293 set_Sync_pred(n, i, last_pred);
294 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
295 ARR_SHRINKLEN(get_irn_in(n), arity);
298 int (get_irn_deps)(const ir_node *node)
300 return get_irn_deps_(node);
303 ir_node *(get_irn_dep)(const ir_node *node, int pos)
305 return get_irn_dep_(node, pos);
308 void set_irn_dep(ir_node *node, int pos, ir_node *dep)
313 assert(node->deps && "dependency array node yet allocated. use add_irn_dep()");
314 assert(pos >= 0 && pos < (int)ARR_LEN(node->deps) && "dependency index out of range");
316 old = node->deps[pos];
317 node->deps[pos] = dep;
318 irg = get_irn_irg(node);
319 if (edges_activated_kind(irg, EDGE_KIND_DEP))
320 edges_notify_edge_kind(node, pos, dep, old, EDGE_KIND_DEP, irg);
323 void add_irn_dep(ir_node *node, ir_node *dep)
327 if (node->deps == NULL) {
328 node->deps = NEW_ARR_F(ir_node *, 0);
330 ARR_APP1(ir_node*, node->deps, dep);
331 irg = get_irn_irg(node);
332 if (edges_activated_kind(irg, EDGE_KIND_DEP))
333 edges_notify_edge_kind(node, ARR_LEN(node->deps)-1, dep, NULL, EDGE_KIND_DEP, irg);
336 void delete_irn_dep(ir_node *node, ir_node *dep)
340 if (node->deps == NULL)
343 n_deps = ARR_LEN(node->deps);
344 for (i = 0; i < n_deps; ++i) {
345 if (node->deps[i] == dep) {
346 set_irn_dep(node, i, node->deps[n_deps-1]);
347 edges_notify_edge(node, i, NULL, dep, get_irn_irg(node));
348 ARR_SHRINKLEN(node->deps, n_deps-1);
354 void add_irn_deps(ir_node *tgt, ir_node *src)
358 for (i = 0, n = get_irn_deps(src); i < n; ++i)
359 add_irn_dep(tgt, get_irn_dep(src, i));
363 ir_mode *(get_irn_mode)(const ir_node *node)
365 return get_irn_mode_(node);
368 void (set_irn_mode)(ir_node *node, ir_mode *mode)
370 set_irn_mode_(node, mode);
373 ir_op *(get_irn_op)(const ir_node *node)
375 return get_irn_op_(node);
378 void (set_irn_op)(ir_node *node, ir_op *op)
380 set_irn_op_(node, op);
383 unsigned (get_irn_opcode)(const ir_node *node)
385 return get_irn_opcode_(node);
388 const char *get_irn_opname(const ir_node *node)
390 return get_id_str(node->op->name);
393 ident *get_irn_opident(const ir_node *node)
396 return node->op->name;
399 ir_visited_t (get_irn_visited)(const ir_node *node)
401 return get_irn_visited_(node);
404 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
406 set_irn_visited_(node, visited);
409 void (mark_irn_visited)(ir_node *node)
411 mark_irn_visited_(node);
414 int (irn_visited)(const ir_node *node)
416 return irn_visited_(node);
419 int (irn_visited_else_mark)(ir_node *node)
421 return irn_visited_else_mark_(node);
424 void (set_irn_link)(ir_node *node, void *link)
426 set_irn_link_(node, link);
429 void *(get_irn_link)(const ir_node *node)
431 return get_irn_link_(node);
434 op_pin_state (get_irn_pinned)(const ir_node *node)
436 return get_irn_pinned_(node);
439 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
441 return is_irn_pinned_in_irg_(node);
444 void set_irn_pinned(ir_node *node, op_pin_state state)
446 /* due to optimization an opt may be turned into a Tuple */
450 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
451 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
453 node->attr.except.pin_state = state;
456 long get_irn_node_nr(const ir_node *node)
459 return node->node_nr;
462 void *(get_irn_generic_attr)(ir_node *node)
464 assert(is_ir_node(node));
465 return get_irn_generic_attr_(node);
468 const void *(get_irn_generic_attr_const)(const ir_node *node)
470 assert(is_ir_node(node));
471 return get_irn_generic_attr_const_(node);
474 unsigned (get_irn_idx)(const ir_node *node)
476 assert(is_ir_node(node));
477 return get_irn_idx_(node);
480 int get_irn_pred_pos(ir_node *node, ir_node *arg)
483 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
484 if (get_irn_n(node, i) == arg)
490 ir_node *(get_nodes_block)(const ir_node *node)
492 return get_nodes_block_(node);
495 void set_nodes_block(ir_node *node, ir_node *block)
497 assert(node->op != op_Block);
498 set_irn_n(node, -1, block);
501 ir_type *is_frame_pointer(const ir_node *n)
503 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
504 ir_node *start = get_Proj_pred(n);
505 if (is_Start(start)) {
506 return get_irg_frame_type(get_irn_irg(start));
512 ir_node **get_Block_cfgpred_arr(ir_node *node)
514 assert(is_Block(node));
515 return (ir_node **)&(get_irn_in(node)[1]);
518 int (get_Block_n_cfgpreds)(const ir_node *node)
520 return get_Block_n_cfgpreds_(node);
523 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
525 return get_Block_cfgpred_(node, pos);
528 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
530 assert(is_Block(node));
531 set_irn_n(node, pos, pred);
534 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
538 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
539 if (get_Block_cfgpred_block(block, i) == pred)
545 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
547 return get_Block_cfgpred_block_(node, pos);
550 int get_Block_matured(const ir_node *node)
552 assert(is_Block(node));
553 return (int)node->attr.block.is_matured;
556 void set_Block_matured(ir_node *node, int matured)
558 assert(is_Block(node));
559 node->attr.block.is_matured = matured;
562 ir_visited_t (get_Block_block_visited)(const ir_node *node)
564 return get_Block_block_visited_(node);
567 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
569 set_Block_block_visited_(node, visit);
572 void (mark_Block_block_visited)(ir_node *node)
574 mark_Block_block_visited_(node);
577 int (Block_block_visited)(const ir_node *node)
579 return Block_block_visited_(node);
582 ir_graph *(get_Block_irg)(const ir_node *block)
584 return get_Block_irg_(block);
587 ir_entity *create_Block_entity(ir_node *block)
590 assert(is_Block(block));
592 entity = block->attr.block.entity;
593 if (entity == NULL) {
594 ir_label_t nr = get_irp_next_label_nr();
595 entity = new_label_entity(nr);
596 set_entity_visibility(entity, ir_visibility_local);
597 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
598 set_entity_compiler_generated(entity, 1);
600 block->attr.block.entity = entity;
605 ir_node *(get_Block_phis)(const ir_node *block)
607 return get_Block_phis_(block);
610 void (set_Block_phis)(ir_node *block, ir_node *phi)
612 set_Block_phis_(block, phi);
615 void (add_Block_phi)(ir_node *block, ir_node *phi)
617 add_Block_phi_(block, phi);
620 unsigned (get_Block_mark)(const ir_node *block)
622 return get_Block_mark_(block);
625 void (set_Block_mark)(ir_node *block, unsigned mark)
627 set_Block_mark_(block, mark);
630 int get_End_n_keepalives(const ir_node *end)
633 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
636 ir_node *get_End_keepalive(const ir_node *end, int pos)
639 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
642 void add_End_keepalive(ir_node *end, ir_node *ka)
648 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
651 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
654 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
658 ir_graph *irg = get_irn_irg(end);
660 /* notify that edges are deleted */
661 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
662 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
664 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
666 for (i = 0; i < n; ++i) {
667 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
668 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
671 /* update irg flags */
672 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
675 void remove_End_keepalive(ir_node *end, ir_node *irn)
677 int n = get_End_n_keepalives(end);
685 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
693 irg = get_irn_irg(end);
695 /* remove the edge */
696 edges_notify_edge(end, idx, NULL, irn, irg);
699 /* exchange with the last one */
700 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
701 edges_notify_edge(end, n - 1, NULL, old, irg);
702 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
703 edges_notify_edge(end, idx, old, NULL, irg);
705 /* now n - 1 keeps, 1 block input */
706 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
708 /* update irg flags */
709 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
712 void remove_End_Bads_and_doublets(ir_node *end)
715 int idx, n = get_End_n_keepalives(end);
717 bool changed = false;
722 irg = get_irn_irg(end);
723 pset_new_init(&keeps);
725 for (idx = n - 1; idx >= 0; --idx) {
726 ir_node *ka = get_End_keepalive(end, idx);
728 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
730 /* remove the edge */
731 edges_notify_edge(end, idx, NULL, ka, irg);
734 /* exchange with the last one */
735 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
736 edges_notify_edge(end, n - 1, NULL, old, irg);
737 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
738 edges_notify_edge(end, idx, old, NULL, irg);
742 pset_new_insert(&keeps, ka);
745 /* n keeps, 1 block input */
746 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
748 pset_new_destroy(&keeps);
751 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
755 void free_End(ir_node *end)
760 end->in = NULL; /* @@@ make sure we get an error if we use the
761 in array afterwards ... */
764 size_t get_Return_n_ress(const ir_node *node)
766 assert(is_Return(node));
767 return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
770 ir_node **get_Return_res_arr(ir_node *node)
772 assert(is_Return(node));
773 if (get_Return_n_ress(node) > 0)
774 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
779 ir_node *get_Return_res(const ir_node *node, int pos)
781 assert(is_Return(node));
783 assert(get_Return_n_ress(node) > (size_t)pos);
784 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
787 void set_Return_res(ir_node *node, int pos, ir_node *res)
789 assert(is_Return(node));
790 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
793 int (is_Const_null)(const ir_node *node)
795 return is_Const_null_(node);
798 int (is_Const_one)(const ir_node *node)
800 return is_Const_one_(node);
803 int (is_Const_all_one)(const ir_node *node)
805 return is_Const_all_one_(node);
810 symconst_kind get_SymConst_kind(const ir_node *node)
812 assert(is_SymConst(node));
813 return node->attr.symc.kind;
816 void set_SymConst_kind(ir_node *node, symconst_kind kind)
818 assert(is_SymConst(node));
819 node->attr.symc.kind = kind;
822 ir_type *get_SymConst_type(const ir_node *node)
824 /* the cast here is annoying, but we have to compensate for
826 ir_node *irn = (ir_node *)node;
827 assert(is_SymConst(node) &&
828 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
829 return irn->attr.symc.sym.type_p;
832 void set_SymConst_type(ir_node *node, ir_type *tp)
834 assert(is_SymConst(node) &&
835 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
836 node->attr.symc.sym.type_p = tp;
839 ir_entity *get_SymConst_entity(const ir_node *node)
841 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
842 return node->attr.symc.sym.entity_p;
845 void set_SymConst_entity(ir_node *node, ir_entity *ent)
847 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
848 node->attr.symc.sym.entity_p = ent;
851 ir_enum_const *get_SymConst_enum(const ir_node *node)
853 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
854 return node->attr.symc.sym.enum_p;
857 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
859 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
860 node->attr.symc.sym.enum_p = ec;
863 union symconst_symbol
864 get_SymConst_symbol(const ir_node *node)
866 assert(is_SymConst(node));
867 return node->attr.symc.sym;
870 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
872 assert(is_SymConst(node));
873 node->attr.symc.sym = sym;
876 int get_Sel_n_indexs(const ir_node *node)
878 assert(is_Sel(node));
879 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
882 ir_node **get_Sel_index_arr(ir_node *node)
884 assert(is_Sel(node));
885 if (get_Sel_n_indexs(node) > 0)
886 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
891 ir_node *get_Sel_index(const ir_node *node, int pos)
893 assert(is_Sel(node));
894 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
897 void set_Sel_index(ir_node *node, int pos, ir_node *index)
899 assert(is_Sel(node));
900 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
903 ir_node **get_Call_param_arr(ir_node *node)
905 assert(is_Call(node));
906 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
909 int get_Call_n_params(const ir_node *node)
911 assert(is_Call(node));
912 return get_irn_arity(node) - CALL_PARAM_OFFSET;
915 ir_node *get_Call_param(const ir_node *node, int pos)
917 assert(is_Call(node));
918 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
921 void set_Call_param(ir_node *node, int pos, ir_node *param)
923 assert(is_Call(node));
924 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
927 ir_node **get_Builtin_param_arr(ir_node *node)
929 assert(is_Builtin(node));
930 return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
933 int get_Builtin_n_params(const ir_node *node)
935 assert(is_Builtin(node));
936 return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
939 ir_node *get_Builtin_param(const ir_node *node, int pos)
941 assert(is_Builtin(node));
942 return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
945 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
947 assert(is_Builtin(node));
948 set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
951 const char *get_builtin_kind_name(ir_builtin_kind kind)
953 #define X(a) case a: return #a
957 X(ir_bk_return_address);
958 X(ir_bk_frame_address);
968 X(ir_bk_inner_trampoline);
975 int Call_has_callees(const ir_node *node)
977 assert(is_Call(node));
978 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
979 (node->attr.call.callee_arr != NULL));
982 size_t get_Call_n_callees(const ir_node *node)
984 assert(is_Call(node) && node->attr.call.callee_arr);
985 return ARR_LEN(node->attr.call.callee_arr);
988 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
990 assert(pos < get_Call_n_callees(node));
991 return node->attr.call.callee_arr[pos];
994 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
996 ir_graph *irg = get_irn_irg(node);
998 assert(is_Call(node));
999 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1000 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1002 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1005 void remove_Call_callee_arr(ir_node *node)
1007 assert(is_Call(node));
1008 node->attr.call.callee_arr = NULL;
1011 int is_Cast_upcast(ir_node *node)
1013 ir_type *totype = get_Cast_type(node);
1014 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1016 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1019 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1020 totype = get_pointer_points_to_type(totype);
1021 fromtype = get_pointer_points_to_type(fromtype);
1026 if (!is_Class_type(totype)) return 0;
1027 return is_SubClass_of(fromtype, totype);
1030 int is_Cast_downcast(ir_node *node)
1032 ir_type *totype = get_Cast_type(node);
1033 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1035 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1038 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1039 totype = get_pointer_points_to_type(totype);
1040 fromtype = get_pointer_points_to_type(fromtype);
1045 if (!is_Class_type(totype)) return 0;
1046 return is_SubClass_of(totype, fromtype);
1049 int (is_unop)(const ir_node *node)
1051 return is_unop_(node);
1054 ir_node *get_unop_op(const ir_node *node)
1056 if (node->op->opar == oparity_unary)
1057 return get_irn_n(node, node->op->op_index);
1059 assert(node->op->opar == oparity_unary);
1063 void set_unop_op(ir_node *node, ir_node *op)
1065 if (node->op->opar == oparity_unary)
1066 set_irn_n(node, node->op->op_index, op);
1068 assert(node->op->opar == oparity_unary);
1071 int (is_binop)(const ir_node *node)
1073 return is_binop_(node);
1076 ir_node *get_binop_left(const ir_node *node)
1078 assert(node->op->opar == oparity_binary);
1079 return get_irn_n(node, node->op->op_index);
1082 void set_binop_left(ir_node *node, ir_node *left)
1084 assert(node->op->opar == oparity_binary);
1085 set_irn_n(node, node->op->op_index, left);
1088 ir_node *get_binop_right(const ir_node *node)
1090 assert(node->op->opar == oparity_binary);
1091 return get_irn_n(node, node->op->op_index + 1);
1094 void set_binop_right(ir_node *node, ir_node *right)
1096 assert(node->op->opar == oparity_binary);
1097 set_irn_n(node, node->op->op_index + 1, right);
1100 ir_node **get_Phi_preds_arr(ir_node *node)
1102 assert(is_Phi(node));
1103 return (ir_node **)&(get_irn_in(node)[1]);
1106 int get_Phi_n_preds(const ir_node *node)
1108 assert(is_Phi(node));
1109 return get_irn_arity(node);
1112 ir_node *get_Phi_pred(const ir_node *node, int pos)
1114 assert(is_Phi(node));
1115 return get_irn_n(node, pos);
1118 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1120 assert(is_Phi(node));
1121 set_irn_n(node, pos, pred);
1124 ir_node *(get_Phi_next)(const ir_node *phi)
1126 return get_Phi_next_(phi);
1129 void (set_Phi_next)(ir_node *phi, ir_node *next)
1131 set_Phi_next_(phi, next);
1134 int is_memop(const ir_node *node)
1136 return is_op_uses_memory(get_irn_op(node));
1139 ir_node *get_memop_mem(const ir_node *node)
1141 const ir_op *op = get_irn_op(node);
1142 assert(is_memop(node));
1143 return get_irn_n(node, op->memory_index);
1146 void set_memop_mem(ir_node *node, ir_node *mem)
1148 const ir_op *op = get_irn_op(node);
1149 assert(is_memop(node));
1150 set_irn_n(node, op->memory_index, mem);
1153 ir_node **get_Sync_preds_arr(ir_node *node)
1155 assert(is_Sync(node));
1156 return (ir_node **)&(get_irn_in(node)[1]);
1159 int get_Sync_n_preds(const ir_node *node)
1161 assert(is_Sync(node));
1162 return (get_irn_arity(node));
1165 ir_node *get_Sync_pred(const ir_node *node, int pos)
1167 assert(is_Sync(node));
1168 return get_irn_n(node, pos);
1171 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1173 assert(is_Sync(node));
1174 set_irn_n(node, pos, pred);
1177 void add_Sync_pred(ir_node *node, ir_node *pred)
1179 assert(is_Sync(node));
1180 add_irn_n(node, pred);
1183 int (is_arg_Proj)(const ir_node *node)
1185 return is_arg_Proj_(node);
1188 int is_x_except_Proj(const ir_node *node)
1193 pred = get_Proj_pred(node);
1194 if (!is_fragile_op(pred))
1196 return get_Proj_proj(node) == pred->op->pn_x_except;
1199 int is_x_regular_Proj(const ir_node *node)
1204 pred = get_Proj_pred(node);
1205 if (!is_fragile_op(pred))
1207 return get_Proj_proj(node) == pred->op->pn_x_regular;
1210 void ir_set_throws_exception(ir_node *node, int throws_exception)
1212 except_attr *attr = &node->attr.except;
1213 assert(is_fragile_op(node));
1214 attr->throws_exception = throws_exception;
1217 int ir_throws_exception(const ir_node *node)
1219 const except_attr *attr = &node->attr.except;
1220 assert(is_fragile_op(node));
1221 return attr->throws_exception;
1224 ir_node **get_Tuple_preds_arr(ir_node *node)
1226 assert(is_Tuple(node));
1227 return (ir_node **)&(get_irn_in(node)[1]);
1230 int get_Tuple_n_preds(const ir_node *node)
1232 assert(is_Tuple(node));
1233 return get_irn_arity(node);
1236 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1238 assert(is_Tuple(node));
1239 return get_irn_n(node, pos);
1242 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1244 assert(is_Tuple(node));
1245 set_irn_n(node, pos, pred);
1248 int get_ASM_n_inputs(const ir_node *node)
1250 assert(is_ASM(node));
1251 return get_irn_arity(node) - ASM_PARAM_OFFSET;
1254 ir_node *get_ASM_input(const ir_node *node, int pos)
1256 return get_irn_n(node, ASM_PARAM_OFFSET + pos);
1259 size_t get_ASM_n_output_constraints(const ir_node *node)
1261 assert(is_ASM(node));
1262 return ARR_LEN(node->attr.assem.output_constraints);
1265 size_t get_ASM_n_clobbers(const ir_node *node)
1267 assert(is_ASM(node));
1268 return ARR_LEN(node->attr.assem.clobbers);
1271 ir_graph *(get_irn_irg)(const ir_node *node)
1273 return get_irn_irg_(node);
1276 ir_node *skip_Proj(ir_node *node)
1278 /* don't assert node !!! */
1283 node = get_Proj_pred(node);
1289 skip_Proj_const(const ir_node *node)
1291 /* don't assert node !!! */
1296 node = get_Proj_pred(node);
1301 ir_node *skip_Tuple(ir_node *node)
1306 if (is_Proj(node)) {
1307 pred = get_Proj_pred(node);
1309 if (is_Proj(pred)) { /* nested Tuple ? */
1310 pred = skip_Tuple(pred);
1312 if (is_Tuple(pred)) {
1313 node = get_Tuple_pred(pred, get_Proj_proj(node));
1316 } else if (is_Tuple(pred)) {
1317 node = get_Tuple_pred(pred, get_Proj_proj(node));
1324 ir_node *skip_Cast(ir_node *node)
1327 return get_Cast_op(node);
1331 const ir_node *skip_Cast_const(const ir_node *node)
1334 return get_Cast_op(node);
1338 ir_node *skip_Pin(ir_node *node)
1341 return get_Pin_op(node);
1345 ir_node *skip_Confirm(ir_node *node)
1347 if (is_Confirm(node))
1348 return get_Confirm_value(node);
1352 ir_node *skip_HighLevel_ops(ir_node *node)
1354 while (is_op_highlevel(get_irn_op(node))) {
1355 node = get_irn_n(node, 0);
1361 ir_node *skip_Id(ir_node *node)
1363 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1364 * than any other approach, as Id chains are resolved and all point to the real node, or
1365 * all id's are self loops.
1367 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1368 * a little bit "hand optimized".
1371 /* don't assert node !!! */
1373 if (!node || (node->op != op_Id)) return node;
1375 /* Don't use get_Id_pred(): We get into an endless loop for
1376 self-referencing Ids. */
1377 pred = node->in[0+1];
1379 if (pred->op != op_Id) return pred;
1381 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1382 ir_node *rem_pred, *res;
1384 if (pred->op != op_Id) return pred; /* shortcut */
1387 assert(get_irn_arity (node) > 0);
1389 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1390 res = skip_Id(rem_pred);
1391 if (is_Id(res)) /* self-loop */ return node;
1393 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1400 int (is_strictConv)(const ir_node *node)
1402 return is_strictConv_(node);
1405 int (is_SymConst_addr_ent)(const ir_node *node)
1407 return is_SymConst_addr_ent_(node);
1410 int is_cfop(const ir_node *node)
1412 if (is_fragile_op(node) && ir_throws_exception(node))
1415 return is_op_cfopcode(get_irn_op(node));
1418 int is_unknown_jump(const ir_node *node)
1420 return is_op_unknown_jump(get_irn_op(node));
1423 int is_fragile_op(const ir_node *node)
1425 return is_op_fragile(get_irn_op(node));
1428 int (is_irn_forking)(const ir_node *node)
1430 return is_irn_forking_(node);
1433 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1435 copy_node_attr_(irg, old_node, new_node);
1438 ir_type *(get_irn_type_attr)(ir_node *node)
1440 return get_irn_type_attr_(node);
1443 ir_entity *(get_irn_entity_attr)(ir_node *node)
1445 return get_irn_entity_attr_(node);
1448 int (is_irn_constlike)(const ir_node *node)
1450 return is_irn_constlike_(node);
1453 int (is_irn_keep)(const ir_node *node)
1455 return is_irn_keep_(node);
1458 int (is_irn_start_block_placed)(const ir_node *node)
1460 return is_irn_start_block_placed_(node);
1463 int (is_irn_cse_neutral)(const ir_node *node)
1465 return is_irn_cse_neutral_(node);
1468 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1470 #define X(a) case a: return #a
1472 X(COND_JMP_PRED_NONE);
1473 X(COND_JMP_PRED_TRUE);
1474 X(COND_JMP_PRED_FALSE);
1480 /** Return the attribute type of a SymConst node if exists */
1481 static ir_type *get_SymConst_attr_type(const ir_node *self)
1483 symconst_kind kind = get_SymConst_kind(self);
1484 if (SYMCONST_HAS_TYPE(kind))
1485 return get_SymConst_type(self);
1489 /** Return the attribute entity of a SymConst node if exists */
1490 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1492 symconst_kind kind = get_SymConst_kind(self);
1493 if (SYMCONST_HAS_ENT(kind))
1494 return get_SymConst_entity(self);
1498 static void register_get_type_func(ir_op *op, get_type_attr_func func)
1500 op->ops.get_type_attr = func;
1503 static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
1505 op->ops.get_entity_attr = func;
1508 void ir_register_getter_ops(void)
1510 register_get_type_func(op_Alloc, get_Alloc_type);
1511 register_get_type_func(op_Builtin, get_Builtin_type);
1512 register_get_type_func(op_Call, get_Call_type);
1513 register_get_type_func(op_Cast, get_Cast_type);
1514 register_get_type_func(op_CopyB, get_CopyB_type);
1515 register_get_type_func(op_Free, get_Free_type);
1516 register_get_type_func(op_InstOf, get_InstOf_type);
1517 register_get_type_func(op_SymConst, get_SymConst_attr_type);
1519 register_get_entity_func(op_SymConst, get_SymConst_attr_entity);
1520 register_get_entity_func(op_Sel, get_Sel_entity);
1521 register_get_entity_func(op_Block, get_Block_entity);
1524 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1526 set_irn_dbg_info_(n, db);
1529 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1531 return get_irn_dbg_info_(n);
1534 ir_switch_table *ir_new_switch_table(ir_graph *irg, size_t n_entries)
1536 struct obstack *obst = get_irg_obstack(irg);
1537 ir_switch_table *res = OALLOCFZ(obst, ir_switch_table, entries, n_entries);
1538 res->n_entries = n_entries;
1542 void ir_switch_table_set(ir_switch_table *table, size_t n,
1543 ir_tarval *min, ir_tarval *max, long pn)
1545 ir_switch_table_entry *entry = ir_switch_table_get_entry(table, n);
1551 size_t (ir_switch_table_get_n_entries)(const ir_switch_table *table)
1553 return ir_switch_table_get_n_entries_(table);
1556 ir_tarval *ir_switch_table_get_max(const ir_switch_table *table, size_t e)
1558 return ir_switch_table_get_entry_const(table, e)->max;
1561 ir_tarval *ir_switch_table_get_min(const ir_switch_table *table, size_t e)
1563 return ir_switch_table_get_entry_const(table, e)->min;
1566 long ir_switch_table_get_pn(const ir_switch_table *table, size_t e)
1568 return ir_switch_table_get_entry_const(table, e)->pn;
1571 ir_switch_table *ir_switch_table_duplicate(ir_graph *irg,
1572 const ir_switch_table *table)
1574 size_t n_entries = ir_switch_table_get_n_entries(table);
1576 ir_switch_table *res = ir_new_switch_table(irg, n_entries);
1577 for (e = 0; e < n_entries; ++e) {
1578 const ir_switch_table_entry *entry
1579 = ir_switch_table_get_entry_const(table, e);
1580 ir_switch_table_entry *new_entry = ir_switch_table_get_entry(res, e);
1581 *new_entry = *entry;
1586 /* include generated code */
1587 #include "gen_irnode.c.inl"