2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irgraph_t.h"
34 #include "irbackedge_t.h"
38 #include "iredgekinds.h"
39 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET (n_Call_max+1)
52 #define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
53 #define SEL_INDEX_OFFSET (n_Sel_max+1)
54 #define RETURN_RESULT_OFFSET (n_Return_max+1)
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *relation_names [] = {
69 "unordered_less_equal",
71 "unordered_greater_equal",
76 const char *get_relation_string(ir_relation relation)
78 assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
79 return relation_names[relation];
82 ir_relation get_negated_relation(ir_relation relation)
84 return relation ^ ir_relation_true;
87 ir_relation get_inversed_relation(ir_relation relation)
89 ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
90 bool less = relation & ir_relation_less;
91 bool greater = relation & ir_relation_greater;
92 code |= (less ? ir_relation_greater : 0) | (greater ? ir_relation_less : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
102 unsigned firm_add_node_size = 0;
105 unsigned firm_register_additional_node_data(unsigned size)
107 assert(!forbid_new_data && "Too late to register additional node data");
112 return firm_add_node_size += size;
116 void init_irnode(void)
118 /* Forbid the addition of new data to an ir node. */
122 struct struct_align {
131 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
132 ir_mode *mode, int arity, ir_node *const *in)
135 unsigned align = offsetof(struct struct_align, s) - 1;
136 unsigned add_node_size = (firm_add_node_size + align) & ~align;
137 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
144 p = (char*)obstack_alloc(irg->obst, node_size);
145 memset(p, 0, node_size);
146 res = (ir_node *)(p + add_node_size);
148 res->kind = k_ir_node;
152 res->node_idx = irg_register_node_idx(irg, res);
157 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
159 /* not nice but necessary: End and Sync must always have a flexible array */
160 if (op == op_End || op == op_Sync)
161 res->in = NEW_ARR_F(ir_node *, (arity+1));
163 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
164 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
168 set_irn_dbg_info(res, db);
170 res->node_nr = get_irp_new_node_nr();
172 for (i = 0; i < EDGE_KIND_LAST; ++i) {
173 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
174 /* edges will be build immediately */
175 res->edge_info[i].edges_built = 1;
176 res->edge_info[i].out_count = 0;
179 /* don't put this into the for loop, arity is -1 for some nodes! */
180 edges_notify_edge(res, -1, res->in[0], NULL, irg);
181 for (i = 1; i <= arity; ++i)
182 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
184 hook_new_node(irg, res);
185 if (get_irg_phase_state(irg) == phase_backend) {
186 be_info_new_node(irg, res);
192 int (is_ir_node)(const void *thing)
194 return is_ir_node_(thing);
197 int (get_irn_arity)(const ir_node *node)
199 return get_irn_arity_(node);
202 ir_node **get_irn_in(const ir_node *node)
207 void set_irn_in(ir_node *node, int arity, ir_node **in)
211 ir_graph *irg = get_irn_irg(node);
216 assert(node != NULL && node->kind == k_ir_node);
218 for (i = 0; i < arity; ++i) {
219 assert(in[i] != NULL && in[0]->kind == k_ir_node);
223 for (i = 0; i < arity; i++) {
224 if (i < (int)ARR_LEN(*pOld_in)-1)
225 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
227 edges_notify_edge(node, i, in[i], NULL, irg);
229 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
230 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
233 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
234 ir_node * block = (*pOld_in)[0];
235 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
236 (*pOld_in)[0] = block;
238 fix_backedges(irg->obst, node);
240 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
242 /* update irg flags */
243 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
246 ir_node *(get_irn_n)(const ir_node *node, int n)
248 return get_irn_n_(node, n);
251 void set_irn_n(ir_node *node, int n, ir_node *in)
253 ir_graph *irg = get_irn_irg(node);
254 assert(node && node->kind == k_ir_node);
256 assert(n < get_irn_arity(node));
257 assert(in && in->kind == k_ir_node);
260 hook_set_irn_n(node, n, in, node->in[n + 1]);
262 /* Here, we rely on src and tgt being in the current ir graph */
263 edges_notify_edge(node, n, in, node->in[n + 1], irg);
265 node->in[n + 1] = in;
267 /* update irg flags */
268 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
271 int add_irn_n(ir_node *node, ir_node *in)
274 ir_graph *irg = get_irn_irg(node);
276 assert(node->op->opar == oparity_dynamic);
277 pos = ARR_LEN(node->in) - 1;
278 ARR_APP1(ir_node *, node->in, in);
279 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
282 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
287 void del_Sync_n(ir_node *n, int i)
289 int arity = get_Sync_n_preds(n);
290 ir_node *last_pred = get_Sync_pred(n, arity - 1);
291 set_Sync_pred(n, i, last_pred);
292 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
293 ARR_SHRINKLEN(get_irn_in(n), arity);
296 int (get_irn_deps)(const ir_node *node)
298 return get_irn_deps_(node);
301 ir_node *(get_irn_dep)(const ir_node *node, int pos)
303 return get_irn_dep_(node, pos);
306 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
308 set_irn_dep_(node, pos, dep);
311 int add_irn_dep(ir_node *node, ir_node *dep)
315 /* DEP edges are only allowed in backend phase */
316 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
317 if (node->deps == NULL) {
318 node->deps = NEW_ARR_F(ir_node *, 1);
324 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
325 if (node->deps[i] == NULL)
328 if (node->deps[i] == dep)
332 if (first_zero >= 0) {
333 node->deps[first_zero] = dep;
336 ARR_APP1(ir_node *, node->deps, dep);
341 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
346 void add_irn_deps(ir_node *tgt, ir_node *src)
350 for (i = 0, n = get_irn_deps(src); i < n; ++i)
351 add_irn_dep(tgt, get_irn_dep(src, i));
355 ir_mode *(get_irn_mode)(const ir_node *node)
357 return get_irn_mode_(node);
360 void (set_irn_mode)(ir_node *node, ir_mode *mode)
362 set_irn_mode_(node, mode);
365 ir_op *(get_irn_op)(const ir_node *node)
367 return get_irn_op_(node);
370 void (set_irn_op)(ir_node *node, ir_op *op)
372 set_irn_op_(node, op);
375 unsigned (get_irn_opcode)(const ir_node *node)
377 return get_irn_opcode_(node);
380 const char *get_irn_opname(const ir_node *node)
383 if (is_Phi0(node)) return "Phi0";
384 return get_id_str(node->op->name);
387 ident *get_irn_opident(const ir_node *node)
390 return node->op->name;
393 ir_visited_t (get_irn_visited)(const ir_node *node)
395 return get_irn_visited_(node);
398 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
400 set_irn_visited_(node, visited);
403 void (mark_irn_visited)(ir_node *node)
405 mark_irn_visited_(node);
408 int (irn_visited)(const ir_node *node)
410 return irn_visited_(node);
413 int (irn_visited_else_mark)(ir_node *node)
415 return irn_visited_else_mark_(node);
418 void (set_irn_link)(ir_node *node, void *link)
420 set_irn_link_(node, link);
423 void *(get_irn_link)(const ir_node *node)
425 return get_irn_link_(node);
428 op_pin_state (get_irn_pinned)(const ir_node *node)
430 return get_irn_pinned_(node);
433 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
435 return is_irn_pinned_in_irg_(node);
438 void set_irn_pinned(ir_node *node, op_pin_state state)
440 /* due to optimization an opt may be turned into a Tuple */
444 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
445 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
447 node->attr.except.pin_state = state;
450 long get_irn_node_nr(const ir_node *node)
453 return node->node_nr;
456 void *(get_irn_generic_attr)(ir_node *node)
458 assert(is_ir_node(node));
459 return get_irn_generic_attr_(node);
462 const void *(get_irn_generic_attr_const)(const ir_node *node)
464 assert(is_ir_node(node));
465 return get_irn_generic_attr_const_(node);
468 unsigned (get_irn_idx)(const ir_node *node)
470 assert(is_ir_node(node));
471 return get_irn_idx_(node);
474 int get_irn_pred_pos(ir_node *node, ir_node *arg)
477 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
478 if (get_irn_n(node, i) == arg)
484 ir_node *(get_nodes_block)(const ir_node *node)
486 return get_nodes_block_(node);
489 void set_nodes_block(ir_node *node, ir_node *block)
491 assert(node->op != op_Block);
492 set_irn_n(node, -1, block);
495 ir_type *is_frame_pointer(const ir_node *n)
497 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
498 ir_node *start = get_Proj_pred(n);
499 if (is_Start(start)) {
500 return get_irg_frame_type(get_irn_irg(start));
506 ir_node **get_Block_cfgpred_arr(ir_node *node)
508 assert(is_Block(node));
509 return (ir_node **)&(get_irn_in(node)[1]);
512 int (get_Block_n_cfgpreds)(const ir_node *node)
514 return get_Block_n_cfgpreds_(node);
517 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
519 return get_Block_cfgpred_(node, pos);
522 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
524 assert(is_Block(node));
525 set_irn_n(node, pos, pred);
528 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
532 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
533 if (get_Block_cfgpred_block(block, i) == pred)
539 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
541 return get_Block_cfgpred_block_(node, pos);
544 int get_Block_matured(const ir_node *node)
546 assert(is_Block(node));
547 return (int)node->attr.block.is_matured;
550 void set_Block_matured(ir_node *node, int matured)
552 assert(is_Block(node));
553 node->attr.block.is_matured = matured;
556 ir_visited_t (get_Block_block_visited)(const ir_node *node)
558 return get_Block_block_visited_(node);
561 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
563 set_Block_block_visited_(node, visit);
566 void (mark_Block_block_visited)(ir_node *node)
568 mark_Block_block_visited_(node);
571 int (Block_block_visited)(const ir_node *node)
573 return Block_block_visited_(node);
576 ir_extblk *get_Block_extbb(const ir_node *block)
579 assert(is_Block(block));
580 res = block->attr.block.extblk;
581 assert(res == NULL || is_ir_extbb(res));
585 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
587 assert(is_Block(block));
588 assert(extblk == NULL || is_ir_extbb(extblk));
589 block->attr.block.extblk = extblk;
592 ir_graph *(get_Block_irg)(const ir_node *block)
594 return get_Block_irg_(block);
597 ir_entity *create_Block_entity(ir_node *block)
600 assert(is_Block(block));
602 entity = block->attr.block.entity;
603 if (entity == NULL) {
604 ir_label_t nr = get_irp_next_label_nr();
605 entity = new_label_entity(nr);
606 set_entity_visibility(entity, ir_visibility_local);
607 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
608 set_entity_compiler_generated(entity, 1);
610 block->attr.block.entity = entity;
615 ir_node *(get_Block_phis)(const ir_node *block)
617 return get_Block_phis_(block);
620 void (set_Block_phis)(ir_node *block, ir_node *phi)
622 set_Block_phis_(block, phi);
625 void (add_Block_phi)(ir_node *block, ir_node *phi)
627 add_Block_phi_(block, phi);
630 unsigned (get_Block_mark)(const ir_node *block)
632 return get_Block_mark_(block);
635 void (set_Block_mark)(ir_node *block, unsigned mark)
637 set_Block_mark_(block, mark);
640 int get_End_n_keepalives(const ir_node *end)
643 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
646 ir_node *get_End_keepalive(const ir_node *end, int pos)
649 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
652 void add_End_keepalive(ir_node *end, ir_node *ka)
658 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
661 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
664 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
668 ir_graph *irg = get_irn_irg(end);
670 /* notify that edges are deleted */
671 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
672 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
674 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
676 for (i = 0; i < n; ++i) {
677 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
678 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
681 /* update irg flags */
682 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
685 void remove_End_keepalive(ir_node *end, ir_node *irn)
687 int n = get_End_n_keepalives(end);
692 for (i = n -1; i >= 0; --i) {
693 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
703 irg = get_irn_irg(end);
705 /* remove the edge */
706 edges_notify_edge(end, idx, NULL, irn, irg);
709 /* exchange with the last one */
710 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
711 edges_notify_edge(end, n - 1, NULL, old, irg);
712 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
713 edges_notify_edge(end, idx, old, NULL, irg);
715 /* now n - 1 keeps, 1 block input */
716 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
718 /* update irg flags */
719 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
722 void remove_End_Bads_and_doublets(ir_node *end)
725 int idx, n = get_End_n_keepalives(end);
727 bool changed = false;
732 irg = get_irn_irg(end);
733 pset_new_init(&keeps);
735 for (idx = n - 1; idx >= 0; --idx) {
736 ir_node *ka = get_End_keepalive(end, idx);
738 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
740 /* remove the edge */
741 edges_notify_edge(end, idx, NULL, ka, irg);
744 /* exchange with the last one */
745 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
746 edges_notify_edge(end, n - 1, NULL, old, irg);
747 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
748 edges_notify_edge(end, idx, old, NULL, irg);
752 pset_new_insert(&keeps, ka);
755 /* n keeps, 1 block input */
756 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
758 pset_new_destroy(&keeps);
761 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
765 void free_End(ir_node *end)
770 end->in = NULL; /* @@@ make sure we get an error if we use the
771 in array afterwards ... */
774 size_t get_Return_n_ress(const ir_node *node)
776 assert(is_Return(node));
777 return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
780 ir_node **get_Return_res_arr(ir_node *node)
782 assert(is_Return(node));
783 if (get_Return_n_ress(node) > 0)
784 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
789 ir_node *get_Return_res(const ir_node *node, int pos)
791 assert(is_Return(node));
793 assert(get_Return_n_ress(node) > (size_t)pos);
794 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
797 void set_Return_res(ir_node *node, int pos, ir_node *res)
799 assert(is_Return(node));
800 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
803 int (is_Const_null)(const ir_node *node)
805 return is_Const_null_(node);
808 int (is_Const_one)(const ir_node *node)
810 return is_Const_one_(node);
813 int (is_Const_all_one)(const ir_node *node)
815 return is_Const_all_one_(node);
820 symconst_kind get_SymConst_kind(const ir_node *node)
822 assert(is_SymConst(node));
823 return node->attr.symc.kind;
826 void set_SymConst_kind(ir_node *node, symconst_kind kind)
828 assert(is_SymConst(node));
829 node->attr.symc.kind = kind;
832 ir_type *get_SymConst_type(const ir_node *node)
834 /* the cast here is annoying, but we have to compensate for
836 ir_node *irn = (ir_node *)node;
837 assert(is_SymConst(node) &&
838 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
839 return irn->attr.symc.sym.type_p;
842 void set_SymConst_type(ir_node *node, ir_type *tp)
844 assert(is_SymConst(node) &&
845 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
846 node->attr.symc.sym.type_p = tp;
849 ir_entity *get_SymConst_entity(const ir_node *node)
851 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
852 return node->attr.symc.sym.entity_p;
855 void set_SymConst_entity(ir_node *node, ir_entity *ent)
857 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
858 node->attr.symc.sym.entity_p = ent;
861 ir_enum_const *get_SymConst_enum(const ir_node *node)
863 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
864 return node->attr.symc.sym.enum_p;
867 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
869 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
870 node->attr.symc.sym.enum_p = ec;
873 union symconst_symbol
874 get_SymConst_symbol(const ir_node *node)
876 assert(is_SymConst(node));
877 return node->attr.symc.sym;
880 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
882 assert(is_SymConst(node));
883 node->attr.symc.sym = sym;
886 int get_Sel_n_indexs(const ir_node *node)
888 assert(is_Sel(node));
889 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
892 ir_node **get_Sel_index_arr(ir_node *node)
894 assert(is_Sel(node));
895 if (get_Sel_n_indexs(node) > 0)
896 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
901 ir_node *get_Sel_index(const ir_node *node, int pos)
903 assert(is_Sel(node));
904 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
907 void set_Sel_index(ir_node *node, int pos, ir_node *index)
909 assert(is_Sel(node));
910 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
913 ir_node **get_Call_param_arr(ir_node *node)
915 assert(is_Call(node));
916 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
919 size_t get_Call_n_params(const ir_node *node)
921 assert(is_Call(node));
922 return (size_t) (get_irn_arity(node) - CALL_PARAM_OFFSET);
925 ir_node *get_Call_param(const ir_node *node, int pos)
927 assert(is_Call(node));
928 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
931 void set_Call_param(ir_node *node, int pos, ir_node *param)
933 assert(is_Call(node));
934 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
937 ir_node **get_Builtin_param_arr(ir_node *node)
939 assert(is_Builtin(node));
940 return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
943 int get_Builtin_n_params(const ir_node *node)
945 assert(is_Builtin(node));
946 return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
949 ir_node *get_Builtin_param(const ir_node *node, int pos)
951 assert(is_Builtin(node));
952 return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
955 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
957 assert(is_Builtin(node));
958 set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
961 const char *get_builtin_kind_name(ir_builtin_kind kind)
963 #define X(a) case a: return #a
967 X(ir_bk_return_address);
968 X(ir_bk_frame_address);
978 X(ir_bk_inner_trampoline);
985 int Call_has_callees(const ir_node *node)
987 assert(is_Call(node));
988 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
989 (node->attr.call.callee_arr != NULL));
992 size_t get_Call_n_callees(const ir_node *node)
994 assert(is_Call(node) && node->attr.call.callee_arr);
995 return ARR_LEN(node->attr.call.callee_arr);
998 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
1000 assert(pos < get_Call_n_callees(node));
1001 return node->attr.call.callee_arr[pos];
1004 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
1006 ir_graph *irg = get_irn_irg(node);
1008 assert(is_Call(node));
1009 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1010 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
1012 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1015 void remove_Call_callee_arr(ir_node *node)
1017 assert(is_Call(node));
1018 node->attr.call.callee_arr = NULL;
1021 int is_Cast_upcast(ir_node *node)
1023 ir_type *totype = get_Cast_type(node);
1024 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1026 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1029 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1030 totype = get_pointer_points_to_type(totype);
1031 fromtype = get_pointer_points_to_type(fromtype);
1036 if (!is_Class_type(totype)) return 0;
1037 return is_SubClass_of(fromtype, totype);
1040 int is_Cast_downcast(ir_node *node)
1042 ir_type *totype = get_Cast_type(node);
1043 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1045 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1048 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1049 totype = get_pointer_points_to_type(totype);
1050 fromtype = get_pointer_points_to_type(fromtype);
1055 if (!is_Class_type(totype)) return 0;
1056 return is_SubClass_of(totype, fromtype);
1059 int (is_unop)(const ir_node *node)
1061 return is_unop_(node);
1064 ir_node *get_unop_op(const ir_node *node)
1066 if (node->op->opar == oparity_unary)
1067 return get_irn_n(node, node->op->op_index);
1069 assert(node->op->opar == oparity_unary);
1073 void set_unop_op(ir_node *node, ir_node *op)
1075 if (node->op->opar == oparity_unary)
1076 set_irn_n(node, node->op->op_index, op);
1078 assert(node->op->opar == oparity_unary);
1081 int (is_binop)(const ir_node *node)
1083 return is_binop_(node);
1086 ir_node *get_binop_left(const ir_node *node)
1088 assert(node->op->opar == oparity_binary);
1089 return get_irn_n(node, node->op->op_index);
1092 void set_binop_left(ir_node *node, ir_node *left)
1094 assert(node->op->opar == oparity_binary);
1095 set_irn_n(node, node->op->op_index, left);
1098 ir_node *get_binop_right(const ir_node *node)
1100 assert(node->op->opar == oparity_binary);
1101 return get_irn_n(node, node->op->op_index + 1);
1104 void set_binop_right(ir_node *node, ir_node *right)
1106 assert(node->op->opar == oparity_binary);
1107 set_irn_n(node, node->op->op_index + 1, right);
1110 int is_Phi0(const ir_node *n)
1114 return ((get_irn_op(n) == op_Phi) &&
1115 (get_irn_arity(n) == 0) &&
1116 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1119 ir_node **get_Phi_preds_arr(ir_node *node)
1121 assert(is_Phi(node));
1122 return (ir_node **)&(get_irn_in(node)[1]);
1125 int get_Phi_n_preds(const ir_node *node)
1127 assert(is_Phi(node) || is_Phi0(node));
1128 return (get_irn_arity(node));
1131 ir_node *get_Phi_pred(const ir_node *node, int pos)
1133 assert(is_Phi(node) || is_Phi0(node));
1134 return get_irn_n(node, pos);
1137 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1139 assert(is_Phi(node) || is_Phi0(node));
1140 set_irn_n(node, pos, pred);
1143 ir_node *(get_Phi_next)(const ir_node *phi)
1145 return get_Phi_next_(phi);
1148 void (set_Phi_next)(ir_node *phi, ir_node *next)
1150 set_Phi_next_(phi, next);
1153 int is_memop(const ir_node *node)
1155 return is_op_uses_memory(get_irn_op(node));
1158 ir_node *get_memop_mem(const ir_node *node)
1160 const ir_op *op = get_irn_op(node);
1161 assert(is_memop(node));
1162 return get_irn_n(node, op->memory_index);
1165 void set_memop_mem(ir_node *node, ir_node *mem)
1167 const ir_op *op = get_irn_op(node);
1168 assert(is_memop(node));
1169 set_irn_n(node, op->memory_index, mem);
1172 ir_node **get_Sync_preds_arr(ir_node *node)
1174 assert(is_Sync(node));
1175 return (ir_node **)&(get_irn_in(node)[1]);
1178 int get_Sync_n_preds(const ir_node *node)
1180 assert(is_Sync(node));
1181 return (get_irn_arity(node));
1184 ir_node *get_Sync_pred(const ir_node *node, int pos)
1186 assert(is_Sync(node));
1187 return get_irn_n(node, pos);
1190 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1192 assert(is_Sync(node));
1193 set_irn_n(node, pos, pred);
1196 void add_Sync_pred(ir_node *node, ir_node *pred)
1198 assert(is_Sync(node));
1199 add_irn_n(node, pred);
1202 int (is_arg_Proj)(const ir_node *node)
1204 return is_arg_Proj_(node);
1207 int is_x_except_Proj(const ir_node *node)
1212 pred = get_Proj_pred(node);
1213 if (!is_fragile_op(pred))
1215 return get_Proj_proj(node) == pred->op->pn_x_except;
1218 int is_x_regular_Proj(const ir_node *node)
1223 pred = get_Proj_pred(node);
1224 if (!is_fragile_op(pred))
1226 return get_Proj_proj(node) == pred->op->pn_x_regular;
1229 void ir_set_throws_exception(ir_node *node, int throws_exception)
1231 except_attr *attr = &node->attr.except;
1232 assert(is_fragile_op(node));
1233 attr->throws_exception = throws_exception;
1236 int ir_throws_exception(const ir_node *node)
1238 const except_attr *attr = &node->attr.except;
1239 assert(is_fragile_op(node));
1240 return attr->throws_exception;
1243 ir_node **get_Tuple_preds_arr(ir_node *node)
1245 assert(is_Tuple(node));
1246 return (ir_node **)&(get_irn_in(node)[1]);
1249 int get_Tuple_n_preds(const ir_node *node)
1251 assert(is_Tuple(node));
1252 return get_irn_arity(node);
1255 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1257 assert(is_Tuple(node));
1258 return get_irn_n(node, pos);
1261 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1263 assert(is_Tuple(node));
1264 set_irn_n(node, pos, pred);
1267 size_t get_ASM_n_input_constraints(const ir_node *node)
1269 assert(is_ASM(node));
1270 return ARR_LEN(node->attr.assem.input_constraints);
1273 size_t get_ASM_n_output_constraints(const ir_node *node)
1275 assert(is_ASM(node));
1276 return ARR_LEN(node->attr.assem.output_constraints);
1279 size_t get_ASM_n_clobbers(const ir_node *node)
1281 assert(is_ASM(node));
1282 return ARR_LEN(node->attr.assem.clobbers);
1285 ir_graph *(get_irn_irg)(const ir_node *node)
1287 return get_irn_irg_(node);
1290 ir_node *skip_Proj(ir_node *node)
1292 /* don't assert node !!! */
1297 node = get_Proj_pred(node);
1303 skip_Proj_const(const ir_node *node)
1305 /* don't assert node !!! */
1310 node = get_Proj_pred(node);
1315 ir_node *skip_Tuple(ir_node *node)
1320 if (is_Proj(node)) {
1321 pred = get_Proj_pred(node);
1323 if (is_Proj(pred)) { /* nested Tuple ? */
1324 pred = skip_Tuple(pred);
1326 if (is_Tuple(pred)) {
1327 node = get_Tuple_pred(pred, get_Proj_proj(node));
1330 } else if (is_Tuple(pred)) {
1331 node = get_Tuple_pred(pred, get_Proj_proj(node));
1338 ir_node *skip_Cast(ir_node *node)
1341 return get_Cast_op(node);
1345 const ir_node *skip_Cast_const(const ir_node *node)
1348 return get_Cast_op(node);
1352 ir_node *skip_Pin(ir_node *node)
1355 return get_Pin_op(node);
1359 ir_node *skip_Confirm(ir_node *node)
1361 if (is_Confirm(node))
1362 return get_Confirm_value(node);
1366 ir_node *skip_HighLevel_ops(ir_node *node)
1368 while (is_op_highlevel(get_irn_op(node))) {
1369 node = get_irn_n(node, 0);
1375 ir_node *skip_Id(ir_node *node)
1377 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1378 * than any other approach, as Id chains are resolved and all point to the real node, or
1379 * all id's are self loops.
1381 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1382 * a little bit "hand optimized".
1385 /* don't assert node !!! */
1387 if (!node || (node->op != op_Id)) return node;
1389 /* Don't use get_Id_pred(): We get into an endless loop for
1390 self-referencing Ids. */
1391 pred = node->in[0+1];
1393 if (pred->op != op_Id) return pred;
1395 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1396 ir_node *rem_pred, *res;
1398 if (pred->op != op_Id) return pred; /* shortcut */
1401 assert(get_irn_arity (node) > 0);
1403 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1404 res = skip_Id(rem_pred);
1405 if (is_Id(res)) /* self-loop */ return node;
1407 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1414 int (is_strictConv)(const ir_node *node)
1416 return is_strictConv_(node);
1419 int (is_SymConst_addr_ent)(const ir_node *node)
1421 return is_SymConst_addr_ent_(node);
1424 int is_cfop(const ir_node *node)
1426 if (is_fragile_op(node) && ir_throws_exception(node))
1429 return is_op_cfopcode(get_irn_op(node));
1432 int is_unknown_jump(const ir_node *node)
1434 return is_op_unknown_jump(get_irn_op(node));
1437 int is_fragile_op(const ir_node *node)
1439 return is_op_fragile(get_irn_op(node));
1442 int (is_irn_forking)(const ir_node *node)
1444 return is_irn_forking_(node);
1447 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1449 copy_node_attr_(irg, old_node, new_node);
1452 ir_type *(get_irn_type_attr)(ir_node *node)
1454 return get_irn_type_attr_(node);
1457 ir_entity *(get_irn_entity_attr)(ir_node *node)
1459 return get_irn_entity_attr_(node);
1462 int (is_irn_constlike)(const ir_node *node)
1464 return is_irn_constlike_(node);
1467 int (is_irn_keep)(const ir_node *node)
1469 return is_irn_keep_(node);
1472 int (is_irn_start_block_placed)(const ir_node *node)
1474 return is_irn_start_block_placed_(node);
1477 int (is_irn_cse_neutral)(const ir_node *node)
1479 return is_irn_cse_neutral_(node);
1482 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1484 #define X(a) case a: return #a
1486 X(COND_JMP_PRED_NONE);
1487 X(COND_JMP_PRED_TRUE);
1488 X(COND_JMP_PRED_FALSE);
1494 /** Return the attribute type of a SymConst node if exists */
1495 static ir_type *get_SymConst_attr_type(const ir_node *self)
1497 symconst_kind kind = get_SymConst_kind(self);
1498 if (SYMCONST_HAS_TYPE(kind))
1499 return get_SymConst_type(self);
1503 /** Return the attribute entity of a SymConst node if exists */
1504 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1506 symconst_kind kind = get_SymConst_kind(self);
1507 if (SYMCONST_HAS_ENT(kind))
1508 return get_SymConst_entity(self);
1512 /** the get_type_attr operation must be always implemented */
1513 static ir_type *get_Null_type(const ir_node *n)
1516 return firm_unknown_type;
1519 void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
1522 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
1523 case iro_Builtin: ops->get_type_attr = get_Builtin_type; break;
1524 case iro_Call: ops->get_type_attr = get_Call_type; break;
1525 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
1526 case iro_CopyB: ops->get_type_attr = get_CopyB_type; break;
1527 case iro_Free: ops->get_type_attr = get_Free_type; break;
1528 case iro_InstOf: ops->get_type_attr = get_InstOf_type; break;
1529 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
1531 /* not allowed to be NULL */
1532 if (! ops->get_type_attr)
1533 ops->get_type_attr = get_Null_type;
1538 /** the get_entity_attr operation must be always implemented */
1539 static ir_entity *get_Null_ent(const ir_node *n)
1545 void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
1548 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
1549 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
1550 case iro_Block: ops->get_entity_attr = get_Block_entity; break;
1552 /* not allowed to be NULL */
1553 if (! ops->get_entity_attr)
1554 ops->get_entity_attr = get_Null_ent;
1559 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1561 set_irn_dbg_info_(n, db);
1564 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1566 return get_irn_dbg_info_(n);
1569 ir_switch_table *ir_new_switch_table(ir_graph *irg, size_t n_entries)
1571 struct obstack *obst = get_irg_obstack(irg);
1572 ir_switch_table *res = OALLOCFZ(obst, ir_switch_table, entries, n_entries);
1573 res->n_entries = n_entries;
1577 void ir_switch_table_set(ir_switch_table *table, size_t n,
1578 ir_tarval *min, ir_tarval *max, long pn)
1580 ir_switch_table_entry *entry = ir_switch_table_get_entry(table, n);
1586 size_t (ir_switch_table_get_n_entries)(const ir_switch_table *table)
1588 return ir_switch_table_get_n_entries_(table);
1591 ir_tarval *ir_switch_table_get_max(const ir_switch_table *table, size_t e)
1593 return ir_switch_table_get_entry_const(table, e)->max;
1596 ir_tarval *ir_switch_table_get_min(const ir_switch_table *table, size_t e)
1598 return ir_switch_table_get_entry_const(table, e)->min;
1601 long ir_switch_table_get_pn(const ir_switch_table *table, size_t e)
1603 return ir_switch_table_get_entry_const(table, e)->pn;
1606 ir_switch_table *ir_switch_table_duplicate(ir_graph *irg,
1607 const ir_switch_table *table)
1609 size_t n_entries = ir_switch_table_get_n_entries(table);
1611 ir_switch_table *res = ir_new_switch_table(irg, n_entries);
1612 for (e = 0; e < n_entries; ++e) {
1613 const ir_switch_table_entry *entry
1614 = ir_switch_table_get_entry_const(table, e);
1615 ir_switch_table_entry *new_entry = ir_switch_table_get_entry(res, e);
1616 *new_entry = *entry;
1621 unsigned firm_default_hash(const ir_node *node)
1626 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1627 h = irn_arity = get_irn_arity(node);
1629 /* consider all in nodes... except the block if not a control flow. */
1630 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
1631 ir_node *pred = get_irn_n(node, i);
1632 if (is_irn_cse_neutral(pred))
1635 h = 9*h + HASH_PTR(pred);
1639 h = 9*h + HASH_PTR(get_irn_mode(node));
1641 h = 9*h + HASH_PTR(get_irn_op(node));
1646 /* include generated code */
1647 #include "gen_irnode.c.inl"