2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Representation of an intermediate operation.
9 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
18 #include "irgraph_t.h"
20 #include "irbackedge_t.h"
24 #include "iredgekinds.h"
25 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define END_KEEPALIVE_OFFSET 0
39 static const char *relation_names [] = {
51 "unordered_less_equal",
53 "unordered_greater_equal",
58 const char *get_relation_string(ir_relation relation)
60 assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
61 return relation_names[relation];
64 ir_relation get_negated_relation(ir_relation relation)
66 return relation ^ ir_relation_true;
69 ir_relation get_inversed_relation(ir_relation relation)
71 ir_relation code = relation & ~(ir_relation_less|ir_relation_greater);
72 bool less = relation & ir_relation_less;
73 bool greater = relation & ir_relation_greater;
74 code |= (less ? ir_relation_greater : ir_relation_false)
75 | (greater ? ir_relation_less : ir_relation_false);
79 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
80 ir_mode *mode, int arity, ir_node *const *in)
88 size_t const node_size = offsetof(ir_node, attr) + op->attr_size;
89 ir_node *const res = (ir_node*)OALLOCNZ(get_irg_obstack(irg), char, node_size);
91 res->kind = k_ir_node;
95 res->node_idx = irg_register_node_idx(irg, res);
100 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
102 /* not nice but necessary: End and Sync must always have a flexible array */
103 if (op == op_End || op == op_Sync)
104 res->in = NEW_ARR_F(ir_node *, (arity+1));
106 res->in = NEW_ARR_D(ir_node*, get_irg_obstack(irg), arity + 1);
107 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
111 set_irn_dbg_info(res, db);
112 res->node_nr = get_irp_new_node_nr();
114 for (i = 0; i < EDGE_KIND_LAST; ++i) {
115 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
116 /* edges will be build immediately */
117 res->edge_info[i].edges_built = 1;
118 res->edge_info[i].out_count = 0;
121 /* don't put this into the for loop, arity is -1 for some nodes! */
123 edges_notify_edge(res, -1, block, NULL, irg);
124 for (i = 1; i <= arity; ++i)
125 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
127 hook_new_node(irg, res);
128 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) {
129 be_info_new_node(irg, res);
135 int (is_ir_node)(const void *thing)
137 return is_ir_node_(thing);
140 int (get_irn_arity)(const ir_node *node)
142 return get_irn_arity_(node);
145 ir_node **get_irn_in(const ir_node *node)
150 void set_irn_in(ir_node *const node, int const arity, ir_node *const *const in)
154 ir_graph *irg = get_irn_irg(node);
159 assert(node != NULL && node->kind == k_ir_node);
161 for (i = 0; i < arity; ++i) {
162 assert(in[i] != NULL && in[0]->kind == k_ir_node);
166 for (i = 0; i < arity; i++) {
167 if (i < (int)ARR_LEN(*pOld_in)-1)
168 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
170 edges_notify_edge(node, i, in[i], NULL, irg);
172 for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
173 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
176 if (arity != (int)ARR_LEN(*pOld_in) - 1) {
177 ir_node * block = (*pOld_in)[0];
178 *pOld_in = NEW_ARR_D(ir_node*, get_irg_obstack(irg), arity + 1);
179 (*pOld_in)[0] = block;
181 fix_backedges(get_irg_obstack(irg), node);
183 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
185 /* update irg flags */
186 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
189 ir_node *(get_irn_n)(const ir_node *node, int n)
191 return get_irn_n_(node, n);
194 void set_irn_n(ir_node *node, int n, ir_node *in)
196 ir_graph *irg = get_irn_irg(node);
197 assert(node && node->kind == k_ir_node);
199 assert(n < get_irn_arity(node));
200 assert(in && in->kind == k_ir_node);
203 hook_set_irn_n(node, n, in, node->in[n + 1]);
205 /* Here, we rely on src and tgt being in the current ir graph */
206 edges_notify_edge(node, n, in, node->in[n + 1], irg);
208 node->in[n + 1] = in;
210 /* update irg flags */
211 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
214 int add_irn_n(ir_node *node, ir_node *in)
217 ir_graph *irg = get_irn_irg(node);
219 assert(node->op->opar == oparity_dynamic);
220 pos = ARR_LEN(node->in) - 1;
221 ARR_APP1(ir_node *, node->in, in);
222 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
225 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
227 /* update irg flags */
228 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
233 static void del_irn_n(ir_node *node, int n)
235 ir_graph *irg = get_irn_irg(node);
237 /* remove the edge */
238 ir_node *pred = node->in[n+1];
239 edges_notify_edge(node, n, NULL, pred, irg);
241 int arity = get_irn_arity(node);
243 /* exchange with the last one */
244 ir_node *old = node->in[arity];
245 edges_notify_edge(node, arity-1, NULL, old, irg);
247 edges_notify_edge(node, n, old, NULL, irg);
249 ARR_SHRINKLEN(node->in, arity);
251 /* update irg flags */
252 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
255 void del_Sync_n(ir_node *n, int i)
260 int (get_irn_deps)(const ir_node *node)
262 return get_irn_deps_(node);
265 ir_node *(get_irn_dep)(const ir_node *node, int pos)
267 return get_irn_dep_(node, pos);
270 void set_irn_dep(ir_node *node, int pos, ir_node *dep)
275 assert(node->deps && "dependency array node yet allocated. use add_irn_dep()");
276 assert(pos >= 0 && pos < (int)ARR_LEN(node->deps) && "dependency index out of range");
278 old = node->deps[pos];
279 node->deps[pos] = dep;
280 irg = get_irn_irg(node);
281 if (edges_activated_kind(irg, EDGE_KIND_DEP))
282 edges_notify_edge_kind(node, pos, dep, old, EDGE_KIND_DEP, irg);
285 void add_irn_dep(ir_node *node, ir_node *dep)
289 if (node->deps == NULL) {
290 node->deps = NEW_ARR_F(ir_node *, 0);
292 ARR_APP1(ir_node*, node->deps, dep);
293 irg = get_irn_irg(node);
294 if (edges_activated_kind(irg, EDGE_KIND_DEP))
295 edges_notify_edge_kind(node, ARR_LEN(node->deps)-1, dep, NULL, EDGE_KIND_DEP, irg);
298 void delete_irn_dep(ir_node *node, ir_node *dep)
302 if (node->deps == NULL)
305 n_deps = ARR_LEN(node->deps);
306 for (i = 0; i < n_deps; ++i) {
307 if (node->deps[i] == dep) {
308 set_irn_dep(node, i, node->deps[n_deps-1]);
309 edges_notify_edge(node, i, NULL, dep, get_irn_irg(node));
310 ARR_SHRINKLEN(node->deps, n_deps-1);
316 void add_irn_deps(ir_node *tgt, ir_node *src)
320 for (i = 0, n = get_irn_deps(src); i < n; ++i)
321 add_irn_dep(tgt, get_irn_dep(src, i));
325 ir_mode *(get_irn_mode)(const ir_node *node)
327 return get_irn_mode_(node);
330 void (set_irn_mode)(ir_node *node, ir_mode *mode)
332 set_irn_mode_(node, mode);
335 ir_op *(get_irn_op)(const ir_node *node)
337 return get_irn_op_(node);
340 void (set_irn_op)(ir_node *node, ir_op *op)
342 set_irn_op_(node, op);
345 unsigned (get_irn_opcode)(const ir_node *node)
347 return get_irn_opcode_(node);
350 const char *get_irn_opname(const ir_node *node)
352 return get_id_str(node->op->name);
355 ident *get_irn_opident(const ir_node *node)
358 return node->op->name;
361 ir_visited_t (get_irn_visited)(const ir_node *node)
363 return get_irn_visited_(node);
366 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
368 set_irn_visited_(node, visited);
371 void (mark_irn_visited)(ir_node *node)
373 mark_irn_visited_(node);
376 int (irn_visited)(const ir_node *node)
378 return irn_visited_(node);
381 int (irn_visited_else_mark)(ir_node *node)
383 return irn_visited_else_mark_(node);
386 void (set_irn_link)(ir_node *node, void *link)
388 set_irn_link_(node, link);
391 void *(get_irn_link)(const ir_node *node)
393 return get_irn_link_(node);
396 op_pin_state (get_irn_pinned)(const ir_node *node)
398 return get_irn_pinned_(node);
401 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
403 return is_irn_pinned_in_irg_(node);
406 void set_irn_pinned(ir_node *node, op_pin_state state)
408 /* due to optimization an opt may be turned into a Tuple */
412 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
413 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
415 node->attr.except.pin_state = state;
418 long get_irn_node_nr(const ir_node *node)
421 return node->node_nr;
424 void *(get_irn_generic_attr)(ir_node *node)
426 assert(is_ir_node(node));
427 return get_irn_generic_attr_(node);
430 const void *(get_irn_generic_attr_const)(const ir_node *node)
432 assert(is_ir_node(node));
433 return get_irn_generic_attr_const_(node);
436 unsigned (get_irn_idx)(const ir_node *node)
438 assert(is_ir_node(node));
439 return get_irn_idx_(node);
442 ir_node *(get_nodes_block)(const ir_node *node)
444 return get_nodes_block_(node);
447 void set_nodes_block(ir_node *node, ir_node *block)
449 assert(!is_Block(node));
450 set_irn_n(node, -1, block);
453 ir_type *is_frame_pointer(const ir_node *n)
455 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
456 ir_node *start = get_Proj_pred(n);
457 if (is_Start(start)) {
458 return get_irg_frame_type(get_irn_irg(start));
464 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
468 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
469 if (get_Block_cfgpred_block(block, i) == pred)
475 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
477 return get_Block_cfgpred_block_(node, pos);
480 int get_Block_matured(const ir_node *node)
482 assert(is_Block(node));
483 return (int)node->attr.block.is_matured;
486 void set_Block_matured(ir_node *node, int matured)
488 assert(is_Block(node));
489 node->attr.block.is_matured = matured;
492 ir_visited_t (get_Block_block_visited)(const ir_node *node)
494 return get_Block_block_visited_(node);
497 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
499 set_Block_block_visited_(node, visit);
502 void (mark_Block_block_visited)(ir_node *node)
504 mark_Block_block_visited_(node);
507 int (Block_block_visited)(const ir_node *node)
509 return Block_block_visited_(node);
512 ir_graph *(get_Block_irg)(const ir_node *block)
514 return get_Block_irg_(block);
517 ir_entity *create_Block_entity(ir_node *block)
520 assert(is_Block(block));
522 entity = block->attr.block.entity;
523 if (entity == NULL) {
524 ir_label_t nr = get_irp_next_label_nr();
525 entity = new_label_entity(nr);
526 set_entity_visibility(entity, ir_visibility_local);
527 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
528 set_entity_compiler_generated(entity, 1);
530 block->attr.block.entity = entity;
535 ir_node *(get_Block_phis)(const ir_node *block)
537 return get_Block_phis_(block);
540 void (set_Block_phis)(ir_node *block, ir_node *phi)
542 set_Block_phis_(block, phi);
545 void (add_Block_phi)(ir_node *block, ir_node *phi)
547 add_Block_phi_(block, phi);
550 unsigned (get_Block_mark)(const ir_node *block)
552 return get_Block_mark_(block);
555 void (set_Block_mark)(ir_node *block, unsigned mark)
557 set_Block_mark_(block, mark);
560 void add_End_keepalive(ir_node *end, ir_node *ka)
566 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
570 ir_graph *irg = get_irn_irg(end);
572 /* notify that edges are deleted */
573 for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
574 edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
576 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
578 for (i = 0; i < n; ++i) {
579 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
580 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
583 /* update irg flags */
584 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
587 void remove_End_keepalive(ir_node *end, ir_node *irn)
589 int n = get_End_n_keepalives(end);
595 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
599 idx = END_KEEPALIVE_OFFSET + i;
607 void remove_End_Bads_and_doublets(ir_node *end)
610 int idx, n = get_End_n_keepalives(end);
612 bool changed = false;
617 irg = get_irn_irg(end);
618 pset_new_init(&keeps);
620 for (idx = n - 1; idx >= 0; --idx) {
621 ir_node *ka = get_End_keepalive(end, idx);
623 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
625 del_irn_n(end, idx - END_KEEPALIVE_OFFSET);
628 pset_new_insert(&keeps, ka);
631 pset_new_destroy(&keeps);
634 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
638 void free_End(ir_node *end)
643 end->in = NULL; /* @@@ make sure we get an error if we use the
644 in array afterwards ... */
647 int (is_Const_null)(const ir_node *node)
649 return is_Const_null_(node);
652 int (is_Const_one)(const ir_node *node)
654 return is_Const_one_(node);
657 int (is_Const_all_one)(const ir_node *node)
659 return is_Const_all_one_(node);
664 symconst_kind get_SymConst_kind(const ir_node *node)
666 assert(is_SymConst(node));
667 return node->attr.symc.kind;
670 void set_SymConst_kind(ir_node *node, symconst_kind kind)
672 assert(is_SymConst(node));
673 node->attr.symc.kind = kind;
676 ir_type *get_SymConst_type(const ir_node *node)
678 /* the cast here is annoying, but we have to compensate for
680 ir_node *irn = (ir_node *)node;
681 assert(is_SymConst(node) &&
682 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
683 return irn->attr.symc.sym.type_p;
686 void set_SymConst_type(ir_node *node, ir_type *tp)
688 assert(is_SymConst(node) &&
689 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
690 node->attr.symc.sym.type_p = tp;
693 ir_entity *get_SymConst_entity(const ir_node *node)
695 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
696 return node->attr.symc.sym.entity_p;
699 void set_SymConst_entity(ir_node *node, ir_entity *ent)
701 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
702 node->attr.symc.sym.entity_p = ent;
705 ir_enum_const *get_SymConst_enum(const ir_node *node)
707 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
708 return node->attr.symc.sym.enum_p;
711 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
713 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
714 node->attr.symc.sym.enum_p = ec;
717 union symconst_symbol
718 get_SymConst_symbol(const ir_node *node)
720 assert(is_SymConst(node));
721 return node->attr.symc.sym;
724 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
726 assert(is_SymConst(node));
727 node->attr.symc.sym = sym;
730 const char *get_builtin_kind_name(ir_builtin_kind kind)
732 #define X(a) case a: return #a
736 X(ir_bk_return_address);
737 X(ir_bk_frame_address);
747 X(ir_bk_inner_trampoline);
754 int Call_has_callees(const ir_node *node)
756 assert(is_Call(node));
757 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
758 (node->attr.call.callee_arr != NULL));
761 size_t get_Call_n_callees(const ir_node *node)
763 assert(is_Call(node) && node->attr.call.callee_arr);
764 return ARR_LEN(node->attr.call.callee_arr);
767 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
769 assert(pos < get_Call_n_callees(node));
770 return node->attr.call.callee_arr[pos];
773 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
775 assert(is_Call(node));
776 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
777 ir_graph *const irg = get_irn_irg(node);
778 node->attr.call.callee_arr = NEW_ARR_D(ir_entity*, get_irg_obstack(irg), n);
780 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
783 void remove_Call_callee_arr(ir_node *node)
785 assert(is_Call(node));
786 node->attr.call.callee_arr = NULL;
789 int (is_unop)(const ir_node *node)
791 return is_unop_(node);
794 ir_node *get_unop_op(const ir_node *node)
796 if (node->op->opar == oparity_unary)
797 return get_irn_n(node, node->op->op_index);
799 assert(node->op->opar == oparity_unary);
803 void set_unop_op(ir_node *node, ir_node *op)
805 if (node->op->opar == oparity_unary)
806 set_irn_n(node, node->op->op_index, op);
808 assert(node->op->opar == oparity_unary);
811 int (is_binop)(const ir_node *node)
813 return is_binop_(node);
816 ir_node *get_binop_left(const ir_node *node)
818 assert(node->op->opar == oparity_binary);
819 return get_irn_n(node, node->op->op_index);
822 void set_binop_left(ir_node *node, ir_node *left)
824 assert(node->op->opar == oparity_binary);
825 set_irn_n(node, node->op->op_index, left);
828 ir_node *get_binop_right(const ir_node *node)
830 assert(node->op->opar == oparity_binary);
831 return get_irn_n(node, node->op->op_index + 1);
834 void set_binop_right(ir_node *node, ir_node *right)
836 assert(node->op->opar == oparity_binary);
837 set_irn_n(node, node->op->op_index + 1, right);
840 ir_node *(get_Phi_next)(const ir_node *phi)
842 return get_Phi_next_(phi);
845 void (set_Phi_next)(ir_node *phi, ir_node *next)
847 set_Phi_next_(phi, next);
850 int is_memop(const ir_node *node)
852 return is_op_uses_memory(get_irn_op(node));
855 ir_node *get_memop_mem(const ir_node *node)
857 const ir_op *op = get_irn_op(node);
858 assert(is_memop(node));
859 return get_irn_n(node, op->memory_index);
862 void set_memop_mem(ir_node *node, ir_node *mem)
864 const ir_op *op = get_irn_op(node);
865 assert(is_memop(node));
866 set_irn_n(node, op->memory_index, mem);
869 void add_Sync_pred(ir_node *node, ir_node *pred)
871 assert(is_Sync(node));
872 add_irn_n(node, pred);
875 int (is_arg_Proj)(const ir_node *node)
877 return is_arg_Proj_(node);
880 int is_x_except_Proj(const ir_node *node)
885 pred = get_Proj_pred(node);
886 if (!is_fragile_op(pred))
888 return get_Proj_proj(node) == pred->op->pn_x_except;
891 int is_x_regular_Proj(const ir_node *node)
896 pred = get_Proj_pred(node);
897 if (!is_fragile_op(pred))
899 return get_Proj_proj(node) == pred->op->pn_x_regular;
902 void ir_set_throws_exception(ir_node *node, int throws_exception)
904 except_attr *attr = &node->attr.except;
905 assert(is_fragile_op(node));
906 attr->throws_exception = throws_exception;
909 int ir_throws_exception(const ir_node *node)
911 const except_attr *attr = &node->attr.except;
912 assert(is_fragile_op(node));
913 return attr->throws_exception;
916 size_t get_ASM_n_output_constraints(const ir_node *node)
918 assert(is_ASM(node));
919 return ARR_LEN(node->attr.assem.output_constraints);
922 size_t get_ASM_n_clobbers(const ir_node *node)
924 assert(is_ASM(node));
925 return ARR_LEN(node->attr.assem.clobbers);
928 ir_graph *(get_irn_irg)(const ir_node *node)
930 return get_irn_irg_(node);
933 ir_node *skip_Proj(ir_node *node)
935 /* don't assert node !!! */
940 node = get_Proj_pred(node);
946 skip_Proj_const(const ir_node *node)
948 /* don't assert node !!! */
953 node = get_Proj_pred(node);
958 ir_node *skip_Tuple(ir_node *node)
964 pred = get_Proj_pred(node);
966 if (is_Proj(pred)) { /* nested Tuple ? */
967 pred = skip_Tuple(pred);
969 if (is_Tuple(pred)) {
970 node = get_Tuple_pred(pred, get_Proj_proj(node));
973 } else if (is_Tuple(pred)) {
974 node = get_Tuple_pred(pred, get_Proj_proj(node));
981 ir_node *skip_Pin(ir_node *node)
984 return get_Pin_op(node);
988 ir_node *skip_Confirm(ir_node *node)
990 if (is_Confirm(node))
991 return get_Confirm_value(node);
995 ir_node *skip_HighLevel_ops(ir_node *node)
997 while (is_op_highlevel(get_irn_op(node))) {
998 node = get_irn_n(node, 0);
1004 ir_node *skip_Id(ir_node *node)
1006 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1007 * than any other approach, as Id chains are resolved and all point to the real node, or
1008 * all id's are self loops.
1010 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1011 * a little bit "hand optimized".
1014 /* don't assert node !!! */
1016 if (!node || (node->op != op_Id)) return node;
1018 /* Don't use get_Id_pred(): We get into an endless loop for
1019 self-referencing Ids. */
1020 pred = node->in[0+1];
1022 if (pred->op != op_Id) return pred;
1024 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1025 ir_node *rem_pred, *res;
1027 if (pred->op != op_Id) return pred; /* shortcut */
1030 assert(get_irn_arity (node) > 0);
1032 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1033 res = skip_Id(rem_pred);
1034 if (is_Id(res)) /* self-loop */ return node;
1036 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1043 int (is_SymConst_addr_ent)(const ir_node *node)
1045 return is_SymConst_addr_ent_(node);
1048 int is_cfop(const ir_node *node)
1050 if (is_fragile_op(node) && ir_throws_exception(node))
1053 return is_op_cfopcode(get_irn_op(node));
1056 int is_unknown_jump(const ir_node *node)
1058 return is_op_unknown_jump(get_irn_op(node));
1061 int is_fragile_op(const ir_node *node)
1063 return is_op_fragile(get_irn_op(node));
1066 int (is_irn_forking)(const ir_node *node)
1068 return is_irn_forking_(node);
1071 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1073 copy_node_attr_(irg, old_node, new_node);
1076 ir_type *(get_irn_type_attr)(ir_node *node)
1078 return get_irn_type_attr_(node);
1081 ir_entity *(get_irn_entity_attr)(ir_node *node)
1083 return get_irn_entity_attr_(node);
1086 int (is_irn_constlike)(const ir_node *node)
1088 return is_irn_constlike_(node);
1091 int (is_irn_keep)(const ir_node *node)
1093 return is_irn_keep_(node);
1096 int (is_irn_start_block_placed)(const ir_node *node)
1098 return is_irn_start_block_placed_(node);
1101 int (is_irn_cse_neutral)(const ir_node *node)
1103 return is_irn_cse_neutral_(node);
1106 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1108 #define X(a) case a: return #a
1110 X(COND_JMP_PRED_NONE);
1111 X(COND_JMP_PRED_TRUE);
1112 X(COND_JMP_PRED_FALSE);
1118 /** Return the attribute type of a SymConst node if exists */
1119 static ir_type *get_SymConst_attr_type(const ir_node *self)
1121 symconst_kind kind = get_SymConst_kind(self);
1122 if (SYMCONST_HAS_TYPE(kind))
1123 return get_SymConst_type(self);
1127 /** Return the attribute entity of a SymConst node if exists */
1128 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1130 symconst_kind kind = get_SymConst_kind(self);
1131 if (SYMCONST_HAS_ENT(kind))
1132 return get_SymConst_entity(self);
1136 static void register_get_type_func(ir_op *op, get_type_attr_func func)
1138 op->ops.get_type_attr = func;
1141 static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
1143 op->ops.get_entity_attr = func;
1146 void ir_register_getter_ops(void)
1148 register_get_type_func(op_Alloc, get_Alloc_type);
1149 register_get_type_func(op_Builtin, get_Builtin_type);
1150 register_get_type_func(op_Call, get_Call_type);
1151 register_get_type_func(op_CopyB, get_CopyB_type);
1152 register_get_type_func(op_Free, get_Free_type);
1153 register_get_type_func(op_InstOf, get_InstOf_type);
1154 register_get_type_func(op_SymConst, get_SymConst_attr_type);
1156 register_get_entity_func(op_SymConst, get_SymConst_attr_entity);
1157 register_get_entity_func(op_Sel, get_Sel_entity);
1158 register_get_entity_func(op_Block, get_Block_entity);
1161 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1163 set_irn_dbg_info_(n, db);
1166 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1168 return get_irn_dbg_info_(n);
1171 ir_switch_table *ir_new_switch_table(ir_graph *irg, size_t n_entries)
1173 struct obstack *obst = get_irg_obstack(irg);
1174 ir_switch_table *res = OALLOCFZ(obst, ir_switch_table, entries, n_entries);
1175 res->n_entries = n_entries;
1179 void ir_switch_table_set(ir_switch_table *table, size_t n,
1180 ir_tarval *min, ir_tarval *max, long pn)
1182 ir_switch_table_entry *entry = ir_switch_table_get_entry(table, n);
1188 size_t (ir_switch_table_get_n_entries)(const ir_switch_table *table)
1190 return ir_switch_table_get_n_entries_(table);
1193 ir_tarval *ir_switch_table_get_max(const ir_switch_table *table, size_t e)
1195 return ir_switch_table_get_entry_const(table, e)->max;
1198 ir_tarval *ir_switch_table_get_min(const ir_switch_table *table, size_t e)
1200 return ir_switch_table_get_entry_const(table, e)->min;
1203 long ir_switch_table_get_pn(const ir_switch_table *table, size_t e)
1205 return ir_switch_table_get_entry_const(table, e)->pn;
1208 ir_switch_table *ir_switch_table_duplicate(ir_graph *irg,
1209 const ir_switch_table *table)
1211 size_t n_entries = ir_switch_table_get_n_entries(table);
1213 ir_switch_table *res = ir_new_switch_table(irg, n_entries);
1214 for (e = 0; e < n_entries; ++e) {
1215 const ir_switch_table_entry *entry
1216 = ir_switch_table_get_entry_const(table, e);
1217 ir_switch_table_entry *new_entry = ir_switch_table_get_entry(res, e);
1218 *new_entry = *entry;
1223 bool only_used_by_keepalive(const ir_node *node)
1225 foreach_out_edge(node, edge) {
1226 ir_node *succ = get_edge_src_irn(edge);
1229 if (is_Proj(succ) && only_used_by_keepalive(succ))
1231 /* found a real user */
1237 /* include generated code */
1238 #include "gen_irnode.c.inl"