2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define BUILDIN_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc)
69 assert(pnc >= 0 && pnc <
70 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
71 return pnc_name_arr[pnc];
75 * Calculates the negated (Complement(R)) pnc condition.
77 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
81 /* do NOT add the Uo bit for non-floating point values */
82 if (! mode_is_float(mode))
88 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
89 pn_Cmp get_inversed_pnc(long pnc)
91 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
92 long lesser = pnc & pn_Cmp_Lt;
93 long greater = pnc & pn_Cmp_Gt;
95 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
101 * Indicates, whether additional data can be registered to ir nodes.
102 * If set to 1, this is not possible anymore.
104 static int forbid_new_data = 0;
107 * The amount of additional space for custom data to be allocated upon
108 * creating a new node.
110 unsigned firm_add_node_size = 0;
113 /* register new space for every node */
114 unsigned firm_register_additional_node_data(unsigned size)
116 assert(!forbid_new_data && "Too late to register additional node data");
121 return firm_add_node_size += size;
125 void init_irnode(void)
127 /* Forbid the addition of new data to an ir node. */
131 struct struct_align {
141 * irnode constructor.
142 * Create a new irnode in irg, with an op, mode, arity and
143 * some incoming irnodes.
144 * If arity is negative, a node with a dynamic array is created.
146 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
147 ir_mode *mode, int arity, ir_node **in)
150 unsigned align = offsetof(struct struct_align, s) - 1;
151 unsigned add_node_size = (firm_add_node_size + align) & ~align;
152 size_t node_size = offsetof(ir_node, attr) + op->attr_size + add_node_size;
159 p = obstack_alloc(irg->obst, node_size);
160 memset(p, 0, node_size);
161 res = (ir_node *)(p + add_node_size);
163 res->kind = k_ir_node;
167 res->node_idx = irg_register_node_idx(irg, res);
172 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
174 /* not nice but necessary: End and Sync must always have a flexible array */
175 if (op == op_End || op == op_Sync)
176 res->in = NEW_ARR_F(ir_node *, (arity+1));
178 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
179 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
183 set_irn_dbg_info(res, db);
185 res->node_nr = get_irp_new_node_nr();
187 for (i = 0; i < EDGE_KIND_LAST; ++i) {
188 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
189 /* edges will be build immediately */
190 res->edge_info[i].edges_built = 1;
191 res->edge_info[i].out_count = 0;
194 /* don't put this into the for loop, arity is -1 for some nodes! */
195 edges_notify_edge(res, -1, res->in[0], NULL, irg);
196 for (i = 1; i <= arity; ++i)
197 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
199 hook_new_node(irg, res);
200 if (get_irg_phase_state(irg) == phase_backend) {
201 be_info_new_node(res);
207 /*-- getting some parameters from ir_nodes --*/
209 int (is_ir_node)(const void *thing)
211 return _is_ir_node(thing);
214 int (get_irn_intra_arity)(const ir_node *node)
216 return _get_irn_intra_arity(node);
219 int (get_irn_inter_arity)(const ir_node *node)
221 return _get_irn_inter_arity(node);
224 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
226 int (get_irn_arity)(const ir_node *node)
228 return _get_irn_arity(node);
231 /* Returns the array with ins. This array is shifted with respect to the
232 array accessed by get_irn_n: The block operand is at position 0 not -1.
233 (@@@ This should be changed.)
234 The order of the predecessors in this array is not guaranteed, except that
235 lists of operands as predecessors of Block or arguments of a Call are
237 ir_node **get_irn_in(const ir_node *node)
240 #ifdef INTERPROCEDURAL_VIEW
241 if (get_interprocedural_view()) { /* handle Filter and Block specially */
242 if (get_irn_opcode(node) == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 return node->attr.filter.in_cg;
245 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
246 return node->attr.block.in_cg;
248 /* else fall through */
250 #endif /* INTERPROCEDURAL_VIEW */
254 void set_irn_in(ir_node *node, int arity, ir_node **in)
258 ir_graph *irg = get_irn_irg(node);
261 #ifdef INTERPROCEDURAL_VIEW
262 if (get_interprocedural_view()) { /* handle Filter and Block specially */
263 ir_opcode code = get_irn_opcode(node);
264 if (code == iro_Filter) {
265 assert(node->attr.filter.in_cg);
266 pOld_in = &node->attr.filter.in_cg;
267 } else if (code == iro_Block && node->attr.block.in_cg) {
268 pOld_in = &node->attr.block.in_cg;
273 #endif /* INTERPROCEDURAL_VIEW */
277 for (i = 0; i < arity; i++) {
278 if (i < ARR_LEN(*pOld_in)-1)
279 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
281 edges_notify_edge(node, i, in[i], NULL, irg);
283 for (;i < ARR_LEN(*pOld_in)-1; i++) {
284 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
287 if (arity != ARR_LEN(*pOld_in) - 1) {
288 ir_node * block = (*pOld_in)[0];
289 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
290 (*pOld_in)[0] = block;
292 fix_backedges(irg->obst, node);
294 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
297 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
299 return _get_irn_intra_n(node, n);
302 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
304 return _get_irn_inter_n(node, n);
307 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
309 ir_node *(get_irn_n)(const ir_node *node, int n)
311 return _get_irn_n(node, n);
314 void set_irn_n(ir_node *node, int n, ir_node *in)
316 assert(node && node->kind == k_ir_node);
318 assert(n < get_irn_arity(node));
319 assert(in && in->kind == k_ir_node);
321 #ifdef INTERPROCEDURAL_VIEW
322 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
323 /* Change block pred in both views! */
324 node->in[n + 1] = in;
325 assert(node->attr.filter.in_cg);
326 node->attr.filter.in_cg[n + 1] = in;
329 if (get_interprocedural_view()) { /* handle Filter and Block specially */
330 if (get_irn_opcode(node) == iro_Filter) {
331 assert(node->attr.filter.in_cg);
332 node->attr.filter.in_cg[n + 1] = in;
334 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
335 node->attr.block.in_cg[n + 1] = in;
338 /* else fall through */
340 #endif /* INTERPROCEDURAL_VIEW */
343 hook_set_irn_n(node, n, in, node->in[n + 1]);
345 /* Here, we rely on src and tgt being in the current ir graph */
346 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
348 node->in[n + 1] = in;
351 int add_irn_n(ir_node *node, ir_node *in)
354 ir_graph *irg = get_irn_irg(node);
356 assert(node->op->opar == oparity_dynamic);
357 pos = ARR_LEN(node->in) - 1;
358 ARR_APP1(ir_node *, node->in, in);
359 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
362 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
367 void del_Sync_n(ir_node *n, int i)
369 int arity = get_Sync_n_preds(n);
370 ir_node *last_pred = get_Sync_pred(n, arity - 1);
371 set_Sync_pred(n, i, last_pred);
372 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
373 ARR_SHRINKLEN(get_irn_in(n), arity);
376 int (get_irn_deps)(const ir_node *node)
378 return _get_irn_deps(node);
381 ir_node *(get_irn_dep)(const ir_node *node, int pos)
383 return _get_irn_dep(node, pos);
386 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
388 _set_irn_dep(node, pos, dep);
391 int add_irn_dep(ir_node *node, ir_node *dep)
395 /* DEP edges are only allowed in backend phase */
396 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
397 if (node->deps == NULL) {
398 node->deps = NEW_ARR_F(ir_node *, 1);
404 for (i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
405 if (node->deps[i] == NULL)
408 if (node->deps[i] == dep)
412 if (first_zero >= 0) {
413 node->deps[first_zero] = dep;
416 ARR_APP1(ir_node *, node->deps, dep);
421 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
426 void add_irn_deps(ir_node *tgt, ir_node *src)
430 for (i = 0, n = get_irn_deps(src); i < n; ++i)
431 add_irn_dep(tgt, get_irn_dep(src, i));
435 ir_mode *(get_irn_mode)(const ir_node *node)
437 return _get_irn_mode(node);
440 void (set_irn_mode)(ir_node *node, ir_mode *mode)
442 _set_irn_mode(node, mode);
445 /** Gets the string representation of the mode .*/
446 const char *get_irn_modename(const ir_node *node)
449 return get_mode_name(node->mode);
452 ident *get_irn_modeident(const ir_node *node)
455 return get_mode_ident(node->mode);
458 ir_op *(get_irn_op)(const ir_node *node)
460 return _get_irn_op(node);
463 /* should be private to the library: */
464 void (set_irn_op)(ir_node *node, ir_op *op)
466 _set_irn_op(node, op);
469 unsigned (get_irn_opcode)(const ir_node *node)
471 return _get_irn_opcode(node);
474 const char *get_irn_opname(const ir_node *node)
477 if (is_Phi0(node)) return "Phi0";
478 return get_id_str(node->op->name);
481 ident *get_irn_opident(const ir_node *node)
484 return node->op->name;
487 ir_visited_t (get_irn_visited)(const ir_node *node)
489 return _get_irn_visited(node);
492 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
494 _set_irn_visited(node, visited);
497 void (mark_irn_visited)(ir_node *node)
499 _mark_irn_visited(node);
502 int (irn_visited)(const ir_node *node)
504 return _irn_visited(node);
507 int (irn_visited_else_mark)(ir_node *node)
509 return _irn_visited_else_mark(node);
512 void (set_irn_link)(ir_node *node, void *link)
514 _set_irn_link(node, link);
517 void *(get_irn_link)(const ir_node *node)
519 return _get_irn_link(node);
522 op_pin_state (get_irn_pinned)(const ir_node *node)
524 return _get_irn_pinned(node);
527 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
529 return _is_irn_pinned_in_irg(node);
532 void set_irn_pinned(ir_node *node, op_pin_state state)
534 /* due to optimization an opt may be turned into a Tuple */
538 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
539 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
541 node->attr.except.pin_state = state;
544 /* Outputs a unique number for this node */
545 long get_irn_node_nr(const ir_node *node)
548 return node->node_nr;
551 void *(get_irn_generic_attr)(ir_node *node)
553 assert(is_ir_node(node));
554 return _get_irn_generic_attr(node);
557 const void *(get_irn_generic_attr_const)(const ir_node *node)
559 assert(is_ir_node(node));
560 return _get_irn_generic_attr_const(node);
563 unsigned (get_irn_idx)(const ir_node *node)
565 assert(is_ir_node(node));
566 return _get_irn_idx(node);
569 int get_irn_pred_pos(ir_node *node, ir_node *arg)
572 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
573 if (get_irn_n(node, i) == arg)
579 /** manipulate fields of individual nodes **/
581 /* this works for all except Block */
582 ir_node *get_nodes_block(const ir_node *node)
584 assert(node->op != op_Block);
585 return get_irn_n(node, -1);
588 void set_nodes_block(ir_node *node, ir_node *block)
590 assert(node->op != op_Block);
591 set_irn_n(node, -1, block);
594 /* this works for all except Block */
595 ir_node *get_nodes_MacroBlock(const ir_node *node)
597 assert(node->op != op_Block);
598 return get_Block_MacroBlock(get_irn_n(node, -1));
601 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
602 * from Start. If so returns frame type, else Null. */
603 ir_type *is_frame_pointer(const ir_node *n)
605 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
606 ir_node *start = get_Proj_pred(n);
607 if (is_Start(start)) {
608 return get_irg_frame_type(get_irn_irg(start));
614 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
615 * from Start. If so returns tls type, else Null. */
616 ir_type *is_tls_pointer(const ir_node *n)
618 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
619 ir_node *start = get_Proj_pred(n);
620 if (is_Start(start)) {
621 return get_tls_type();
627 ir_node **get_Block_cfgpred_arr(ir_node *node)
629 assert(is_Block(node));
630 return (ir_node **)&(get_irn_in(node)[1]);
633 int (get_Block_n_cfgpreds)(const ir_node *node)
635 return _get_Block_n_cfgpreds(node);
638 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
640 return _get_Block_cfgpred(node, pos);
643 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
645 assert(is_Block(node));
646 set_irn_n(node, pos, pred);
649 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
653 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
654 if (get_Block_cfgpred_block(block, i) == pred)
660 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
662 return _get_Block_cfgpred_block(node, pos);
665 int get_Block_matured(const ir_node *node)
667 assert(is_Block(node));
668 return (int)node->attr.block.is_matured;
671 void set_Block_matured(ir_node *node, int matured)
673 assert(is_Block(node));
674 node->attr.block.is_matured = matured;
677 ir_visited_t (get_Block_block_visited)(const ir_node *node)
679 return _get_Block_block_visited(node);
682 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
684 _set_Block_block_visited(node, visit);
687 /* For this current_ir_graph must be set. */
688 void (mark_Block_block_visited)(ir_node *node)
690 _mark_Block_block_visited(node);
693 int (Block_block_visited)(const ir_node *node)
695 return _Block_block_visited(node);
698 #ifdef INTERPROCEDURAL_VIEW
699 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
701 assert(is_Block(node));
702 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
703 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
704 node->attr.block.in_cg[0] = NULL;
705 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
707 /* Fix backedge array. fix_backedges() operates depending on
708 interprocedural_view. */
709 int ipv = get_interprocedural_view();
710 set_interprocedural_view(1);
711 fix_backedges(current_ir_graph->obst, node);
712 set_interprocedural_view(ipv);
715 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
718 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
720 assert(is_Block(node) && node->attr.block.in_cg &&
721 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
722 node->attr.block.in_cg[pos + 1] = pred;
725 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
727 assert(is_Block(node));
728 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
731 int get_Block_cg_n_cfgpreds(const ir_node *node)
733 assert(is_Block(node));
734 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
737 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
739 assert(is_Block(node) && node->attr.block.in_cg);
740 return node->attr.block.in_cg[pos + 1];
743 void remove_Block_cg_cfgpred_arr(ir_node *node)
745 assert(is_Block(node));
746 node->attr.block.in_cg = NULL;
748 #endif /* INTERPROCEDURAL_VIEW */
750 ir_node *(set_Block_dead)(ir_node *block)
752 return _set_Block_dead(block);
755 int (is_Block_dead)(const ir_node *block)
757 return _is_Block_dead(block);
760 ir_extblk *get_Block_extbb(const ir_node *block)
763 assert(is_Block(block));
764 res = block->attr.block.extblk;
765 assert(res == NULL || is_ir_extbb(res));
769 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
771 assert(is_Block(block));
772 assert(extblk == NULL || is_ir_extbb(extblk));
773 block->attr.block.extblk = extblk;
776 /* Returns the macro block header of a block.*/
777 ir_node *get_Block_MacroBlock(const ir_node *block)
780 assert(is_Block(block));
781 mbh = get_irn_n(block, -1);
782 /* once macro block header is respected by all optimizations,
783 this assert can be removed */
788 /* Sets the macro block header of a block. */
789 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
791 assert(is_Block(block));
793 assert(is_Block(mbh));
794 set_irn_n(block, -1, mbh);
797 /* returns the macro block header of a node. */
798 ir_node *get_irn_MacroBlock(const ir_node *n)
801 n = get_nodes_block(n);
802 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
806 return get_Block_MacroBlock(n);
809 /* returns the graph of a Block. */
810 ir_graph *(get_Block_irg)(const ir_node *block)
812 return _get_Block_irg(block);
815 ir_entity *create_Block_entity(ir_node *block)
818 assert(is_Block(block));
820 entity = block->attr.block.entity;
821 if (entity == NULL) {
825 glob = get_glob_type();
826 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
827 set_entity_visibility(entity, ir_visibility_local);
828 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
829 nr = get_irp_next_label_nr();
830 set_entity_label(entity, nr);
831 set_entity_compiler_generated(entity, 1);
833 block->attr.block.entity = entity;
838 ir_entity *get_Block_entity(const ir_node *block)
840 assert(is_Block(block));
841 return block->attr.block.entity;
844 void set_Block_entity(ir_node *block, ir_entity *entity)
846 assert(is_Block(block));
847 assert(get_entity_type(entity) == get_code_type());
848 block->attr.block.entity = entity;
851 int has_Block_entity(const ir_node *block)
853 return block->attr.block.entity != NULL;
856 ir_node *(get_Block_phis)(const ir_node *block)
858 return _get_Block_phis(block);
861 void (set_Block_phis)(ir_node *block, ir_node *phi)
863 _set_Block_phis(block, phi);
866 void (add_Block_phi)(ir_node *block, ir_node *phi)
868 _add_Block_phi(block, phi);
871 /* Get the Block mark (single bit). */
872 unsigned (get_Block_mark)(const ir_node *block)
874 return _get_Block_mark(block);
877 /* Set the Block mark (single bit). */
878 void (set_Block_mark)(ir_node *block, unsigned mark)
880 _set_Block_mark(block, mark);
883 int get_End_n_keepalives(const ir_node *end)
886 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
889 ir_node *get_End_keepalive(const ir_node *end, int pos)
892 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
895 void add_End_keepalive(ir_node *end, ir_node *ka)
901 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
904 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
907 /* Set new keep-alives */
908 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
911 ir_graph *irg = get_irn_irg(end);
913 /* notify that edges are deleted */
914 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
915 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
917 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
919 for (i = 0; i < n; ++i) {
920 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
921 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
925 /* Set new keep-alives from old keep-alives, skipping irn */
926 void remove_End_keepalive(ir_node *end, ir_node *irn)
928 int n = get_End_n_keepalives(end);
933 for (i = n -1; i >= 0; --i) {
934 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
944 irg = get_irn_irg(end);
946 /* remove the edge */
947 edges_notify_edge(end, idx, NULL, irn, irg);
950 /* exchange with the last one */
951 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
952 edges_notify_edge(end, n - 1, NULL, old, irg);
953 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
954 edges_notify_edge(end, idx, old, NULL, irg);
956 /* now n - 1 keeps, 1 block input */
957 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
960 /* remove Bads, NoMems and doublets from the keep-alive set */
961 void remove_End_Bads_and_doublets(ir_node *end)
964 int idx, n = get_End_n_keepalives(end);
970 irg = get_irn_irg(end);
971 pset_new_init(&keeps);
973 for (idx = n - 1; idx >= 0; --idx) {
974 ir_node *ka = get_End_keepalive(end, idx);
976 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
977 /* remove the edge */
978 edges_notify_edge(end, idx, NULL, ka, irg);
981 /* exchange with the last one */
982 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
983 edges_notify_edge(end, n - 1, NULL, old, irg);
984 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
985 edges_notify_edge(end, idx, old, NULL, irg);
989 pset_new_insert(&keeps, ka);
992 /* n keeps, 1 block input */
993 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
995 pset_new_destroy(&keeps);
998 void free_End(ir_node *end)
1000 assert(is_End(end));
1003 end->in = NULL; /* @@@ make sure we get an error if we use the
1004 in array afterwards ... */
1007 int get_Return_n_ress(const ir_node *node)
1009 assert(is_Return(node));
1010 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1013 ir_node **get_Return_res_arr(ir_node *node)
1015 assert(is_Return(node));
1016 if (get_Return_n_ress(node) > 0)
1017 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1023 void set_Return_n_res(ir_node *node, int results)
1025 assert(is_Return(node));
1029 ir_node *get_Return_res(const ir_node *node, int pos)
1031 assert(is_Return(node));
1032 assert(get_Return_n_ress(node) > pos);
1033 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1036 void set_Return_res(ir_node *node, int pos, ir_node *res)
1038 assert(is_Return(node));
1039 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1042 int (is_Const_null)(const ir_node *node)
1044 return _is_Const_null(node);
1047 int (is_Const_one)(const ir_node *node)
1049 return _is_Const_one(node);
1052 int (is_Const_all_one)(const ir_node *node)
1054 return _is_Const_all_one(node);
1058 /* The source language type. Must be an atomic type. Mode of type must
1059 be mode of node. For tarvals from entities type must be pointer to
1061 ir_type *get_Const_type(const ir_node *node)
1063 assert(is_Const(node));
1064 return node->attr.con.tp;
1067 void set_Const_type(ir_node *node, ir_type *tp)
1069 assert(is_Const(node));
1070 if (tp != firm_unknown_type) {
1071 assert(is_atomic_type(tp));
1072 assert(get_type_mode(tp) == get_irn_mode(node));
1074 node->attr.con.tp = tp;
1078 symconst_kind get_SymConst_kind(const ir_node *node)
1080 assert(is_SymConst(node));
1081 return node->attr.symc.kind;
1084 void set_SymConst_kind(ir_node *node, symconst_kind kind)
1086 assert(is_SymConst(node));
1087 node->attr.symc.kind = kind;
1090 ir_type *get_SymConst_type(const ir_node *node)
1092 /* the cast here is annoying, but we have to compensate for
1094 ir_node *irn = (ir_node *)node;
1095 assert(is_SymConst(node) &&
1096 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1097 return irn->attr.symc.sym.type_p;
1100 void set_SymConst_type(ir_node *node, ir_type *tp)
1102 assert(is_SymConst(node) &&
1103 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1104 node->attr.symc.sym.type_p = tp;
1108 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1109 ir_entity *get_SymConst_entity(const ir_node *node)
1111 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1112 return node->attr.symc.sym.entity_p;
1115 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1117 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1118 node->attr.symc.sym.entity_p = ent;
1121 ir_enum_const *get_SymConst_enum(const ir_node *node)
1123 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1124 return node->attr.symc.sym.enum_p;
1127 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1129 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1130 node->attr.symc.sym.enum_p = ec;
1133 union symconst_symbol
1134 get_SymConst_symbol(const ir_node *node)
1136 assert(is_SymConst(node));
1137 return node->attr.symc.sym;
1140 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1142 assert(is_SymConst(node));
1143 node->attr.symc.sym = sym;
1146 ir_type *get_SymConst_value_type(const ir_node *node)
1148 assert(is_SymConst(node));
1149 return node->attr.symc.tp;
1152 void set_SymConst_value_type(ir_node *node, ir_type *tp)
1154 assert(is_SymConst(node));
1155 node->attr.symc.tp = tp;
1158 int get_Sel_n_indexs(const ir_node *node)
1160 assert(is_Sel(node));
1161 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1164 ir_node **get_Sel_index_arr(ir_node *node)
1166 assert(is_Sel(node));
1167 if (get_Sel_n_indexs(node) > 0)
1168 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1173 ir_node *get_Sel_index(const ir_node *node, int pos)
1175 assert(is_Sel(node));
1176 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1179 void set_Sel_index(ir_node *node, int pos, ir_node *index)
1181 assert(is_Sel(node));
1182 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1186 /* For unary and binary arithmetic operations the access to the
1187 operands can be factored out. Left is the first, right the
1188 second arithmetic value as listed in tech report 0999-33.
1189 unops are: Minus, Abs, Not, Conv, Cast
1190 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1191 Shr, Shrs, Rotate, Cmp */
1194 ir_node **get_Call_param_arr(ir_node *node)
1196 assert(is_Call(node));
1197 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1200 int get_Call_n_params(const ir_node *node)
1202 assert(is_Call(node));
1203 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1206 ir_node *get_Call_param(const ir_node *node, int pos)
1208 assert(is_Call(node));
1209 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1212 void set_Call_param(ir_node *node, int pos, ir_node *param)
1214 assert(is_Call(node));
1215 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1218 ir_node **get_Builtin_param_arr(ir_node *node)
1220 assert(is_Builtin(node));
1221 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1224 int get_Builtin_n_params(const ir_node *node)
1226 assert(is_Builtin(node));
1227 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1230 ir_node *get_Builtin_param(const ir_node *node, int pos)
1232 assert(is_Builtin(node));
1233 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1236 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
1238 assert(is_Builtin(node));
1239 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1242 /* Returns a human readable string for the ir_builtin_kind. */
1243 const char *get_builtin_kind_name(ir_builtin_kind kind)
1245 #define X(a) case a: return #a
1248 X(ir_bk_debugbreak);
1249 X(ir_bk_return_address);
1250 X(ir_bk_frame_address);
1260 X(ir_bk_inner_trampoline);
1267 int Call_has_callees(const ir_node *node)
1269 assert(is_Call(node));
1270 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1271 (node->attr.call.callee_arr != NULL));
1274 int get_Call_n_callees(const ir_node *node)
1276 assert(is_Call(node) && node->attr.call.callee_arr);
1277 return ARR_LEN(node->attr.call.callee_arr);
1280 ir_entity *get_Call_callee(const ir_node *node, int pos)
1282 assert(pos >= 0 && pos < get_Call_n_callees(node));
1283 return node->attr.call.callee_arr[pos];
1286 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1288 assert(is_Call(node));
1289 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1290 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1292 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1295 void remove_Call_callee_arr(ir_node *node)
1297 assert(is_Call(node));
1298 node->attr.call.callee_arr = NULL;
1302 * Returns non-zero if a Call is surely a self-recursive Call.
1303 * Beware: if this functions returns 0, the call might be self-recursive!
1305 int is_self_recursive_Call(const ir_node *call)
1307 const ir_node *callee = get_Call_ptr(call);
1309 if (is_SymConst_addr_ent(callee)) {
1310 const ir_entity *ent = get_SymConst_entity(callee);
1311 const ir_graph *irg = get_entity_irg(ent);
1312 if (irg == get_irn_irg(call))
1318 /* Checks for upcast.
1320 * Returns true if the Cast node casts a class type to a super type.
1322 int is_Cast_upcast(ir_node *node)
1324 ir_type *totype = get_Cast_type(node);
1325 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1327 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1330 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1331 totype = get_pointer_points_to_type(totype);
1332 fromtype = get_pointer_points_to_type(fromtype);
1337 if (!is_Class_type(totype)) return 0;
1338 return is_SubClass_of(fromtype, totype);
1341 /* Checks for downcast.
1343 * Returns true if the Cast node casts a class type to a sub type.
1345 int is_Cast_downcast(ir_node *node)
1347 ir_type *totype = get_Cast_type(node);
1348 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1350 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1353 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1354 totype = get_pointer_points_to_type(totype);
1355 fromtype = get_pointer_points_to_type(fromtype);
1360 if (!is_Class_type(totype)) return 0;
1361 return is_SubClass_of(totype, fromtype);
1364 int (is_unop)(const ir_node *node)
1366 return _is_unop(node);
1369 ir_node *get_unop_op(const ir_node *node)
1371 if (node->op->opar == oparity_unary)
1372 return get_irn_n(node, node->op->op_index);
1374 assert(node->op->opar == oparity_unary);
1378 void set_unop_op(ir_node *node, ir_node *op)
1380 if (node->op->opar == oparity_unary)
1381 set_irn_n(node, node->op->op_index, op);
1383 assert(node->op->opar == oparity_unary);
1386 int (is_binop)(const ir_node *node)
1388 return _is_binop(node);
1391 ir_node *get_binop_left(const ir_node *node)
1393 assert(node->op->opar == oparity_binary);
1394 return get_irn_n(node, node->op->op_index);
1397 void set_binop_left(ir_node *node, ir_node *left)
1399 assert(node->op->opar == oparity_binary);
1400 set_irn_n(node, node->op->op_index, left);
1403 ir_node *get_binop_right(const ir_node *node)
1405 assert(node->op->opar == oparity_binary);
1406 return get_irn_n(node, node->op->op_index + 1);
1409 void set_binop_right(ir_node *node, ir_node *right)
1411 assert(node->op->opar == oparity_binary);
1412 set_irn_n(node, node->op->op_index + 1, right);
1415 int is_Phi0(const ir_node *n)
1419 return ((get_irn_op(n) == op_Phi) &&
1420 (get_irn_arity(n) == 0) &&
1421 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1424 ir_node **get_Phi_preds_arr(ir_node *node)
1426 assert(node->op == op_Phi);
1427 return (ir_node **)&(get_irn_in(node)[1]);
1430 int get_Phi_n_preds(const ir_node *node)
1432 assert(is_Phi(node) || is_Phi0(node));
1433 return (get_irn_arity(node));
1437 void set_Phi_n_preds(ir_node *node, int n_preds)
1439 assert(node->op == op_Phi);
1443 ir_node *get_Phi_pred(const ir_node *node, int pos)
1445 assert(is_Phi(node) || is_Phi0(node));
1446 return get_irn_n(node, pos);
1449 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1451 assert(is_Phi(node) || is_Phi0(node));
1452 set_irn_n(node, pos, pred);
1455 ir_node *(get_Phi_next)(const ir_node *phi)
1457 return _get_Phi_next(phi);
1460 void (set_Phi_next)(ir_node *phi, ir_node *next)
1462 _set_Phi_next(phi, next);
1465 int is_memop(const ir_node *node)
1467 ir_opcode code = get_irn_opcode(node);
1468 return (code == iro_Load || code == iro_Store);
1471 ir_node *get_memop_mem(const ir_node *node)
1473 assert(is_memop(node));
1474 return get_irn_n(node, 0);
1477 void set_memop_mem(ir_node *node, ir_node *mem)
1479 assert(is_memop(node));
1480 set_irn_n(node, 0, mem);
1483 ir_node *get_memop_ptr(const ir_node *node)
1485 assert(is_memop(node));
1486 return get_irn_n(node, 1);
1489 void set_memop_ptr(ir_node *node, ir_node *ptr)
1491 assert(is_memop(node));
1492 set_irn_n(node, 1, ptr);
1495 ir_volatility get_Load_volatility(const ir_node *node)
1497 assert(is_Load(node));
1498 return node->attr.load.volatility;
1501 void set_Load_volatility(ir_node *node, ir_volatility volatility)
1503 assert(is_Load(node));
1504 node->attr.load.volatility = volatility;
1507 ir_align get_Load_align(const ir_node *node)
1509 assert(is_Load(node));
1510 return node->attr.load.aligned;
1513 void set_Load_align(ir_node *node, ir_align align)
1515 assert(is_Load(node));
1516 node->attr.load.aligned = align;
1520 ir_volatility get_Store_volatility(const ir_node *node)
1522 assert(is_Store(node));
1523 return node->attr.store.volatility;
1526 void set_Store_volatility(ir_node *node, ir_volatility volatility)
1528 assert(is_Store(node));
1529 node->attr.store.volatility = volatility;
1532 ir_align get_Store_align(const ir_node *node)
1534 assert(is_Store(node));
1535 return node->attr.store.aligned;
1538 void set_Store_align(ir_node *node, ir_align align)
1540 assert(is_Store(node));
1541 node->attr.store.aligned = align;
1545 ir_node **get_Sync_preds_arr(ir_node *node)
1547 assert(is_Sync(node));
1548 return (ir_node **)&(get_irn_in(node)[1]);
1551 int get_Sync_n_preds(const ir_node *node)
1553 assert(is_Sync(node));
1554 return (get_irn_arity(node));
1558 void set_Sync_n_preds(ir_node *node, int n_preds)
1560 assert(is_Sync(node));
1564 ir_node *get_Sync_pred(const ir_node *node, int pos)
1566 assert(is_Sync(node));
1567 return get_irn_n(node, pos);
1570 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1572 assert(is_Sync(node));
1573 set_irn_n(node, pos, pred);
1576 /* Add a new Sync predecessor */
1577 void add_Sync_pred(ir_node *node, ir_node *pred)
1579 assert(is_Sync(node));
1580 add_irn_n(node, pred);
1583 /* Returns the source language type of a Proj node. */
1584 ir_type *get_Proj_type(const ir_node *n)
1586 ir_type *tp = firm_unknown_type;
1587 ir_node *pred = get_Proj_pred(n);
1589 switch (get_irn_opcode(pred)) {
1592 /* Deal with Start / Call here: we need to know the Proj Nr. */
1593 assert(get_irn_mode(pred) == mode_T);
1594 pred_pred = get_Proj_pred(pred);
1596 if (is_Start(pred_pred)) {
1597 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1598 tp = get_method_param_type(mtp, get_Proj_proj(n));
1599 } else if (is_Call(pred_pred)) {
1600 ir_type *mtp = get_Call_type(pred_pred);
1601 tp = get_method_res_type(mtp, get_Proj_proj(n));
1604 case iro_Start: break;
1605 case iro_Call: break;
1607 ir_node *a = get_Load_ptr(pred);
1609 tp = get_entity_type(get_Sel_entity(a));
1617 long get_Proj_proj(const ir_node *node)
1619 #ifdef INTERPROCEDURAL_VIEW
1620 ir_opcode code = get_irn_opcode(node);
1622 if (code == iro_Proj) {
1623 return node->attr.proj;
1626 assert(code == iro_Filter);
1627 return node->attr.filter.proj;
1630 assert(is_Proj(node));
1631 return node->attr.proj;
1632 #endif /* INTERPROCEDURAL_VIEW */
1635 void set_Proj_proj(ir_node *node, long proj)
1637 #ifdef INTERPROCEDURAL_VIEW
1638 ir_opcode code = get_irn_opcode(node);
1640 if (code == iro_Proj) {
1641 node->attr.proj = proj;
1644 assert(code == iro_Filter);
1645 node->attr.filter.proj = proj;
1648 assert(is_Proj(node));
1649 node->attr.proj = proj;
1650 #endif /* INTERPROCEDURAL_VIEW */
1653 /* Returns non-zero if a node is a routine parameter. */
1654 int (is_arg_Proj)(const ir_node *node)
1656 return _is_arg_Proj(node);
1659 ir_node **get_Tuple_preds_arr(ir_node *node)
1661 assert(is_Tuple(node));
1662 return (ir_node **)&(get_irn_in(node)[1]);
1665 int get_Tuple_n_preds(const ir_node *node)
1667 assert(is_Tuple(node));
1668 return get_irn_arity(node);
1672 void set_Tuple_n_preds(ir_node *node, int n_preds)
1674 assert(is_Tuple(node));
1678 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1680 assert(is_Tuple(node));
1681 return get_irn_n(node, pos);
1684 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1686 assert(is_Tuple(node));
1687 set_irn_n(node, pos, pred);
1690 /* Don't use get_irn_arity, get_irn_n in implementation as access
1691 shall work independent of view!!! */
1692 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
1694 assert(is_Filter(node));
1695 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1696 ir_graph *irg = get_irn_irg(node);
1697 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1698 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
1699 node->attr.filter.in_cg[0] = node->in[0];
1701 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1704 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
1706 assert(is_Filter(node) && node->attr.filter.in_cg &&
1707 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1708 node->attr.filter.in_cg[pos + 1] = pred;
1711 int get_Filter_n_cg_preds(const ir_node *node)
1713 assert(is_Filter(node) && node->attr.filter.in_cg);
1714 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1717 ir_node *get_Filter_cg_pred(const ir_node *node, int pos)
1720 assert(is_Filter(node) && node->attr.filter.in_cg &&
1722 arity = ARR_LEN(node->attr.filter.in_cg);
1723 assert(pos < arity - 1);
1724 return node->attr.filter.in_cg[pos + 1];
1727 int get_ASM_n_input_constraints(const ir_node *node)
1729 assert(is_ASM(node));
1730 return ARR_LEN(node->attr.assem.input_constraints);
1733 int get_ASM_n_output_constraints(const ir_node *node)
1735 assert(is_ASM(node));
1736 return ARR_LEN(node->attr.assem.output_constraints);
1739 int get_ASM_n_clobbers(const ir_node *node)
1741 assert(is_ASM(node));
1742 return ARR_LEN(node->attr.assem.clobbers);
1745 /* returns the graph of a node */
1746 ir_graph *(get_irn_irg)(const ir_node *node)
1748 return _get_irn_irg(node);
1752 /*----------------------------------------------------------------*/
1753 /* Auxiliary routines */
1754 /*----------------------------------------------------------------*/
1756 ir_node *skip_Proj(ir_node *node)
1758 /* don't assert node !!! */
1763 node = get_Proj_pred(node);
1769 skip_Proj_const(const ir_node *node)
1771 /* don't assert node !!! */
1776 node = get_Proj_pred(node);
1781 ir_node *skip_Tuple(ir_node *node)
1787 if (is_Proj(node)) {
1788 pred = get_Proj_pred(node);
1789 op = get_irn_op(pred);
1792 * Looks strange but calls get_irn_op() only once
1793 * in most often cases.
1795 if (op == op_Proj) { /* nested Tuple ? */
1796 pred = skip_Tuple(pred);
1798 if (is_Tuple(pred)) {
1799 node = get_Tuple_pred(pred, get_Proj_proj(node));
1802 } else if (op == op_Tuple) {
1803 node = get_Tuple_pred(pred, get_Proj_proj(node));
1810 /* returns operand of node if node is a Cast */
1811 ir_node *skip_Cast(ir_node *node)
1814 return get_Cast_op(node);
1818 /* returns operand of node if node is a Cast */
1819 const ir_node *skip_Cast_const(const ir_node *node)
1822 return get_Cast_op(node);
1826 /* returns operand of node if node is a Pin */
1827 ir_node *skip_Pin(ir_node *node)
1830 return get_Pin_op(node);
1834 /* returns operand of node if node is a Confirm */
1835 ir_node *skip_Confirm(ir_node *node)
1837 if (is_Confirm(node))
1838 return get_Confirm_value(node);
1842 /* skip all high-level ops */
1843 ir_node *skip_HighLevel_ops(ir_node *node)
1845 while (is_op_highlevel(get_irn_op(node))) {
1846 node = get_irn_n(node, 0);
1852 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1853 * than any other approach, as Id chains are resolved and all point to the real node, or
1854 * all id's are self loops.
1856 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1857 * a little bit "hand optimized".
1859 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
1861 ir_node *skip_Id(ir_node *node)
1864 /* don't assert node !!! */
1866 if (!node || (node->op != op_Id)) return node;
1868 /* Don't use get_Id_pred(): We get into an endless loop for
1869 self-referencing Ids. */
1870 pred = node->in[0+1];
1872 if (pred->op != op_Id) return pred;
1874 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1875 ir_node *rem_pred, *res;
1877 if (pred->op != op_Id) return pred; /* shortcut */
1880 assert(get_irn_arity (node) > 0);
1882 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1883 res = skip_Id(rem_pred);
1884 if (res->op == op_Id) /* self-loop */ return node;
1886 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1893 int (is_strictConv)(const ir_node *node)
1895 return _is_strictConv(node);
1898 int (is_no_Block)(const ir_node *node)
1900 return _is_no_Block(node);
1903 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
1904 int (is_SymConst_addr_ent)(const ir_node *node)
1906 return _is_SymConst_addr_ent(node);
1909 /* Returns true if the operation manipulates control flow. */
1910 int is_cfop(const ir_node *node)
1912 return is_op_cfopcode(get_irn_op(node));
1915 /* Returns true if the operation manipulates interprocedural control flow:
1916 CallBegin, EndReg, EndExcept */
1917 int is_ip_cfop(const ir_node *node)
1919 return is_ip_cfopcode(get_irn_op(node));
1922 /* Returns true if the operation can change the control flow because
1924 int is_fragile_op(const ir_node *node)
1926 return is_op_fragile(get_irn_op(node));
1929 /* Returns the memory operand of fragile operations. */
1930 ir_node *get_fragile_op_mem(ir_node *node)
1932 assert(node && is_fragile_op(node));
1934 switch (get_irn_opcode(node)) {
1945 return get_irn_n(node, pn_Generic_M);
1950 panic("should not be reached");
1954 /* Returns the result mode of a Div operation. */
1955 ir_mode *get_divop_resmod(const ir_node *node)
1957 switch (get_irn_opcode(node)) {
1958 case iro_Quot : return get_Quot_resmode(node);
1959 case iro_DivMod: return get_DivMod_resmode(node);
1960 case iro_Div : return get_Div_resmode(node);
1961 case iro_Mod : return get_Mod_resmode(node);
1963 panic("should not be reached");
1967 /* Returns true if the operation is a forking control flow operation. */
1968 int (is_irn_forking)(const ir_node *node)
1970 return _is_irn_forking(node);
1973 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1975 _copy_node_attr(irg, old_node, new_node);
1978 /* Return the type associated with the value produced by n
1979 * if the node remarks this type as it is the case for
1980 * Cast, Const, SymConst and some Proj nodes. */
1981 ir_type *(get_irn_type)(ir_node *node)
1983 return _get_irn_type(node);
1986 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
1988 ir_type *(get_irn_type_attr)(ir_node *node)
1990 return _get_irn_type_attr(node);
1993 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
1994 ir_entity *(get_irn_entity_attr)(ir_node *node)
1996 return _get_irn_entity_attr(node);
1999 /* Returns non-zero for constant-like nodes. */
2000 int (is_irn_constlike)(const ir_node *node)
2002 return _is_irn_constlike(node);
2006 * Returns non-zero for nodes that are allowed to have keep-alives and
2007 * are neither Block nor PhiM.
2009 int (is_irn_keep)(const ir_node *node)
2011 return _is_irn_keep(node);
2015 * Returns non-zero for nodes that are always placed in the start block.
2017 int (is_irn_start_block_placed)(const ir_node *node)
2019 return _is_irn_start_block_placed(node);
2022 /* Returns non-zero for nodes that are machine operations. */
2023 int (is_irn_machine_op)(const ir_node *node)
2025 return _is_irn_machine_op(node);
2028 /* Returns non-zero for nodes that are machine operands. */
2029 int (is_irn_machine_operand)(const ir_node *node)
2031 return _is_irn_machine_operand(node);
2034 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2035 int (is_irn_machine_user)(const ir_node *node, unsigned n)
2037 return _is_irn_machine_user(node, n);
2040 /* Returns non-zero for nodes that are CSE neutral to its users. */
2041 int (is_irn_cse_neutral)(const ir_node *node)
2043 return _is_irn_cse_neutral(node);
2046 /* Gets the string representation of the jump prediction .*/
2047 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2049 #define X(a) case a: return #a
2051 X(COND_JMP_PRED_NONE);
2052 X(COND_JMP_PRED_TRUE);
2053 X(COND_JMP_PRED_FALSE);
2059 /** the get_type operation must be always implemented and return a firm type */
2060 static ir_type *get_Default_type(const ir_node *n)
2063 return get_unknown_type();
2066 /* Sets the get_type operation for an ir_op_ops. */
2067 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
2070 case iro_Const: ops->get_type = get_Const_type; break;
2071 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2072 case iro_Cast: ops->get_type = get_Cast_type; break;
2073 case iro_Proj: ops->get_type = get_Proj_type; break;
2075 /* not allowed to be NULL */
2076 if (! ops->get_type)
2077 ops->get_type = get_Default_type;
2083 /** Return the attribute type of a SymConst node if exists */
2084 static ir_type *get_SymConst_attr_type(const ir_node *self)
2086 symconst_kind kind = get_SymConst_kind(self);
2087 if (SYMCONST_HAS_TYPE(kind))
2088 return get_SymConst_type(self);
2092 /** Return the attribute entity of a SymConst node if exists */
2093 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
2095 symconst_kind kind = get_SymConst_kind(self);
2096 if (SYMCONST_HAS_ENT(kind))
2097 return get_SymConst_entity(self);
2101 /** the get_type_attr operation must be always implemented */
2102 static ir_type *get_Null_type(const ir_node *n)
2105 return firm_unknown_type;
2108 /* Sets the get_type operation for an ir_op_ops. */
2109 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
2112 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2113 case iro_Call: ops->get_type_attr = get_Call_type; break;
2114 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2115 case iro_Free: ops->get_type_attr = get_Free_type; break;
2116 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2118 /* not allowed to be NULL */
2119 if (! ops->get_type_attr)
2120 ops->get_type_attr = get_Null_type;
2126 /** the get_entity_attr operation must be always implemented */
2127 static ir_entity *get_Null_ent(const ir_node *n)
2133 /* Sets the get_type operation for an ir_op_ops. */
2134 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
2137 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2138 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2140 /* not allowed to be NULL */
2141 if (! ops->get_entity_attr)
2142 ops->get_entity_attr = get_Null_ent;
2148 /* Sets the debug information of a node. */
2149 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
2151 _set_irn_dbg_info(n, db);
2155 * Returns the debug information of an node.
2157 * @param n The node.
2159 dbg_info *(get_irn_dbg_info)(const ir_node *n)
2161 return _get_irn_dbg_info(n);
2164 /* checks whether a node represents a global address */
2165 int is_Global(const ir_node *node)
2167 return is_SymConst_addr_ent(node);
2170 /* returns the entity of a global address */
2171 ir_entity *get_Global_entity(const ir_node *node)
2173 return get_SymConst_entity(node);
2177 * Calculate a hash value of a node.
2179 unsigned firm_default_hash(const ir_node *node)
2184 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2185 h = irn_arity = get_irn_intra_arity(node);
2187 /* consider all in nodes... except the block if not a control flow. */
2188 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2189 ir_node *pred = get_irn_intra_n(node, i);
2190 if (is_irn_cse_neutral(pred))
2193 h = 9*h + HASH_PTR(pred);
2197 h = 9*h + HASH_PTR(get_irn_mode(node));
2199 h = 9*h + HASH_PTR(get_irn_op(node));
2202 } /* firm_default_hash */
2204 /* include generated code */
2205 #include "gen_irnode.c.inl"