3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define CALL_PARAM_OFFSET 2
38 #define FUNCCALL_PARAM_OFFSET 1
39 #define SEL_INDEX_OFFSET 2
40 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
41 #define END_KEEPALIVE_OFFSET 0
43 static const char *pnc_name_arr [] = {
44 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
45 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
46 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
47 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
51 * returns the pnc name from an pnc constant
53 const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
58 * Calculates the negated (Complement(R)) pnc condition.
60 int get_negated_pnc(int pnc, ir_mode *mode) {
63 /* do NOT add the Uo bit for non-floating point values */
64 if (! mode_is_float(mode))
70 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
72 get_inversed_pnc(int pnc) {
73 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
74 int lesser = pnc & pn_Cmp_Lt;
75 int greater = pnc & pn_Cmp_Gt;
77 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * IR node constructor.
123 * Create a new IR node in irg, with an op, mode, arity and
124 * some incoming IR nodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
137 assert(irg && op && mode);
138 p = obstack_alloc (irg->obst, node_size);
139 memset(p, 0, node_size);
140 res = (ir_node *) (p + firm_add_node_size);
142 state = get_op_pinned(op);
144 res->kind = k_ir_node;
148 res->node_idx = irg_register_node_idx(irg, res);
149 res->pinned = state != op_pin_state_floats;
152 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
154 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
155 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
159 set_irn_dbg_info(res, db);
163 res->node_nr = get_irp_new_node_nr();
166 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 is_bl = is_Block(res);
169 INIT_LIST_HEAD(&res->attr.block.succ_head);
172 for (i = is_bl; i <= arity; ++i)
173 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
175 hook_new_node(irg, res);
180 /*-- getting some parameters from ir_nodes --*/
183 (is_ir_node)(const void *thing) {
184 return _is_ir_node(thing);
188 (get_irn_intra_arity)(const ir_node *node) {
189 return _get_irn_intra_arity(node);
193 (get_irn_inter_arity)(const ir_node *node) {
194 return _get_irn_inter_arity(node);
197 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
200 (get_irn_arity)(const ir_node *node) {
201 return _get_irn_arity(node);
204 /* Returns the array with ins. This array is shifted with respect to the
205 array accessed by get_irn_n: The block operand is at position 0 not -1.
206 (@@@ This should be changed.)
207 The order of the predecessors in this array is not guaranteed, except that
208 lists of operands as predecessors of Block or arguments of a Call are
211 get_irn_in (const ir_node *node) {
213 if (get_interprocedural_view()) { /* handle Filter and Block specially */
214 if (get_irn_opcode(node) == iro_Filter) {
215 assert(node->attr.filter.in_cg);
216 return node->attr.filter.in_cg;
217 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
218 return node->attr.block.in_cg;
220 /* else fall through */
226 set_irn_in (ir_node *node, int arity, ir_node **in) {
229 ir_graph *irg = current_ir_graph;
231 if (get_interprocedural_view()) { /* handle Filter and Block specially */
232 if (get_irn_opcode(node) == iro_Filter) {
233 assert(node->attr.filter.in_cg);
234 arr = &node->attr.filter.in_cg;
235 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
236 arr = &node->attr.block.in_cg;
244 for (i = 0; i < arity; i++) {
245 if (i < ARR_LEN(*arr)-1)
246 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
248 edges_notify_edge(node, i, in[i], NULL, irg);
250 for(;i < ARR_LEN(*arr)-1; i++) {
251 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
254 if (arity != ARR_LEN(*arr) - 1) {
255 ir_node * block = (*arr)[0];
256 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
259 fix_backedges(irg->obst, node);
261 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
265 (get_irn_intra_n)(const ir_node *node, int n) {
266 return _get_irn_intra_n (node, n);
270 (get_irn_inter_n)(const ir_node *node, int n) {
271 return _get_irn_inter_n (node, n);
274 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
277 (get_irn_n)(const ir_node *node, int n) {
278 return _get_irn_n(node, n);
282 set_irn_n (ir_node *node, int n, ir_node *in) {
283 assert(node && node->kind == k_ir_node);
285 assert(n < get_irn_arity(node));
286 assert(in && in->kind == k_ir_node);
288 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
289 /* Change block pred in both views! */
290 node->in[n + 1] = in;
291 assert(node->attr.filter.in_cg);
292 node->attr.filter.in_cg[n + 1] = in;
295 if (get_interprocedural_view()) { /* handle Filter and Block specially */
296 if (get_irn_opcode(node) == iro_Filter) {
297 assert(node->attr.filter.in_cg);
298 node->attr.filter.in_cg[n + 1] = in;
300 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
301 node->attr.block.in_cg[n + 1] = in;
304 /* else fall through */
308 hook_set_irn_n(node, n, in, node->in[n + 1]);
310 /* Here, we rely on src and tgt being in the current ir graph */
311 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
313 node->in[n + 1] = in;
317 (get_irn_mode)(const ir_node *node) {
318 return _get_irn_mode(node);
322 (set_irn_mode)(ir_node *node, ir_mode *mode)
324 _set_irn_mode(node, mode);
328 get_irn_modecode (const ir_node *node)
331 return node->mode->code;
334 /** Gets the string representation of the mode .*/
336 get_irn_modename (const ir_node *node)
339 return get_mode_name(node->mode);
343 get_irn_modeident (const ir_node *node)
346 return get_mode_ident(node->mode);
350 (get_irn_op)(const ir_node *node) {
351 return _get_irn_op(node);
354 /* should be private to the library: */
356 (set_irn_op)(ir_node *node, ir_op *op) {
357 _set_irn_op(node, op);
361 (get_irn_opcode)(const ir_node *node)
363 return _get_irn_opcode(node);
367 get_irn_opname (const ir_node *node)
370 if ((get_irn_op((ir_node *)node) == op_Phi) &&
371 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
372 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
373 return get_id_str(node->op->name);
377 get_irn_opident (const ir_node *node)
380 return node->op->name;
384 (get_irn_visited)(const ir_node *node)
386 return _get_irn_visited(node);
390 (set_irn_visited)(ir_node *node, unsigned long visited)
392 _set_irn_visited(node, visited);
396 (mark_irn_visited)(ir_node *node) {
397 _mark_irn_visited(node);
401 (irn_not_visited)(const ir_node *node) {
402 return _irn_not_visited(node);
406 (irn_visited)(const ir_node *node) {
407 return _irn_visited(node);
411 (set_irn_link)(ir_node *node, void *link) {
412 _set_irn_link(node, link);
416 (get_irn_link)(const ir_node *node) {
417 return _get_irn_link(node);
421 (get_irn_pinned)(const ir_node *node) {
422 return _get_irn_pinned(node);
426 (is_irn_pinned_in_irg) (const ir_node *node) {
427 return _is_irn_pinned_in_irg(node);
430 void set_irn_pinned(ir_node *node, op_pin_state state) {
431 /* due to optimization an opt may be turned into a Tuple */
432 if (get_irn_op(node) == op_Tuple)
437 /* the node is exception/memory pinned OR */
438 (get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned) ||
439 /* a floating point node can be pinned if fp_exceptions are enabled */
440 (mode_is_float(get_irn_mode(node)) && get_irg_fp_model(get_irn_irg(node)) & fp_exceptions)
442 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
444 node->pinned = state != op_pin_state_floats;
447 #ifdef DO_HEAPANALYSIS
448 /* Access the abstract interpretation information of a node.
449 Returns NULL if no such information is available. */
450 struct abstval *get_irn_abst_value(ir_node *n) {
453 /* Set the abstract interpretation information of a node. */
454 void set_irn_abst_value(ir_node *n, struct abstval *os) {
457 struct section *firm_get_irn_section(ir_node *n) {
460 void firm_set_irn_section(ir_node *n, struct section *s) {
464 /* Dummies needed for firmjni. */
465 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
466 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
467 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
468 void firm_set_irn_section(ir_node *n, struct section *s) {}
469 #endif /* DO_HEAPANALYSIS */
472 /* Outputs a unique number for this node */
473 long get_irn_node_nr(const ir_node *node) {
476 return node->node_nr;
478 return (long)PTR_TO_INT(node);
483 get_irn_const_attr (ir_node *node)
485 assert (node->op == op_Const);
486 return node->attr.con;
490 get_irn_proj_attr (ir_node *node)
492 assert (node->op == op_Proj);
493 return node->attr.proj;
497 get_irn_alloc_attr (ir_node *node)
499 assert (node->op == op_Alloc);
500 return node->attr.alloc;
504 get_irn_free_attr (ir_node *node)
506 assert (node->op == op_Free);
507 return node->attr.free;
511 get_irn_symconst_attr (ir_node *node)
513 assert (node->op == op_SymConst);
514 return node->attr.symc;
518 get_irn_call_attr (ir_node *node)
520 assert (node->op == op_Call);
521 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
525 get_irn_sel_attr (ir_node *node)
527 assert (node->op == op_Sel);
528 return node->attr.sel;
532 get_irn_phi_attr (ir_node *node)
534 assert (node->op == op_Phi);
535 return node->attr.phi0_pos;
539 get_irn_block_attr (ir_node *node)
541 assert (node->op == op_Block);
542 return node->attr.block;
546 get_irn_load_attr (ir_node *node)
548 assert (node->op == op_Load);
549 return node->attr.load;
553 get_irn_store_attr (ir_node *node)
555 assert (node->op == op_Store);
556 return node->attr.store;
560 get_irn_except_attr (ir_node *node)
562 assert (node->op == op_Div || node->op == op_Quot ||
563 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
564 return node->attr.except;
568 get_irn_generic_attr (ir_node *node) {
572 unsigned (get_irn_idx)(const ir_node *node) {
573 assert(is_ir_node(node));
574 return _get_irn_idx(node);
577 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
579 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
580 if (get_irn_n(node, i) == arg)
586 /** manipulate fields of individual nodes **/
588 /* this works for all except Block */
590 get_nodes_block (const ir_node *node) {
591 assert (!(node->op == op_Block));
592 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
593 return get_irn_n(node, -1);
597 set_nodes_block (ir_node *node, ir_node *block) {
598 assert (!(node->op == op_Block));
599 set_irn_n(node, -1, block);
602 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
603 * from Start. If so returns frame type, else Null. */
604 ir_type *is_frame_pointer(ir_node *n) {
605 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
606 ir_node *start = get_Proj_pred(n);
607 if (get_irn_op(start) == op_Start) {
608 return get_irg_frame_type(get_irn_irg(start));
614 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
615 * from Start. If so returns global type, else Null. */
616 ir_type *is_globals_pointer(ir_node *n) {
617 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
618 ir_node *start = get_Proj_pred(n);
619 if (get_irn_op(start) == op_Start) {
620 return get_glob_type();
626 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
627 * from Start. If so returns tls type, else Null. */
628 ir_type *is_tls_pointer(ir_node *n) {
629 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
630 ir_node *start = get_Proj_pred(n);
631 if (get_irn_op(start) == op_Start) {
632 return get_tls_type();
638 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
639 * from Start. If so returns 1, else 0. */
640 int is_value_arg_pointer(ir_node *n) {
641 if ((get_irn_op(n) == op_Proj) &&
642 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
643 (get_irn_op(get_Proj_pred(n)) == op_Start))
648 /* Returns an array with the predecessors of the Block. Depending on
649 the implementation of the graph data structure this can be a copy of
650 the internal representation of predecessors as well as the internal
651 array itself. Therefore writing to this array might obstruct the ir. */
653 get_Block_cfgpred_arr (ir_node *node)
655 assert ((node->op == op_Block));
656 return (ir_node **)&(get_irn_in(node)[1]);
660 (get_Block_n_cfgpreds)(ir_node *node) {
661 return _get_Block_n_cfgpreds(node);
665 (get_Block_cfgpred)(ir_node *node, int pos) {
666 return _get_Block_cfgpred(node, pos);
670 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
671 assert (node->op == op_Block);
672 set_irn_n(node, pos, pred);
676 (get_Block_cfgpred_block)(ir_node *node, int pos) {
677 return _get_Block_cfgpred_block(node, pos);
681 get_Block_matured (ir_node *node) {
682 assert (node->op == op_Block);
683 return (int)node->attr.block.matured;
687 set_Block_matured (ir_node *node, int matured) {
688 assert (node->op == op_Block);
689 node->attr.block.matured = matured;
693 (get_Block_block_visited)(ir_node *node) {
694 return _get_Block_block_visited(node);
698 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
699 _set_Block_block_visited(node, visit);
702 /* For this current_ir_graph must be set. */
704 (mark_Block_block_visited)(ir_node *node) {
705 _mark_Block_block_visited(node);
709 (Block_not_block_visited)(ir_node *node) {
710 return _Block_not_block_visited(node);
714 get_Block_graph_arr (ir_node *node, int pos) {
715 assert (node->op == op_Block);
716 return node->attr.block.graph_arr[pos+1];
720 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
721 assert (node->op == op_Block);
722 node->attr.block.graph_arr[pos+1] = value;
725 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
726 assert(node->op == op_Block);
727 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
728 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
729 node->attr.block.in_cg[0] = NULL;
730 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
732 /* Fix backedge array. fix_backedges() operates depending on
733 interprocedural_view. */
734 int ipv = get_interprocedural_view();
735 set_interprocedural_view(1);
736 fix_backedges(current_ir_graph->obst, node);
737 set_interprocedural_view(ipv);
740 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
743 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
744 assert(node->op == op_Block &&
745 node->attr.block.in_cg &&
746 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
747 node->attr.block.in_cg[pos + 1] = pred;
750 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
751 assert(node->op == op_Block);
752 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
755 int get_Block_cg_n_cfgpreds(ir_node * node) {
756 assert(node->op == op_Block);
757 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
760 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
761 assert(node->op == op_Block && node->attr.block.in_cg);
762 return node->attr.block.in_cg[pos + 1];
765 void remove_Block_cg_cfgpred_arr(ir_node * node) {
766 assert(node->op == op_Block);
767 node->attr.block.in_cg = NULL;
770 ir_node *(set_Block_dead)(ir_node *block) {
771 return _set_Block_dead(block);
774 int (is_Block_dead)(const ir_node *block) {
775 return _is_Block_dead(block);
778 ir_extblk *get_Block_extbb(const ir_node *block) {
780 assert(is_Block(block));
781 res = block->attr.block.extblk;
782 assert(res == NULL || is_ir_extbb(res));
786 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
787 assert(is_Block(block));
788 assert(extblk == NULL || is_ir_extbb(extblk));
789 block->attr.block.extblk = extblk;
793 get_End_n_keepalives(ir_node *end) {
794 assert (end->op == op_End);
795 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
799 get_End_keepalive(ir_node *end, int pos) {
800 assert (end->op == op_End);
801 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
805 add_End_keepalive (ir_node *end, ir_node *ka) {
807 ir_graph *irg = get_irn_irg(end);
809 assert(end->op == op_End);
810 l = ARR_LEN(end->in);
811 ARR_APP1(ir_node *, end->in, ka);
812 edges_notify_edge(end, l - 1, end->in[l], NULL, irg);
816 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
817 assert (end->op == op_End);
818 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
821 /* Set new keep-alives */
822 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
824 ir_graph *irg = get_irn_irg(end);
826 /* notify that edges are deleted */
827 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
828 edges_notify_edge(end, i, end->in[i], NULL, irg);
830 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
832 for (i = 0; i < n; ++i) {
833 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
834 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
839 free_End (ir_node *end) {
840 assert (end->op == op_End);
843 end->in = NULL; /* @@@ make sure we get an error if we use the
844 in array afterwards ... */
847 /* Return the target address of an IJmp */
848 ir_node *get_IJmp_target(ir_node *ijmp) {
849 assert(ijmp->op == op_IJmp);
850 return get_irn_n(ijmp, 0);
853 /** Sets the target address of an IJmp */
854 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
855 assert(ijmp->op == op_IJmp);
856 set_irn_n(ijmp, 0, tgt);
860 > Implementing the case construct (which is where the constant Proj node is
861 > important) involves far more than simply determining the constant values.
862 > We could argue that this is more properly a function of the translator from
863 > Firm to the target machine. That could be done if there was some way of
864 > projecting "default" out of the Cond node.
865 I know it's complicated.
866 Basically there are two proglems:
867 - determining the gaps between the projs
868 - determining the biggest case constant to know the proj number for
870 I see several solutions:
871 1. Introduce a ProjDefault node. Solves both problems.
872 This means to extend all optimizations executed during construction.
873 2. Give the Cond node for switch two flavors:
874 a) there are no gaps in the projs (existing flavor)
875 b) gaps may exist, default proj is still the Proj with the largest
876 projection number. This covers also the gaps.
877 3. Fix the semantic of the Cond to that of 2b)
879 Solution 2 seems to be the best:
880 Computing the gaps in the Firm representation is not too hard, i.e.,
881 libFIRM can implement a routine that transforms between the two
882 flavours. This is also possible for 1) but 2) does not require to
883 change any existing optimization.
884 Further it should be far simpler to determine the biggest constant than
886 I don't want to choose 3) as 2a) seems to have advantages for
887 dataflow analysis and 3) does not allow to convert the representation to
891 get_Cond_selector (ir_node *node) {
892 assert (node->op == op_Cond);
893 return get_irn_n(node, 0);
897 set_Cond_selector (ir_node *node, ir_node *selector) {
898 assert (node->op == op_Cond);
899 set_irn_n(node, 0, selector);
903 get_Cond_kind (ir_node *node) {
904 assert (node->op == op_Cond);
905 return node->attr.cond.kind;
909 set_Cond_kind (ir_node *node, cond_kind kind) {
910 assert (node->op == op_Cond);
911 node->attr.cond.kind = kind;
915 get_Cond_defaultProj (ir_node *node) {
916 assert (node->op == op_Cond);
917 return node->attr.cond.default_proj;
921 get_Return_mem (ir_node *node) {
922 assert (node->op == op_Return);
923 return get_irn_n(node, 0);
927 set_Return_mem (ir_node *node, ir_node *mem) {
928 assert (node->op == op_Return);
929 set_irn_n(node, 0, mem);
933 get_Return_n_ress (ir_node *node) {
934 assert (node->op == op_Return);
935 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
939 get_Return_res_arr (ir_node *node)
941 assert ((node->op == op_Return));
942 if (get_Return_n_ress(node) > 0)
943 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
950 set_Return_n_res (ir_node *node, int results) {
951 assert (node->op == op_Return);
956 get_Return_res (ir_node *node, int pos) {
957 assert (node->op == op_Return);
958 assert (get_Return_n_ress(node) > pos);
959 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
963 set_Return_res (ir_node *node, int pos, ir_node *res){
964 assert (node->op == op_Return);
965 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
968 tarval *(get_Const_tarval)(ir_node *node) {
969 return _get_Const_tarval(node);
973 set_Const_tarval (ir_node *node, tarval *con) {
974 assert (node->op == op_Const);
975 node->attr.con.tv = con;
978 cnst_classify_t (classify_Const)(ir_node *node)
980 return _classify_Const(node);
984 /* The source language type. Must be an atomic type. Mode of type must
985 be mode of node. For tarvals from entities type must be pointer to
988 get_Const_type (ir_node *node) {
989 assert (node->op == op_Const);
990 return node->attr.con.tp;
994 set_Const_type (ir_node *node, ir_type *tp) {
995 assert (node->op == op_Const);
996 if (tp != firm_unknown_type) {
997 assert (is_atomic_type(tp));
998 assert (get_type_mode(tp) == get_irn_mode(node));
1000 node->attr.con.tp = tp;
1005 get_SymConst_kind (const ir_node *node) {
1006 assert (node->op == op_SymConst);
1007 return node->attr.symc.num;
1011 set_SymConst_kind (ir_node *node, symconst_kind num) {
1012 assert (node->op == op_SymConst);
1013 node->attr.symc.num = num;
1017 get_SymConst_type (ir_node *node) {
1018 assert( (node->op == op_SymConst)
1019 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1020 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1024 set_SymConst_type (ir_node *node, ir_type *tp) {
1025 assert( (node->op == op_SymConst)
1026 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1027 node->attr.symc.sym.type_p = tp;
1031 get_SymConst_name (ir_node *node) {
1032 assert ( (node->op == op_SymConst)
1033 && (get_SymConst_kind(node) == symconst_addr_name));
1034 return node->attr.symc.sym.ident_p;
1038 set_SymConst_name (ir_node *node, ident *name) {
1039 assert ( (node->op == op_SymConst)
1040 && (get_SymConst_kind(node) == symconst_addr_name));
1041 node->attr.symc.sym.ident_p = name;
1045 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1046 entity *get_SymConst_entity (ir_node *node) {
1047 assert ( (node->op == op_SymConst)
1048 && (get_SymConst_kind (node) == symconst_addr_ent));
1049 return node->attr.symc.sym.entity_p;
1052 void set_SymConst_entity (ir_node *node, entity *ent) {
1053 assert ( (node->op == op_SymConst)
1054 && (get_SymConst_kind(node) == symconst_addr_ent));
1055 node->attr.symc.sym.entity_p = ent;
1058 union symconst_symbol
1059 get_SymConst_symbol (ir_node *node) {
1060 assert (node->op == op_SymConst);
1061 return node->attr.symc.sym;
1065 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1066 assert (node->op == op_SymConst);
1067 node->attr.symc.sym = sym;
1071 get_SymConst_value_type (ir_node *node) {
1072 assert (node->op == op_SymConst);
1073 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1074 return node->attr.symc.tp;
1078 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1079 assert (node->op == op_SymConst);
1080 node->attr.symc.tp = tp;
1084 get_Sel_mem (ir_node *node) {
1085 assert (node->op == op_Sel);
1086 return get_irn_n(node, 0);
1090 set_Sel_mem (ir_node *node, ir_node *mem) {
1091 assert (node->op == op_Sel);
1092 set_irn_n(node, 0, mem);
1096 get_Sel_ptr (ir_node *node) {
1097 assert (node->op == op_Sel);
1098 return get_irn_n(node, 1);
1102 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1103 assert (node->op == op_Sel);
1104 set_irn_n(node, 1, ptr);
1108 get_Sel_n_indexs (ir_node *node) {
1109 assert (node->op == op_Sel);
1110 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1114 get_Sel_index_arr (ir_node *node)
1116 assert ((node->op == op_Sel));
1117 if (get_Sel_n_indexs(node) > 0)
1118 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1124 get_Sel_index (ir_node *node, int pos) {
1125 assert (node->op == op_Sel);
1126 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1130 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1131 assert (node->op == op_Sel);
1132 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1136 get_Sel_entity (ir_node *node) {
1137 assert (node->op == op_Sel);
1138 return node->attr.sel.ent;
1142 set_Sel_entity (ir_node *node, entity *ent) {
1143 assert (node->op == op_Sel);
1144 node->attr.sel.ent = ent;
1148 /* For unary and binary arithmetic operations the access to the
1149 operands can be factored out. Left is the first, right the
1150 second arithmetic value as listed in tech report 0999-33.
1151 unops are: Minus, Abs, Not, Conv, Cast
1152 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1153 Shr, Shrs, Rotate, Cmp */
1157 get_Call_mem (ir_node *node) {
1158 assert (node->op == op_Call);
1159 return get_irn_n(node, 0);
1163 set_Call_mem (ir_node *node, ir_node *mem) {
1164 assert (node->op == op_Call);
1165 set_irn_n(node, 0, mem);
1169 get_Call_ptr (ir_node *node) {
1170 assert (node->op == op_Call);
1171 return get_irn_n(node, 1);
1175 set_Call_ptr (ir_node *node, ir_node *ptr) {
1176 assert (node->op == op_Call);
1177 set_irn_n(node, 1, ptr);
1181 get_Call_param_arr (ir_node *node) {
1182 assert (node->op == op_Call);
1183 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1187 get_Call_n_params (ir_node *node) {
1188 assert (node->op == op_Call);
1189 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1193 get_Call_arity (ir_node *node) {
1194 assert (node->op == op_Call);
1195 return get_Call_n_params(node);
1199 set_Call_arity (ir_node *node, ir_node *arity) {
1200 assert (node->op == op_Call);
1205 get_Call_param (ir_node *node, int pos) {
1206 assert (node->op == op_Call);
1207 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1211 set_Call_param (ir_node *node, int pos, ir_node *param) {
1212 assert (node->op == op_Call);
1213 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1217 get_Call_type (ir_node *node) {
1218 assert (node->op == op_Call);
1219 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1223 set_Call_type (ir_node *node, ir_type *tp) {
1224 assert (node->op == op_Call);
1225 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1226 node->attr.call.cld_tp = tp;
1229 int Call_has_callees(ir_node *node) {
1230 assert(node && node->op == op_Call);
1231 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1232 (node->attr.call.callee_arr != NULL));
1235 int get_Call_n_callees(ir_node * node) {
1236 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1237 return ARR_LEN(node->attr.call.callee_arr);
1240 entity * get_Call_callee(ir_node * node, int pos) {
1241 assert(pos >= 0 && pos < get_Call_n_callees(node));
1242 return node->attr.call.callee_arr[pos];
1245 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1246 assert(node->op == op_Call);
1247 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1248 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1250 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1253 void remove_Call_callee_arr(ir_node * node) {
1254 assert(node->op == op_Call);
1255 node->attr.call.callee_arr = NULL;
1258 ir_node * get_CallBegin_ptr (ir_node *node) {
1259 assert(node->op == op_CallBegin);
1260 return get_irn_n(node, 0);
1262 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1263 assert(node->op == op_CallBegin);
1264 set_irn_n(node, 0, ptr);
1266 ir_node * get_CallBegin_call (ir_node *node) {
1267 assert(node->op == op_CallBegin);
1268 return node->attr.callbegin.call;
1270 void set_CallBegin_call (ir_node *node, ir_node *call) {
1271 assert(node->op == op_CallBegin);
1272 node->attr.callbegin.call = call;
1277 ir_node * get_##OP##_left(ir_node *node) { \
1278 assert(node->op == op_##OP); \
1279 return get_irn_n(node, node->op->op_index); \
1281 void set_##OP##_left(ir_node *node, ir_node *left) { \
1282 assert(node->op == op_##OP); \
1283 set_irn_n(node, node->op->op_index, left); \
1285 ir_node *get_##OP##_right(ir_node *node) { \
1286 assert(node->op == op_##OP); \
1287 return get_irn_n(node, node->op->op_index + 1); \
1289 void set_##OP##_right(ir_node *node, ir_node *right) { \
1290 assert(node->op == op_##OP); \
1291 set_irn_n(node, node->op->op_index + 1, right); \
1295 ir_node *get_##OP##_op(ir_node *node) { \
1296 assert(node->op == op_##OP); \
1297 return get_irn_n(node, node->op->op_index); \
1299 void set_##OP##_op (ir_node *node, ir_node *op) { \
1300 assert(node->op == op_##OP); \
1301 set_irn_n(node, node->op->op_index, op); \
1311 get_Quot_mem (ir_node *node) {
1312 assert (node->op == op_Quot);
1313 return get_irn_n(node, 0);
1317 set_Quot_mem (ir_node *node, ir_node *mem) {
1318 assert (node->op == op_Quot);
1319 set_irn_n(node, 0, mem);
1325 get_DivMod_mem (ir_node *node) {
1326 assert (node->op == op_DivMod);
1327 return get_irn_n(node, 0);
1331 set_DivMod_mem (ir_node *node, ir_node *mem) {
1332 assert (node->op == op_DivMod);
1333 set_irn_n(node, 0, mem);
1339 get_Div_mem (ir_node *node) {
1340 assert (node->op == op_Div);
1341 return get_irn_n(node, 0);
1345 set_Div_mem (ir_node *node, ir_node *mem) {
1346 assert (node->op == op_Div);
1347 set_irn_n(node, 0, mem);
1353 get_Mod_mem (ir_node *node) {
1354 assert(node->op == op_Mod);
1355 return get_irn_n(node, 0);
1359 set_Mod_mem (ir_node *node, ir_node *mem) {
1360 assert(node->op == op_Mod);
1361 set_irn_n(node, 0, mem);
1377 int get_Conv_strict(ir_node *node) {
1378 assert(node->op == op_Conv);
1379 return node->attr.conv.strict;
1382 void set_Conv_strict(ir_node *node, int strict_flag) {
1383 assert(node->op == op_Conv);
1384 node->attr.conv.strict = (char)strict_flag;
1388 get_Cast_type (ir_node *node) {
1389 assert(node->op == op_Cast);
1390 return node->attr.cast.totype;
1394 set_Cast_type (ir_node *node, ir_type *to_tp) {
1395 assert(node->op == op_Cast);
1396 node->attr.cast.totype = to_tp;
1400 /* Checks for upcast.
1402 * Returns true if the Cast node casts a class type to a super type.
1404 int is_Cast_upcast(ir_node *node) {
1405 ir_type *totype = get_Cast_type(node);
1406 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1407 ir_graph *myirg = get_irn_irg(node);
1409 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1412 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1413 totype = get_pointer_points_to_type(totype);
1414 fromtype = get_pointer_points_to_type(fromtype);
1419 if (!is_Class_type(totype)) return 0;
1420 return is_SubClass_of(fromtype, totype);
1423 /* Checks for downcast.
1425 * Returns true if the Cast node casts a class type to a sub type.
1427 int is_Cast_downcast(ir_node *node) {
1428 ir_type *totype = get_Cast_type(node);
1429 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1431 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1434 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1435 totype = get_pointer_points_to_type(totype);
1436 fromtype = get_pointer_points_to_type(fromtype);
1441 if (!is_Class_type(totype)) return 0;
1442 return is_SubClass_of(totype, fromtype);
1446 (is_unop)(const ir_node *node) {
1447 return _is_unop(node);
1451 get_unop_op (ir_node *node) {
1452 if (node->op->opar == oparity_unary)
1453 return get_irn_n(node, node->op->op_index);
1455 assert(node->op->opar == oparity_unary);
1460 set_unop_op (ir_node *node, ir_node *op) {
1461 if (node->op->opar == oparity_unary)
1462 set_irn_n(node, node->op->op_index, op);
1464 assert(node->op->opar == oparity_unary);
1468 (is_binop)(const ir_node *node) {
1469 return _is_binop(node);
1473 get_binop_left (ir_node *node) {
1474 if (node->op->opar == oparity_binary)
1475 return get_irn_n(node, node->op->op_index);
1477 assert(node->op->opar == oparity_binary);
1482 set_binop_left (ir_node *node, ir_node *left) {
1483 if (node->op->opar == oparity_binary)
1484 set_irn_n(node, node->op->op_index, left);
1486 assert (node->op->opar == oparity_binary);
1490 get_binop_right (ir_node *node) {
1491 if (node->op->opar == oparity_binary)
1492 return get_irn_n(node, node->op->op_index + 1);
1494 assert(node->op->opar == oparity_binary);
1499 set_binop_right (ir_node *node, ir_node *right) {
1500 if (node->op->opar == oparity_binary)
1501 set_irn_n(node, node->op->op_index + 1, right);
1503 assert (node->op->opar == oparity_binary);
1506 int is_Phi (const ir_node *n) {
1512 if (op == op_Filter) return get_interprocedural_view();
1515 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1516 (get_irn_arity(n) > 0));
1521 int is_Phi0 (const ir_node *n) {
1524 return ((get_irn_op(n) == op_Phi) &&
1525 (get_irn_arity(n) == 0) &&
1526 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1530 get_Phi_preds_arr (ir_node *node) {
1531 assert (node->op == op_Phi);
1532 return (ir_node **)&(get_irn_in(node)[1]);
1536 get_Phi_n_preds (ir_node *node) {
1537 assert (is_Phi(node) || is_Phi0(node));
1538 return (get_irn_arity(node));
1542 void set_Phi_n_preds (ir_node *node, int n_preds) {
1543 assert (node->op == op_Phi);
1548 get_Phi_pred (ir_node *node, int pos) {
1549 assert (is_Phi(node) || is_Phi0(node));
1550 return get_irn_n(node, pos);
1554 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1555 assert (is_Phi(node) || is_Phi0(node));
1556 set_irn_n(node, pos, pred);
1560 int is_memop(ir_node *node) {
1561 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1564 ir_node *get_memop_mem (ir_node *node) {
1565 assert(is_memop(node));
1566 return get_irn_n(node, 0);
1569 void set_memop_mem (ir_node *node, ir_node *mem) {
1570 assert(is_memop(node));
1571 set_irn_n(node, 0, mem);
1574 ir_node *get_memop_ptr (ir_node *node) {
1575 assert(is_memop(node));
1576 return get_irn_n(node, 1);
1579 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1580 assert(is_memop(node));
1581 set_irn_n(node, 1, ptr);
1585 get_Load_mem (ir_node *node) {
1586 assert (node->op == op_Load);
1587 return get_irn_n(node, 0);
1591 set_Load_mem (ir_node *node, ir_node *mem) {
1592 assert (node->op == op_Load);
1593 set_irn_n(node, 0, mem);
1597 get_Load_ptr (ir_node *node) {
1598 assert (node->op == op_Load);
1599 return get_irn_n(node, 1);
1603 set_Load_ptr (ir_node *node, ir_node *ptr) {
1604 assert (node->op == op_Load);
1605 set_irn_n(node, 1, ptr);
1609 get_Load_mode (ir_node *node) {
1610 assert (node->op == op_Load);
1611 return node->attr.load.load_mode;
1615 set_Load_mode (ir_node *node, ir_mode *mode) {
1616 assert (node->op == op_Load);
1617 node->attr.load.load_mode = mode;
1621 get_Load_volatility (ir_node *node) {
1622 assert (node->op == op_Load);
1623 return node->attr.load.volatility;
1627 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1628 assert (node->op == op_Load);
1629 node->attr.load.volatility = volatility;
1634 get_Store_mem (ir_node *node) {
1635 assert (node->op == op_Store);
1636 return get_irn_n(node, 0);
1640 set_Store_mem (ir_node *node, ir_node *mem) {
1641 assert (node->op == op_Store);
1642 set_irn_n(node, 0, mem);
1646 get_Store_ptr (ir_node *node) {
1647 assert (node->op == op_Store);
1648 return get_irn_n(node, 1);
1652 set_Store_ptr (ir_node *node, ir_node *ptr) {
1653 assert (node->op == op_Store);
1654 set_irn_n(node, 1, ptr);
1658 get_Store_value (ir_node *node) {
1659 assert (node->op == op_Store);
1660 return get_irn_n(node, 2);
1664 set_Store_value (ir_node *node, ir_node *value) {
1665 assert (node->op == op_Store);
1666 set_irn_n(node, 2, value);
1670 get_Store_volatility (ir_node *node) {
1671 assert (node->op == op_Store);
1672 return node->attr.store.volatility;
1676 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1677 assert (node->op == op_Store);
1678 node->attr.store.volatility = volatility;
1683 get_Alloc_mem (ir_node *node) {
1684 assert (node->op == op_Alloc);
1685 return get_irn_n(node, 0);
1689 set_Alloc_mem (ir_node *node, ir_node *mem) {
1690 assert (node->op == op_Alloc);
1691 set_irn_n(node, 0, mem);
1695 get_Alloc_size (ir_node *node) {
1696 assert (node->op == op_Alloc);
1697 return get_irn_n(node, 1);
1701 set_Alloc_size (ir_node *node, ir_node *size) {
1702 assert (node->op == op_Alloc);
1703 set_irn_n(node, 1, size);
1707 get_Alloc_type (ir_node *node) {
1708 assert (node->op == op_Alloc);
1709 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1713 set_Alloc_type (ir_node *node, ir_type *tp) {
1714 assert (node->op == op_Alloc);
1715 node->attr.alloc.type = tp;
1719 get_Alloc_where (ir_node *node) {
1720 assert (node->op == op_Alloc);
1721 return node->attr.alloc.where;
1725 set_Alloc_where (ir_node *node, where_alloc where) {
1726 assert (node->op == op_Alloc);
1727 node->attr.alloc.where = where;
1732 get_Free_mem (ir_node *node) {
1733 assert (node->op == op_Free);
1734 return get_irn_n(node, 0);
1738 set_Free_mem (ir_node *node, ir_node *mem) {
1739 assert (node->op == op_Free);
1740 set_irn_n(node, 0, mem);
1744 get_Free_ptr (ir_node *node) {
1745 assert (node->op == op_Free);
1746 return get_irn_n(node, 1);
1750 set_Free_ptr (ir_node *node, ir_node *ptr) {
1751 assert (node->op == op_Free);
1752 set_irn_n(node, 1, ptr);
1756 get_Free_size (ir_node *node) {
1757 assert (node->op == op_Free);
1758 return get_irn_n(node, 2);
1762 set_Free_size (ir_node *node, ir_node *size) {
1763 assert (node->op == op_Free);
1764 set_irn_n(node, 2, size);
1768 get_Free_type (ir_node *node) {
1769 assert (node->op == op_Free);
1770 return node->attr.free.type = skip_tid(node->attr.free.type);
1774 set_Free_type (ir_node *node, ir_type *tp) {
1775 assert (node->op == op_Free);
1776 node->attr.free.type = tp;
1780 get_Free_where (ir_node *node) {
1781 assert (node->op == op_Free);
1782 return node->attr.free.where;
1786 set_Free_where (ir_node *node, where_alloc where) {
1787 assert (node->op == op_Free);
1788 node->attr.free.where = where;
1791 ir_node **get_Sync_preds_arr (ir_node *node) {
1792 assert (node->op == op_Sync);
1793 return (ir_node **)&(get_irn_in(node)[1]);
1796 int get_Sync_n_preds (ir_node *node) {
1797 assert(node->op == op_Sync);
1798 return (get_irn_arity(node));
1802 void set_Sync_n_preds (ir_node *node, int n_preds) {
1803 assert (node->op == op_Sync);
1807 ir_node *get_Sync_pred (ir_node *node, int pos) {
1808 assert(node->op == op_Sync);
1809 return get_irn_n(node, pos);
1812 void set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1813 assert(node->op == op_Sync);
1814 set_irn_n(node, pos, pred);
1817 /* Add a new Sync predecessor */
1818 void add_Sync_pred (ir_node *node, ir_node *pred) {
1820 ir_graph *irg = get_irn_irg(node);
1822 assert(node->op == op_Sync);
1823 l = ARR_LEN(node->in);
1824 ARR_APP1(ir_node *, node->in, pred);
1825 edges_notify_edge(node, l, node->in[l], NULL, irg);
1828 /* Returns the source language type of a Proj node. */
1829 ir_type *get_Proj_type(ir_node *n)
1831 ir_type *tp = firm_unknown_type;
1832 ir_node *pred = get_Proj_pred(n);
1834 switch (get_irn_opcode(pred)) {
1837 /* Deal with Start / Call here: we need to know the Proj Nr. */
1838 assert(get_irn_mode(pred) == mode_T);
1839 pred_pred = get_Proj_pred(pred);
1840 if (get_irn_op(pred_pred) == op_Start) {
1841 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1842 tp = get_method_param_type(mtp, get_Proj_proj(n));
1843 } else if (get_irn_op(pred_pred) == op_Call) {
1844 ir_type *mtp = get_Call_type(pred_pred);
1845 tp = get_method_res_type(mtp, get_Proj_proj(n));
1848 case iro_Start: break;
1849 case iro_Call: break;
1851 ir_node *a = get_Load_ptr(pred);
1853 tp = get_entity_type(get_Sel_entity(a));
1862 get_Proj_pred (const ir_node *node) {
1863 assert (is_Proj(node));
1864 return get_irn_n(node, 0);
1868 set_Proj_pred (ir_node *node, ir_node *pred) {
1869 assert (is_Proj(node));
1870 set_irn_n(node, 0, pred);
1874 get_Proj_proj (const ir_node *node) {
1875 assert (is_Proj(node));
1876 if (get_irn_opcode(node) == iro_Proj) {
1877 return node->attr.proj;
1879 assert(get_irn_opcode(node) == iro_Filter);
1880 return node->attr.filter.proj;
1885 set_Proj_proj (ir_node *node, long proj) {
1886 assert (node->op == op_Proj);
1887 node->attr.proj = proj;
1891 get_Tuple_preds_arr (ir_node *node) {
1892 assert (node->op == op_Tuple);
1893 return (ir_node **)&(get_irn_in(node)[1]);
1897 get_Tuple_n_preds (ir_node *node) {
1898 assert (node->op == op_Tuple);
1899 return (get_irn_arity(node));
1904 set_Tuple_n_preds (ir_node *node, int n_preds) {
1905 assert (node->op == op_Tuple);
1910 get_Tuple_pred (ir_node *node, int pos) {
1911 assert (node->op == op_Tuple);
1912 return get_irn_n(node, pos);
1916 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1917 assert (node->op == op_Tuple);
1918 set_irn_n(node, pos, pred);
1922 get_Id_pred (ir_node *node) {
1923 assert (node->op == op_Id);
1924 return get_irn_n(node, 0);
1928 set_Id_pred (ir_node *node, ir_node *pred) {
1929 assert (node->op == op_Id);
1930 set_irn_n(node, 0, pred);
1933 ir_node *get_Confirm_value (ir_node *node) {
1934 assert (node->op == op_Confirm);
1935 return get_irn_n(node, 0);
1937 void set_Confirm_value (ir_node *node, ir_node *value) {
1938 assert (node->op == op_Confirm);
1939 set_irn_n(node, 0, value);
1941 ir_node *get_Confirm_bound (ir_node *node) {
1942 assert (node->op == op_Confirm);
1943 return get_irn_n(node, 1);
1945 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1946 assert (node->op == op_Confirm);
1947 set_irn_n(node, 0, bound);
1949 pn_Cmp get_Confirm_cmp (ir_node *node) {
1950 assert (node->op == op_Confirm);
1951 return node->attr.confirm_cmp;
1953 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1954 assert (node->op == op_Confirm);
1955 node->attr.confirm_cmp = cmp;
1960 get_Filter_pred (ir_node *node) {
1961 assert(node->op == op_Filter);
1965 set_Filter_pred (ir_node *node, ir_node *pred) {
1966 assert(node->op == op_Filter);
1970 get_Filter_proj(ir_node *node) {
1971 assert(node->op == op_Filter);
1972 return node->attr.filter.proj;
1975 set_Filter_proj (ir_node *node, long proj) {
1976 assert(node->op == op_Filter);
1977 node->attr.filter.proj = proj;
1980 /* Don't use get_irn_arity, get_irn_n in implementation as access
1981 shall work independent of view!!! */
1982 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1983 assert(node->op == op_Filter);
1984 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1985 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1986 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1987 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1988 node->attr.filter.in_cg[0] = node->in[0];
1990 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1993 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1994 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1995 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1996 node->attr.filter.in_cg[pos + 1] = pred;
1998 int get_Filter_n_cg_preds(ir_node *node) {
1999 assert(node->op == op_Filter && node->attr.filter.in_cg);
2000 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2002 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2004 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2006 arity = ARR_LEN(node->attr.filter.in_cg);
2007 assert(pos < arity - 1);
2008 return node->attr.filter.in_cg[pos + 1];
2012 ir_node *get_Mux_sel (ir_node *node) {
2013 if (node->op == op_Psi) {
2014 assert(get_irn_arity(node) == 3);
2015 return get_Psi_cond(node, 0);
2017 assert(node->op == op_Mux);
2020 void set_Mux_sel (ir_node *node, ir_node *sel) {
2021 if (node->op == op_Psi) {
2022 assert(get_irn_arity(node) == 3);
2023 set_Psi_cond(node, 0, sel);
2026 assert(node->op == op_Mux);
2031 ir_node *get_Mux_false (ir_node *node) {
2032 if (node->op == op_Psi) {
2033 assert(get_irn_arity(node) == 3);
2034 return get_Psi_default(node);
2036 assert(node->op == op_Mux);
2039 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2040 if (node->op == op_Psi) {
2041 assert(get_irn_arity(node) == 3);
2042 set_Psi_default(node, ir_false);
2045 assert(node->op == op_Mux);
2046 node->in[2] = ir_false;
2050 ir_node *get_Mux_true (ir_node *node) {
2051 if (node->op == op_Psi) {
2052 assert(get_irn_arity(node) == 3);
2053 return get_Psi_val(node, 0);
2055 assert(node->op == op_Mux);
2058 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2059 if (node->op == op_Psi) {
2060 assert(get_irn_arity(node) == 3);
2061 set_Psi_val(node, 0, ir_true);
2064 assert(node->op == op_Mux);
2065 node->in[3] = ir_true;
2070 ir_node *get_Psi_cond (ir_node *node, int pos) {
2071 int num_conds = get_Psi_n_conds(node);
2072 assert(node->op == op_Psi);
2073 assert(pos < num_conds);
2074 return get_irn_n(node, 2 * pos);
2077 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2078 int num_conds = get_Psi_n_conds(node);
2079 assert(node->op == op_Psi);
2080 assert(pos < num_conds);
2081 set_irn_n(node, 2 * pos, cond);
2084 ir_node *get_Psi_val (ir_node *node, int pos) {
2085 int num_vals = get_Psi_n_conds(node);
2086 assert(node->op == op_Psi);
2087 assert(pos < num_vals);
2088 return get_irn_n(node, 2 * pos + 1);
2091 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2092 int num_vals = get_Psi_n_conds(node);
2093 assert(node->op == op_Psi);
2094 assert(pos < num_vals);
2095 set_irn_n(node, 2 * pos + 1, val);
2098 ir_node *get_Psi_default(ir_node *node) {
2099 int def_pos = get_irn_arity(node) - 1;
2100 assert(node->op == op_Psi);
2101 return get_irn_n(node, def_pos);
2104 void set_Psi_default(ir_node *node, ir_node *val) {
2105 int def_pos = get_irn_arity(node);
2106 assert(node->op == op_Psi);
2107 set_irn_n(node, def_pos, val);
2110 int (get_Psi_n_conds)(ir_node *node) {
2111 return _get_Psi_n_conds(node);
2115 ir_node *get_CopyB_mem (ir_node *node) {
2116 assert (node->op == op_CopyB);
2117 return get_irn_n(node, 0);
2120 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2121 assert (node->op == op_CopyB);
2122 set_irn_n(node, 0, mem);
2125 ir_node *get_CopyB_dst (ir_node *node) {
2126 assert (node->op == op_CopyB);
2127 return get_irn_n(node, 1);
2130 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2131 assert (node->op == op_CopyB);
2132 set_irn_n(node, 1, dst);
2135 ir_node *get_CopyB_src (ir_node *node) {
2136 assert (node->op == op_CopyB);
2137 return get_irn_n(node, 2);
2140 void set_CopyB_src (ir_node *node, ir_node *src) {
2141 assert (node->op == op_CopyB);
2142 set_irn_n(node, 2, src);
2145 ir_type *get_CopyB_type(ir_node *node) {
2146 assert (node->op == op_CopyB);
2147 return node->attr.copyb.data_type;
2150 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2151 assert (node->op == op_CopyB && data_type);
2152 node->attr.copyb.data_type = data_type;
2157 get_InstOf_type (ir_node *node) {
2158 assert (node->op = op_InstOf);
2159 return node->attr.instof.type;
2163 set_InstOf_type (ir_node *node, ir_type *type) {
2164 assert (node->op = op_InstOf);
2165 node->attr.instof.type = type;
2169 get_InstOf_store (ir_node *node) {
2170 assert (node->op = op_InstOf);
2171 return get_irn_n(node, 0);
2175 set_InstOf_store (ir_node *node, ir_node *obj) {
2176 assert (node->op = op_InstOf);
2177 set_irn_n(node, 0, obj);
2181 get_InstOf_obj (ir_node *node) {
2182 assert (node->op = op_InstOf);
2183 return get_irn_n(node, 1);
2187 set_InstOf_obj (ir_node *node, ir_node *obj) {
2188 assert (node->op = op_InstOf);
2189 set_irn_n(node, 1, obj);
2192 /* Returns the memory input of a Raise operation. */
2194 get_Raise_mem (ir_node *node) {
2195 assert (node->op == op_Raise);
2196 return get_irn_n(node, 0);
2200 set_Raise_mem (ir_node *node, ir_node *mem) {
2201 assert (node->op == op_Raise);
2202 set_irn_n(node, 0, mem);
2206 get_Raise_exo_ptr (ir_node *node) {
2207 assert (node->op == op_Raise);
2208 return get_irn_n(node, 1);
2212 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2213 assert (node->op == op_Raise);
2214 set_irn_n(node, 1, exo_ptr);
2219 /* Returns the memory input of a Bound operation. */
2220 ir_node *get_Bound_mem(ir_node *bound) {
2221 assert (bound->op == op_Bound);
2222 return get_irn_n(bound, 0);
2225 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2226 assert (bound->op == op_Bound);
2227 set_irn_n(bound, 0, mem);
2230 /* Returns the index input of a Bound operation. */
2231 ir_node *get_Bound_index(ir_node *bound) {
2232 assert (bound->op == op_Bound);
2233 return get_irn_n(bound, 1);
2236 void set_Bound_index(ir_node *bound, ir_node *idx) {
2237 assert (bound->op == op_Bound);
2238 set_irn_n(bound, 1, idx);
2241 /* Returns the lower bound input of a Bound operation. */
2242 ir_node *get_Bound_lower(ir_node *bound) {
2243 assert (bound->op == op_Bound);
2244 return get_irn_n(bound, 2);
2247 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2248 assert (bound->op == op_Bound);
2249 set_irn_n(bound, 2, lower);
2252 /* Returns the upper bound input of a Bound operation. */
2253 ir_node *get_Bound_upper(ir_node *bound) {
2254 assert (bound->op == op_Bound);
2255 return get_irn_n(bound, 3);
2258 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2259 assert (bound->op == op_Bound);
2260 set_irn_n(bound, 3, upper);
2263 /* returns the graph of a node */
2265 get_irn_irg(const ir_node *node) {
2267 * Do not use get_nodes_Block() here, because this
2268 * will check the pinned state.
2269 * However even a 'wrong' block is always in the proper
2272 if (! is_Block(node))
2273 node = get_irn_n(node, -1);
2274 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2275 node = get_irn_n(node, -1);
2276 assert(get_irn_op(node) == op_Block);
2277 return node->attr.block.irg;
2281 /*----------------------------------------------------------------*/
2282 /* Auxiliary routines */
2283 /*----------------------------------------------------------------*/
2286 skip_Proj (ir_node *node) {
2287 /* don't assert node !!! */
2288 if (node && is_Proj(node)) {
2289 return get_Proj_pred(node);
2296 skip_Tuple (ir_node *node) {
2300 if (!get_opt_normalize()) return node;
2303 node = skip_Id(node);
2304 if (get_irn_op(node) == op_Proj) {
2305 pred = skip_Id(get_Proj_pred(node));
2306 op = get_irn_op(pred);
2309 * Looks strange but calls get_irn_op() only once
2310 * in most often cases.
2312 if (op == op_Proj) { /* nested Tuple ? */
2313 pred = skip_Id(skip_Tuple(pred));
2314 op = get_irn_op(pred);
2316 if (op == op_Tuple) {
2317 node = get_Tuple_pred(pred, get_Proj_proj(node));
2321 else if (op == op_Tuple) {
2322 node = get_Tuple_pred(pred, get_Proj_proj(node));
2329 /* returns operand of node if node is a Cast */
2330 ir_node *skip_Cast (ir_node *node) {
2331 if (node && get_irn_op(node) == op_Cast)
2332 return get_Cast_op(node);
2336 /* returns operand of node if node is a Confirm */
2337 ir_node *skip_Confirm (ir_node *node) {
2338 if (node && get_irn_op(node) == op_Confirm)
2339 return get_Confirm_value(node);
2343 /* skip all high-level ops */
2344 ir_node *skip_HighLevel(ir_node *node) {
2345 if (node && is_op_highlevel(get_irn_op(node)))
2346 return get_irn_n(node, 0);
2351 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2352 * than any other approach, as Id chains are resolved and all point to the real node, or
2353 * all id's are self loops.
2355 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2358 skip_Id (ir_node *node) {
2359 /* don't assert node !!! */
2361 /* Don't use get_Id_pred: We get into an endless loop for
2362 self-referencing Ids. */
2363 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2364 ir_node *rem_pred = node->in[0+1];
2367 assert (get_irn_arity (node) > 0);
2369 node->in[0+1] = node;
2370 res = skip_Id(rem_pred);
2371 if (res->op == op_Id) /* self-loop */ return node;
2373 node->in[0+1] = res;
2380 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2381 * than any other approach, as Id chains are resolved and all point to the real node, or
2382 * all id's are self loops.
2384 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2385 * a little bit "hand optimized".
2387 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2390 skip_Id (ir_node *node) {
2392 /* don't assert node !!! */
2394 if (!node || (node->op != op_Id)) return node;
2396 /* Don't use get_Id_pred(): We get into an endless loop for
2397 self-referencing Ids. */
2398 pred = node->in[0+1];
2400 if (pred->op != op_Id) return pred;
2402 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2403 ir_node *rem_pred, *res;
2405 if (pred->op != op_Id) return pred; /* shortcut */
2408 assert (get_irn_arity (node) > 0);
2410 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2411 res = skip_Id(rem_pred);
2412 if (res->op == op_Id) /* self-loop */ return node;
2414 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2422 void skip_Id_and_store(ir_node **node) {
2425 if (!n || (n->op != op_Id)) return;
2427 /* Don't use get_Id_pred(): We get into an endless loop for
2428 self-referencing Ids. */
2433 (is_Bad)(const ir_node *node) {
2434 return _is_Bad(node);
2438 (is_Const)(const ir_node *node) {
2439 return _is_Const(node);
2443 (is_no_Block)(const ir_node *node) {
2444 return _is_no_Block(node);
2448 (is_Block)(const ir_node *node) {
2449 return _is_Block(node);
2452 /* returns true if node is an Unknown node. */
2454 (is_Unknown)(const ir_node *node) {
2455 return _is_Unknown(node);
2458 /* returns true if node is a Return node. */
2460 (is_Return)(const ir_node *node) {
2461 return _is_Return(node);
2464 /* returns true if node is a Call node. */
2466 (is_Call)(const ir_node *node) {
2467 return _is_Call(node);
2470 /* returns true if node is a Sel node. */
2472 (is_Sel)(const ir_node *node) {
2473 return _is_Sel(node);
2476 /* returns true if node is a Mux node or a Psi with only one condition. */
2478 (is_Mux)(const ir_node *node) {
2479 return _is_Mux(node);
2482 /* returns true if node is a Load node. */
2484 (is_Load)(const ir_node *node) {
2485 return _is_Load(node);
2488 /* returns true if node is a Sync node. */
2490 (is_Sync)(const ir_node *node) {
2491 return _is_Sync(node);
2494 /* returns true if node is a Confirm node. */
2496 (is_Confirm)(const ir_node *node) {
2497 return _is_Confirm(node);
2501 is_Proj (const ir_node *node) {
2503 return node->op == op_Proj
2504 || (!get_interprocedural_view() && node->op == op_Filter);
2507 /* Returns true if the operation manipulates control flow. */
2509 is_cfop(const ir_node *node) {
2510 return is_cfopcode(get_irn_op(node));
2513 /* Returns true if the operation manipulates interprocedural control flow:
2514 CallBegin, EndReg, EndExcept */
2515 int is_ip_cfop(const ir_node *node) {
2516 return is_ip_cfopcode(get_irn_op(node));
2519 /* Returns true if the operation can change the control flow because
2522 is_fragile_op(const ir_node *node) {
2523 return is_op_fragile(get_irn_op(node));
2526 /* Returns the memory operand of fragile operations. */
2527 ir_node *get_fragile_op_mem(ir_node *node) {
2528 assert(node && is_fragile_op(node));
2530 switch (get_irn_opcode (node)) {
2539 return get_irn_n(node, 0);
2544 assert(0 && "should not be reached");
2549 /* Returns true if the operation is a forking control flow operation. */
2550 int (is_irn_forking)(const ir_node *node) {
2551 return _is_irn_forking(node);
2554 /* Return the type associated with the value produced by n
2555 * if the node remarks this type as it is the case for
2556 * Cast, Const, SymConst and some Proj nodes. */
2557 ir_type *(get_irn_type)(ir_node *node) {
2558 return _get_irn_type(node);
2561 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2563 ir_type *(get_irn_type_attr)(ir_node *node) {
2564 return _get_irn_type_attr(node);
2567 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2568 entity *(get_irn_entity_attr)(ir_node *node) {
2569 return _get_irn_entity_attr(node);
2572 /* Returns non-zero for constant-like nodes. */
2573 int (is_irn_constlike)(const ir_node *node) {
2574 return _is_irn_constlike(node);
2578 * Returns non-zero for nodes that are allowed to have keep-alives and
2579 * are neither Block nor PhiM.
2581 int (is_irn_keep)(const ir_node *node) {
2582 return _is_irn_keep(node);
2585 /* Returns non-zero for nodes that are machine operations. */
2586 int (is_irn_machine_op)(const ir_node *node) {
2587 return _is_irn_machine_op(node);
2590 /* Returns non-zero for nodes that are machine operands. */
2591 int (is_irn_machine_operand)(const ir_node *node) {
2592 return _is_irn_machine_operand(node);
2595 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2596 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2597 return _is_irn_machine_user(node, n);
2601 /* Gets the string representation of the jump prediction .*/
2602 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2606 case COND_JMP_PRED_NONE: return "no prediction";
2607 case COND_JMP_PRED_TRUE: return "true taken";
2608 case COND_JMP_PRED_FALSE: return "false taken";
2612 /* Returns the conditional jump prediction of a Cond node. */
2613 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2614 return _get_Cond_jmp_pred(cond);
2617 /* Sets a new conditional jump prediction. */
2618 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2619 _set_Cond_jmp_pred(cond, pred);
2622 /** the get_type operation must be always implemented and return a firm type */
2623 static ir_type *get_Default_type(ir_node *n) {
2624 return get_unknown_type();
2627 /* Sets the get_type operation for an ir_op_ops. */
2628 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2631 case iro_Const: ops->get_type = get_Const_type; break;
2632 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2633 case iro_Cast: ops->get_type = get_Cast_type; break;
2634 case iro_Proj: ops->get_type = get_Proj_type; break;
2636 /* not allowed to be NULL */
2637 if (! ops->get_type)
2638 ops->get_type = get_Default_type;
2644 /** Return the attribute type of a SymConst node if exists */
2645 static ir_type *get_SymConst_attr_type(ir_node *self) {
2646 symconst_kind kind = get_SymConst_kind(self);
2647 if (SYMCONST_HAS_TYPE(kind))
2648 return get_SymConst_type(self);
2652 /** Return the attribute entity of a SymConst node if exists */
2653 static entity *get_SymConst_attr_entity(ir_node *self) {
2654 symconst_kind kind = get_SymConst_kind(self);
2655 if (SYMCONST_HAS_ENT(kind))
2656 return get_SymConst_entity(self);
2660 /** the get_type_attr operation must be always implemented */
2661 static ir_type *get_Null_type(ir_node *n) {
2662 return firm_unknown_type;
2665 /* Sets the get_type operation for an ir_op_ops. */
2666 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2669 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2670 case iro_Call: ops->get_type_attr = get_Call_type; break;
2671 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2672 case iro_Free: ops->get_type_attr = get_Free_type; break;
2673 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2675 /* not allowed to be NULL */
2676 if (! ops->get_type_attr)
2677 ops->get_type_attr = get_Null_type;
2683 /** the get_entity_attr operation must be always implemented */
2684 static entity *get_Null_ent(ir_node *n) {
2688 /* Sets the get_type operation for an ir_op_ops. */
2689 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2692 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2693 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2695 /* not allowed to be NULL */
2696 if (! ops->get_entity_attr)
2697 ops->get_entity_attr = get_Null_ent;
2703 #ifdef DEBUG_libfirm
2704 void dump_irn (ir_node *n) {
2705 int i, arity = get_irn_arity(n);
2706 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2708 ir_node *pred = get_irn_n(n, -1);
2709 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2710 get_irn_node_nr(pred), (void *)pred);
2712 printf(" preds: \n");
2713 for (i = 0; i < arity; ++i) {
2714 ir_node *pred = get_irn_n(n, i);
2715 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2716 get_irn_node_nr(pred), (void *)pred);
2720 #else /* DEBUG_libfirm */
2721 void dump_irn (ir_node *n) {}
2722 #endif /* DEBUG_libfirm */