3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
84 * Indicates, whether additional data can be registered to ir nodes.
85 * If set to 1, this is not possible anymore.
87 static int forbid_new_data = 0;
90 * The amount of additional space for custom data to be allocated upon
91 * creating a new node.
93 unsigned firm_add_node_size = 0;
96 /* register new space for every node */
97 unsigned register_additional_node_data(unsigned size) {
98 assert(!forbid_new_data && "Too late to register additional node data");
103 return firm_add_node_size += size;
109 /* Forbid the addition of new data to an ir node. */
114 * irnode constructor.
115 * Create a new irnode in irg, with an op, mode, arity and
116 * some incoming irnodes.
117 * If arity is negative, a node with a dynamic array is created.
120 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
121 int arity, ir_node **in)
124 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
128 assert(irg && op && mode);
129 p = obstack_alloc (irg->obst, node_size);
130 memset(p, 0, node_size);
131 res = (ir_node *) (p + firm_add_node_size);
133 res->kind = k_ir_node;
137 res->node_idx = irg_register_node_idx(irg, res);
142 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
144 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
145 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
149 set_irn_dbg_info(res, db);
153 res->node_nr = get_irp_new_node_nr();
156 for(i = 0; i < EDGE_KIND_LAST; ++i)
157 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
159 is_bl = is_Block(res);
160 for (i = is_bl; i <= arity; ++i)
161 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
163 hook_new_node(irg, res);
168 /*-- getting some parameters from ir_nodes --*/
171 (is_ir_node)(const void *thing) {
172 return _is_ir_node(thing);
176 (get_irn_intra_arity)(const ir_node *node) {
177 return _get_irn_intra_arity(node);
181 (get_irn_inter_arity)(const ir_node *node) {
182 return _get_irn_inter_arity(node);
185 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
188 (get_irn_arity)(const ir_node *node) {
189 return _get_irn_arity(node);
192 /* Returns the array with ins. This array is shifted with respect to the
193 array accessed by get_irn_n: The block operand is at position 0 not -1.
194 (@@@ This should be changed.)
195 The order of the predecessors in this array is not guaranteed, except that
196 lists of operands as predecessors of Block or arguments of a Call are
199 get_irn_in(const ir_node *node) {
201 if (get_interprocedural_view()) { /* handle Filter and Block specially */
202 if (get_irn_opcode(node) == iro_Filter) {
203 assert(node->attr.filter.in_cg);
204 return node->attr.filter.in_cg;
205 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
206 return node->attr.block.in_cg;
208 /* else fall through */
214 set_irn_in(ir_node *node, int arity, ir_node **in) {
217 ir_graph *irg = current_ir_graph;
219 if (get_interprocedural_view()) { /* handle Filter and Block specially */
220 if (get_irn_opcode(node) == iro_Filter) {
221 assert(node->attr.filter.in_cg);
222 arr = &node->attr.filter.in_cg;
223 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
224 arr = &node->attr.block.in_cg;
232 for (i = 0; i < arity; i++) {
233 if (i < ARR_LEN(*arr)-1)
234 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
236 edges_notify_edge(node, i, in[i], NULL, irg);
238 for(;i < ARR_LEN(*arr)-1; i++) {
239 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
242 if (arity != ARR_LEN(*arr) - 1) {
243 ir_node * block = (*arr)[0];
244 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
247 fix_backedges(irg->obst, node);
249 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
253 (get_irn_intra_n)(const ir_node *node, int n) {
254 return _get_irn_intra_n (node, n);
258 (get_irn_inter_n)(const ir_node *node, int n) {
259 return _get_irn_inter_n (node, n);
262 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
265 (get_irn_n)(const ir_node *node, int n) {
266 return _get_irn_n(node, n);
270 set_irn_n (ir_node *node, int n, ir_node *in) {
271 assert(node && node->kind == k_ir_node);
273 assert(n < get_irn_arity(node));
274 assert(in && in->kind == k_ir_node);
276 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
277 /* Change block pred in both views! */
278 node->in[n + 1] = in;
279 assert(node->attr.filter.in_cg);
280 node->attr.filter.in_cg[n + 1] = in;
283 if (get_interprocedural_view()) { /* handle Filter and Block specially */
284 if (get_irn_opcode(node) == iro_Filter) {
285 assert(node->attr.filter.in_cg);
286 node->attr.filter.in_cg[n + 1] = in;
288 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
289 node->attr.block.in_cg[n + 1] = in;
292 /* else fall through */
296 hook_set_irn_n(node, n, in, node->in[n + 1]);
298 /* Here, we rely on src and tgt being in the current ir graph */
299 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
301 node->in[n + 1] = in;
305 (get_irn_deps)(const ir_node *node)
307 return _get_irn_deps(node);
311 (get_irn_dep)(const ir_node *node, int pos)
313 return _get_irn_dep(node, pos);
317 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
319 _set_irn_dep(node, pos, dep);
322 int add_irn_dep(ir_node *node, ir_node *dep)
326 if (node->deps == NULL) {
327 node->deps = NEW_ARR_F(ir_node *, 1);
333 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
334 if(node->deps[i] == NULL)
337 if(node->deps[i] == dep)
341 if (first_zero >= 0) {
342 node->deps[first_zero] = dep;
345 ARR_APP1(ir_node *, node->deps, dep);
350 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
355 void add_irn_deps(ir_node *tgt, ir_node *src)
359 for(i = 0, n = get_irn_deps(src); i < n; ++i)
360 add_irn_dep(tgt, get_irn_dep(src, i));
365 (get_irn_mode)(const ir_node *node) {
366 return _get_irn_mode(node);
370 (set_irn_mode)(ir_node *node, ir_mode *mode) {
371 _set_irn_mode(node, mode);
375 get_irn_modecode(const ir_node *node) {
377 return node->mode->code;
380 /** Gets the string representation of the mode .*/
382 get_irn_modename(const ir_node *node) {
384 return get_mode_name(node->mode);
388 get_irn_modeident(const ir_node *node) {
390 return get_mode_ident(node->mode);
394 (get_irn_op)(const ir_node *node) {
395 return _get_irn_op(node);
398 /* should be private to the library: */
400 (set_irn_op)(ir_node *node, ir_op *op) {
401 _set_irn_op(node, op);
405 (get_irn_opcode)(const ir_node *node) {
406 return _get_irn_opcode(node);
410 get_irn_opname(const ir_node *node) {
412 if ((get_irn_op((ir_node *)node) == op_Phi) &&
413 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
414 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
415 return get_id_str(node->op->name);
419 get_irn_opident(const ir_node *node) {
421 return node->op->name;
425 (get_irn_visited)(const ir_node *node) {
426 return _get_irn_visited(node);
430 (set_irn_visited)(ir_node *node, unsigned long visited) {
431 _set_irn_visited(node, visited);
435 (mark_irn_visited)(ir_node *node) {
436 _mark_irn_visited(node);
440 (irn_not_visited)(const ir_node *node) {
441 return _irn_not_visited(node);
445 (irn_visited)(const ir_node *node) {
446 return _irn_visited(node);
450 (set_irn_link)(ir_node *node, void *link) {
451 _set_irn_link(node, link);
455 (get_irn_link)(const ir_node *node) {
456 return _get_irn_link(node);
460 (get_irn_pinned)(const ir_node *node) {
461 return _get_irn_pinned(node);
465 (is_irn_pinned_in_irg) (const ir_node *node) {
466 return _is_irn_pinned_in_irg(node);
469 void set_irn_pinned(ir_node *node, op_pin_state state) {
470 /* due to optimization an opt may be turned into a Tuple */
471 if (get_irn_op(node) == op_Tuple)
474 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
475 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
477 node->attr.except.pin_state = state;
480 #ifdef DO_HEAPANALYSIS
481 /* Access the abstract interpretation information of a node.
482 Returns NULL if no such information is available. */
483 struct abstval *get_irn_abst_value(ir_node *n) {
486 /* Set the abstract interpretation information of a node. */
487 void set_irn_abst_value(ir_node *n, struct abstval *os) {
490 struct section *firm_get_irn_section(ir_node *n) {
493 void firm_set_irn_section(ir_node *n, struct section *s) {
497 /* Dummies needed for firmjni. */
498 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
499 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
500 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
501 void firm_set_irn_section(ir_node *n, struct section *s) {}
502 #endif /* DO_HEAPANALYSIS */
505 /* Outputs a unique number for this node */
506 long get_irn_node_nr(const ir_node *node) {
509 return node->node_nr;
511 return (long)PTR_TO_INT(node);
516 get_irn_const_attr(ir_node *node) {
517 assert(node->op == op_Const);
518 return node->attr.con;
522 get_irn_proj_attr(ir_node *node) {
523 assert(node->op == op_Proj);
524 return node->attr.proj;
528 get_irn_alloc_attr(ir_node *node) {
529 assert(node->op == op_Alloc);
530 return node->attr.alloc;
534 get_irn_free_attr(ir_node *node) {
535 assert(node->op == op_Free);
536 return node->attr.free;
540 get_irn_symconst_attr(ir_node *node) {
541 assert(node->op == op_SymConst);
542 return node->attr.symc;
546 get_irn_call_attr(ir_node *node) {
547 assert(node->op == op_Call);
548 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
552 get_irn_sel_attr(ir_node *node) {
553 assert(node->op == op_Sel);
554 return node->attr.sel;
558 get_irn_phi_attr(ir_node *node) {
559 assert(node->op == op_Phi);
560 return node->attr.phi0_pos;
564 get_irn_block_attr(ir_node *node) {
565 assert(node->op == op_Block);
566 return node->attr.block;
570 get_irn_load_attr(ir_node *node)
572 assert(node->op == op_Load);
573 return node->attr.load;
577 get_irn_store_attr(ir_node *node)
579 assert(node->op == op_Store);
580 return node->attr.store;
584 get_irn_except_attr(ir_node *node) {
585 assert(node->op == op_Div || node->op == op_Quot ||
586 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
587 return node->attr.except;
591 get_irn_generic_attr(ir_node *node) {
595 unsigned (get_irn_idx)(const ir_node *node) {
596 assert(is_ir_node(node));
597 return _get_irn_idx(node);
600 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
602 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
603 if (get_irn_n(node, i) == arg)
609 /** manipulate fields of individual nodes **/
611 /* this works for all except Block */
613 get_nodes_block(const ir_node *node) {
614 assert(node->op != op_Block);
615 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
616 return get_irn_n(node, -1);
620 set_nodes_block(ir_node *node, ir_node *block) {
621 assert(node->op != op_Block);
622 set_irn_n(node, -1, block);
625 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
626 * from Start. If so returns frame type, else Null. */
627 ir_type *is_frame_pointer(ir_node *n) {
628 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
629 ir_node *start = get_Proj_pred(n);
630 if (get_irn_op(start) == op_Start) {
631 return get_irg_frame_type(get_irn_irg(start));
637 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
638 * from Start. If so returns global type, else Null. */
639 ir_type *is_globals_pointer(ir_node *n) {
640 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
641 ir_node *start = get_Proj_pred(n);
642 if (get_irn_op(start) == op_Start) {
643 return get_glob_type();
649 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
650 * from Start. If so returns tls type, else Null. */
651 ir_type *is_tls_pointer(ir_node *n) {
652 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
653 ir_node *start = get_Proj_pred(n);
654 if (get_irn_op(start) == op_Start) {
655 return get_tls_type();
661 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
662 * from Start. If so returns 1, else 0. */
663 int is_value_arg_pointer(ir_node *n) {
664 if ((get_irn_op(n) == op_Proj) &&
665 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
666 (get_irn_op(get_Proj_pred(n)) == op_Start))
671 /* Returns an array with the predecessors of the Block. Depending on
672 the implementation of the graph data structure this can be a copy of
673 the internal representation of predecessors as well as the internal
674 array itself. Therefore writing to this array might obstruct the ir. */
676 get_Block_cfgpred_arr(ir_node *node) {
677 assert((node->op == op_Block));
678 return (ir_node **)&(get_irn_in(node)[1]);
682 (get_Block_n_cfgpreds)(const ir_node *node) {
683 return _get_Block_n_cfgpreds(node);
687 (get_Block_cfgpred)(ir_node *node, int pos) {
688 return _get_Block_cfgpred(node, pos);
692 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
693 assert(node->op == op_Block);
694 set_irn_n(node, pos, pred);
698 (get_Block_cfgpred_block)(ir_node *node, int pos) {
699 return _get_Block_cfgpred_block(node, pos);
703 get_Block_matured(ir_node *node) {
704 assert(node->op == op_Block);
705 return (int)node->attr.block.matured;
709 set_Block_matured(ir_node *node, int matured) {
710 assert(node->op == op_Block);
711 node->attr.block.matured = matured;
715 (get_Block_block_visited)(ir_node *node) {
716 return _get_Block_block_visited(node);
720 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
721 _set_Block_block_visited(node, visit);
724 /* For this current_ir_graph must be set. */
726 (mark_Block_block_visited)(ir_node *node) {
727 _mark_Block_block_visited(node);
731 (Block_not_block_visited)(ir_node *node) {
732 return _Block_not_block_visited(node);
736 get_Block_graph_arr (ir_node *node, int pos) {
737 assert(node->op == op_Block);
738 return node->attr.block.graph_arr[pos+1];
742 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
743 assert(node->op == op_Block);
744 node->attr.block.graph_arr[pos+1] = value;
747 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
748 assert(node->op == op_Block);
749 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
750 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
751 node->attr.block.in_cg[0] = NULL;
752 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
754 /* Fix backedge array. fix_backedges() operates depending on
755 interprocedural_view. */
756 int ipv = get_interprocedural_view();
757 set_interprocedural_view(1);
758 fix_backedges(current_ir_graph->obst, node);
759 set_interprocedural_view(ipv);
762 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
765 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
766 assert(node->op == op_Block &&
767 node->attr.block.in_cg &&
768 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
769 node->attr.block.in_cg[pos + 1] = pred;
772 ir_node **get_Block_cg_cfgpred_arr(ir_node * node) {
773 assert(node->op == op_Block);
774 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
777 int get_Block_cg_n_cfgpreds(ir_node * node) {
778 assert(node->op == op_Block);
779 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
782 ir_node *get_Block_cg_cfgpred(ir_node * node, int pos) {
783 assert(node->op == op_Block && node->attr.block.in_cg);
784 return node->attr.block.in_cg[pos + 1];
787 void remove_Block_cg_cfgpred_arr(ir_node * node) {
788 assert(node->op == op_Block);
789 node->attr.block.in_cg = NULL;
792 ir_node *(set_Block_dead)(ir_node *block) {
793 return _set_Block_dead(block);
796 int (is_Block_dead)(const ir_node *block) {
797 return _is_Block_dead(block);
800 ir_extblk *get_Block_extbb(const ir_node *block) {
802 assert(is_Block(block));
803 res = block->attr.block.extblk;
804 assert(res == NULL || is_ir_extbb(res));
808 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
809 assert(is_Block(block));
810 assert(extblk == NULL || is_ir_extbb(extblk));
811 block->attr.block.extblk = extblk;
815 get_End_n_keepalives(ir_node *end) {
816 assert(end->op == op_End);
817 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
821 get_End_keepalive(ir_node *end, int pos) {
822 assert(end->op == op_End);
823 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
827 add_End_keepalive (ir_node *end, ir_node *ka) {
829 ir_graph *irg = get_irn_irg(end);
831 assert(end->op == op_End);
832 l = ARR_LEN(end->in);
833 ARR_APP1(ir_node *, end->in, ka);
834 edges_notify_edge(end, l - 1, end->in[l], NULL, irg);
838 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
839 assert(end->op == op_End);
840 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
843 /* Set new keep-alives */
844 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
846 ir_graph *irg = get_irn_irg(end);
848 /* notify that edges are deleted */
849 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
850 edges_notify_edge(end, i, NULL, end->in[i], irg);
852 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
854 for (i = 0; i < n; ++i) {
855 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
856 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
860 /* Set new keep-alives from old keep-alives, skipping irn */
861 void remove_End_keepalive(ir_node *end, ir_node *irn) {
862 int n = get_End_n_keepalives(end);
866 NEW_ARR_A(ir_node *, in, n);
868 for (idx = i = 0; i < n; ++i) {
869 ir_node *old_ka = get_End_keepalive(end, i);
876 /* set new keep-alives */
877 set_End_keepalives(end, idx, in);
881 free_End (ir_node *end) {
882 assert(end->op == op_End);
885 end->in = NULL; /* @@@ make sure we get an error if we use the
886 in array afterwards ... */
889 /* Return the target address of an IJmp */
890 ir_node *get_IJmp_target(ir_node *ijmp) {
891 assert(ijmp->op == op_IJmp);
892 return get_irn_n(ijmp, 0);
895 /** Sets the target address of an IJmp */
896 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
897 assert(ijmp->op == op_IJmp);
898 set_irn_n(ijmp, 0, tgt);
902 > Implementing the case construct (which is where the constant Proj node is
903 > important) involves far more than simply determining the constant values.
904 > We could argue that this is more properly a function of the translator from
905 > Firm to the target machine. That could be done if there was some way of
906 > projecting "default" out of the Cond node.
907 I know it's complicated.
908 Basically there are two proglems:
909 - determining the gaps between the projs
910 - determining the biggest case constant to know the proj number for
912 I see several solutions:
913 1. Introduce a ProjDefault node. Solves both problems.
914 This means to extend all optimizations executed during construction.
915 2. Give the Cond node for switch two flavors:
916 a) there are no gaps in the projs (existing flavor)
917 b) gaps may exist, default proj is still the Proj with the largest
918 projection number. This covers also the gaps.
919 3. Fix the semantic of the Cond to that of 2b)
921 Solution 2 seems to be the best:
922 Computing the gaps in the Firm representation is not too hard, i.e.,
923 libFIRM can implement a routine that transforms between the two
924 flavours. This is also possible for 1) but 2) does not require to
925 change any existing optimization.
926 Further it should be far simpler to determine the biggest constant than
928 I don't want to choose 3) as 2a) seems to have advantages for
929 dataflow analysis and 3) does not allow to convert the representation to
933 get_Cond_selector(ir_node *node) {
934 assert(node->op == op_Cond);
935 return get_irn_n(node, 0);
939 set_Cond_selector(ir_node *node, ir_node *selector) {
940 assert(node->op == op_Cond);
941 set_irn_n(node, 0, selector);
945 get_Cond_kind(ir_node *node) {
946 assert(node->op == op_Cond);
947 return node->attr.cond.kind;
951 set_Cond_kind(ir_node *node, cond_kind kind) {
952 assert(node->op == op_Cond);
953 node->attr.cond.kind = kind;
957 get_Cond_defaultProj(ir_node *node) {
958 assert(node->op == op_Cond);
959 return node->attr.cond.default_proj;
963 get_Return_mem(ir_node *node) {
964 assert(node->op == op_Return);
965 return get_irn_n(node, 0);
969 set_Return_mem(ir_node *node, ir_node *mem) {
970 assert(node->op == op_Return);
971 set_irn_n(node, 0, mem);
975 get_Return_n_ress(ir_node *node) {
976 assert(node->op == op_Return);
977 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
981 get_Return_res_arr (ir_node *node) {
982 assert((node->op == op_Return));
983 if (get_Return_n_ress(node) > 0)
984 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
991 set_Return_n_res(ir_node *node, int results) {
992 assert(node->op == op_Return);
997 get_Return_res(ir_node *node, int pos) {
998 assert(node->op == op_Return);
999 assert(get_Return_n_ress(node) > pos);
1000 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1004 set_Return_res(ir_node *node, int pos, ir_node *res){
1005 assert(node->op == op_Return);
1006 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1009 tarval *(get_Const_tarval)(ir_node *node) {
1010 return _get_Const_tarval(node);
1014 set_Const_tarval(ir_node *node, tarval *con) {
1015 assert(node->op == op_Const);
1016 node->attr.con.tv = con;
1019 cnst_classify_t (classify_Const)(ir_node *node) {
1020 return _classify_Const(node);
1024 /* The source language type. Must be an atomic type. Mode of type must
1025 be mode of node. For tarvals from entities type must be pointer to
1028 get_Const_type(ir_node *node) {
1029 assert(node->op == op_Const);
1030 return node->attr.con.tp;
1034 set_Const_type(ir_node *node, ir_type *tp) {
1035 assert(node->op == op_Const);
1036 if (tp != firm_unknown_type) {
1037 assert(is_atomic_type(tp));
1038 assert(get_type_mode(tp) == get_irn_mode(node));
1040 node->attr.con.tp = tp;
1045 get_SymConst_kind(const ir_node *node) {
1046 assert(node->op == op_SymConst);
1047 return node->attr.symc.num;
1051 set_SymConst_kind(ir_node *node, symconst_kind num) {
1052 assert(node->op == op_SymConst);
1053 node->attr.symc.num = num;
1057 get_SymConst_type(ir_node *node) {
1058 assert((node->op == op_SymConst) &&
1059 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1060 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1064 set_SymConst_type(ir_node *node, ir_type *tp) {
1065 assert((node->op == op_SymConst) &&
1066 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1067 node->attr.symc.sym.type_p = tp;
1071 get_SymConst_name(ir_node *node) {
1072 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1073 return node->attr.symc.sym.ident_p;
1077 set_SymConst_name(ir_node *node, ident *name) {
1078 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1079 node->attr.symc.sym.ident_p = name;
1083 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1084 ir_entity *get_SymConst_entity(ir_node *node) {
1085 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1086 return node->attr.symc.sym.entity_p;
1089 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1090 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1091 node->attr.symc.sym.entity_p = ent;
1094 ir_enum_const *get_SymConst_enum(ir_node *node) {
1095 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1096 return node->attr.symc.sym.enum_p;
1099 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1100 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1101 node->attr.symc.sym.enum_p = ec;
1104 union symconst_symbol
1105 get_SymConst_symbol(ir_node *node) {
1106 assert(node->op == op_SymConst);
1107 return node->attr.symc.sym;
1111 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1112 assert(node->op == op_SymConst);
1113 node->attr.symc.sym = sym;
1117 get_SymConst_value_type(ir_node *node) {
1118 assert(node->op == op_SymConst);
1119 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1120 return node->attr.symc.tp;
1124 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1125 assert(node->op == op_SymConst);
1126 node->attr.symc.tp = tp;
1130 get_Sel_mem(ir_node *node) {
1131 assert(node->op == op_Sel);
1132 return get_irn_n(node, 0);
1136 set_Sel_mem(ir_node *node, ir_node *mem) {
1137 assert(node->op == op_Sel);
1138 set_irn_n(node, 0, mem);
1142 get_Sel_ptr(ir_node *node) {
1143 assert(node->op == op_Sel);
1144 return get_irn_n(node, 1);
1148 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1149 assert(node->op == op_Sel);
1150 set_irn_n(node, 1, ptr);
1154 get_Sel_n_indexs(ir_node *node) {
1155 assert(node->op == op_Sel);
1156 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1160 get_Sel_index_arr(ir_node *node) {
1161 assert((node->op == op_Sel));
1162 if (get_Sel_n_indexs(node) > 0)
1163 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1169 get_Sel_index(ir_node *node, int pos) {
1170 assert(node->op == op_Sel);
1171 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1175 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1176 assert(node->op == op_Sel);
1177 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1181 get_Sel_entity(ir_node *node) {
1182 assert(node->op == op_Sel);
1183 return node->attr.sel.ent;
1187 set_Sel_entity(ir_node *node, ir_entity *ent) {
1188 assert(node->op == op_Sel);
1189 node->attr.sel.ent = ent;
1193 /* For unary and binary arithmetic operations the access to the
1194 operands can be factored out. Left is the first, right the
1195 second arithmetic value as listed in tech report 0999-33.
1196 unops are: Minus, Abs, Not, Conv, Cast
1197 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1198 Shr, Shrs, Rotate, Cmp */
1202 get_Call_mem(ir_node *node) {
1203 assert(node->op == op_Call);
1204 return get_irn_n(node, 0);
1208 set_Call_mem(ir_node *node, ir_node *mem) {
1209 assert(node->op == op_Call);
1210 set_irn_n(node, 0, mem);
1214 get_Call_ptr(ir_node *node) {
1215 assert(node->op == op_Call);
1216 return get_irn_n(node, 1);
1220 set_Call_ptr(ir_node *node, ir_node *ptr) {
1221 assert(node->op == op_Call);
1222 set_irn_n(node, 1, ptr);
1226 get_Call_param_arr(ir_node *node) {
1227 assert(node->op == op_Call);
1228 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1232 get_Call_n_params(ir_node *node) {
1233 assert(node->op == op_Call);
1234 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1238 get_Call_arity(ir_node *node) {
1239 assert(node->op == op_Call);
1240 return get_Call_n_params(node);
1244 set_Call_arity(ir_node *node, ir_node *arity) {
1245 assert(node->op == op_Call);
1250 get_Call_param(ir_node *node, int pos) {
1251 assert(node->op == op_Call);
1252 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1256 set_Call_param(ir_node *node, int pos, ir_node *param) {
1257 assert(node->op == op_Call);
1258 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1262 get_Call_type(ir_node *node) {
1263 assert(node->op == op_Call);
1264 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1268 set_Call_type(ir_node *node, ir_type *tp) {
1269 assert(node->op == op_Call);
1270 assert((get_unknown_type() == tp) || is_Method_type(tp));
1271 node->attr.call.cld_tp = tp;
1274 int Call_has_callees(ir_node *node) {
1275 assert(node && node->op == op_Call);
1276 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1277 (node->attr.call.callee_arr != NULL));
1280 int get_Call_n_callees(ir_node * node) {
1281 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1282 return ARR_LEN(node->attr.call.callee_arr);
1285 ir_entity * get_Call_callee(ir_node * node, int pos) {
1286 assert(pos >= 0 && pos < get_Call_n_callees(node));
1287 return node->attr.call.callee_arr[pos];
1290 void set_Call_callee_arr(ir_node * node, const int n, ir_entity ** arr) {
1291 assert(node->op == op_Call);
1292 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1293 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1295 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1298 void remove_Call_callee_arr(ir_node * node) {
1299 assert(node->op == op_Call);
1300 node->attr.call.callee_arr = NULL;
1303 ir_node * get_CallBegin_ptr(ir_node *node) {
1304 assert(node->op == op_CallBegin);
1305 return get_irn_n(node, 0);
1308 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1309 assert(node->op == op_CallBegin);
1310 set_irn_n(node, 0, ptr);
1313 ir_node * get_CallBegin_call(ir_node *node) {
1314 assert(node->op == op_CallBegin);
1315 return node->attr.callbegin.call;
1318 void set_CallBegin_call(ir_node *node, ir_node *call) {
1319 assert(node->op == op_CallBegin);
1320 node->attr.callbegin.call = call;
1325 ir_node * get_##OP##_left(ir_node *node) { \
1326 assert(node->op == op_##OP); \
1327 return get_irn_n(node, node->op->op_index); \
1329 void set_##OP##_left(ir_node *node, ir_node *left) { \
1330 assert(node->op == op_##OP); \
1331 set_irn_n(node, node->op->op_index, left); \
1333 ir_node *get_##OP##_right(ir_node *node) { \
1334 assert(node->op == op_##OP); \
1335 return get_irn_n(node, node->op->op_index + 1); \
1337 void set_##OP##_right(ir_node *node, ir_node *right) { \
1338 assert(node->op == op_##OP); \
1339 set_irn_n(node, node->op->op_index + 1, right); \
1343 ir_node *get_##OP##_op(ir_node *node) { \
1344 assert(node->op == op_##OP); \
1345 return get_irn_n(node, node->op->op_index); \
1347 void set_##OP##_op (ir_node *node, ir_node *op) { \
1348 assert(node->op == op_##OP); \
1349 set_irn_n(node, node->op->op_index, op); \
1352 #define BINOP_MEM(OP) \
1356 get_##OP##_mem(ir_node *node) { \
1357 assert(node->op == op_##OP); \
1358 return get_irn_n(node, 0); \
1362 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1363 assert(node->op == op_##OP); \
1364 set_irn_n(node, 0, mem); \
1388 int get_Conv_strict(ir_node *node) {
1389 assert(node->op == op_Conv);
1390 return node->attr.conv.strict;
1393 void set_Conv_strict(ir_node *node, int strict_flag) {
1394 assert(node->op == op_Conv);
1395 node->attr.conv.strict = (char)strict_flag;
1399 get_Cast_type(ir_node *node) {
1400 assert(node->op == op_Cast);
1401 return node->attr.cast.totype;
1405 set_Cast_type(ir_node *node, ir_type *to_tp) {
1406 assert(node->op == op_Cast);
1407 node->attr.cast.totype = to_tp;
1411 /* Checks for upcast.
1413 * Returns true if the Cast node casts a class type to a super type.
1415 int is_Cast_upcast(ir_node *node) {
1416 ir_type *totype = get_Cast_type(node);
1417 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1418 ir_graph *myirg = get_irn_irg(node);
1420 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1423 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1424 totype = get_pointer_points_to_type(totype);
1425 fromtype = get_pointer_points_to_type(fromtype);
1430 if (!is_Class_type(totype)) return 0;
1431 return is_SubClass_of(fromtype, totype);
1434 /* Checks for downcast.
1436 * Returns true if the Cast node casts a class type to a sub type.
1438 int is_Cast_downcast(ir_node *node) {
1439 ir_type *totype = get_Cast_type(node);
1440 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1442 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1445 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1446 totype = get_pointer_points_to_type(totype);
1447 fromtype = get_pointer_points_to_type(fromtype);
1452 if (!is_Class_type(totype)) return 0;
1453 return is_SubClass_of(totype, fromtype);
1457 (is_unop)(const ir_node *node) {
1458 return _is_unop(node);
1462 get_unop_op(ir_node *node) {
1463 if (node->op->opar == oparity_unary)
1464 return get_irn_n(node, node->op->op_index);
1466 assert(node->op->opar == oparity_unary);
1471 set_unop_op(ir_node *node, ir_node *op) {
1472 if (node->op->opar == oparity_unary)
1473 set_irn_n(node, node->op->op_index, op);
1475 assert(node->op->opar == oparity_unary);
1479 (is_binop)(const ir_node *node) {
1480 return _is_binop(node);
1484 get_binop_left(ir_node *node) {
1485 assert(node->op->opar == oparity_binary);
1486 return get_irn_n(node, node->op->op_index);
1490 set_binop_left(ir_node *node, ir_node *left) {
1491 assert(node->op->opar == oparity_binary);
1492 set_irn_n(node, node->op->op_index, left);
1496 get_binop_right(ir_node *node) {
1497 assert(node->op->opar == oparity_binary);
1498 return get_irn_n(node, node->op->op_index + 1);
1502 set_binop_right(ir_node *node, ir_node *right) {
1503 assert(node->op->opar == oparity_binary);
1504 set_irn_n(node, node->op->op_index + 1, right);
1507 int is_Phi(const ir_node *n) {
1513 if (op == op_Filter) return get_interprocedural_view();
1516 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1517 (get_irn_arity(n) > 0));
1522 int is_Phi0(const ir_node *n) {
1525 return ((get_irn_op(n) == op_Phi) &&
1526 (get_irn_arity(n) == 0) &&
1527 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1531 get_Phi_preds_arr(ir_node *node) {
1532 assert(node->op == op_Phi);
1533 return (ir_node **)&(get_irn_in(node)[1]);
1537 get_Phi_n_preds(ir_node *node) {
1538 assert(is_Phi(node) || is_Phi0(node));
1539 return (get_irn_arity(node));
1543 void set_Phi_n_preds(ir_node *node, int n_preds) {
1544 assert(node->op == op_Phi);
1549 get_Phi_pred(ir_node *node, int pos) {
1550 assert(is_Phi(node) || is_Phi0(node));
1551 return get_irn_n(node, pos);
1555 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1556 assert(is_Phi(node) || is_Phi0(node));
1557 set_irn_n(node, pos, pred);
1561 int is_memop(ir_node *node) {
1562 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1565 ir_node *get_memop_mem(ir_node *node) {
1566 assert(is_memop(node));
1567 return get_irn_n(node, 0);
1570 void set_memop_mem(ir_node *node, ir_node *mem) {
1571 assert(is_memop(node));
1572 set_irn_n(node, 0, mem);
1575 ir_node *get_memop_ptr(ir_node *node) {
1576 assert(is_memop(node));
1577 return get_irn_n(node, 1);
1580 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1581 assert(is_memop(node));
1582 set_irn_n(node, 1, ptr);
1586 get_Load_mem(ir_node *node) {
1587 assert(node->op == op_Load);
1588 return get_irn_n(node, 0);
1592 set_Load_mem(ir_node *node, ir_node *mem) {
1593 assert(node->op == op_Load);
1594 set_irn_n(node, 0, mem);
1598 get_Load_ptr(ir_node *node) {
1599 assert(node->op == op_Load);
1600 return get_irn_n(node, 1);
1604 set_Load_ptr(ir_node *node, ir_node *ptr) {
1605 assert(node->op == op_Load);
1606 set_irn_n(node, 1, ptr);
1610 get_Load_mode(ir_node *node) {
1611 assert(node->op == op_Load);
1612 return node->attr.load.load_mode;
1616 set_Load_mode(ir_node *node, ir_mode *mode) {
1617 assert(node->op == op_Load);
1618 node->attr.load.load_mode = mode;
1622 get_Load_volatility(ir_node *node) {
1623 assert(node->op == op_Load);
1624 return node->attr.load.volatility;
1628 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1629 assert(node->op == op_Load);
1630 node->attr.load.volatility = volatility;
1635 get_Store_mem(ir_node *node) {
1636 assert(node->op == op_Store);
1637 return get_irn_n(node, 0);
1641 set_Store_mem(ir_node *node, ir_node *mem) {
1642 assert(node->op == op_Store);
1643 set_irn_n(node, 0, mem);
1647 get_Store_ptr(ir_node *node) {
1648 assert(node->op == op_Store);
1649 return get_irn_n(node, 1);
1653 set_Store_ptr(ir_node *node, ir_node *ptr) {
1654 assert(node->op == op_Store);
1655 set_irn_n(node, 1, ptr);
1659 get_Store_value(ir_node *node) {
1660 assert(node->op == op_Store);
1661 return get_irn_n(node, 2);
1665 set_Store_value(ir_node *node, ir_node *value) {
1666 assert(node->op == op_Store);
1667 set_irn_n(node, 2, value);
1671 get_Store_volatility(ir_node *node) {
1672 assert(node->op == op_Store);
1673 return node->attr.store.volatility;
1677 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1678 assert(node->op == op_Store);
1679 node->attr.store.volatility = volatility;
1684 get_Alloc_mem(ir_node *node) {
1685 assert(node->op == op_Alloc);
1686 return get_irn_n(node, 0);
1690 set_Alloc_mem(ir_node *node, ir_node *mem) {
1691 assert(node->op == op_Alloc);
1692 set_irn_n(node, 0, mem);
1696 get_Alloc_size(ir_node *node) {
1697 assert(node->op == op_Alloc);
1698 return get_irn_n(node, 1);
1702 set_Alloc_size(ir_node *node, ir_node *size) {
1703 assert(node->op == op_Alloc);
1704 set_irn_n(node, 1, size);
1708 get_Alloc_type(ir_node *node) {
1709 assert(node->op == op_Alloc);
1710 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1714 set_Alloc_type(ir_node *node, ir_type *tp) {
1715 assert(node->op == op_Alloc);
1716 node->attr.alloc.type = tp;
1720 get_Alloc_where(ir_node *node) {
1721 assert(node->op == op_Alloc);
1722 return node->attr.alloc.where;
1726 set_Alloc_where(ir_node *node, where_alloc where) {
1727 assert(node->op == op_Alloc);
1728 node->attr.alloc.where = where;
1733 get_Free_mem(ir_node *node) {
1734 assert(node->op == op_Free);
1735 return get_irn_n(node, 0);
1739 set_Free_mem(ir_node *node, ir_node *mem) {
1740 assert(node->op == op_Free);
1741 set_irn_n(node, 0, mem);
1745 get_Free_ptr(ir_node *node) {
1746 assert(node->op == op_Free);
1747 return get_irn_n(node, 1);
1751 set_Free_ptr(ir_node *node, ir_node *ptr) {
1752 assert(node->op == op_Free);
1753 set_irn_n(node, 1, ptr);
1757 get_Free_size(ir_node *node) {
1758 assert(node->op == op_Free);
1759 return get_irn_n(node, 2);
1763 set_Free_size(ir_node *node, ir_node *size) {
1764 assert(node->op == op_Free);
1765 set_irn_n(node, 2, size);
1769 get_Free_type(ir_node *node) {
1770 assert(node->op == op_Free);
1771 return node->attr.free.type = skip_tid(node->attr.free.type);
1775 set_Free_type(ir_node *node, ir_type *tp) {
1776 assert(node->op == op_Free);
1777 node->attr.free.type = tp;
1781 get_Free_where(ir_node *node) {
1782 assert(node->op == op_Free);
1783 return node->attr.free.where;
1787 set_Free_where(ir_node *node, where_alloc where) {
1788 assert(node->op == op_Free);
1789 node->attr.free.where = where;
1792 ir_node **get_Sync_preds_arr(ir_node *node) {
1793 assert(node->op == op_Sync);
1794 return (ir_node **)&(get_irn_in(node)[1]);
1797 int get_Sync_n_preds(ir_node *node) {
1798 assert(node->op == op_Sync);
1799 return (get_irn_arity(node));
1803 void set_Sync_n_preds(ir_node *node, int n_preds) {
1804 assert(node->op == op_Sync);
1808 ir_node *get_Sync_pred(ir_node *node, int pos) {
1809 assert(node->op == op_Sync);
1810 return get_irn_n(node, pos);
1813 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1814 assert(node->op == op_Sync);
1815 set_irn_n(node, pos, pred);
1818 /* Add a new Sync predecessor */
1819 void add_Sync_pred(ir_node *node, ir_node *pred) {
1821 ir_graph *irg = get_irn_irg(node);
1823 assert(node->op == op_Sync);
1824 l = ARR_LEN(node->in);
1825 ARR_APP1(ir_node *, node->in, pred);
1826 edges_notify_edge(node, l, node->in[l], NULL, irg);
1829 /* Returns the source language type of a Proj node. */
1830 ir_type *get_Proj_type(ir_node *n) {
1831 ir_type *tp = firm_unknown_type;
1832 ir_node *pred = get_Proj_pred(n);
1834 switch (get_irn_opcode(pred)) {
1837 /* Deal with Start / Call here: we need to know the Proj Nr. */
1838 assert(get_irn_mode(pred) == mode_T);
1839 pred_pred = get_Proj_pred(pred);
1840 if (get_irn_op(pred_pred) == op_Start) {
1841 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1842 tp = get_method_param_type(mtp, get_Proj_proj(n));
1843 } else if (get_irn_op(pred_pred) == op_Call) {
1844 ir_type *mtp = get_Call_type(pred_pred);
1845 tp = get_method_res_type(mtp, get_Proj_proj(n));
1848 case iro_Start: break;
1849 case iro_Call: break;
1851 ir_node *a = get_Load_ptr(pred);
1853 tp = get_entity_type(get_Sel_entity(a));
1862 get_Proj_pred(const ir_node *node) {
1863 assert(is_Proj(node));
1864 return get_irn_n(node, 0);
1868 set_Proj_pred(ir_node *node, ir_node *pred) {
1869 assert(is_Proj(node));
1870 set_irn_n(node, 0, pred);
1874 get_Proj_proj(const ir_node *node) {
1875 assert(is_Proj(node));
1876 if (get_irn_opcode(node) == iro_Proj) {
1877 return node->attr.proj;
1879 assert(get_irn_opcode(node) == iro_Filter);
1880 return node->attr.filter.proj;
1885 set_Proj_proj(ir_node *node, long proj) {
1886 assert(node->op == op_Proj);
1887 node->attr.proj = proj;
1891 get_Tuple_preds_arr(ir_node *node) {
1892 assert(node->op == op_Tuple);
1893 return (ir_node **)&(get_irn_in(node)[1]);
1897 get_Tuple_n_preds(ir_node *node) {
1898 assert(node->op == op_Tuple);
1899 return (get_irn_arity(node));
1904 set_Tuple_n_preds(ir_node *node, int n_preds) {
1905 assert(node->op == op_Tuple);
1910 get_Tuple_pred (ir_node *node, int pos) {
1911 assert(node->op == op_Tuple);
1912 return get_irn_n(node, pos);
1916 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
1917 assert(node->op == op_Tuple);
1918 set_irn_n(node, pos, pred);
1922 get_Id_pred(ir_node *node) {
1923 assert(node->op == op_Id);
1924 return get_irn_n(node, 0);
1928 set_Id_pred(ir_node *node, ir_node *pred) {
1929 assert(node->op == op_Id);
1930 set_irn_n(node, 0, pred);
1933 ir_node *get_Confirm_value(ir_node *node) {
1934 assert(node->op == op_Confirm);
1935 return get_irn_n(node, 0);
1938 void set_Confirm_value(ir_node *node, ir_node *value) {
1939 assert(node->op == op_Confirm);
1940 set_irn_n(node, 0, value);
1943 ir_node *get_Confirm_bound(ir_node *node) {
1944 assert(node->op == op_Confirm);
1945 return get_irn_n(node, 1);
1948 void set_Confirm_bound(ir_node *node, ir_node *bound) {
1949 assert(node->op == op_Confirm);
1950 set_irn_n(node, 0, bound);
1953 pn_Cmp get_Confirm_cmp(ir_node *node) {
1954 assert(node->op == op_Confirm);
1955 return node->attr.confirm_cmp;
1958 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
1959 assert(node->op == op_Confirm);
1960 node->attr.confirm_cmp = cmp;
1965 get_Filter_pred(ir_node *node) {
1966 assert(node->op == op_Filter);
1971 set_Filter_pred(ir_node *node, ir_node *pred) {
1972 assert(node->op == op_Filter);
1977 get_Filter_proj(ir_node *node) {
1978 assert(node->op == op_Filter);
1979 return node->attr.filter.proj;
1983 set_Filter_proj(ir_node *node, long proj) {
1984 assert(node->op == op_Filter);
1985 node->attr.filter.proj = proj;
1988 /* Don't use get_irn_arity, get_irn_n in implementation as access
1989 shall work independent of view!!! */
1990 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1991 assert(node->op == op_Filter);
1992 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1993 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1994 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1995 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1996 node->attr.filter.in_cg[0] = node->in[0];
1998 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2001 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2002 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2003 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2004 node->attr.filter.in_cg[pos + 1] = pred;
2007 int get_Filter_n_cg_preds(ir_node *node) {
2008 assert(node->op == op_Filter && node->attr.filter.in_cg);
2009 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2012 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2014 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2016 arity = ARR_LEN(node->attr.filter.in_cg);
2017 assert(pos < arity - 1);
2018 return node->attr.filter.in_cg[pos + 1];
2022 ir_node *get_Mux_sel(ir_node *node) {
2023 if (node->op == op_Psi) {
2024 assert(get_irn_arity(node) == 3);
2025 return get_Psi_cond(node, 0);
2027 assert(node->op == op_Mux);
2031 void set_Mux_sel(ir_node *node, ir_node *sel) {
2032 if (node->op == op_Psi) {
2033 assert(get_irn_arity(node) == 3);
2034 set_Psi_cond(node, 0, sel);
2036 assert(node->op == op_Mux);
2041 ir_node *get_Mux_false(ir_node *node) {
2042 if (node->op == op_Psi) {
2043 assert(get_irn_arity(node) == 3);
2044 return get_Psi_default(node);
2046 assert(node->op == op_Mux);
2050 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2051 if (node->op == op_Psi) {
2052 assert(get_irn_arity(node) == 3);
2053 set_Psi_default(node, ir_false);
2055 assert(node->op == op_Mux);
2056 node->in[2] = ir_false;
2060 ir_node *get_Mux_true(ir_node *node) {
2061 if (node->op == op_Psi) {
2062 assert(get_irn_arity(node) == 3);
2063 return get_Psi_val(node, 0);
2065 assert(node->op == op_Mux);
2069 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2070 if (node->op == op_Psi) {
2071 assert(get_irn_arity(node) == 3);
2072 set_Psi_val(node, 0, ir_true);
2074 assert(node->op == op_Mux);
2075 node->in[3] = ir_true;
2080 ir_node *get_Psi_cond(ir_node *node, int pos) {
2081 int num_conds = get_Psi_n_conds(node);
2082 assert(node->op == op_Psi);
2083 assert(pos < num_conds);
2084 return get_irn_n(node, 2 * pos);
2087 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2088 int num_conds = get_Psi_n_conds(node);
2089 assert(node->op == op_Psi);
2090 assert(pos < num_conds);
2091 set_irn_n(node, 2 * pos, cond);
2094 ir_node *get_Psi_val(ir_node *node, int pos) {
2095 int num_vals = get_Psi_n_conds(node);
2096 assert(node->op == op_Psi);
2097 assert(pos < num_vals);
2098 return get_irn_n(node, 2 * pos + 1);
2101 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2102 int num_vals = get_Psi_n_conds(node);
2103 assert(node->op == op_Psi);
2104 assert(pos < num_vals);
2105 set_irn_n(node, 2 * pos + 1, val);
2108 ir_node *get_Psi_default(ir_node *node) {
2109 int def_pos = get_irn_arity(node) - 1;
2110 assert(node->op == op_Psi);
2111 return get_irn_n(node, def_pos);
2114 void set_Psi_default(ir_node *node, ir_node *val) {
2115 int def_pos = get_irn_arity(node);
2116 assert(node->op == op_Psi);
2117 set_irn_n(node, def_pos, val);
2120 int (get_Psi_n_conds)(ir_node *node) {
2121 return _get_Psi_n_conds(node);
2125 ir_node *get_CopyB_mem(ir_node *node) {
2126 assert(node->op == op_CopyB);
2127 return get_irn_n(node, 0);
2130 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2131 assert(node->op == op_CopyB);
2132 set_irn_n(node, 0, mem);
2135 ir_node *get_CopyB_dst(ir_node *node) {
2136 assert(node->op == op_CopyB);
2137 return get_irn_n(node, 1);
2140 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2141 assert(node->op == op_CopyB);
2142 set_irn_n(node, 1, dst);
2145 ir_node *get_CopyB_src (ir_node *node) {
2146 assert(node->op == op_CopyB);
2147 return get_irn_n(node, 2);
2150 void set_CopyB_src(ir_node *node, ir_node *src) {
2151 assert(node->op == op_CopyB);
2152 set_irn_n(node, 2, src);
2155 ir_type *get_CopyB_type(ir_node *node) {
2156 assert(node->op == op_CopyB);
2157 return node->attr.copyb.data_type;
2160 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2161 assert(node->op == op_CopyB && data_type);
2162 node->attr.copyb.data_type = data_type;
2167 get_InstOf_type(ir_node *node) {
2168 assert(node->op = op_InstOf);
2169 return node->attr.instof.type;
2173 set_InstOf_type(ir_node *node, ir_type *type) {
2174 assert(node->op = op_InstOf);
2175 node->attr.instof.type = type;
2179 get_InstOf_store(ir_node *node) {
2180 assert(node->op = op_InstOf);
2181 return get_irn_n(node, 0);
2185 set_InstOf_store(ir_node *node, ir_node *obj) {
2186 assert(node->op = op_InstOf);
2187 set_irn_n(node, 0, obj);
2191 get_InstOf_obj(ir_node *node) {
2192 assert(node->op = op_InstOf);
2193 return get_irn_n(node, 1);
2197 set_InstOf_obj(ir_node *node, ir_node *obj) {
2198 assert(node->op = op_InstOf);
2199 set_irn_n(node, 1, obj);
2202 /* Returns the memory input of a Raise operation. */
2204 get_Raise_mem(ir_node *node) {
2205 assert(node->op == op_Raise);
2206 return get_irn_n(node, 0);
2210 set_Raise_mem(ir_node *node, ir_node *mem) {
2211 assert(node->op == op_Raise);
2212 set_irn_n(node, 0, mem);
2216 get_Raise_exo_ptr(ir_node *node) {
2217 assert(node->op == op_Raise);
2218 return get_irn_n(node, 1);
2222 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2223 assert(node->op == op_Raise);
2224 set_irn_n(node, 1, exo_ptr);
2229 /* Returns the memory input of a Bound operation. */
2230 ir_node *get_Bound_mem(ir_node *bound) {
2231 assert(bound->op == op_Bound);
2232 return get_irn_n(bound, 0);
2235 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2236 assert(bound->op == op_Bound);
2237 set_irn_n(bound, 0, mem);
2240 /* Returns the index input of a Bound operation. */
2241 ir_node *get_Bound_index(ir_node *bound) {
2242 assert(bound->op == op_Bound);
2243 return get_irn_n(bound, 1);
2246 void set_Bound_index(ir_node *bound, ir_node *idx) {
2247 assert(bound->op == op_Bound);
2248 set_irn_n(bound, 1, idx);
2251 /* Returns the lower bound input of a Bound operation. */
2252 ir_node *get_Bound_lower(ir_node *bound) {
2253 assert(bound->op == op_Bound);
2254 return get_irn_n(bound, 2);
2257 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2258 assert(bound->op == op_Bound);
2259 set_irn_n(bound, 2, lower);
2262 /* Returns the upper bound input of a Bound operation. */
2263 ir_node *get_Bound_upper(ir_node *bound) {
2264 assert(bound->op == op_Bound);
2265 return get_irn_n(bound, 3);
2268 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2269 assert(bound->op == op_Bound);
2270 set_irn_n(bound, 3, upper);
2273 /* Return the operand of a Pin node. */
2274 ir_node *get_Pin_op(ir_node *pin) {
2275 assert(pin->op == op_Pin);
2276 return get_irn_n(pin, 0);
2279 void set_Pin_op(ir_node *pin, ir_node *node) {
2280 assert(pin->op == op_Pin);
2281 set_irn_n(pin, 0, node);
2285 /* returns the graph of a node */
2287 get_irn_irg(const ir_node *node) {
2289 * Do not use get_nodes_Block() here, because this
2290 * will check the pinned state.
2291 * However even a 'wrong' block is always in the proper
2294 if (! is_Block(node))
2295 node = get_irn_n(node, -1);
2296 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2297 node = get_irn_n(node, -1);
2298 assert(get_irn_op(node) == op_Block);
2299 return node->attr.block.irg;
2303 /*----------------------------------------------------------------*/
2304 /* Auxiliary routines */
2305 /*----------------------------------------------------------------*/
2308 skip_Proj(ir_node *node) {
2309 /* don't assert node !!! */
2314 node = get_Proj_pred(node);
2320 skip_Proj_const(const ir_node *node) {
2321 /* don't assert node !!! */
2326 node = get_Proj_pred(node);
2332 skip_Tuple(ir_node *node) {
2336 if (!get_opt_normalize()) return node;
2339 if (get_irn_op(node) == op_Proj) {
2340 pred = get_Proj_pred(node);
2341 op = get_irn_op(pred);
2344 * Looks strange but calls get_irn_op() only once
2345 * in most often cases.
2347 if (op == op_Proj) { /* nested Tuple ? */
2348 pred = skip_Tuple(pred);
2349 op = get_irn_op(pred);
2351 if (op == op_Tuple) {
2352 node = get_Tuple_pred(pred, get_Proj_proj(node));
2355 } else if (op == op_Tuple) {
2356 node = get_Tuple_pred(pred, get_Proj_proj(node));
2363 /* returns operand of node if node is a Cast */
2364 ir_node *skip_Cast(ir_node *node) {
2365 if (get_irn_op(node) == op_Cast)
2366 return get_Cast_op(node);
2370 /* returns operand of node if node is a Confirm */
2371 ir_node *skip_Confirm(ir_node *node) {
2372 if (get_irn_op(node) == op_Confirm)
2373 return get_Confirm_value(node);
2377 /* skip all high-level ops */
2378 ir_node *skip_HighLevel(ir_node *node) {
2379 if (is_op_highlevel(get_irn_op(node)))
2380 return get_irn_n(node, 0);
2385 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2386 * than any other approach, as Id chains are resolved and all point to the real node, or
2387 * all id's are self loops.
2389 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2390 * a little bit "hand optimized".
2392 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2395 skip_Id(ir_node *node) {
2397 /* don't assert node !!! */
2399 if (!node || (node->op != op_Id)) return node;
2401 /* Don't use get_Id_pred(): We get into an endless loop for
2402 self-referencing Ids. */
2403 pred = node->in[0+1];
2405 if (pred->op != op_Id) return pred;
2407 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2408 ir_node *rem_pred, *res;
2410 if (pred->op != op_Id) return pred; /* shortcut */
2413 assert(get_irn_arity (node) > 0);
2415 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2416 res = skip_Id(rem_pred);
2417 if (res->op == op_Id) /* self-loop */ return node;
2419 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2426 void skip_Id_and_store(ir_node **node) {
2429 if (!n || (n->op != op_Id)) return;
2431 /* Don't use get_Id_pred(): We get into an endless loop for
2432 self-referencing Ids. */
2437 (is_Bad)(const ir_node *node) {
2438 return _is_Bad(node);
2442 (is_NoMem)(const ir_node *node) {
2443 return _is_NoMem(node);
2447 (is_Mod)(const ir_node *node) {
2448 return _is_Mod(node);
2452 (is_Div)(const ir_node *node) {
2453 return _is_Div(node);
2457 (is_DivMod)(const ir_node *node) {
2458 return _is_DivMod(node);
2462 (is_Quot)(const ir_node *node) {
2463 return _is_Quot(node);
2467 (is_Start)(const ir_node *node) {
2468 return _is_Start(node);
2472 (is_End)(const ir_node *node) {
2473 return _is_End(node);
2477 (is_Const)(const ir_node *node) {
2478 return _is_Const(node);
2482 (is_no_Block)(const ir_node *node) {
2483 return _is_no_Block(node);
2487 (is_Block)(const ir_node *node) {
2488 return _is_Block(node);
2491 /* returns true if node is an Unknown node. */
2493 (is_Unknown)(const ir_node *node) {
2494 return _is_Unknown(node);
2497 /* returns true if node is a Return node. */
2499 (is_Return)(const ir_node *node) {
2500 return _is_Return(node);
2503 /* returns true if node is a Call node. */
2505 (is_Call)(const ir_node *node) {
2506 return _is_Call(node);
2509 /* returns true if node is a Sel node. */
2511 (is_Sel)(const ir_node *node) {
2512 return _is_Sel(node);
2515 /* returns true if node is a Mux node or a Psi with only one condition. */
2517 (is_Mux)(const ir_node *node) {
2518 return _is_Mux(node);
2521 /* returns true if node is a Load node. */
2523 (is_Load)(const ir_node *node) {
2524 return _is_Load(node);
2527 /* returns true if node is a Load node. */
2529 (is_Store)(const ir_node *node) {
2530 return _is_Store(node);
2533 /* returns true if node is a Sync node. */
2535 (is_Sync)(const ir_node *node) {
2536 return _is_Sync(node);
2539 /* returns true if node is a Confirm node. */
2541 (is_Confirm)(const ir_node *node) {
2542 return _is_Confirm(node);
2545 /* returns true if node is a Pin node. */
2547 (is_Pin)(const ir_node *node) {
2548 return _is_Pin(node);
2551 /* returns true if node is a SymConst node. */
2553 (is_SymConst)(const ir_node *node) {
2554 return _is_SymConst(node);
2557 /* returns true if node is a Cond node. */
2559 (is_Cond)(const ir_node *node) {
2560 return _is_Cond(node);
2564 (is_CopyB)(const ir_node *node) {
2565 return _is_CopyB(node);
2568 /* returns true if node is a Cmp node. */
2570 (is_Cmp)(const ir_node *node) {
2571 return _is_Cmp(node);
2574 /* returns true if node is an Alloc node. */
2576 (is_Alloc)(const ir_node *node) {
2577 return _is_Alloc(node);
2580 /* returns true if a node is a Jmp node. */
2582 (is_Jmp)(const ir_node *node) {
2583 return _is_Jmp(node);
2586 /* returns true if a node is a Raise node. */
2588 (is_Raise)(const ir_node *node) {
2589 return _is_Raise(node);
2593 is_Proj(const ir_node *node) {
2595 return node->op == op_Proj ||
2596 (!get_interprocedural_view() && node->op == op_Filter);
2599 /* Returns true if the operation manipulates control flow. */
2601 is_cfop(const ir_node *node) {
2602 return is_cfopcode(get_irn_op(node));
2605 /* Returns true if the operation manipulates interprocedural control flow:
2606 CallBegin, EndReg, EndExcept */
2607 int is_ip_cfop(const ir_node *node) {
2608 return is_ip_cfopcode(get_irn_op(node));
2611 /* Returns true if the operation can change the control flow because
2614 is_fragile_op(const ir_node *node) {
2615 return is_op_fragile(get_irn_op(node));
2618 /* Returns the memory operand of fragile operations. */
2619 ir_node *get_fragile_op_mem(ir_node *node) {
2620 assert(node && is_fragile_op(node));
2622 switch (get_irn_opcode (node)) {
2632 return get_irn_n(node, 0);
2637 assert(0 && "should not be reached");
2642 /* Returns true if the operation is a forking control flow operation. */
2643 int (is_irn_forking)(const ir_node *node) {
2644 return _is_irn_forking(node);
2647 /* Return the type associated with the value produced by n
2648 * if the node remarks this type as it is the case for
2649 * Cast, Const, SymConst and some Proj nodes. */
2650 ir_type *(get_irn_type)(ir_node *node) {
2651 return _get_irn_type(node);
2654 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2656 ir_type *(get_irn_type_attr)(ir_node *node) {
2657 return _get_irn_type_attr(node);
2660 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2661 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2662 return _get_irn_entity_attr(node);
2665 /* Returns non-zero for constant-like nodes. */
2666 int (is_irn_constlike)(const ir_node *node) {
2667 return _is_irn_constlike(node);
2671 * Returns non-zero for nodes that are allowed to have keep-alives and
2672 * are neither Block nor PhiM.
2674 int (is_irn_keep)(const ir_node *node) {
2675 return _is_irn_keep(node);
2679 * Returns non-zero for nodes that are always placed in the start block.
2681 int (is_irn_start_block_placed)(const ir_node *node) {
2682 return _is_irn_start_block_placed(node);
2685 /* Returns non-zero for nodes that are machine operations. */
2686 int (is_irn_machine_op)(const ir_node *node) {
2687 return _is_irn_machine_op(node);
2690 /* Returns non-zero for nodes that are machine operands. */
2691 int (is_irn_machine_operand)(const ir_node *node) {
2692 return _is_irn_machine_operand(node);
2695 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2696 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2697 return _is_irn_machine_user(node, n);
2701 /* Gets the string representation of the jump prediction .*/
2702 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2705 case COND_JMP_PRED_NONE: return "no prediction";
2706 case COND_JMP_PRED_TRUE: return "true taken";
2707 case COND_JMP_PRED_FALSE: return "false taken";
2711 /* Returns the conditional jump prediction of a Cond node. */
2712 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2713 return _get_Cond_jmp_pred(cond);
2716 /* Sets a new conditional jump prediction. */
2717 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2718 _set_Cond_jmp_pred(cond, pred);
2721 /** the get_type operation must be always implemented and return a firm type */
2722 static ir_type *get_Default_type(ir_node *n) {
2723 return get_unknown_type();
2726 /* Sets the get_type operation for an ir_op_ops. */
2727 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2729 case iro_Const: ops->get_type = get_Const_type; break;
2730 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2731 case iro_Cast: ops->get_type = get_Cast_type; break;
2732 case iro_Proj: ops->get_type = get_Proj_type; break;
2734 /* not allowed to be NULL */
2735 if (! ops->get_type)
2736 ops->get_type = get_Default_type;
2742 /** Return the attribute type of a SymConst node if exists */
2743 static ir_type *get_SymConst_attr_type(ir_node *self) {
2744 symconst_kind kind = get_SymConst_kind(self);
2745 if (SYMCONST_HAS_TYPE(kind))
2746 return get_SymConst_type(self);
2750 /** Return the attribute entity of a SymConst node if exists */
2751 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2752 symconst_kind kind = get_SymConst_kind(self);
2753 if (SYMCONST_HAS_ENT(kind))
2754 return get_SymConst_entity(self);
2758 /** the get_type_attr operation must be always implemented */
2759 static ir_type *get_Null_type(ir_node *n) {
2760 return firm_unknown_type;
2763 /* Sets the get_type operation for an ir_op_ops. */
2764 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2766 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2767 case iro_Call: ops->get_type_attr = get_Call_type; break;
2768 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2769 case iro_Free: ops->get_type_attr = get_Free_type; break;
2770 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2772 /* not allowed to be NULL */
2773 if (! ops->get_type_attr)
2774 ops->get_type_attr = get_Null_type;
2780 /** the get_entity_attr operation must be always implemented */
2781 static ir_entity *get_Null_ent(ir_node *n) {
2785 /* Sets the get_type operation for an ir_op_ops. */
2786 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2788 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2789 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2791 /* not allowed to be NULL */
2792 if (! ops->get_entity_attr)
2793 ops->get_entity_attr = get_Null_ent;
2799 #ifdef DEBUG_libfirm
2800 void dump_irn(ir_node *n) {
2801 int i, arity = get_irn_arity(n);
2802 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2804 ir_node *pred = get_irn_n(n, -1);
2805 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2806 get_irn_node_nr(pred), (void *)pred);
2808 printf(" preds: \n");
2809 for (i = 0; i < arity; ++i) {
2810 ir_node *pred = get_irn_n(n, i);
2811 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2812 get_irn_node_nr(pred), (void *)pred);
2816 #else /* DEBUG_libfirm */
2817 void dump_irn(ir_node *n) {}
2818 #endif /* DEBUG_libfirm */