3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
83 const char *pns_name_arr [] = {
84 "initial_exec", "global_store",
85 "frame_base", "globals", "args"
88 const char *symconst_name_arr [] = {
89 "type_tag", "size", "addr_name", "addr_ent"
93 * Indicates, whether additional data can be registered to ir nodes.
94 * If set to 1, this is not possible anymore.
96 static int forbid_new_data = 0;
99 * The amount of additional space for custom data to be allocated upon
100 * creating a new node.
102 unsigned firm_add_node_size = 0;
105 /* register new space for every node */
106 unsigned register_additional_node_data(unsigned size) {
107 assert(!forbid_new_data && "Too late to register additional node data");
112 return firm_add_node_size += size;
118 /* Forbid the addition of new data to an ir node. */
123 * irnode constructor.
124 * Create a new irnode in irg, with an op, mode, arity and
125 * some incoming irnodes.
126 * If arity is negative, a node with a dynamic array is created.
129 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
130 int arity, ir_node **in)
133 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
137 assert(irg && op && mode);
138 p = obstack_alloc (irg->obst, node_size);
139 memset(p, 0, node_size);
140 res = (ir_node *) (p + firm_add_node_size);
142 res->kind = k_ir_node;
146 res->node_idx = irg_register_node_idx(irg, res);
151 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
153 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
154 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
158 set_irn_dbg_info(res, db);
162 res->node_nr = get_irp_new_node_nr();
165 for(i = 0; i < EDGE_KIND_LAST; ++i)
166 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
168 is_bl = is_Block(res);
169 for (i = is_bl; i <= arity; ++i)
170 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 ir_graph *irg = current_ir_graph;
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
241 for (i = 0; i < arity; i++) {
242 if (i < ARR_LEN(*arr)-1)
243 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
245 edges_notify_edge(node, i, in[i], NULL, irg);
247 for(;i < ARR_LEN(*arr)-1; i++) {
248 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
251 if (arity != ARR_LEN(*arr) - 1) {
252 ir_node * block = (*arr)[0];
253 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
256 fix_backedges(irg->obst, node);
258 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
262 (get_irn_intra_n)(const ir_node *node, int n) {
263 return _get_irn_intra_n (node, n);
267 (get_irn_inter_n)(const ir_node *node, int n) {
268 return _get_irn_inter_n (node, n);
271 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
274 (get_irn_n)(const ir_node *node, int n) {
275 return _get_irn_n(node, n);
279 set_irn_n (ir_node *node, int n, ir_node *in) {
280 assert(node && node->kind == k_ir_node);
282 assert(n < get_irn_arity(node));
283 assert(in && in->kind == k_ir_node);
285 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
286 /* Change block pred in both views! */
287 node->in[n + 1] = in;
288 assert(node->attr.filter.in_cg);
289 node->attr.filter.in_cg[n + 1] = in;
292 if (get_interprocedural_view()) { /* handle Filter and Block specially */
293 if (get_irn_opcode(node) == iro_Filter) {
294 assert(node->attr.filter.in_cg);
295 node->attr.filter.in_cg[n + 1] = in;
297 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
298 node->attr.block.in_cg[n + 1] = in;
301 /* else fall through */
305 hook_set_irn_n(node, n, in, node->in[n + 1]);
307 /* Here, we rely on src and tgt being in the current ir graph */
308 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
310 node->in[n + 1] = in;
314 (get_irn_deps)(const ir_node *node)
316 return _get_irn_deps(node);
320 (get_irn_dep)(const ir_node *node, int pos)
322 return _get_irn_dep(node, pos);
326 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
328 _set_irn_dep(node, pos, dep);
331 int add_irn_dep(ir_node *node, ir_node *dep)
335 if (node->deps == NULL) {
336 node->deps = NEW_ARR_F(ir_node *, 1);
343 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
344 if(node->deps[i] == NULL)
347 if(node->deps[i] == dep)
351 if(first_zero >= 0) {
352 node->deps[first_zero] = dep;
357 ARR_APP1(ir_node *, node->deps, dep);
362 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
367 void add_irn_deps(ir_node *tgt, ir_node *src)
371 for(i = 0, n = get_irn_deps(src); i < n; ++i)
372 add_irn_dep(tgt, get_irn_dep(src, i));
377 (get_irn_mode)(const ir_node *node) {
378 return _get_irn_mode(node);
382 (set_irn_mode)(ir_node *node, ir_mode *mode)
384 _set_irn_mode(node, mode);
388 get_irn_modecode (const ir_node *node)
391 return node->mode->code;
394 /** Gets the string representation of the mode .*/
396 get_irn_modename (const ir_node *node)
399 return get_mode_name(node->mode);
403 get_irn_modeident (const ir_node *node)
406 return get_mode_ident(node->mode);
410 (get_irn_op)(const ir_node *node) {
411 return _get_irn_op(node);
414 /* should be private to the library: */
416 (set_irn_op)(ir_node *node, ir_op *op) {
417 _set_irn_op(node, op);
421 (get_irn_opcode)(const ir_node *node)
423 return _get_irn_opcode(node);
427 get_irn_opname (const ir_node *node)
430 if ((get_irn_op((ir_node *)node) == op_Phi) &&
431 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
432 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
433 return get_id_str(node->op->name);
437 get_irn_opident (const ir_node *node)
440 return node->op->name;
444 (get_irn_visited)(const ir_node *node)
446 return _get_irn_visited(node);
450 (set_irn_visited)(ir_node *node, unsigned long visited)
452 _set_irn_visited(node, visited);
456 (mark_irn_visited)(ir_node *node) {
457 _mark_irn_visited(node);
461 (irn_not_visited)(const ir_node *node) {
462 return _irn_not_visited(node);
466 (irn_visited)(const ir_node *node) {
467 return _irn_visited(node);
471 (set_irn_link)(ir_node *node, void *link) {
472 _set_irn_link(node, link);
476 (get_irn_link)(const ir_node *node) {
477 return _get_irn_link(node);
481 (get_irn_pinned)(const ir_node *node) {
482 return _get_irn_pinned(node);
486 (is_irn_pinned_in_irg) (const ir_node *node) {
487 return _is_irn_pinned_in_irg(node);
490 void set_irn_pinned(ir_node *node, op_pin_state state) {
491 /* due to optimization an opt may be turned into a Tuple */
492 if (get_irn_op(node) == op_Tuple)
495 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
496 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
498 node->attr.except.pin_state = state;
501 #ifdef DO_HEAPANALYSIS
502 /* Access the abstract interpretation information of a node.
503 Returns NULL if no such information is available. */
504 struct abstval *get_irn_abst_value(ir_node *n) {
507 /* Set the abstract interpretation information of a node. */
508 void set_irn_abst_value(ir_node *n, struct abstval *os) {
511 struct section *firm_get_irn_section(ir_node *n) {
514 void firm_set_irn_section(ir_node *n, struct section *s) {
518 /* Dummies needed for firmjni. */
519 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
520 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
521 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
522 void firm_set_irn_section(ir_node *n, struct section *s) {}
523 #endif /* DO_HEAPANALYSIS */
526 /* Outputs a unique number for this node */
527 long get_irn_node_nr(const ir_node *node) {
530 return node->node_nr;
532 return (long)PTR_TO_INT(node);
537 get_irn_const_attr (ir_node *node)
539 assert(node->op == op_Const);
540 return node->attr.con;
544 get_irn_proj_attr (ir_node *node)
546 assert(node->op == op_Proj);
547 return node->attr.proj;
551 get_irn_alloc_attr (ir_node *node)
553 assert(node->op == op_Alloc);
554 return node->attr.alloc;
558 get_irn_free_attr (ir_node *node)
560 assert(node->op == op_Free);
561 return node->attr.free;
565 get_irn_symconst_attr (ir_node *node)
567 assert(node->op == op_SymConst);
568 return node->attr.symc;
572 get_irn_call_attr (ir_node *node)
574 assert(node->op == op_Call);
575 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
579 get_irn_sel_attr (ir_node *node)
581 assert(node->op == op_Sel);
582 return node->attr.sel;
586 get_irn_phi_attr (ir_node *node)
588 assert(node->op == op_Phi);
589 return node->attr.phi0_pos;
593 get_irn_block_attr (ir_node *node)
595 assert(node->op == op_Block);
596 return node->attr.block;
600 get_irn_load_attr (ir_node *node)
602 assert(node->op == op_Load);
603 return node->attr.load;
607 get_irn_store_attr (ir_node *node)
609 assert(node->op == op_Store);
610 return node->attr.store;
614 get_irn_except_attr (ir_node *node)
616 assert(node->op == op_Div || node->op == op_Quot ||
617 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
618 return node->attr.except;
622 get_irn_generic_attr (ir_node *node) {
626 unsigned (get_irn_idx)(const ir_node *node) {
627 assert(is_ir_node(node));
628 return _get_irn_idx(node);
631 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
633 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
634 if (get_irn_n(node, i) == arg)
640 /** manipulate fields of individual nodes **/
642 /* this works for all except Block */
644 get_nodes_block (const ir_node *node) {
645 assert(!(node->op == op_Block));
646 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
647 return get_irn_n(node, -1);
651 set_nodes_block (ir_node *node, ir_node *block) {
652 assert(!(node->op == op_Block));
653 set_irn_n(node, -1, block);
656 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
657 * from Start. If so returns frame type, else Null. */
658 ir_type *is_frame_pointer(ir_node *n) {
659 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
660 ir_node *start = get_Proj_pred(n);
661 if (get_irn_op(start) == op_Start) {
662 return get_irg_frame_type(get_irn_irg(start));
668 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
669 * from Start. If so returns global type, else Null. */
670 ir_type *is_globals_pointer(ir_node *n) {
671 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
672 ir_node *start = get_Proj_pred(n);
673 if (get_irn_op(start) == op_Start) {
674 return get_glob_type();
680 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
681 * from Start. If so returns tls type, else Null. */
682 ir_type *is_tls_pointer(ir_node *n) {
683 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
684 ir_node *start = get_Proj_pred(n);
685 if (get_irn_op(start) == op_Start) {
686 return get_tls_type();
692 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
693 * from Start. If so returns 1, else 0. */
694 int is_value_arg_pointer(ir_node *n) {
695 if ((get_irn_op(n) == op_Proj) &&
696 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
697 (get_irn_op(get_Proj_pred(n)) == op_Start))
702 /* Returns an array with the predecessors of the Block. Depending on
703 the implementation of the graph data structure this can be a copy of
704 the internal representation of predecessors as well as the internal
705 array itself. Therefore writing to this array might obstruct the ir. */
707 get_Block_cfgpred_arr (ir_node *node)
709 assert((node->op == op_Block));
710 return (ir_node **)&(get_irn_in(node)[1]);
714 (get_Block_n_cfgpreds)(const ir_node *node) {
715 return _get_Block_n_cfgpreds(node);
719 (get_Block_cfgpred)(ir_node *node, int pos) {
720 return _get_Block_cfgpred(node, pos);
724 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
725 assert(node->op == op_Block);
726 set_irn_n(node, pos, pred);
730 (get_Block_cfgpred_block)(ir_node *node, int pos) {
731 return _get_Block_cfgpred_block(node, pos);
735 get_Block_matured (ir_node *node) {
736 assert(node->op == op_Block);
737 return (int)node->attr.block.matured;
741 set_Block_matured (ir_node *node, int matured) {
742 assert(node->op == op_Block);
743 node->attr.block.matured = matured;
747 (get_Block_block_visited)(ir_node *node) {
748 return _get_Block_block_visited(node);
752 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
753 _set_Block_block_visited(node, visit);
756 /* For this current_ir_graph must be set. */
758 (mark_Block_block_visited)(ir_node *node) {
759 _mark_Block_block_visited(node);
763 (Block_not_block_visited)(ir_node *node) {
764 return _Block_not_block_visited(node);
768 get_Block_graph_arr (ir_node *node, int pos) {
769 assert(node->op == op_Block);
770 return node->attr.block.graph_arr[pos+1];
774 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
775 assert(node->op == op_Block);
776 node->attr.block.graph_arr[pos+1] = value;
779 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
780 assert(node->op == op_Block);
781 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
782 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
783 node->attr.block.in_cg[0] = NULL;
784 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
786 /* Fix backedge array. fix_backedges() operates depending on
787 interprocedural_view. */
788 int ipv = get_interprocedural_view();
789 set_interprocedural_view(1);
790 fix_backedges(current_ir_graph->obst, node);
791 set_interprocedural_view(ipv);
794 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
797 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
798 assert(node->op == op_Block &&
799 node->attr.block.in_cg &&
800 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
801 node->attr.block.in_cg[pos + 1] = pred;
804 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
805 assert(node->op == op_Block);
806 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
809 int get_Block_cg_n_cfgpreds(ir_node * node) {
810 assert(node->op == op_Block);
811 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
814 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
815 assert(node->op == op_Block && node->attr.block.in_cg);
816 return node->attr.block.in_cg[pos + 1];
819 void remove_Block_cg_cfgpred_arr(ir_node * node) {
820 assert(node->op == op_Block);
821 node->attr.block.in_cg = NULL;
824 ir_node *(set_Block_dead)(ir_node *block) {
825 return _set_Block_dead(block);
828 int (is_Block_dead)(const ir_node *block) {
829 return _is_Block_dead(block);
832 ir_extblk *get_Block_extbb(const ir_node *block) {
834 assert(is_Block(block));
835 res = block->attr.block.extblk;
836 assert(res == NULL || is_ir_extbb(res));
840 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
841 assert(is_Block(block));
842 assert(extblk == NULL || is_ir_extbb(extblk));
843 block->attr.block.extblk = extblk;
847 get_End_n_keepalives(ir_node *end) {
848 assert(end->op == op_End);
849 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
853 get_End_keepalive(ir_node *end, int pos) {
854 assert(end->op == op_End);
855 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
859 add_End_keepalive (ir_node *end, ir_node *ka) {
861 ir_graph *irg = get_irn_irg(end);
863 assert(end->op == op_End);
864 l = ARR_LEN(end->in);
865 ARR_APP1(ir_node *, end->in, ka);
866 edges_notify_edge(end, l - 1, end->in[l], NULL, irg);
870 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
871 assert(end->op == op_End);
872 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
875 /* Set new keep-alives */
876 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
878 ir_graph *irg = get_irn_irg(end);
880 /* notify that edges are deleted */
881 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
882 edges_notify_edge(end, i, end->in[i], NULL, irg);
884 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
886 for (i = 0; i < n; ++i) {
887 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
888 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
893 free_End (ir_node *end) {
894 assert(end->op == op_End);
897 end->in = NULL; /* @@@ make sure we get an error if we use the
898 in array afterwards ... */
901 /* Return the target address of an IJmp */
902 ir_node *get_IJmp_target(ir_node *ijmp) {
903 assert(ijmp->op == op_IJmp);
904 return get_irn_n(ijmp, 0);
907 /** Sets the target address of an IJmp */
908 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
909 assert(ijmp->op == op_IJmp);
910 set_irn_n(ijmp, 0, tgt);
914 > Implementing the case construct (which is where the constant Proj node is
915 > important) involves far more than simply determining the constant values.
916 > We could argue that this is more properly a function of the translator from
917 > Firm to the target machine. That could be done if there was some way of
918 > projecting "default" out of the Cond node.
919 I know it's complicated.
920 Basically there are two proglems:
921 - determining the gaps between the projs
922 - determining the biggest case constant to know the proj number for
924 I see several solutions:
925 1. Introduce a ProjDefault node. Solves both problems.
926 This means to extend all optimizations executed during construction.
927 2. Give the Cond node for switch two flavors:
928 a) there are no gaps in the projs (existing flavor)
929 b) gaps may exist, default proj is still the Proj with the largest
930 projection number. This covers also the gaps.
931 3. Fix the semantic of the Cond to that of 2b)
933 Solution 2 seems to be the best:
934 Computing the gaps in the Firm representation is not too hard, i.e.,
935 libFIRM can implement a routine that transforms between the two
936 flavours. This is also possible for 1) but 2) does not require to
937 change any existing optimization.
938 Further it should be far simpler to determine the biggest constant than
940 I don't want to choose 3) as 2a) seems to have advantages for
941 dataflow analysis and 3) does not allow to convert the representation to
945 get_Cond_selector (ir_node *node) {
946 assert(node->op == op_Cond);
947 return get_irn_n(node, 0);
951 set_Cond_selector (ir_node *node, ir_node *selector) {
952 assert(node->op == op_Cond);
953 set_irn_n(node, 0, selector);
957 get_Cond_kind (ir_node *node) {
958 assert(node->op == op_Cond);
959 return node->attr.cond.kind;
963 set_Cond_kind (ir_node *node, cond_kind kind) {
964 assert(node->op == op_Cond);
965 node->attr.cond.kind = kind;
969 get_Cond_defaultProj (ir_node *node) {
970 assert(node->op == op_Cond);
971 return node->attr.cond.default_proj;
975 get_Return_mem (ir_node *node) {
976 assert(node->op == op_Return);
977 return get_irn_n(node, 0);
981 set_Return_mem (ir_node *node, ir_node *mem) {
982 assert(node->op == op_Return);
983 set_irn_n(node, 0, mem);
987 get_Return_n_ress (ir_node *node) {
988 assert(node->op == op_Return);
989 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
993 get_Return_res_arr (ir_node *node)
995 assert((node->op == op_Return));
996 if (get_Return_n_ress(node) > 0)
997 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1004 set_Return_n_res (ir_node *node, int results) {
1005 assert(node->op == op_Return);
1010 get_Return_res (ir_node *node, int pos) {
1011 assert(node->op == op_Return);
1012 assert(get_Return_n_ress(node) > pos);
1013 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1017 set_Return_res (ir_node *node, int pos, ir_node *res){
1018 assert(node->op == op_Return);
1019 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1022 tarval *(get_Const_tarval)(ir_node *node) {
1023 return _get_Const_tarval(node);
1027 set_Const_tarval (ir_node *node, tarval *con) {
1028 assert(node->op == op_Const);
1029 node->attr.con.tv = con;
1032 cnst_classify_t (classify_Const)(ir_node *node)
1034 return _classify_Const(node);
1038 /* The source language type. Must be an atomic type. Mode of type must
1039 be mode of node. For tarvals from entities type must be pointer to
1042 get_Const_type (ir_node *node) {
1043 assert(node->op == op_Const);
1044 return node->attr.con.tp;
1048 set_Const_type (ir_node *node, ir_type *tp) {
1049 assert(node->op == op_Const);
1050 if (tp != firm_unknown_type) {
1051 assert(is_atomic_type(tp));
1052 assert(get_type_mode(tp) == get_irn_mode(node));
1054 node->attr.con.tp = tp;
1059 get_SymConst_kind (const ir_node *node) {
1060 assert(node->op == op_SymConst);
1061 return node->attr.symc.num;
1065 set_SymConst_kind (ir_node *node, symconst_kind num) {
1066 assert(node->op == op_SymConst);
1067 node->attr.symc.num = num;
1071 get_SymConst_type (ir_node *node) {
1072 assert( (node->op == op_SymConst)
1073 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1074 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1078 set_SymConst_type (ir_node *node, ir_type *tp) {
1079 assert( (node->op == op_SymConst)
1080 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1081 node->attr.symc.sym.type_p = tp;
1085 get_SymConst_name (ir_node *node) {
1086 assert( (node->op == op_SymConst)
1087 && (get_SymConst_kind(node) == symconst_addr_name));
1088 return node->attr.symc.sym.ident_p;
1092 set_SymConst_name (ir_node *node, ident *name) {
1093 assert( (node->op == op_SymConst)
1094 && (get_SymConst_kind(node) == symconst_addr_name));
1095 node->attr.symc.sym.ident_p = name;
1099 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1100 entity *get_SymConst_entity (ir_node *node) {
1101 assert( (node->op == op_SymConst)
1102 && (get_SymConst_kind (node) == symconst_addr_ent));
1103 return node->attr.symc.sym.entity_p;
1106 void set_SymConst_entity (ir_node *node, entity *ent) {
1107 assert( (node->op == op_SymConst)
1108 && (get_SymConst_kind(node) == symconst_addr_ent));
1109 node->attr.symc.sym.entity_p = ent;
1112 ir_enum_const *get_SymConst_enum (ir_node *node) {
1113 assert( (node->op == op_SymConst)
1114 && (get_SymConst_kind (node) == symconst_enum_const));
1115 return node->attr.symc.sym.enum_p;
1118 void set_SymConst_enum (ir_node *node, ir_enum_const *ec) {
1119 assert( (node->op == op_SymConst)
1120 && (get_SymConst_kind(node) == symconst_enum_const));
1121 node->attr.symc.sym.enum_p = ec;
1124 union symconst_symbol
1125 get_SymConst_symbol (ir_node *node) {
1126 assert(node->op == op_SymConst);
1127 return node->attr.symc.sym;
1131 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1132 assert(node->op == op_SymConst);
1133 node->attr.symc.sym = sym;
1137 get_SymConst_value_type (ir_node *node) {
1138 assert(node->op == op_SymConst);
1139 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1140 return node->attr.symc.tp;
1144 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1145 assert(node->op == op_SymConst);
1146 node->attr.symc.tp = tp;
1150 get_Sel_mem (ir_node *node) {
1151 assert(node->op == op_Sel);
1152 return get_irn_n(node, 0);
1156 set_Sel_mem (ir_node *node, ir_node *mem) {
1157 assert(node->op == op_Sel);
1158 set_irn_n(node, 0, mem);
1162 get_Sel_ptr (ir_node *node) {
1163 assert(node->op == op_Sel);
1164 return get_irn_n(node, 1);
1168 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1169 assert(node->op == op_Sel);
1170 set_irn_n(node, 1, ptr);
1174 get_Sel_n_indexs (ir_node *node) {
1175 assert(node->op == op_Sel);
1176 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1180 get_Sel_index_arr (ir_node *node)
1182 assert((node->op == op_Sel));
1183 if (get_Sel_n_indexs(node) > 0)
1184 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1190 get_Sel_index (ir_node *node, int pos) {
1191 assert(node->op == op_Sel);
1192 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1196 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1197 assert(node->op == op_Sel);
1198 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1202 get_Sel_entity (ir_node *node) {
1203 assert(node->op == op_Sel);
1204 return node->attr.sel.ent;
1208 set_Sel_entity (ir_node *node, entity *ent) {
1209 assert(node->op == op_Sel);
1210 node->attr.sel.ent = ent;
1214 /* For unary and binary arithmetic operations the access to the
1215 operands can be factored out. Left is the first, right the
1216 second arithmetic value as listed in tech report 0999-33.
1217 unops are: Minus, Abs, Not, Conv, Cast
1218 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1219 Shr, Shrs, Rotate, Cmp */
1223 get_Call_mem (ir_node *node) {
1224 assert(node->op == op_Call);
1225 return get_irn_n(node, 0);
1229 set_Call_mem (ir_node *node, ir_node *mem) {
1230 assert(node->op == op_Call);
1231 set_irn_n(node, 0, mem);
1235 get_Call_ptr (ir_node *node) {
1236 assert(node->op == op_Call);
1237 return get_irn_n(node, 1);
1241 set_Call_ptr (ir_node *node, ir_node *ptr) {
1242 assert(node->op == op_Call);
1243 set_irn_n(node, 1, ptr);
1247 get_Call_param_arr (ir_node *node) {
1248 assert(node->op == op_Call);
1249 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1253 get_Call_n_params (ir_node *node) {
1254 assert(node->op == op_Call);
1255 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1259 get_Call_arity (ir_node *node) {
1260 assert(node->op == op_Call);
1261 return get_Call_n_params(node);
1265 set_Call_arity (ir_node *node, ir_node *arity) {
1266 assert(node->op == op_Call);
1271 get_Call_param (ir_node *node, int pos) {
1272 assert(node->op == op_Call);
1273 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1277 set_Call_param (ir_node *node, int pos, ir_node *param) {
1278 assert(node->op == op_Call);
1279 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1283 get_Call_type (ir_node *node) {
1284 assert(node->op == op_Call);
1285 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1289 set_Call_type (ir_node *node, ir_type *tp) {
1290 assert(node->op == op_Call);
1291 assert((get_unknown_type() == tp) || is_Method_type(tp));
1292 node->attr.call.cld_tp = tp;
1295 int Call_has_callees(ir_node *node) {
1296 assert(node && node->op == op_Call);
1297 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1298 (node->attr.call.callee_arr != NULL));
1301 int get_Call_n_callees(ir_node * node) {
1302 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1303 return ARR_LEN(node->attr.call.callee_arr);
1306 entity * get_Call_callee(ir_node * node, int pos) {
1307 assert(pos >= 0 && pos < get_Call_n_callees(node));
1308 return node->attr.call.callee_arr[pos];
1311 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1312 assert(node->op == op_Call);
1313 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1314 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1316 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1319 void remove_Call_callee_arr(ir_node * node) {
1320 assert(node->op == op_Call);
1321 node->attr.call.callee_arr = NULL;
1324 ir_node * get_CallBegin_ptr (ir_node *node) {
1325 assert(node->op == op_CallBegin);
1326 return get_irn_n(node, 0);
1328 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1329 assert(node->op == op_CallBegin);
1330 set_irn_n(node, 0, ptr);
1332 ir_node * get_CallBegin_call (ir_node *node) {
1333 assert(node->op == op_CallBegin);
1334 return node->attr.callbegin.call;
1336 void set_CallBegin_call (ir_node *node, ir_node *call) {
1337 assert(node->op == op_CallBegin);
1338 node->attr.callbegin.call = call;
1343 ir_node * get_##OP##_left(ir_node *node) { \
1344 assert(node->op == op_##OP); \
1345 return get_irn_n(node, node->op->op_index); \
1347 void set_##OP##_left(ir_node *node, ir_node *left) { \
1348 assert(node->op == op_##OP); \
1349 set_irn_n(node, node->op->op_index, left); \
1351 ir_node *get_##OP##_right(ir_node *node) { \
1352 assert(node->op == op_##OP); \
1353 return get_irn_n(node, node->op->op_index + 1); \
1355 void set_##OP##_right(ir_node *node, ir_node *right) { \
1356 assert(node->op == op_##OP); \
1357 set_irn_n(node, node->op->op_index + 1, right); \
1361 ir_node *get_##OP##_op(ir_node *node) { \
1362 assert(node->op == op_##OP); \
1363 return get_irn_n(node, node->op->op_index); \
1365 void set_##OP##_op (ir_node *node, ir_node *op) { \
1366 assert(node->op == op_##OP); \
1367 set_irn_n(node, node->op->op_index, op); \
1377 get_Quot_mem (ir_node *node) {
1378 assert(node->op == op_Quot);
1379 return get_irn_n(node, 0);
1383 set_Quot_mem (ir_node *node, ir_node *mem) {
1384 assert(node->op == op_Quot);
1385 set_irn_n(node, 0, mem);
1391 get_DivMod_mem (ir_node *node) {
1392 assert(node->op == op_DivMod);
1393 return get_irn_n(node, 0);
1397 set_DivMod_mem (ir_node *node, ir_node *mem) {
1398 assert(node->op == op_DivMod);
1399 set_irn_n(node, 0, mem);
1405 get_Div_mem (ir_node *node) {
1406 assert(node->op == op_Div);
1407 return get_irn_n(node, 0);
1411 set_Div_mem (ir_node *node, ir_node *mem) {
1412 assert(node->op == op_Div);
1413 set_irn_n(node, 0, mem);
1419 get_Mod_mem (ir_node *node) {
1420 assert(node->op == op_Mod);
1421 return get_irn_n(node, 0);
1425 set_Mod_mem (ir_node *node, ir_node *mem) {
1426 assert(node->op == op_Mod);
1427 set_irn_n(node, 0, mem);
1443 int get_Conv_strict(ir_node *node) {
1444 assert(node->op == op_Conv);
1445 return node->attr.conv.strict;
1448 void set_Conv_strict(ir_node *node, int strict_flag) {
1449 assert(node->op == op_Conv);
1450 node->attr.conv.strict = (char)strict_flag;
1454 get_Cast_type (ir_node *node) {
1455 assert(node->op == op_Cast);
1456 return node->attr.cast.totype;
1460 set_Cast_type (ir_node *node, ir_type *to_tp) {
1461 assert(node->op == op_Cast);
1462 node->attr.cast.totype = to_tp;
1466 /* Checks for upcast.
1468 * Returns true if the Cast node casts a class type to a super type.
1470 int is_Cast_upcast(ir_node *node) {
1471 ir_type *totype = get_Cast_type(node);
1472 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1473 ir_graph *myirg = get_irn_irg(node);
1475 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1478 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1479 totype = get_pointer_points_to_type(totype);
1480 fromtype = get_pointer_points_to_type(fromtype);
1485 if (!is_Class_type(totype)) return 0;
1486 return is_SubClass_of(fromtype, totype);
1489 /* Checks for downcast.
1491 * Returns true if the Cast node casts a class type to a sub type.
1493 int is_Cast_downcast(ir_node *node) {
1494 ir_type *totype = get_Cast_type(node);
1495 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1497 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1500 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1501 totype = get_pointer_points_to_type(totype);
1502 fromtype = get_pointer_points_to_type(fromtype);
1507 if (!is_Class_type(totype)) return 0;
1508 return is_SubClass_of(totype, fromtype);
1512 (is_unop)(const ir_node *node) {
1513 return _is_unop(node);
1517 get_unop_op (ir_node *node) {
1518 if (node->op->opar == oparity_unary)
1519 return get_irn_n(node, node->op->op_index);
1521 assert(node->op->opar == oparity_unary);
1526 set_unop_op (ir_node *node, ir_node *op) {
1527 if (node->op->opar == oparity_unary)
1528 set_irn_n(node, node->op->op_index, op);
1530 assert(node->op->opar == oparity_unary);
1534 (is_binop)(const ir_node *node) {
1535 return _is_binop(node);
1539 get_binop_left (ir_node *node) {
1540 if (node->op->opar == oparity_binary)
1541 return get_irn_n(node, node->op->op_index);
1543 assert(node->op->opar == oparity_binary);
1548 set_binop_left (ir_node *node, ir_node *left) {
1549 if (node->op->opar == oparity_binary)
1550 set_irn_n(node, node->op->op_index, left);
1552 assert(node->op->opar == oparity_binary);
1556 get_binop_right (ir_node *node) {
1557 if (node->op->opar == oparity_binary)
1558 return get_irn_n(node, node->op->op_index + 1);
1560 assert(node->op->opar == oparity_binary);
1565 set_binop_right (ir_node *node, ir_node *right) {
1566 if (node->op->opar == oparity_binary)
1567 set_irn_n(node, node->op->op_index + 1, right);
1569 assert(node->op->opar == oparity_binary);
1572 int is_Phi (const ir_node *n) {
1578 if (op == op_Filter) return get_interprocedural_view();
1581 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1582 (get_irn_arity(n) > 0));
1587 int is_Phi0 (const ir_node *n) {
1590 return ((get_irn_op(n) == op_Phi) &&
1591 (get_irn_arity(n) == 0) &&
1592 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1596 get_Phi_preds_arr (ir_node *node) {
1597 assert(node->op == op_Phi);
1598 return (ir_node **)&(get_irn_in(node)[1]);
1602 get_Phi_n_preds (ir_node *node) {
1603 assert(is_Phi(node) || is_Phi0(node));
1604 return (get_irn_arity(node));
1608 void set_Phi_n_preds (ir_node *node, int n_preds) {
1609 assert(node->op == op_Phi);
1614 get_Phi_pred (ir_node *node, int pos) {
1615 assert(is_Phi(node) || is_Phi0(node));
1616 return get_irn_n(node, pos);
1620 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1621 assert(is_Phi(node) || is_Phi0(node));
1622 set_irn_n(node, pos, pred);
1626 int is_memop(ir_node *node) {
1627 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1630 ir_node *get_memop_mem (ir_node *node) {
1631 assert(is_memop(node));
1632 return get_irn_n(node, 0);
1635 void set_memop_mem (ir_node *node, ir_node *mem) {
1636 assert(is_memop(node));
1637 set_irn_n(node, 0, mem);
1640 ir_node *get_memop_ptr (ir_node *node) {
1641 assert(is_memop(node));
1642 return get_irn_n(node, 1);
1645 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1646 assert(is_memop(node));
1647 set_irn_n(node, 1, ptr);
1651 get_Load_mem (ir_node *node) {
1652 assert(node->op == op_Load);
1653 return get_irn_n(node, 0);
1657 set_Load_mem (ir_node *node, ir_node *mem) {
1658 assert(node->op == op_Load);
1659 set_irn_n(node, 0, mem);
1663 get_Load_ptr (ir_node *node) {
1664 assert(node->op == op_Load);
1665 return get_irn_n(node, 1);
1669 set_Load_ptr (ir_node *node, ir_node *ptr) {
1670 assert(node->op == op_Load);
1671 set_irn_n(node, 1, ptr);
1675 get_Load_mode (ir_node *node) {
1676 assert(node->op == op_Load);
1677 return node->attr.load.load_mode;
1681 set_Load_mode (ir_node *node, ir_mode *mode) {
1682 assert(node->op == op_Load);
1683 node->attr.load.load_mode = mode;
1687 get_Load_volatility (ir_node *node) {
1688 assert(node->op == op_Load);
1689 return node->attr.load.volatility;
1693 set_Load_volatility (ir_node *node, ir_volatility volatility) {
1694 assert(node->op == op_Load);
1695 node->attr.load.volatility = volatility;
1700 get_Store_mem (ir_node *node) {
1701 assert(node->op == op_Store);
1702 return get_irn_n(node, 0);
1706 set_Store_mem (ir_node *node, ir_node *mem) {
1707 assert(node->op == op_Store);
1708 set_irn_n(node, 0, mem);
1712 get_Store_ptr (ir_node *node) {
1713 assert(node->op == op_Store);
1714 return get_irn_n(node, 1);
1718 set_Store_ptr (ir_node *node, ir_node *ptr) {
1719 assert(node->op == op_Store);
1720 set_irn_n(node, 1, ptr);
1724 get_Store_value (ir_node *node) {
1725 assert(node->op == op_Store);
1726 return get_irn_n(node, 2);
1730 set_Store_value (ir_node *node, ir_node *value) {
1731 assert(node->op == op_Store);
1732 set_irn_n(node, 2, value);
1736 get_Store_volatility (ir_node *node) {
1737 assert(node->op == op_Store);
1738 return node->attr.store.volatility;
1742 set_Store_volatility (ir_node *node, ir_volatility volatility) {
1743 assert(node->op == op_Store);
1744 node->attr.store.volatility = volatility;
1749 get_Alloc_mem (ir_node *node) {
1750 assert(node->op == op_Alloc);
1751 return get_irn_n(node, 0);
1755 set_Alloc_mem (ir_node *node, ir_node *mem) {
1756 assert(node->op == op_Alloc);
1757 set_irn_n(node, 0, mem);
1761 get_Alloc_size (ir_node *node) {
1762 assert(node->op == op_Alloc);
1763 return get_irn_n(node, 1);
1767 set_Alloc_size (ir_node *node, ir_node *size) {
1768 assert(node->op == op_Alloc);
1769 set_irn_n(node, 1, size);
1773 get_Alloc_type (ir_node *node) {
1774 assert(node->op == op_Alloc);
1775 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1779 set_Alloc_type (ir_node *node, ir_type *tp) {
1780 assert(node->op == op_Alloc);
1781 node->attr.alloc.type = tp;
1785 get_Alloc_where (ir_node *node) {
1786 assert(node->op == op_Alloc);
1787 return node->attr.alloc.where;
1791 set_Alloc_where (ir_node *node, where_alloc where) {
1792 assert(node->op == op_Alloc);
1793 node->attr.alloc.where = where;
1798 get_Free_mem (ir_node *node) {
1799 assert(node->op == op_Free);
1800 return get_irn_n(node, 0);
1804 set_Free_mem (ir_node *node, ir_node *mem) {
1805 assert(node->op == op_Free);
1806 set_irn_n(node, 0, mem);
1810 get_Free_ptr (ir_node *node) {
1811 assert(node->op == op_Free);
1812 return get_irn_n(node, 1);
1816 set_Free_ptr (ir_node *node, ir_node *ptr) {
1817 assert(node->op == op_Free);
1818 set_irn_n(node, 1, ptr);
1822 get_Free_size (ir_node *node) {
1823 assert(node->op == op_Free);
1824 return get_irn_n(node, 2);
1828 set_Free_size (ir_node *node, ir_node *size) {
1829 assert(node->op == op_Free);
1830 set_irn_n(node, 2, size);
1834 get_Free_type (ir_node *node) {
1835 assert(node->op == op_Free);
1836 return node->attr.free.type = skip_tid(node->attr.free.type);
1840 set_Free_type (ir_node *node, ir_type *tp) {
1841 assert(node->op == op_Free);
1842 node->attr.free.type = tp;
1846 get_Free_where (ir_node *node) {
1847 assert(node->op == op_Free);
1848 return node->attr.free.where;
1852 set_Free_where (ir_node *node, where_alloc where) {
1853 assert(node->op == op_Free);
1854 node->attr.free.where = where;
1857 ir_node **get_Sync_preds_arr (ir_node *node) {
1858 assert(node->op == op_Sync);
1859 return (ir_node **)&(get_irn_in(node)[1]);
1862 int get_Sync_n_preds (ir_node *node) {
1863 assert(node->op == op_Sync);
1864 return (get_irn_arity(node));
1868 void set_Sync_n_preds (ir_node *node, int n_preds) {
1869 assert(node->op == op_Sync);
1873 ir_node *get_Sync_pred (ir_node *node, int pos) {
1874 assert(node->op == op_Sync);
1875 return get_irn_n(node, pos);
1878 void set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1879 assert(node->op == op_Sync);
1880 set_irn_n(node, pos, pred);
1883 /* Add a new Sync predecessor */
1884 void add_Sync_pred (ir_node *node, ir_node *pred) {
1886 ir_graph *irg = get_irn_irg(node);
1888 assert(node->op == op_Sync);
1889 l = ARR_LEN(node->in);
1890 ARR_APP1(ir_node *, node->in, pred);
1891 edges_notify_edge(node, l, node->in[l], NULL, irg);
1894 /* Returns the source language type of a Proj node. */
1895 ir_type *get_Proj_type(ir_node *n)
1897 ir_type *tp = firm_unknown_type;
1898 ir_node *pred = get_Proj_pred(n);
1900 switch (get_irn_opcode(pred)) {
1903 /* Deal with Start / Call here: we need to know the Proj Nr. */
1904 assert(get_irn_mode(pred) == mode_T);
1905 pred_pred = get_Proj_pred(pred);
1906 if (get_irn_op(pred_pred) == op_Start) {
1907 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1908 tp = get_method_param_type(mtp, get_Proj_proj(n));
1909 } else if (get_irn_op(pred_pred) == op_Call) {
1910 ir_type *mtp = get_Call_type(pred_pred);
1911 tp = get_method_res_type(mtp, get_Proj_proj(n));
1914 case iro_Start: break;
1915 case iro_Call: break;
1917 ir_node *a = get_Load_ptr(pred);
1919 tp = get_entity_type(get_Sel_entity(a));
1928 get_Proj_pred (const ir_node *node) {
1929 assert(is_Proj(node));
1930 return get_irn_n(node, 0);
1934 set_Proj_pred (ir_node *node, ir_node *pred) {
1935 assert(is_Proj(node));
1936 set_irn_n(node, 0, pred);
1940 get_Proj_proj (const ir_node *node) {
1941 assert(is_Proj(node));
1942 if (get_irn_opcode(node) == iro_Proj) {
1943 return node->attr.proj;
1945 assert(get_irn_opcode(node) == iro_Filter);
1946 return node->attr.filter.proj;
1951 set_Proj_proj (ir_node *node, long proj) {
1952 assert(node->op == op_Proj);
1953 node->attr.proj = proj;
1957 get_Tuple_preds_arr (ir_node *node) {
1958 assert(node->op == op_Tuple);
1959 return (ir_node **)&(get_irn_in(node)[1]);
1963 get_Tuple_n_preds (ir_node *node) {
1964 assert(node->op == op_Tuple);
1965 return (get_irn_arity(node));
1970 set_Tuple_n_preds (ir_node *node, int n_preds) {
1971 assert(node->op == op_Tuple);
1976 get_Tuple_pred (ir_node *node, int pos) {
1977 assert(node->op == op_Tuple);
1978 return get_irn_n(node, pos);
1982 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1983 assert(node->op == op_Tuple);
1984 set_irn_n(node, pos, pred);
1988 get_Id_pred (ir_node *node) {
1989 assert(node->op == op_Id);
1990 return get_irn_n(node, 0);
1994 set_Id_pred (ir_node *node, ir_node *pred) {
1995 assert(node->op == op_Id);
1996 set_irn_n(node, 0, pred);
1999 ir_node *get_Confirm_value (ir_node *node) {
2000 assert(node->op == op_Confirm);
2001 return get_irn_n(node, 0);
2003 void set_Confirm_value (ir_node *node, ir_node *value) {
2004 assert(node->op == op_Confirm);
2005 set_irn_n(node, 0, value);
2007 ir_node *get_Confirm_bound (ir_node *node) {
2008 assert(node->op == op_Confirm);
2009 return get_irn_n(node, 1);
2011 void set_Confirm_bound (ir_node *node, ir_node *bound) {
2012 assert(node->op == op_Confirm);
2013 set_irn_n(node, 0, bound);
2015 pn_Cmp get_Confirm_cmp (ir_node *node) {
2016 assert(node->op == op_Confirm);
2017 return node->attr.confirm_cmp;
2019 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
2020 assert(node->op == op_Confirm);
2021 node->attr.confirm_cmp = cmp;
2026 get_Filter_pred (ir_node *node) {
2027 assert(node->op == op_Filter);
2031 set_Filter_pred (ir_node *node, ir_node *pred) {
2032 assert(node->op == op_Filter);
2036 get_Filter_proj(ir_node *node) {
2037 assert(node->op == op_Filter);
2038 return node->attr.filter.proj;
2041 set_Filter_proj (ir_node *node, long proj) {
2042 assert(node->op == op_Filter);
2043 node->attr.filter.proj = proj;
2046 /* Don't use get_irn_arity, get_irn_n in implementation as access
2047 shall work independent of view!!! */
2048 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2049 assert(node->op == op_Filter);
2050 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2051 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2052 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2053 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2054 node->attr.filter.in_cg[0] = node->in[0];
2056 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2059 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2060 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2061 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2062 node->attr.filter.in_cg[pos + 1] = pred;
2064 int get_Filter_n_cg_preds(ir_node *node) {
2065 assert(node->op == op_Filter && node->attr.filter.in_cg);
2066 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2068 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2070 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2072 arity = ARR_LEN(node->attr.filter.in_cg);
2073 assert(pos < arity - 1);
2074 return node->attr.filter.in_cg[pos + 1];
2078 ir_node *get_Mux_sel (ir_node *node) {
2079 if (node->op == op_Psi) {
2080 assert(get_irn_arity(node) == 3);
2081 return get_Psi_cond(node, 0);
2083 assert(node->op == op_Mux);
2086 void set_Mux_sel (ir_node *node, ir_node *sel) {
2087 if (node->op == op_Psi) {
2088 assert(get_irn_arity(node) == 3);
2089 set_Psi_cond(node, 0, sel);
2092 assert(node->op == op_Mux);
2097 ir_node *get_Mux_false (ir_node *node) {
2098 if (node->op == op_Psi) {
2099 assert(get_irn_arity(node) == 3);
2100 return get_Psi_default(node);
2102 assert(node->op == op_Mux);
2105 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2106 if (node->op == op_Psi) {
2107 assert(get_irn_arity(node) == 3);
2108 set_Psi_default(node, ir_false);
2111 assert(node->op == op_Mux);
2112 node->in[2] = ir_false;
2116 ir_node *get_Mux_true (ir_node *node) {
2117 if (node->op == op_Psi) {
2118 assert(get_irn_arity(node) == 3);
2119 return get_Psi_val(node, 0);
2121 assert(node->op == op_Mux);
2124 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2125 if (node->op == op_Psi) {
2126 assert(get_irn_arity(node) == 3);
2127 set_Psi_val(node, 0, ir_true);
2130 assert(node->op == op_Mux);
2131 node->in[3] = ir_true;
2136 ir_node *get_Psi_cond (ir_node *node, int pos) {
2137 int num_conds = get_Psi_n_conds(node);
2138 assert(node->op == op_Psi);
2139 assert(pos < num_conds);
2140 return get_irn_n(node, 2 * pos);
2143 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2144 int num_conds = get_Psi_n_conds(node);
2145 assert(node->op == op_Psi);
2146 assert(pos < num_conds);
2147 set_irn_n(node, 2 * pos, cond);
2150 ir_node *get_Psi_val (ir_node *node, int pos) {
2151 int num_vals = get_Psi_n_conds(node);
2152 assert(node->op == op_Psi);
2153 assert(pos < num_vals);
2154 return get_irn_n(node, 2 * pos + 1);
2157 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2158 int num_vals = get_Psi_n_conds(node);
2159 assert(node->op == op_Psi);
2160 assert(pos < num_vals);
2161 set_irn_n(node, 2 * pos + 1, val);
2164 ir_node *get_Psi_default(ir_node *node) {
2165 int def_pos = get_irn_arity(node) - 1;
2166 assert(node->op == op_Psi);
2167 return get_irn_n(node, def_pos);
2170 void set_Psi_default(ir_node *node, ir_node *val) {
2171 int def_pos = get_irn_arity(node);
2172 assert(node->op == op_Psi);
2173 set_irn_n(node, def_pos, val);
2176 int (get_Psi_n_conds)(ir_node *node) {
2177 return _get_Psi_n_conds(node);
2181 ir_node *get_CopyB_mem (ir_node *node) {
2182 assert(node->op == op_CopyB);
2183 return get_irn_n(node, 0);
2186 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2187 assert(node->op == op_CopyB);
2188 set_irn_n(node, 0, mem);
2191 ir_node *get_CopyB_dst (ir_node *node) {
2192 assert(node->op == op_CopyB);
2193 return get_irn_n(node, 1);
2196 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2197 assert(node->op == op_CopyB);
2198 set_irn_n(node, 1, dst);
2201 ir_node *get_CopyB_src (ir_node *node) {
2202 assert(node->op == op_CopyB);
2203 return get_irn_n(node, 2);
2206 void set_CopyB_src (ir_node *node, ir_node *src) {
2207 assert(node->op == op_CopyB);
2208 set_irn_n(node, 2, src);
2211 ir_type *get_CopyB_type(ir_node *node) {
2212 assert(node->op == op_CopyB);
2213 return node->attr.copyb.data_type;
2216 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2217 assert(node->op == op_CopyB && data_type);
2218 node->attr.copyb.data_type = data_type;
2223 get_InstOf_type (ir_node *node) {
2224 assert(node->op = op_InstOf);
2225 return node->attr.instof.type;
2229 set_InstOf_type (ir_node *node, ir_type *type) {
2230 assert(node->op = op_InstOf);
2231 node->attr.instof.type = type;
2235 get_InstOf_store (ir_node *node) {
2236 assert(node->op = op_InstOf);
2237 return get_irn_n(node, 0);
2241 set_InstOf_store (ir_node *node, ir_node *obj) {
2242 assert(node->op = op_InstOf);
2243 set_irn_n(node, 0, obj);
2247 get_InstOf_obj (ir_node *node) {
2248 assert(node->op = op_InstOf);
2249 return get_irn_n(node, 1);
2253 set_InstOf_obj (ir_node *node, ir_node *obj) {
2254 assert(node->op = op_InstOf);
2255 set_irn_n(node, 1, obj);
2258 /* Returns the memory input of a Raise operation. */
2260 get_Raise_mem (ir_node *node) {
2261 assert(node->op == op_Raise);
2262 return get_irn_n(node, 0);
2266 set_Raise_mem (ir_node *node, ir_node *mem) {
2267 assert(node->op == op_Raise);
2268 set_irn_n(node, 0, mem);
2272 get_Raise_exo_ptr (ir_node *node) {
2273 assert(node->op == op_Raise);
2274 return get_irn_n(node, 1);
2278 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2279 assert(node->op == op_Raise);
2280 set_irn_n(node, 1, exo_ptr);
2285 /* Returns the memory input of a Bound operation. */
2286 ir_node *get_Bound_mem(ir_node *bound) {
2287 assert(bound->op == op_Bound);
2288 return get_irn_n(bound, 0);
2291 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2292 assert(bound->op == op_Bound);
2293 set_irn_n(bound, 0, mem);
2296 /* Returns the index input of a Bound operation. */
2297 ir_node *get_Bound_index(ir_node *bound) {
2298 assert(bound->op == op_Bound);
2299 return get_irn_n(bound, 1);
2302 void set_Bound_index(ir_node *bound, ir_node *idx) {
2303 assert(bound->op == op_Bound);
2304 set_irn_n(bound, 1, idx);
2307 /* Returns the lower bound input of a Bound operation. */
2308 ir_node *get_Bound_lower(ir_node *bound) {
2309 assert(bound->op == op_Bound);
2310 return get_irn_n(bound, 2);
2313 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2314 assert(bound->op == op_Bound);
2315 set_irn_n(bound, 2, lower);
2318 /* Returns the upper bound input of a Bound operation. */
2319 ir_node *get_Bound_upper(ir_node *bound) {
2320 assert(bound->op == op_Bound);
2321 return get_irn_n(bound, 3);
2324 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2325 assert(bound->op == op_Bound);
2326 set_irn_n(bound, 3, upper);
2329 /* Return the operand of a Pin node. */
2330 ir_node *get_Pin_op(ir_node *pin) {
2331 assert(pin->op == op_Pin);
2332 return get_irn_n(pin, 0);
2335 void set_Pin_op(ir_node *pin, ir_node *node) {
2336 assert(pin->op == op_Pin);
2337 set_irn_n(pin, 0, node);
2341 /* returns the graph of a node */
2343 get_irn_irg(const ir_node *node) {
2345 * Do not use get_nodes_Block() here, because this
2346 * will check the pinned state.
2347 * However even a 'wrong' block is always in the proper
2350 if (! is_Block(node))
2351 node = get_irn_n(node, -1);
2352 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2353 node = get_irn_n(node, -1);
2354 assert(get_irn_op(node) == op_Block);
2355 return node->attr.block.irg;
2359 /*----------------------------------------------------------------*/
2360 /* Auxiliary routines */
2361 /*----------------------------------------------------------------*/
2364 skip_Proj (ir_node *node) {
2365 /* don't assert node !!! */
2366 if (node && is_Proj(node)) {
2367 return get_Proj_pred(node);
2374 skip_Tuple (ir_node *node) {
2378 if (!get_opt_normalize()) return node;
2381 node = skip_Id(node);
2382 if (get_irn_op(node) == op_Proj) {
2383 pred = skip_Id(get_Proj_pred(node));
2384 op = get_irn_op(pred);
2387 * Looks strange but calls get_irn_op() only once
2388 * in most often cases.
2390 if (op == op_Proj) { /* nested Tuple ? */
2391 pred = skip_Id(skip_Tuple(pred));
2392 op = get_irn_op(pred);
2394 if (op == op_Tuple) {
2395 node = get_Tuple_pred(pred, get_Proj_proj(node));
2399 else if (op == op_Tuple) {
2400 node = get_Tuple_pred(pred, get_Proj_proj(node));
2407 /* returns operand of node if node is a Cast */
2408 ir_node *skip_Cast (ir_node *node) {
2409 if (node && get_irn_op(node) == op_Cast)
2410 return get_Cast_op(node);
2414 /* returns operand of node if node is a Confirm */
2415 ir_node *skip_Confirm (ir_node *node) {
2416 if (node && get_irn_op(node) == op_Confirm)
2417 return get_Confirm_value(node);
2421 /* skip all high-level ops */
2422 ir_node *skip_HighLevel(ir_node *node) {
2423 if (node && is_op_highlevel(get_irn_op(node)))
2424 return get_irn_n(node, 0);
2429 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2430 * than any other approach, as Id chains are resolved and all point to the real node, or
2431 * all id's are self loops.
2433 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2436 skip_Id (ir_node *node) {
2437 /* don't assert node !!! */
2439 /* Don't use get_Id_pred: We get into an endless loop for
2440 self-referencing Ids. */
2441 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2442 ir_node *rem_pred = node->in[0+1];
2445 assert(get_irn_arity (node) > 0);
2447 node->in[0+1] = node;
2448 res = skip_Id(rem_pred);
2449 if (res->op == op_Id) /* self-loop */ return node;
2451 node->in[0+1] = res;
2458 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2459 * than any other approach, as Id chains are resolved and all point to the real node, or
2460 * all id's are self loops.
2462 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2463 * a little bit "hand optimized".
2465 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2468 skip_Id (ir_node *node) {
2470 /* don't assert node !!! */
2472 if (!node || (node->op != op_Id)) return node;
2474 /* Don't use get_Id_pred(): We get into an endless loop for
2475 self-referencing Ids. */
2476 pred = node->in[0+1];
2478 if (pred->op != op_Id) return pred;
2480 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2481 ir_node *rem_pred, *res;
2483 if (pred->op != op_Id) return pred; /* shortcut */
2486 assert(get_irn_arity (node) > 0);
2488 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2489 res = skip_Id(rem_pred);
2490 if (res->op == op_Id) /* self-loop */ return node;
2492 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2500 void skip_Id_and_store(ir_node **node) {
2503 if (!n || (n->op != op_Id)) return;
2505 /* Don't use get_Id_pred(): We get into an endless loop for
2506 self-referencing Ids. */
2511 (is_Bad)(const ir_node *node) {
2512 return _is_Bad(node);
2516 (is_Const)(const ir_node *node) {
2517 return _is_Const(node);
2521 (is_no_Block)(const ir_node *node) {
2522 return _is_no_Block(node);
2526 (is_Block)(const ir_node *node) {
2527 return _is_Block(node);
2530 /* returns true if node is an Unknown node. */
2532 (is_Unknown)(const ir_node *node) {
2533 return _is_Unknown(node);
2536 /* returns true if node is a Return node. */
2538 (is_Return)(const ir_node *node) {
2539 return _is_Return(node);
2542 /* returns true if node is a Call node. */
2544 (is_Call)(const ir_node *node) {
2545 return _is_Call(node);
2548 /* returns true if node is a Sel node. */
2550 (is_Sel)(const ir_node *node) {
2551 return _is_Sel(node);
2554 /* returns true if node is a Mux node or a Psi with only one condition. */
2556 (is_Mux)(const ir_node *node) {
2557 return _is_Mux(node);
2560 /* returns true if node is a Load node. */
2562 (is_Load)(const ir_node *node) {
2563 return _is_Load(node);
2566 /* returns true if node is a Sync node. */
2568 (is_Sync)(const ir_node *node) {
2569 return _is_Sync(node);
2572 /* returns true if node is a Confirm node. */
2574 (is_Confirm)(const ir_node *node) {
2575 return _is_Confirm(node);
2578 /* returns true if node is a Pin node. */
2580 (is_Pin)(const ir_node *node) {
2581 return _is_Pin(node);
2584 /* returns true if node is a SymConst node. */
2586 (is_SymConst)(const ir_node *node) {
2587 return _is_SymConst(node);
2590 /* returns true if node is a Cond node. */
2592 (is_Cond)(const ir_node *node) {
2593 return _is_Cond(node);
2596 /* returns true if node is a Cmp node. */
2598 (is_Cmp)(const ir_node *node) {
2599 return _is_Cmp(node);
2602 /* returns true if node is an Alloc node. */
2604 (is_Alloc)(const ir_node *node) {
2605 return _is_Alloc(node);
2608 /* returns true if a node is a Jmp node. */
2610 (is_Jmp)(const ir_node *node) {
2611 return _is_Jmp(node);
2615 is_Proj (const ir_node *node) {
2617 return node->op == op_Proj
2618 || (!get_interprocedural_view() && node->op == op_Filter);
2621 /* Returns true if the operation manipulates control flow. */
2623 is_cfop(const ir_node *node) {
2624 return is_cfopcode(get_irn_op(node));
2627 /* Returns true if the operation manipulates interprocedural control flow:
2628 CallBegin, EndReg, EndExcept */
2629 int is_ip_cfop(const ir_node *node) {
2630 return is_ip_cfopcode(get_irn_op(node));
2633 /* Returns true if the operation can change the control flow because
2636 is_fragile_op(const ir_node *node) {
2637 return is_op_fragile(get_irn_op(node));
2640 /* Returns the memory operand of fragile operations. */
2641 ir_node *get_fragile_op_mem(ir_node *node) {
2642 assert(node && is_fragile_op(node));
2644 switch (get_irn_opcode (node)) {
2654 return get_irn_n(node, 0);
2659 assert(0 && "should not be reached");
2664 /* Returns true if the operation is a forking control flow operation. */
2665 int (is_irn_forking)(const ir_node *node) {
2666 return _is_irn_forking(node);
2669 /* Return the type associated with the value produced by n
2670 * if the node remarks this type as it is the case for
2671 * Cast, Const, SymConst and some Proj nodes. */
2672 ir_type *(get_irn_type)(ir_node *node) {
2673 return _get_irn_type(node);
2676 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2678 ir_type *(get_irn_type_attr)(ir_node *node) {
2679 return _get_irn_type_attr(node);
2682 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2683 entity *(get_irn_entity_attr)(ir_node *node) {
2684 return _get_irn_entity_attr(node);
2687 /* Returns non-zero for constant-like nodes. */
2688 int (is_irn_constlike)(const ir_node *node) {
2689 return _is_irn_constlike(node);
2693 * Returns non-zero for nodes that are allowed to have keep-alives and
2694 * are neither Block nor PhiM.
2696 int (is_irn_keep)(const ir_node *node) {
2697 return _is_irn_keep(node);
2700 /* Returns non-zero for nodes that are machine operations. */
2701 int (is_irn_machine_op)(const ir_node *node) {
2702 return _is_irn_machine_op(node);
2705 /* Returns non-zero for nodes that are machine operands. */
2706 int (is_irn_machine_operand)(const ir_node *node) {
2707 return _is_irn_machine_operand(node);
2710 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2711 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2712 return _is_irn_machine_user(node, n);
2716 /* Gets the string representation of the jump prediction .*/
2717 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2721 case COND_JMP_PRED_NONE: return "no prediction";
2722 case COND_JMP_PRED_TRUE: return "true taken";
2723 case COND_JMP_PRED_FALSE: return "false taken";
2727 /* Returns the conditional jump prediction of a Cond node. */
2728 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2729 return _get_Cond_jmp_pred(cond);
2732 /* Sets a new conditional jump prediction. */
2733 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2734 _set_Cond_jmp_pred(cond, pred);
2737 /** the get_type operation must be always implemented and return a firm type */
2738 static ir_type *get_Default_type(ir_node *n) {
2739 return get_unknown_type();
2742 /* Sets the get_type operation for an ir_op_ops. */
2743 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2746 case iro_Const: ops->get_type = get_Const_type; break;
2747 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2748 case iro_Cast: ops->get_type = get_Cast_type; break;
2749 case iro_Proj: ops->get_type = get_Proj_type; break;
2751 /* not allowed to be NULL */
2752 if (! ops->get_type)
2753 ops->get_type = get_Default_type;
2759 /** Return the attribute type of a SymConst node if exists */
2760 static ir_type *get_SymConst_attr_type(ir_node *self) {
2761 symconst_kind kind = get_SymConst_kind(self);
2762 if (SYMCONST_HAS_TYPE(kind))
2763 return get_SymConst_type(self);
2767 /** Return the attribute entity of a SymConst node if exists */
2768 static entity *get_SymConst_attr_entity(ir_node *self) {
2769 symconst_kind kind = get_SymConst_kind(self);
2770 if (SYMCONST_HAS_ENT(kind))
2771 return get_SymConst_entity(self);
2775 /** the get_type_attr operation must be always implemented */
2776 static ir_type *get_Null_type(ir_node *n) {
2777 return firm_unknown_type;
2780 /* Sets the get_type operation for an ir_op_ops. */
2781 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2784 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2785 case iro_Call: ops->get_type_attr = get_Call_type; break;
2786 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2787 case iro_Free: ops->get_type_attr = get_Free_type; break;
2788 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2790 /* not allowed to be NULL */
2791 if (! ops->get_type_attr)
2792 ops->get_type_attr = get_Null_type;
2798 /** the get_entity_attr operation must be always implemented */
2799 static entity *get_Null_ent(ir_node *n) {
2803 /* Sets the get_type operation for an ir_op_ops. */
2804 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2807 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2808 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2810 /* not allowed to be NULL */
2811 if (! ops->get_entity_attr)
2812 ops->get_entity_attr = get_Null_ent;
2818 #ifdef DEBUG_libfirm
2819 void dump_irn (ir_node *n) {
2820 int i, arity = get_irn_arity(n);
2821 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2823 ir_node *pred = get_irn_n(n, -1);
2824 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2825 get_irn_node_nr(pred), (void *)pred);
2827 printf(" preds: \n");
2828 for (i = 0; i < arity; ++i) {
2829 ir_node *pred = get_irn_n(n, i);
2830 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2831 get_irn_node_nr(pred), (void *)pred);
2835 #else /* DEBUG_libfirm */
2836 void dump_irn (ir_node *n) {}
2837 #endif /* DEBUG_libfirm */