3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
83 const char *pns_name_arr [] = {
84 "initial_exec", "global_store",
85 "frame_base", "globals", "args"
88 const char *symconst_name_arr [] = {
89 "type_tag", "size", "addr_name", "addr_ent"
93 * Indicates, whether additional data can be registered to ir nodes.
94 * If set to 1, this is not possible anymore.
96 static int forbid_new_data = 0;
99 * The amount of additional space for custom data to be allocated upon
100 * creating a new node.
102 unsigned firm_add_node_size = 0;
105 /* register new space for every node */
106 unsigned register_additional_node_data(unsigned size) {
107 assert(!forbid_new_data && "Too late to register additional node data");
112 return firm_add_node_size += size;
118 /* Forbid the addition of new data to an ir node. */
123 * irnode constructor.
124 * Create a new irnode in irg, with an op, mode, arity and
125 * some incoming irnodes.
126 * If arity is negative, a node with a dynamic array is created.
129 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
130 int arity, ir_node **in)
133 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
137 assert(irg && op && mode);
138 p = obstack_alloc (irg->obst, node_size);
139 memset(p, 0, node_size);
140 res = (ir_node *) (p + firm_add_node_size);
142 res->kind = k_ir_node;
146 res->node_idx = irg_register_node_idx(irg, res);
151 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
153 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
154 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
158 set_irn_dbg_info(res, db);
162 res->node_nr = get_irp_new_node_nr();
165 for(i = 0; i < EDGE_KIND_LAST; ++i)
166 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
168 is_bl = is_Block(res);
169 for (i = is_bl; i <= arity; ++i)
170 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 ir_graph *irg = current_ir_graph;
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
241 for (i = 0; i < arity; i++) {
242 if (i < ARR_LEN(*arr)-1)
243 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
245 edges_notify_edge(node, i, in[i], NULL, irg);
247 for(;i < ARR_LEN(*arr)-1; i++) {
248 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
251 if (arity != ARR_LEN(*arr) - 1) {
252 ir_node * block = (*arr)[0];
253 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
256 fix_backedges(irg->obst, node);
258 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
262 (get_irn_intra_n)(const ir_node *node, int n) {
263 return _get_irn_intra_n (node, n);
267 (get_irn_inter_n)(const ir_node *node, int n) {
268 return _get_irn_inter_n (node, n);
271 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
274 (get_irn_n)(const ir_node *node, int n) {
275 return _get_irn_n(node, n);
279 set_irn_n (ir_node *node, int n, ir_node *in) {
280 assert(node && node->kind == k_ir_node);
282 assert(n < get_irn_arity(node));
283 assert(in && in->kind == k_ir_node);
285 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
286 /* Change block pred in both views! */
287 node->in[n + 1] = in;
288 assert(node->attr.filter.in_cg);
289 node->attr.filter.in_cg[n + 1] = in;
292 if (get_interprocedural_view()) { /* handle Filter and Block specially */
293 if (get_irn_opcode(node) == iro_Filter) {
294 assert(node->attr.filter.in_cg);
295 node->attr.filter.in_cg[n + 1] = in;
297 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
298 node->attr.block.in_cg[n + 1] = in;
301 /* else fall through */
305 hook_set_irn_n(node, n, in, node->in[n + 1]);
307 /* Here, we rely on src and tgt being in the current ir graph */
308 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
310 node->in[n + 1] = in;
314 (get_irn_deps)(const ir_node *node)
316 return _get_irn_deps(node);
320 (get_irn_dep)(const ir_node *node, int pos)
322 return _get_irn_dep(node, pos);
326 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
328 _set_irn_dep(node, pos, dep);
331 int add_irn_dep(ir_node *node, ir_node *dep)
335 if (node->deps == NULL) {
336 node->deps = NEW_ARR_F(ir_node *, 1);
343 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
344 if(node->deps[i] == NULL)
347 if(node->deps[i] == dep)
351 if(first_zero >= 0) {
352 node->deps[first_zero] = dep;
357 ARR_APP1(ir_node *, node->deps, dep);
362 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
367 void add_irn_deps(ir_node *tgt, ir_node *src)
371 for(i = 0, n = get_irn_deps(src); i < n; ++i)
372 add_irn_dep(tgt, get_irn_dep(src, i));
377 (get_irn_mode)(const ir_node *node) {
378 return _get_irn_mode(node);
382 (set_irn_mode)(ir_node *node, ir_mode *mode)
384 _set_irn_mode(node, mode);
388 get_irn_modecode (const ir_node *node)
391 return node->mode->code;
394 /** Gets the string representation of the mode .*/
396 get_irn_modename (const ir_node *node)
399 return get_mode_name(node->mode);
403 get_irn_modeident (const ir_node *node)
406 return get_mode_ident(node->mode);
410 (get_irn_op)(const ir_node *node) {
411 return _get_irn_op(node);
414 /* should be private to the library: */
416 (set_irn_op)(ir_node *node, ir_op *op) {
417 _set_irn_op(node, op);
421 (get_irn_opcode)(const ir_node *node)
423 return _get_irn_opcode(node);
427 get_irn_opname (const ir_node *node)
430 if ((get_irn_op((ir_node *)node) == op_Phi) &&
431 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
432 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
433 return get_id_str(node->op->name);
437 get_irn_opident (const ir_node *node)
440 return node->op->name;
444 (get_irn_visited)(const ir_node *node)
446 return _get_irn_visited(node);
450 (set_irn_visited)(ir_node *node, unsigned long visited)
452 _set_irn_visited(node, visited);
456 (mark_irn_visited)(ir_node *node) {
457 _mark_irn_visited(node);
461 (irn_not_visited)(const ir_node *node) {
462 return _irn_not_visited(node);
466 (irn_visited)(const ir_node *node) {
467 return _irn_visited(node);
471 (set_irn_link)(ir_node *node, void *link) {
472 _set_irn_link(node, link);
476 (get_irn_link)(const ir_node *node) {
477 return _get_irn_link(node);
481 (get_irn_pinned)(const ir_node *node) {
482 return _get_irn_pinned(node);
486 (is_irn_pinned_in_irg) (const ir_node *node) {
487 return _is_irn_pinned_in_irg(node);
490 void set_irn_pinned(ir_node *node, op_pin_state state) {
491 /* due to optimization an opt may be turned into a Tuple */
492 if (get_irn_op(node) == op_Tuple)
495 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
496 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
498 node->attr.except.pin_state = state;
501 #ifdef DO_HEAPANALYSIS
502 /* Access the abstract interpretation information of a node.
503 Returns NULL if no such information is available. */
504 struct abstval *get_irn_abst_value(ir_node *n) {
507 /* Set the abstract interpretation information of a node. */
508 void set_irn_abst_value(ir_node *n, struct abstval *os) {
511 struct section *firm_get_irn_section(ir_node *n) {
514 void firm_set_irn_section(ir_node *n, struct section *s) {
518 /* Dummies needed for firmjni. */
519 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
520 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
521 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
522 void firm_set_irn_section(ir_node *n, struct section *s) {}
523 #endif /* DO_HEAPANALYSIS */
526 /* Outputs a unique number for this node */
527 long get_irn_node_nr(const ir_node *node) {
530 return node->node_nr;
532 return (long)PTR_TO_INT(node);
537 get_irn_const_attr (ir_node *node)
539 assert(node->op == op_Const);
540 return node->attr.con;
544 get_irn_proj_attr (ir_node *node)
546 assert(node->op == op_Proj);
547 return node->attr.proj;
551 get_irn_alloc_attr (ir_node *node)
553 assert(node->op == op_Alloc);
554 return node->attr.alloc;
558 get_irn_free_attr (ir_node *node)
560 assert(node->op == op_Free);
561 return node->attr.free;
565 get_irn_symconst_attr (ir_node *node)
567 assert(node->op == op_SymConst);
568 return node->attr.symc;
572 get_irn_call_attr (ir_node *node)
574 assert(node->op == op_Call);
575 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
579 get_irn_sel_attr (ir_node *node)
581 assert(node->op == op_Sel);
582 return node->attr.sel;
586 get_irn_phi_attr (ir_node *node)
588 assert(node->op == op_Phi);
589 return node->attr.phi0_pos;
593 get_irn_block_attr (ir_node *node)
595 assert(node->op == op_Block);
596 return node->attr.block;
600 get_irn_load_attr (ir_node *node)
602 assert(node->op == op_Load);
603 return node->attr.load;
607 get_irn_store_attr (ir_node *node)
609 assert(node->op == op_Store);
610 return node->attr.store;
614 get_irn_except_attr (ir_node *node)
616 assert(node->op == op_Div || node->op == op_Quot ||
617 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
618 return node->attr.except;
622 get_irn_generic_attr (ir_node *node) {
626 unsigned (get_irn_idx)(const ir_node *node) {
627 assert(is_ir_node(node));
628 return _get_irn_idx(node);
631 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
633 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
634 if (get_irn_n(node, i) == arg)
640 /** manipulate fields of individual nodes **/
642 /* this works for all except Block */
644 get_nodes_block (const ir_node *node) {
645 assert(!(node->op == op_Block));
646 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
647 return get_irn_n(node, -1);
651 set_nodes_block (ir_node *node, ir_node *block) {
652 assert(!(node->op == op_Block));
653 set_irn_n(node, -1, block);
656 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
657 * from Start. If so returns frame type, else Null. */
658 ir_type *is_frame_pointer(ir_node *n) {
659 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
660 ir_node *start = get_Proj_pred(n);
661 if (get_irn_op(start) == op_Start) {
662 return get_irg_frame_type(get_irn_irg(start));
668 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
669 * from Start. If so returns global type, else Null. */
670 ir_type *is_globals_pointer(ir_node *n) {
671 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
672 ir_node *start = get_Proj_pred(n);
673 if (get_irn_op(start) == op_Start) {
674 return get_glob_type();
680 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
681 * from Start. If so returns tls type, else Null. */
682 ir_type *is_tls_pointer(ir_node *n) {
683 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
684 ir_node *start = get_Proj_pred(n);
685 if (get_irn_op(start) == op_Start) {
686 return get_tls_type();
692 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
693 * from Start. If so returns 1, else 0. */
694 int is_value_arg_pointer(ir_node *n) {
695 if ((get_irn_op(n) == op_Proj) &&
696 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
697 (get_irn_op(get_Proj_pred(n)) == op_Start))
702 /* Returns an array with the predecessors of the Block. Depending on
703 the implementation of the graph data structure this can be a copy of
704 the internal representation of predecessors as well as the internal
705 array itself. Therefore writing to this array might obstruct the ir. */
707 get_Block_cfgpred_arr (ir_node *node)
709 assert((node->op == op_Block));
710 return (ir_node **)&(get_irn_in(node)[1]);
714 (get_Block_n_cfgpreds)(const ir_node *node) {
715 return _get_Block_n_cfgpreds(node);
719 (get_Block_cfgpred)(ir_node *node, int pos) {
720 return _get_Block_cfgpred(node, pos);
724 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
725 assert(node->op == op_Block);
726 set_irn_n(node, pos, pred);
730 (get_Block_cfgpred_block)(ir_node *node, int pos) {
731 return _get_Block_cfgpred_block(node, pos);
735 get_Block_matured (ir_node *node) {
736 assert(node->op == op_Block);
737 return (int)node->attr.block.matured;
741 set_Block_matured (ir_node *node, int matured) {
742 assert(node->op == op_Block);
743 node->attr.block.matured = matured;
747 (get_Block_block_visited)(ir_node *node) {
748 return _get_Block_block_visited(node);
752 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
753 _set_Block_block_visited(node, visit);
756 /* For this current_ir_graph must be set. */
758 (mark_Block_block_visited)(ir_node *node) {
759 _mark_Block_block_visited(node);
763 (Block_not_block_visited)(ir_node *node) {
764 return _Block_not_block_visited(node);
768 get_Block_graph_arr (ir_node *node, int pos) {
769 assert(node->op == op_Block);
770 return node->attr.block.graph_arr[pos+1];
774 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
775 assert(node->op == op_Block);
776 node->attr.block.graph_arr[pos+1] = value;
779 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
780 assert(node->op == op_Block);
781 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
782 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
783 node->attr.block.in_cg[0] = NULL;
784 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
786 /* Fix backedge array. fix_backedges() operates depending on
787 interprocedural_view. */
788 int ipv = get_interprocedural_view();
789 set_interprocedural_view(1);
790 fix_backedges(current_ir_graph->obst, node);
791 set_interprocedural_view(ipv);
794 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
797 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
798 assert(node->op == op_Block &&
799 node->attr.block.in_cg &&
800 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
801 node->attr.block.in_cg[pos + 1] = pred;
804 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
805 assert(node->op == op_Block);
806 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
809 int get_Block_cg_n_cfgpreds(ir_node * node) {
810 assert(node->op == op_Block);
811 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
814 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
815 assert(node->op == op_Block && node->attr.block.in_cg);
816 return node->attr.block.in_cg[pos + 1];
819 void remove_Block_cg_cfgpred_arr(ir_node * node) {
820 assert(node->op == op_Block);
821 node->attr.block.in_cg = NULL;
824 ir_node *(set_Block_dead)(ir_node *block) {
825 return _set_Block_dead(block);
828 int (is_Block_dead)(const ir_node *block) {
829 return _is_Block_dead(block);
832 ir_extblk *get_Block_extbb(const ir_node *block) {
834 assert(is_Block(block));
835 res = block->attr.block.extblk;
836 assert(res == NULL || is_ir_extbb(res));
840 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
841 assert(is_Block(block));
842 assert(extblk == NULL || is_ir_extbb(extblk));
843 block->attr.block.extblk = extblk;
847 get_End_n_keepalives(ir_node *end) {
848 assert(end->op == op_End);
849 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
853 get_End_keepalive(ir_node *end, int pos) {
854 assert(end->op == op_End);
855 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
859 add_End_keepalive (ir_node *end, ir_node *ka) {
861 ir_graph *irg = get_irn_irg(end);
863 assert(end->op == op_End);
864 l = ARR_LEN(end->in);
865 ARR_APP1(ir_node *, end->in, ka);
866 edges_notify_edge(end, l - 1, end->in[l], NULL, irg);
870 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
871 assert(end->op == op_End);
872 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
875 /* Set new keep-alives */
876 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
878 ir_graph *irg = get_irn_irg(end);
880 /* notify that edges are deleted */
881 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
882 edges_notify_edge(end, i, end->in[i], NULL, irg);
884 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
886 for (i = 0; i < n; ++i) {
887 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
888 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
893 free_End (ir_node *end) {
894 assert(end->op == op_End);
897 end->in = NULL; /* @@@ make sure we get an error if we use the
898 in array afterwards ... */
901 /* Return the target address of an IJmp */
902 ir_node *get_IJmp_target(ir_node *ijmp) {
903 assert(ijmp->op == op_IJmp);
904 return get_irn_n(ijmp, 0);
907 /** Sets the target address of an IJmp */
908 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
909 assert(ijmp->op == op_IJmp);
910 set_irn_n(ijmp, 0, tgt);
914 > Implementing the case construct (which is where the constant Proj node is
915 > important) involves far more than simply determining the constant values.
916 > We could argue that this is more properly a function of the translator from
917 > Firm to the target machine. That could be done if there was some way of
918 > projecting "default" out of the Cond node.
919 I know it's complicated.
920 Basically there are two proglems:
921 - determining the gaps between the projs
922 - determining the biggest case constant to know the proj number for
924 I see several solutions:
925 1. Introduce a ProjDefault node. Solves both problems.
926 This means to extend all optimizations executed during construction.
927 2. Give the Cond node for switch two flavors:
928 a) there are no gaps in the projs (existing flavor)
929 b) gaps may exist, default proj is still the Proj with the largest
930 projection number. This covers also the gaps.
931 3. Fix the semantic of the Cond to that of 2b)
933 Solution 2 seems to be the best:
934 Computing the gaps in the Firm representation is not too hard, i.e.,
935 libFIRM can implement a routine that transforms between the two
936 flavours. This is also possible for 1) but 2) does not require to
937 change any existing optimization.
938 Further it should be far simpler to determine the biggest constant than
940 I don't want to choose 3) as 2a) seems to have advantages for
941 dataflow analysis and 3) does not allow to convert the representation to
945 get_Cond_selector (ir_node *node) {
946 assert(node->op == op_Cond);
947 return get_irn_n(node, 0);
951 set_Cond_selector (ir_node *node, ir_node *selector) {
952 assert(node->op == op_Cond);
953 set_irn_n(node, 0, selector);
957 get_Cond_kind (ir_node *node) {
958 assert(node->op == op_Cond);
959 return node->attr.cond.kind;
963 set_Cond_kind (ir_node *node, cond_kind kind) {
964 assert(node->op == op_Cond);
965 node->attr.cond.kind = kind;
969 get_Cond_defaultProj (ir_node *node) {
970 assert(node->op == op_Cond);
971 return node->attr.cond.default_proj;
975 get_Return_mem (ir_node *node) {
976 assert(node->op == op_Return);
977 return get_irn_n(node, 0);
981 set_Return_mem (ir_node *node, ir_node *mem) {
982 assert(node->op == op_Return);
983 set_irn_n(node, 0, mem);
987 get_Return_n_ress (ir_node *node) {
988 assert(node->op == op_Return);
989 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
993 get_Return_res_arr (ir_node *node)
995 assert((node->op == op_Return));
996 if (get_Return_n_ress(node) > 0)
997 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1004 set_Return_n_res (ir_node *node, int results) {
1005 assert(node->op == op_Return);
1010 get_Return_res (ir_node *node, int pos) {
1011 assert(node->op == op_Return);
1012 assert(get_Return_n_ress(node) > pos);
1013 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1017 set_Return_res (ir_node *node, int pos, ir_node *res){
1018 assert(node->op == op_Return);
1019 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1022 tarval *(get_Const_tarval)(ir_node *node) {
1023 return _get_Const_tarval(node);
1027 set_Const_tarval (ir_node *node, tarval *con) {
1028 assert(node->op == op_Const);
1029 node->attr.con.tv = con;
1032 cnst_classify_t (classify_Const)(ir_node *node)
1034 return _classify_Const(node);
1038 /* The source language type. Must be an atomic type. Mode of type must
1039 be mode of node. For tarvals from entities type must be pointer to
1042 get_Const_type (ir_node *node) {
1043 assert(node->op == op_Const);
1044 return node->attr.con.tp;
1048 set_Const_type (ir_node *node, ir_type *tp) {
1049 assert(node->op == op_Const);
1050 if (tp != firm_unknown_type) {
1051 assert(is_atomic_type(tp));
1052 assert(get_type_mode(tp) == get_irn_mode(node));
1054 node->attr.con.tp = tp;
1059 get_SymConst_kind (const ir_node *node) {
1060 assert(node->op == op_SymConst);
1061 return node->attr.symc.num;
1065 set_SymConst_kind (ir_node *node, symconst_kind num) {
1066 assert(node->op == op_SymConst);
1067 node->attr.symc.num = num;
1071 get_SymConst_type (ir_node *node) {
1072 assert( (node->op == op_SymConst)
1073 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1074 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1078 set_SymConst_type (ir_node *node, ir_type *tp) {
1079 assert( (node->op == op_SymConst)
1080 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1081 node->attr.symc.sym.type_p = tp;
1085 get_SymConst_name (ir_node *node) {
1086 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1087 return node->attr.symc.sym.ident_p;
1091 set_SymConst_name (ir_node *node, ident *name) {
1092 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1093 node->attr.symc.sym.ident_p = name;
1097 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1098 entity *get_SymConst_entity (ir_node *node) {
1099 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1100 return node->attr.symc.sym.entity_p;
1103 void set_SymConst_entity (ir_node *node, entity *ent) {
1104 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1105 node->attr.symc.sym.entity_p = ent;
1108 ir_enum_const *get_SymConst_enum (ir_node *node) {
1109 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1110 return node->attr.symc.sym.enum_p;
1113 void set_SymConst_enum (ir_node *node, ir_enum_const *ec) {
1114 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1115 node->attr.symc.sym.enum_p = ec;
1118 union symconst_symbol
1119 get_SymConst_symbol (ir_node *node) {
1120 assert(node->op == op_SymConst);
1121 return node->attr.symc.sym;
1125 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1126 assert(node->op == op_SymConst);
1127 node->attr.symc.sym = sym;
1131 get_SymConst_value_type (ir_node *node) {
1132 assert(node->op == op_SymConst);
1133 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1134 return node->attr.symc.tp;
1138 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1139 assert(node->op == op_SymConst);
1140 node->attr.symc.tp = tp;
1144 get_Sel_mem (ir_node *node) {
1145 assert(node->op == op_Sel);
1146 return get_irn_n(node, 0);
1150 set_Sel_mem (ir_node *node, ir_node *mem) {
1151 assert(node->op == op_Sel);
1152 set_irn_n(node, 0, mem);
1156 get_Sel_ptr (ir_node *node) {
1157 assert(node->op == op_Sel);
1158 return get_irn_n(node, 1);
1162 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1163 assert(node->op == op_Sel);
1164 set_irn_n(node, 1, ptr);
1168 get_Sel_n_indexs (ir_node *node) {
1169 assert(node->op == op_Sel);
1170 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1174 get_Sel_index_arr (ir_node *node)
1176 assert((node->op == op_Sel));
1177 if (get_Sel_n_indexs(node) > 0)
1178 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1184 get_Sel_index (ir_node *node, int pos) {
1185 assert(node->op == op_Sel);
1186 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1190 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1191 assert(node->op == op_Sel);
1192 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1196 get_Sel_entity (ir_node *node) {
1197 assert(node->op == op_Sel);
1198 return node->attr.sel.ent;
1202 set_Sel_entity (ir_node *node, entity *ent) {
1203 assert(node->op == op_Sel);
1204 node->attr.sel.ent = ent;
1208 /* For unary and binary arithmetic operations the access to the
1209 operands can be factored out. Left is the first, right the
1210 second arithmetic value as listed in tech report 0999-33.
1211 unops are: Minus, Abs, Not, Conv, Cast
1212 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1213 Shr, Shrs, Rotate, Cmp */
1217 get_Call_mem (ir_node *node) {
1218 assert(node->op == op_Call);
1219 return get_irn_n(node, 0);
1223 set_Call_mem (ir_node *node, ir_node *mem) {
1224 assert(node->op == op_Call);
1225 set_irn_n(node, 0, mem);
1229 get_Call_ptr (ir_node *node) {
1230 assert(node->op == op_Call);
1231 return get_irn_n(node, 1);
1235 set_Call_ptr (ir_node *node, ir_node *ptr) {
1236 assert(node->op == op_Call);
1237 set_irn_n(node, 1, ptr);
1241 get_Call_param_arr (ir_node *node) {
1242 assert(node->op == op_Call);
1243 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1247 get_Call_n_params (ir_node *node) {
1248 assert(node->op == op_Call);
1249 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1253 get_Call_arity (ir_node *node) {
1254 assert(node->op == op_Call);
1255 return get_Call_n_params(node);
1259 set_Call_arity (ir_node *node, ir_node *arity) {
1260 assert(node->op == op_Call);
1265 get_Call_param (ir_node *node, int pos) {
1266 assert(node->op == op_Call);
1267 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1271 set_Call_param (ir_node *node, int pos, ir_node *param) {
1272 assert(node->op == op_Call);
1273 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1277 get_Call_type (ir_node *node) {
1278 assert(node->op == op_Call);
1279 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1283 set_Call_type (ir_node *node, ir_type *tp) {
1284 assert(node->op == op_Call);
1285 assert((get_unknown_type() == tp) || is_Method_type(tp));
1286 node->attr.call.cld_tp = tp;
1289 int Call_has_callees(ir_node *node) {
1290 assert(node && node->op == op_Call);
1291 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1292 (node->attr.call.callee_arr != NULL));
1295 int get_Call_n_callees(ir_node * node) {
1296 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1297 return ARR_LEN(node->attr.call.callee_arr);
1300 entity * get_Call_callee(ir_node * node, int pos) {
1301 assert(pos >= 0 && pos < get_Call_n_callees(node));
1302 return node->attr.call.callee_arr[pos];
1305 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1306 assert(node->op == op_Call);
1307 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1308 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1310 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1313 void remove_Call_callee_arr(ir_node * node) {
1314 assert(node->op == op_Call);
1315 node->attr.call.callee_arr = NULL;
1318 ir_node * get_CallBegin_ptr (ir_node *node) {
1319 assert(node->op == op_CallBegin);
1320 return get_irn_n(node, 0);
1322 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1323 assert(node->op == op_CallBegin);
1324 set_irn_n(node, 0, ptr);
1326 ir_node * get_CallBegin_call (ir_node *node) {
1327 assert(node->op == op_CallBegin);
1328 return node->attr.callbegin.call;
1330 void set_CallBegin_call (ir_node *node, ir_node *call) {
1331 assert(node->op == op_CallBegin);
1332 node->attr.callbegin.call = call;
1337 ir_node * get_##OP##_left(ir_node *node) { \
1338 assert(node->op == op_##OP); \
1339 return get_irn_n(node, node->op->op_index); \
1341 void set_##OP##_left(ir_node *node, ir_node *left) { \
1342 assert(node->op == op_##OP); \
1343 set_irn_n(node, node->op->op_index, left); \
1345 ir_node *get_##OP##_right(ir_node *node) { \
1346 assert(node->op == op_##OP); \
1347 return get_irn_n(node, node->op->op_index + 1); \
1349 void set_##OP##_right(ir_node *node, ir_node *right) { \
1350 assert(node->op == op_##OP); \
1351 set_irn_n(node, node->op->op_index + 1, right); \
1355 ir_node *get_##OP##_op(ir_node *node) { \
1356 assert(node->op == op_##OP); \
1357 return get_irn_n(node, node->op->op_index); \
1359 void set_##OP##_op (ir_node *node, ir_node *op) { \
1360 assert(node->op == op_##OP); \
1361 set_irn_n(node, node->op->op_index, op); \
1371 get_Quot_mem (ir_node *node) {
1372 assert(node->op == op_Quot);
1373 return get_irn_n(node, 0);
1377 set_Quot_mem (ir_node *node, ir_node *mem) {
1378 assert(node->op == op_Quot);
1379 set_irn_n(node, 0, mem);
1385 get_DivMod_mem (ir_node *node) {
1386 assert(node->op == op_DivMod);
1387 return get_irn_n(node, 0);
1391 set_DivMod_mem (ir_node *node, ir_node *mem) {
1392 assert(node->op == op_DivMod);
1393 set_irn_n(node, 0, mem);
1399 get_Div_mem (ir_node *node) {
1400 assert(node->op == op_Div);
1401 return get_irn_n(node, 0);
1405 set_Div_mem (ir_node *node, ir_node *mem) {
1406 assert(node->op == op_Div);
1407 set_irn_n(node, 0, mem);
1413 get_Mod_mem (ir_node *node) {
1414 assert(node->op == op_Mod);
1415 return get_irn_n(node, 0);
1419 set_Mod_mem (ir_node *node, ir_node *mem) {
1420 assert(node->op == op_Mod);
1421 set_irn_n(node, 0, mem);
1437 int get_Conv_strict(ir_node *node) {
1438 assert(node->op == op_Conv);
1439 return node->attr.conv.strict;
1442 void set_Conv_strict(ir_node *node, int strict_flag) {
1443 assert(node->op == op_Conv);
1444 node->attr.conv.strict = (char)strict_flag;
1448 get_Cast_type (ir_node *node) {
1449 assert(node->op == op_Cast);
1450 return node->attr.cast.totype;
1454 set_Cast_type (ir_node *node, ir_type *to_tp) {
1455 assert(node->op == op_Cast);
1456 node->attr.cast.totype = to_tp;
1460 /* Checks for upcast.
1462 * Returns true if the Cast node casts a class type to a super type.
1464 int is_Cast_upcast(ir_node *node) {
1465 ir_type *totype = get_Cast_type(node);
1466 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1467 ir_graph *myirg = get_irn_irg(node);
1469 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1472 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1473 totype = get_pointer_points_to_type(totype);
1474 fromtype = get_pointer_points_to_type(fromtype);
1479 if (!is_Class_type(totype)) return 0;
1480 return is_SubClass_of(fromtype, totype);
1483 /* Checks for downcast.
1485 * Returns true if the Cast node casts a class type to a sub type.
1487 int is_Cast_downcast(ir_node *node) {
1488 ir_type *totype = get_Cast_type(node);
1489 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1491 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1494 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1495 totype = get_pointer_points_to_type(totype);
1496 fromtype = get_pointer_points_to_type(fromtype);
1501 if (!is_Class_type(totype)) return 0;
1502 return is_SubClass_of(totype, fromtype);
1506 (is_unop)(const ir_node *node) {
1507 return _is_unop(node);
1511 get_unop_op (ir_node *node) {
1512 if (node->op->opar == oparity_unary)
1513 return get_irn_n(node, node->op->op_index);
1515 assert(node->op->opar == oparity_unary);
1520 set_unop_op (ir_node *node, ir_node *op) {
1521 if (node->op->opar == oparity_unary)
1522 set_irn_n(node, node->op->op_index, op);
1524 assert(node->op->opar == oparity_unary);
1528 (is_binop)(const ir_node *node) {
1529 return _is_binop(node);
1533 get_binop_left (ir_node *node) {
1534 if (node->op->opar == oparity_binary)
1535 return get_irn_n(node, node->op->op_index);
1537 assert(node->op->opar == oparity_binary);
1542 set_binop_left (ir_node *node, ir_node *left) {
1543 if (node->op->opar == oparity_binary)
1544 set_irn_n(node, node->op->op_index, left);
1546 assert(node->op->opar == oparity_binary);
1550 get_binop_right (ir_node *node) {
1551 if (node->op->opar == oparity_binary)
1552 return get_irn_n(node, node->op->op_index + 1);
1554 assert(node->op->opar == oparity_binary);
1559 set_binop_right (ir_node *node, ir_node *right) {
1560 if (node->op->opar == oparity_binary)
1561 set_irn_n(node, node->op->op_index + 1, right);
1563 assert(node->op->opar == oparity_binary);
1566 int is_Phi (const ir_node *n) {
1572 if (op == op_Filter) return get_interprocedural_view();
1575 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1576 (get_irn_arity(n) > 0));
1581 int is_Phi0 (const ir_node *n) {
1584 return ((get_irn_op(n) == op_Phi) &&
1585 (get_irn_arity(n) == 0) &&
1586 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1590 get_Phi_preds_arr (ir_node *node) {
1591 assert(node->op == op_Phi);
1592 return (ir_node **)&(get_irn_in(node)[1]);
1596 get_Phi_n_preds (ir_node *node) {
1597 assert(is_Phi(node) || is_Phi0(node));
1598 return (get_irn_arity(node));
1602 void set_Phi_n_preds (ir_node *node, int n_preds) {
1603 assert(node->op == op_Phi);
1608 get_Phi_pred (ir_node *node, int pos) {
1609 assert(is_Phi(node) || is_Phi0(node));
1610 return get_irn_n(node, pos);
1614 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1615 assert(is_Phi(node) || is_Phi0(node));
1616 set_irn_n(node, pos, pred);
1620 int is_memop(ir_node *node) {
1621 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1624 ir_node *get_memop_mem (ir_node *node) {
1625 assert(is_memop(node));
1626 return get_irn_n(node, 0);
1629 void set_memop_mem (ir_node *node, ir_node *mem) {
1630 assert(is_memop(node));
1631 set_irn_n(node, 0, mem);
1634 ir_node *get_memop_ptr (ir_node *node) {
1635 assert(is_memop(node));
1636 return get_irn_n(node, 1);
1639 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1640 assert(is_memop(node));
1641 set_irn_n(node, 1, ptr);
1645 get_Load_mem (ir_node *node) {
1646 assert(node->op == op_Load);
1647 return get_irn_n(node, 0);
1651 set_Load_mem (ir_node *node, ir_node *mem) {
1652 assert(node->op == op_Load);
1653 set_irn_n(node, 0, mem);
1657 get_Load_ptr (ir_node *node) {
1658 assert(node->op == op_Load);
1659 return get_irn_n(node, 1);
1663 set_Load_ptr (ir_node *node, ir_node *ptr) {
1664 assert(node->op == op_Load);
1665 set_irn_n(node, 1, ptr);
1669 get_Load_mode (ir_node *node) {
1670 assert(node->op == op_Load);
1671 return node->attr.load.load_mode;
1675 set_Load_mode (ir_node *node, ir_mode *mode) {
1676 assert(node->op == op_Load);
1677 node->attr.load.load_mode = mode;
1681 get_Load_volatility (ir_node *node) {
1682 assert(node->op == op_Load);
1683 return node->attr.load.volatility;
1687 set_Load_volatility (ir_node *node, ir_volatility volatility) {
1688 assert(node->op == op_Load);
1689 node->attr.load.volatility = volatility;
1694 get_Store_mem (ir_node *node) {
1695 assert(node->op == op_Store);
1696 return get_irn_n(node, 0);
1700 set_Store_mem (ir_node *node, ir_node *mem) {
1701 assert(node->op == op_Store);
1702 set_irn_n(node, 0, mem);
1706 get_Store_ptr (ir_node *node) {
1707 assert(node->op == op_Store);
1708 return get_irn_n(node, 1);
1712 set_Store_ptr (ir_node *node, ir_node *ptr) {
1713 assert(node->op == op_Store);
1714 set_irn_n(node, 1, ptr);
1718 get_Store_value (ir_node *node) {
1719 assert(node->op == op_Store);
1720 return get_irn_n(node, 2);
1724 set_Store_value (ir_node *node, ir_node *value) {
1725 assert(node->op == op_Store);
1726 set_irn_n(node, 2, value);
1730 get_Store_volatility (ir_node *node) {
1731 assert(node->op == op_Store);
1732 return node->attr.store.volatility;
1736 set_Store_volatility (ir_node *node, ir_volatility volatility) {
1737 assert(node->op == op_Store);
1738 node->attr.store.volatility = volatility;
1743 get_Alloc_mem (ir_node *node) {
1744 assert(node->op == op_Alloc);
1745 return get_irn_n(node, 0);
1749 set_Alloc_mem (ir_node *node, ir_node *mem) {
1750 assert(node->op == op_Alloc);
1751 set_irn_n(node, 0, mem);
1755 get_Alloc_size (ir_node *node) {
1756 assert(node->op == op_Alloc);
1757 return get_irn_n(node, 1);
1761 set_Alloc_size (ir_node *node, ir_node *size) {
1762 assert(node->op == op_Alloc);
1763 set_irn_n(node, 1, size);
1767 get_Alloc_type (ir_node *node) {
1768 assert(node->op == op_Alloc);
1769 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1773 set_Alloc_type (ir_node *node, ir_type *tp) {
1774 assert(node->op == op_Alloc);
1775 node->attr.alloc.type = tp;
1779 get_Alloc_where (ir_node *node) {
1780 assert(node->op == op_Alloc);
1781 return node->attr.alloc.where;
1785 set_Alloc_where (ir_node *node, where_alloc where) {
1786 assert(node->op == op_Alloc);
1787 node->attr.alloc.where = where;
1792 get_Free_mem (ir_node *node) {
1793 assert(node->op == op_Free);
1794 return get_irn_n(node, 0);
1798 set_Free_mem (ir_node *node, ir_node *mem) {
1799 assert(node->op == op_Free);
1800 set_irn_n(node, 0, mem);
1804 get_Free_ptr (ir_node *node) {
1805 assert(node->op == op_Free);
1806 return get_irn_n(node, 1);
1810 set_Free_ptr (ir_node *node, ir_node *ptr) {
1811 assert(node->op == op_Free);
1812 set_irn_n(node, 1, ptr);
1816 get_Free_size (ir_node *node) {
1817 assert(node->op == op_Free);
1818 return get_irn_n(node, 2);
1822 set_Free_size (ir_node *node, ir_node *size) {
1823 assert(node->op == op_Free);
1824 set_irn_n(node, 2, size);
1828 get_Free_type (ir_node *node) {
1829 assert(node->op == op_Free);
1830 return node->attr.free.type = skip_tid(node->attr.free.type);
1834 set_Free_type (ir_node *node, ir_type *tp) {
1835 assert(node->op == op_Free);
1836 node->attr.free.type = tp;
1840 get_Free_where (ir_node *node) {
1841 assert(node->op == op_Free);
1842 return node->attr.free.where;
1846 set_Free_where (ir_node *node, where_alloc where) {
1847 assert(node->op == op_Free);
1848 node->attr.free.where = where;
1851 ir_node **get_Sync_preds_arr (ir_node *node) {
1852 assert(node->op == op_Sync);
1853 return (ir_node **)&(get_irn_in(node)[1]);
1856 int get_Sync_n_preds (ir_node *node) {
1857 assert(node->op == op_Sync);
1858 return (get_irn_arity(node));
1862 void set_Sync_n_preds (ir_node *node, int n_preds) {
1863 assert(node->op == op_Sync);
1867 ir_node *get_Sync_pred (ir_node *node, int pos) {
1868 assert(node->op == op_Sync);
1869 return get_irn_n(node, pos);
1872 void set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1873 assert(node->op == op_Sync);
1874 set_irn_n(node, pos, pred);
1877 /* Add a new Sync predecessor */
1878 void add_Sync_pred (ir_node *node, ir_node *pred) {
1880 ir_graph *irg = get_irn_irg(node);
1882 assert(node->op == op_Sync);
1883 l = ARR_LEN(node->in);
1884 ARR_APP1(ir_node *, node->in, pred);
1885 edges_notify_edge(node, l, node->in[l], NULL, irg);
1888 /* Returns the source language type of a Proj node. */
1889 ir_type *get_Proj_type(ir_node *n)
1891 ir_type *tp = firm_unknown_type;
1892 ir_node *pred = get_Proj_pred(n);
1894 switch (get_irn_opcode(pred)) {
1897 /* Deal with Start / Call here: we need to know the Proj Nr. */
1898 assert(get_irn_mode(pred) == mode_T);
1899 pred_pred = get_Proj_pred(pred);
1900 if (get_irn_op(pred_pred) == op_Start) {
1901 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1902 tp = get_method_param_type(mtp, get_Proj_proj(n));
1903 } else if (get_irn_op(pred_pred) == op_Call) {
1904 ir_type *mtp = get_Call_type(pred_pred);
1905 tp = get_method_res_type(mtp, get_Proj_proj(n));
1908 case iro_Start: break;
1909 case iro_Call: break;
1911 ir_node *a = get_Load_ptr(pred);
1913 tp = get_entity_type(get_Sel_entity(a));
1922 get_Proj_pred (const ir_node *node) {
1923 assert(is_Proj(node));
1924 return get_irn_n(node, 0);
1928 set_Proj_pred (ir_node *node, ir_node *pred) {
1929 assert(is_Proj(node));
1930 set_irn_n(node, 0, pred);
1934 get_Proj_proj (const ir_node *node) {
1935 assert(is_Proj(node));
1936 if (get_irn_opcode(node) == iro_Proj) {
1937 return node->attr.proj;
1939 assert(get_irn_opcode(node) == iro_Filter);
1940 return node->attr.filter.proj;
1945 set_Proj_proj (ir_node *node, long proj) {
1946 assert(node->op == op_Proj);
1947 node->attr.proj = proj;
1951 get_Tuple_preds_arr (ir_node *node) {
1952 assert(node->op == op_Tuple);
1953 return (ir_node **)&(get_irn_in(node)[1]);
1957 get_Tuple_n_preds (ir_node *node) {
1958 assert(node->op == op_Tuple);
1959 return (get_irn_arity(node));
1964 set_Tuple_n_preds (ir_node *node, int n_preds) {
1965 assert(node->op == op_Tuple);
1970 get_Tuple_pred (ir_node *node, int pos) {
1971 assert(node->op == op_Tuple);
1972 return get_irn_n(node, pos);
1976 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1977 assert(node->op == op_Tuple);
1978 set_irn_n(node, pos, pred);
1982 get_Id_pred (ir_node *node) {
1983 assert(node->op == op_Id);
1984 return get_irn_n(node, 0);
1988 set_Id_pred (ir_node *node, ir_node *pred) {
1989 assert(node->op == op_Id);
1990 set_irn_n(node, 0, pred);
1993 ir_node *get_Confirm_value (ir_node *node) {
1994 assert(node->op == op_Confirm);
1995 return get_irn_n(node, 0);
1997 void set_Confirm_value (ir_node *node, ir_node *value) {
1998 assert(node->op == op_Confirm);
1999 set_irn_n(node, 0, value);
2001 ir_node *get_Confirm_bound (ir_node *node) {
2002 assert(node->op == op_Confirm);
2003 return get_irn_n(node, 1);
2005 void set_Confirm_bound (ir_node *node, ir_node *bound) {
2006 assert(node->op == op_Confirm);
2007 set_irn_n(node, 0, bound);
2009 pn_Cmp get_Confirm_cmp (ir_node *node) {
2010 assert(node->op == op_Confirm);
2011 return node->attr.confirm_cmp;
2013 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
2014 assert(node->op == op_Confirm);
2015 node->attr.confirm_cmp = cmp;
2020 get_Filter_pred (ir_node *node) {
2021 assert(node->op == op_Filter);
2025 set_Filter_pred (ir_node *node, ir_node *pred) {
2026 assert(node->op == op_Filter);
2030 get_Filter_proj(ir_node *node) {
2031 assert(node->op == op_Filter);
2032 return node->attr.filter.proj;
2035 set_Filter_proj (ir_node *node, long proj) {
2036 assert(node->op == op_Filter);
2037 node->attr.filter.proj = proj;
2040 /* Don't use get_irn_arity, get_irn_n in implementation as access
2041 shall work independent of view!!! */
2042 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2043 assert(node->op == op_Filter);
2044 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2045 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2046 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2047 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2048 node->attr.filter.in_cg[0] = node->in[0];
2050 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2053 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2054 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2055 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2056 node->attr.filter.in_cg[pos + 1] = pred;
2058 int get_Filter_n_cg_preds(ir_node *node) {
2059 assert(node->op == op_Filter && node->attr.filter.in_cg);
2060 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2062 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2064 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2066 arity = ARR_LEN(node->attr.filter.in_cg);
2067 assert(pos < arity - 1);
2068 return node->attr.filter.in_cg[pos + 1];
2072 ir_node *get_Mux_sel (ir_node *node) {
2073 if (node->op == op_Psi) {
2074 assert(get_irn_arity(node) == 3);
2075 return get_Psi_cond(node, 0);
2077 assert(node->op == op_Mux);
2080 void set_Mux_sel (ir_node *node, ir_node *sel) {
2081 if (node->op == op_Psi) {
2082 assert(get_irn_arity(node) == 3);
2083 set_Psi_cond(node, 0, sel);
2086 assert(node->op == op_Mux);
2091 ir_node *get_Mux_false (ir_node *node) {
2092 if (node->op == op_Psi) {
2093 assert(get_irn_arity(node) == 3);
2094 return get_Psi_default(node);
2096 assert(node->op == op_Mux);
2099 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2100 if (node->op == op_Psi) {
2101 assert(get_irn_arity(node) == 3);
2102 set_Psi_default(node, ir_false);
2105 assert(node->op == op_Mux);
2106 node->in[2] = ir_false;
2110 ir_node *get_Mux_true (ir_node *node) {
2111 if (node->op == op_Psi) {
2112 assert(get_irn_arity(node) == 3);
2113 return get_Psi_val(node, 0);
2115 assert(node->op == op_Mux);
2118 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2119 if (node->op == op_Psi) {
2120 assert(get_irn_arity(node) == 3);
2121 set_Psi_val(node, 0, ir_true);
2124 assert(node->op == op_Mux);
2125 node->in[3] = ir_true;
2130 ir_node *get_Psi_cond (ir_node *node, int pos) {
2131 int num_conds = get_Psi_n_conds(node);
2132 assert(node->op == op_Psi);
2133 assert(pos < num_conds);
2134 return get_irn_n(node, 2 * pos);
2137 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2138 int num_conds = get_Psi_n_conds(node);
2139 assert(node->op == op_Psi);
2140 assert(pos < num_conds);
2141 set_irn_n(node, 2 * pos, cond);
2144 ir_node *get_Psi_val (ir_node *node, int pos) {
2145 int num_vals = get_Psi_n_conds(node);
2146 assert(node->op == op_Psi);
2147 assert(pos < num_vals);
2148 return get_irn_n(node, 2 * pos + 1);
2151 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2152 int num_vals = get_Psi_n_conds(node);
2153 assert(node->op == op_Psi);
2154 assert(pos < num_vals);
2155 set_irn_n(node, 2 * pos + 1, val);
2158 ir_node *get_Psi_default(ir_node *node) {
2159 int def_pos = get_irn_arity(node) - 1;
2160 assert(node->op == op_Psi);
2161 return get_irn_n(node, def_pos);
2164 void set_Psi_default(ir_node *node, ir_node *val) {
2165 int def_pos = get_irn_arity(node);
2166 assert(node->op == op_Psi);
2167 set_irn_n(node, def_pos, val);
2170 int (get_Psi_n_conds)(ir_node *node) {
2171 return _get_Psi_n_conds(node);
2175 ir_node *get_CopyB_mem (ir_node *node) {
2176 assert(node->op == op_CopyB);
2177 return get_irn_n(node, 0);
2180 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2181 assert(node->op == op_CopyB);
2182 set_irn_n(node, 0, mem);
2185 ir_node *get_CopyB_dst (ir_node *node) {
2186 assert(node->op == op_CopyB);
2187 return get_irn_n(node, 1);
2190 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2191 assert(node->op == op_CopyB);
2192 set_irn_n(node, 1, dst);
2195 ir_node *get_CopyB_src (ir_node *node) {
2196 assert(node->op == op_CopyB);
2197 return get_irn_n(node, 2);
2200 void set_CopyB_src (ir_node *node, ir_node *src) {
2201 assert(node->op == op_CopyB);
2202 set_irn_n(node, 2, src);
2205 ir_type *get_CopyB_type(ir_node *node) {
2206 assert(node->op == op_CopyB);
2207 return node->attr.copyb.data_type;
2210 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2211 assert(node->op == op_CopyB && data_type);
2212 node->attr.copyb.data_type = data_type;
2217 get_InstOf_type (ir_node *node) {
2218 assert(node->op = op_InstOf);
2219 return node->attr.instof.type;
2223 set_InstOf_type (ir_node *node, ir_type *type) {
2224 assert(node->op = op_InstOf);
2225 node->attr.instof.type = type;
2229 get_InstOf_store (ir_node *node) {
2230 assert(node->op = op_InstOf);
2231 return get_irn_n(node, 0);
2235 set_InstOf_store (ir_node *node, ir_node *obj) {
2236 assert(node->op = op_InstOf);
2237 set_irn_n(node, 0, obj);
2241 get_InstOf_obj (ir_node *node) {
2242 assert(node->op = op_InstOf);
2243 return get_irn_n(node, 1);
2247 set_InstOf_obj (ir_node *node, ir_node *obj) {
2248 assert(node->op = op_InstOf);
2249 set_irn_n(node, 1, obj);
2252 /* Returns the memory input of a Raise operation. */
2254 get_Raise_mem (ir_node *node) {
2255 assert(node->op == op_Raise);
2256 return get_irn_n(node, 0);
2260 set_Raise_mem (ir_node *node, ir_node *mem) {
2261 assert(node->op == op_Raise);
2262 set_irn_n(node, 0, mem);
2266 get_Raise_exo_ptr (ir_node *node) {
2267 assert(node->op == op_Raise);
2268 return get_irn_n(node, 1);
2272 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2273 assert(node->op == op_Raise);
2274 set_irn_n(node, 1, exo_ptr);
2279 /* Returns the memory input of a Bound operation. */
2280 ir_node *get_Bound_mem(ir_node *bound) {
2281 assert(bound->op == op_Bound);
2282 return get_irn_n(bound, 0);
2285 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2286 assert(bound->op == op_Bound);
2287 set_irn_n(bound, 0, mem);
2290 /* Returns the index input of a Bound operation. */
2291 ir_node *get_Bound_index(ir_node *bound) {
2292 assert(bound->op == op_Bound);
2293 return get_irn_n(bound, 1);
2296 void set_Bound_index(ir_node *bound, ir_node *idx) {
2297 assert(bound->op == op_Bound);
2298 set_irn_n(bound, 1, idx);
2301 /* Returns the lower bound input of a Bound operation. */
2302 ir_node *get_Bound_lower(ir_node *bound) {
2303 assert(bound->op == op_Bound);
2304 return get_irn_n(bound, 2);
2307 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2308 assert(bound->op == op_Bound);
2309 set_irn_n(bound, 2, lower);
2312 /* Returns the upper bound input of a Bound operation. */
2313 ir_node *get_Bound_upper(ir_node *bound) {
2314 assert(bound->op == op_Bound);
2315 return get_irn_n(bound, 3);
2318 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2319 assert(bound->op == op_Bound);
2320 set_irn_n(bound, 3, upper);
2323 /* Return the operand of a Pin node. */
2324 ir_node *get_Pin_op(ir_node *pin) {
2325 assert(pin->op == op_Pin);
2326 return get_irn_n(pin, 0);
2329 void set_Pin_op(ir_node *pin, ir_node *node) {
2330 assert(pin->op == op_Pin);
2331 set_irn_n(pin, 0, node);
2335 /* returns the graph of a node */
2337 get_irn_irg(const ir_node *node) {
2339 * Do not use get_nodes_Block() here, because this
2340 * will check the pinned state.
2341 * However even a 'wrong' block is always in the proper
2344 if (! is_Block(node))
2345 node = get_irn_n(node, -1);
2346 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2347 node = get_irn_n(node, -1);
2348 assert(get_irn_op(node) == op_Block);
2349 return node->attr.block.irg;
2353 /*----------------------------------------------------------------*/
2354 /* Auxiliary routines */
2355 /*----------------------------------------------------------------*/
2358 skip_Proj (ir_node *node) {
2359 /* don't assert node !!! */
2360 if (node && is_Proj(node)) {
2361 return get_Proj_pred(node);
2368 skip_Tuple (ir_node *node) {
2372 if (!get_opt_normalize()) return node;
2375 node = skip_Id(node);
2376 if (get_irn_op(node) == op_Proj) {
2377 pred = skip_Id(get_Proj_pred(node));
2378 op = get_irn_op(pred);
2381 * Looks strange but calls get_irn_op() only once
2382 * in most often cases.
2384 if (op == op_Proj) { /* nested Tuple ? */
2385 pred = skip_Id(skip_Tuple(pred));
2386 op = get_irn_op(pred);
2388 if (op == op_Tuple) {
2389 node = get_Tuple_pred(pred, get_Proj_proj(node));
2393 else if (op == op_Tuple) {
2394 node = get_Tuple_pred(pred, get_Proj_proj(node));
2401 /* returns operand of node if node is a Cast */
2402 ir_node *skip_Cast (ir_node *node) {
2403 if (node && get_irn_op(node) == op_Cast)
2404 return get_Cast_op(node);
2408 /* returns operand of node if node is a Confirm */
2409 ir_node *skip_Confirm (ir_node *node) {
2410 if (node && get_irn_op(node) == op_Confirm)
2411 return get_Confirm_value(node);
2415 /* skip all high-level ops */
2416 ir_node *skip_HighLevel(ir_node *node) {
2417 if (node && is_op_highlevel(get_irn_op(node)))
2418 return get_irn_n(node, 0);
2423 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2424 * than any other approach, as Id chains are resolved and all point to the real node, or
2425 * all id's are self loops.
2427 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2430 skip_Id (ir_node *node) {
2431 /* don't assert node !!! */
2433 /* Don't use get_Id_pred: We get into an endless loop for
2434 self-referencing Ids. */
2435 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2436 ir_node *rem_pred = node->in[0+1];
2439 assert(get_irn_arity (node) > 0);
2441 node->in[0+1] = node;
2442 res = skip_Id(rem_pred);
2443 if (res->op == op_Id) /* self-loop */ return node;
2445 node->in[0+1] = res;
2452 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2453 * than any other approach, as Id chains are resolved and all point to the real node, or
2454 * all id's are self loops.
2456 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2457 * a little bit "hand optimized".
2459 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2462 skip_Id (ir_node *node) {
2464 /* don't assert node !!! */
2466 if (!node || (node->op != op_Id)) return node;
2468 /* Don't use get_Id_pred(): We get into an endless loop for
2469 self-referencing Ids. */
2470 pred = node->in[0+1];
2472 if (pred->op != op_Id) return pred;
2474 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2475 ir_node *rem_pred, *res;
2477 if (pred->op != op_Id) return pred; /* shortcut */
2480 assert(get_irn_arity (node) > 0);
2482 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2483 res = skip_Id(rem_pred);
2484 if (res->op == op_Id) /* self-loop */ return node;
2486 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2494 void skip_Id_and_store(ir_node **node) {
2497 if (!n || (n->op != op_Id)) return;
2499 /* Don't use get_Id_pred(): We get into an endless loop for
2500 self-referencing Ids. */
2505 (is_Bad)(const ir_node *node) {
2506 return _is_Bad(node);
2510 (is_Start)(const ir_node *node) {
2511 return _is_Start(node);
2515 (is_Const)(const ir_node *node) {
2516 return _is_Const(node);
2520 (is_no_Block)(const ir_node *node) {
2521 return _is_no_Block(node);
2525 (is_Block)(const ir_node *node) {
2526 return _is_Block(node);
2529 /* returns true if node is an Unknown node. */
2531 (is_Unknown)(const ir_node *node) {
2532 return _is_Unknown(node);
2535 /* returns true if node is a Return node. */
2537 (is_Return)(const ir_node *node) {
2538 return _is_Return(node);
2541 /* returns true if node is a Call node. */
2543 (is_Call)(const ir_node *node) {
2544 return _is_Call(node);
2547 /* returns true if node is a Sel node. */
2549 (is_Sel)(const ir_node *node) {
2550 return _is_Sel(node);
2553 /* returns true if node is a Mux node or a Psi with only one condition. */
2555 (is_Mux)(const ir_node *node) {
2556 return _is_Mux(node);
2559 /* returns true if node is a Load node. */
2561 (is_Load)(const ir_node *node) {
2562 return _is_Load(node);
2565 /* returns true if node is a Sync node. */
2567 (is_Sync)(const ir_node *node) {
2568 return _is_Sync(node);
2571 /* returns true if node is a Confirm node. */
2573 (is_Confirm)(const ir_node *node) {
2574 return _is_Confirm(node);
2577 /* returns true if node is a Pin node. */
2579 (is_Pin)(const ir_node *node) {
2580 return _is_Pin(node);
2583 /* returns true if node is a SymConst node. */
2585 (is_SymConst)(const ir_node *node) {
2586 return _is_SymConst(node);
2589 /* returns true if node is a Cond node. */
2591 (is_Cond)(const ir_node *node) {
2592 return _is_Cond(node);
2595 /* returns true if node is a Cmp node. */
2597 (is_Cmp)(const ir_node *node) {
2598 return _is_Cmp(node);
2601 /* returns true if node is an Alloc node. */
2603 (is_Alloc)(const ir_node *node) {
2604 return _is_Alloc(node);
2607 /* returns true if a node is a Jmp node. */
2609 (is_Jmp)(const ir_node *node) {
2610 return _is_Jmp(node);
2614 is_Proj (const ir_node *node) {
2616 return node->op == op_Proj
2617 || (!get_interprocedural_view() && node->op == op_Filter);
2620 /* Returns true if the operation manipulates control flow. */
2622 is_cfop(const ir_node *node) {
2623 return is_cfopcode(get_irn_op(node));
2626 /* Returns true if the operation manipulates interprocedural control flow:
2627 CallBegin, EndReg, EndExcept */
2628 int is_ip_cfop(const ir_node *node) {
2629 return is_ip_cfopcode(get_irn_op(node));
2632 /* Returns true if the operation can change the control flow because
2635 is_fragile_op(const ir_node *node) {
2636 return is_op_fragile(get_irn_op(node));
2639 /* Returns the memory operand of fragile operations. */
2640 ir_node *get_fragile_op_mem(ir_node *node) {
2641 assert(node && is_fragile_op(node));
2643 switch (get_irn_opcode (node)) {
2653 return get_irn_n(node, 0);
2658 assert(0 && "should not be reached");
2663 /* Returns true if the operation is a forking control flow operation. */
2664 int (is_irn_forking)(const ir_node *node) {
2665 return _is_irn_forking(node);
2668 /* Return the type associated with the value produced by n
2669 * if the node remarks this type as it is the case for
2670 * Cast, Const, SymConst and some Proj nodes. */
2671 ir_type *(get_irn_type)(ir_node *node) {
2672 return _get_irn_type(node);
2675 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2677 ir_type *(get_irn_type_attr)(ir_node *node) {
2678 return _get_irn_type_attr(node);
2681 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2682 entity *(get_irn_entity_attr)(ir_node *node) {
2683 return _get_irn_entity_attr(node);
2686 /* Returns non-zero for constant-like nodes. */
2687 int (is_irn_constlike)(const ir_node *node) {
2688 return _is_irn_constlike(node);
2692 * Returns non-zero for nodes that are allowed to have keep-alives and
2693 * are neither Block nor PhiM.
2695 int (is_irn_keep)(const ir_node *node) {
2696 return _is_irn_keep(node);
2699 /* Returns non-zero for nodes that are machine operations. */
2700 int (is_irn_machine_op)(const ir_node *node) {
2701 return _is_irn_machine_op(node);
2704 /* Returns non-zero for nodes that are machine operands. */
2705 int (is_irn_machine_operand)(const ir_node *node) {
2706 return _is_irn_machine_operand(node);
2709 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2710 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2711 return _is_irn_machine_user(node, n);
2715 /* Gets the string representation of the jump prediction .*/
2716 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2720 case COND_JMP_PRED_NONE: return "no prediction";
2721 case COND_JMP_PRED_TRUE: return "true taken";
2722 case COND_JMP_PRED_FALSE: return "false taken";
2726 /* Returns the conditional jump prediction of a Cond node. */
2727 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2728 return _get_Cond_jmp_pred(cond);
2731 /* Sets a new conditional jump prediction. */
2732 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2733 _set_Cond_jmp_pred(cond, pred);
2736 /** the get_type operation must be always implemented and return a firm type */
2737 static ir_type *get_Default_type(ir_node *n) {
2738 return get_unknown_type();
2741 /* Sets the get_type operation for an ir_op_ops. */
2742 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2745 case iro_Const: ops->get_type = get_Const_type; break;
2746 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2747 case iro_Cast: ops->get_type = get_Cast_type; break;
2748 case iro_Proj: ops->get_type = get_Proj_type; break;
2750 /* not allowed to be NULL */
2751 if (! ops->get_type)
2752 ops->get_type = get_Default_type;
2758 /** Return the attribute type of a SymConst node if exists */
2759 static ir_type *get_SymConst_attr_type(ir_node *self) {
2760 symconst_kind kind = get_SymConst_kind(self);
2761 if (SYMCONST_HAS_TYPE(kind))
2762 return get_SymConst_type(self);
2766 /** Return the attribute entity of a SymConst node if exists */
2767 static entity *get_SymConst_attr_entity(ir_node *self) {
2768 symconst_kind kind = get_SymConst_kind(self);
2769 if (SYMCONST_HAS_ENT(kind))
2770 return get_SymConst_entity(self);
2774 /** the get_type_attr operation must be always implemented */
2775 static ir_type *get_Null_type(ir_node *n) {
2776 return firm_unknown_type;
2779 /* Sets the get_type operation for an ir_op_ops. */
2780 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2783 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2784 case iro_Call: ops->get_type_attr = get_Call_type; break;
2785 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2786 case iro_Free: ops->get_type_attr = get_Free_type; break;
2787 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2789 /* not allowed to be NULL */
2790 if (! ops->get_type_attr)
2791 ops->get_type_attr = get_Null_type;
2797 /** the get_entity_attr operation must be always implemented */
2798 static entity *get_Null_ent(ir_node *n) {
2802 /* Sets the get_type operation for an ir_op_ops. */
2803 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2806 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2807 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2809 /* not allowed to be NULL */
2810 if (! ops->get_entity_attr)
2811 ops->get_entity_attr = get_Null_ent;
2817 #ifdef DEBUG_libfirm
2818 void dump_irn (ir_node *n) {
2819 int i, arity = get_irn_arity(n);
2820 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2822 ir_node *pred = get_irn_n(n, -1);
2823 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2824 get_irn_node_nr(pred), (void *)pred);
2826 printf(" preds: \n");
2827 for (i = 0; i < arity; ++i) {
2828 ir_node *pred = get_irn_n(n, i);
2829 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2830 get_irn_node_nr(pred), (void *)pred);
2834 #else /* DEBUG_libfirm */
2835 void dump_irn (ir_node *n) {}
2836 #endif /* DEBUG_libfirm */