3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
83 const char *pns_name_arr [] = {
84 "initial_exec", "global_store",
85 "frame_base", "globals", "args"
88 const char *symconst_name_arr [] = {
89 "type_tag", "size", "addr_name", "addr_ent"
93 * Indicates, whether additional data can be registered to ir nodes.
94 * If set to 1, this is not possible anymore.
96 static int forbid_new_data = 0;
99 * The amount of additional space for custom data to be allocated upon
100 * creating a new node.
102 unsigned firm_add_node_size = 0;
105 /* register new space for every node */
106 unsigned register_additional_node_data(unsigned size) {
107 assert(!forbid_new_data && "Too late to register additional node data");
112 return firm_add_node_size += size;
118 /* Forbid the addition of new data to an ir node. */
123 * irnode constructor.
124 * Create a new irnode in irg, with an op, mode, arity and
125 * some incoming irnodes.
126 * If arity is negative, a node with a dynamic array is created.
129 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
130 int arity, ir_node **in)
133 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
137 assert(irg && op && mode);
138 p = obstack_alloc (irg->obst, node_size);
139 memset(p, 0, node_size);
140 res = (ir_node *) (p + firm_add_node_size);
142 res->kind = k_ir_node;
146 res->node_idx = irg_register_node_idx(irg, res);
151 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
153 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
154 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
158 set_irn_dbg_info(res, db);
162 res->node_nr = get_irp_new_node_nr();
165 for(i = 0; i < EDGE_KIND_LAST; ++i)
166 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
168 is_bl = is_Block(res);
169 for (i = is_bl; i <= arity; ++i)
170 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
172 hook_new_node(irg, res);
177 /*-- getting some parameters from ir_nodes --*/
180 (is_ir_node)(const void *thing) {
181 return _is_ir_node(thing);
185 (get_irn_intra_arity)(const ir_node *node) {
186 return _get_irn_intra_arity(node);
190 (get_irn_inter_arity)(const ir_node *node) {
191 return _get_irn_inter_arity(node);
194 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
197 (get_irn_arity)(const ir_node *node) {
198 return _get_irn_arity(node);
201 /* Returns the array with ins. This array is shifted with respect to the
202 array accessed by get_irn_n: The block operand is at position 0 not -1.
203 (@@@ This should be changed.)
204 The order of the predecessors in this array is not guaranteed, except that
205 lists of operands as predecessors of Block or arguments of a Call are
208 get_irn_in (const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
223 set_irn_in (ir_node *node, int arity, ir_node **in) {
226 ir_graph *irg = current_ir_graph;
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
241 for (i = 0; i < arity; i++) {
242 if (i < ARR_LEN(*arr)-1)
243 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
245 edges_notify_edge(node, i, in[i], NULL, irg);
247 for(;i < ARR_LEN(*arr)-1; i++) {
248 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
251 if (arity != ARR_LEN(*arr) - 1) {
252 ir_node * block = (*arr)[0];
253 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
256 fix_backedges(irg->obst, node);
258 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
262 (get_irn_intra_n)(const ir_node *node, int n) {
263 return _get_irn_intra_n (node, n);
267 (get_irn_inter_n)(const ir_node *node, int n) {
268 return _get_irn_inter_n (node, n);
271 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
274 (get_irn_n)(const ir_node *node, int n) {
275 return _get_irn_n(node, n);
279 set_irn_n (ir_node *node, int n, ir_node *in) {
280 assert(node && node->kind == k_ir_node);
282 assert(n < get_irn_arity(node));
283 assert(in && in->kind == k_ir_node);
285 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
286 /* Change block pred in both views! */
287 node->in[n + 1] = in;
288 assert(node->attr.filter.in_cg);
289 node->attr.filter.in_cg[n + 1] = in;
292 if (get_interprocedural_view()) { /* handle Filter and Block specially */
293 if (get_irn_opcode(node) == iro_Filter) {
294 assert(node->attr.filter.in_cg);
295 node->attr.filter.in_cg[n + 1] = in;
297 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
298 node->attr.block.in_cg[n + 1] = in;
301 /* else fall through */
305 hook_set_irn_n(node, n, in, node->in[n + 1]);
307 /* Here, we rely on src and tgt being in the current ir graph */
308 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
310 node->in[n + 1] = in;
314 (get_irn_deps)(const ir_node *node)
316 return _get_irn_deps(node);
320 (get_irn_dep)(const ir_node *node, int pos)
322 return _get_irn_dep(node, pos);
326 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
328 _set_irn_dep(node, pos, dep);
331 int add_irn_dep(ir_node *node, ir_node *dep)
335 if (node->deps == NULL) {
336 node->deps = NEW_ARR_F(ir_node *, 1);
343 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
344 if(node->deps[i] == NULL)
347 if(node->deps[i] == dep)
351 if(first_zero >= 0) {
352 node->deps[first_zero] = dep;
357 ARR_APP1(ir_node *, node->deps, dep);
362 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
367 void add_irn_deps(ir_node *tgt, ir_node *src)
371 for(i = 0, n = get_irn_deps(src); i < n; ++i)
372 add_irn_dep(tgt, get_irn_dep(src, i));
377 (get_irn_mode)(const ir_node *node) {
378 return _get_irn_mode(node);
382 (set_irn_mode)(ir_node *node, ir_mode *mode)
384 _set_irn_mode(node, mode);
388 get_irn_modecode (const ir_node *node)
391 return node->mode->code;
394 /** Gets the string representation of the mode .*/
396 get_irn_modename (const ir_node *node)
399 return get_mode_name(node->mode);
403 get_irn_modeident (const ir_node *node)
406 return get_mode_ident(node->mode);
410 (get_irn_op)(const ir_node *node) {
411 return _get_irn_op(node);
414 /* should be private to the library: */
416 (set_irn_op)(ir_node *node, ir_op *op) {
417 _set_irn_op(node, op);
421 (get_irn_opcode)(const ir_node *node)
423 return _get_irn_opcode(node);
427 get_irn_opname (const ir_node *node)
430 if ((get_irn_op((ir_node *)node) == op_Phi) &&
431 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
432 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
433 return get_id_str(node->op->name);
437 get_irn_opident (const ir_node *node)
440 return node->op->name;
444 (get_irn_visited)(const ir_node *node)
446 return _get_irn_visited(node);
450 (set_irn_visited)(ir_node *node, unsigned long visited)
452 _set_irn_visited(node, visited);
456 (mark_irn_visited)(ir_node *node) {
457 _mark_irn_visited(node);
461 (irn_not_visited)(const ir_node *node) {
462 return _irn_not_visited(node);
466 (irn_visited)(const ir_node *node) {
467 return _irn_visited(node);
471 (set_irn_link)(ir_node *node, void *link) {
472 _set_irn_link(node, link);
476 (get_irn_link)(const ir_node *node) {
477 return _get_irn_link(node);
481 (get_irn_pinned)(const ir_node *node) {
482 return _get_irn_pinned(node);
486 (is_irn_pinned_in_irg) (const ir_node *node) {
487 return _is_irn_pinned_in_irg(node);
490 void set_irn_pinned(ir_node *node, op_pin_state state) {
491 /* due to optimization an opt may be turned into a Tuple */
492 if (get_irn_op(node) == op_Tuple)
495 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
496 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
498 node->attr.except.pin_state = state;
501 #ifdef DO_HEAPANALYSIS
502 /* Access the abstract interpretation information of a node.
503 Returns NULL if no such information is available. */
504 struct abstval *get_irn_abst_value(ir_node *n) {
507 /* Set the abstract interpretation information of a node. */
508 void set_irn_abst_value(ir_node *n, struct abstval *os) {
511 struct section *firm_get_irn_section(ir_node *n) {
514 void firm_set_irn_section(ir_node *n, struct section *s) {
518 /* Dummies needed for firmjni. */
519 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
520 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
521 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
522 void firm_set_irn_section(ir_node *n, struct section *s) {}
523 #endif /* DO_HEAPANALYSIS */
526 /* Outputs a unique number for this node */
527 long get_irn_node_nr(const ir_node *node) {
530 return node->node_nr;
532 return (long)PTR_TO_INT(node);
537 get_irn_const_attr (ir_node *node)
539 assert(node->op == op_Const);
540 return node->attr.con;
544 get_irn_proj_attr (ir_node *node)
546 assert(node->op == op_Proj);
547 return node->attr.proj;
551 get_irn_alloc_attr (ir_node *node)
553 assert(node->op == op_Alloc);
554 return node->attr.alloc;
558 get_irn_free_attr (ir_node *node)
560 assert(node->op == op_Free);
561 return node->attr.free;
565 get_irn_symconst_attr (ir_node *node)
567 assert(node->op == op_SymConst);
568 return node->attr.symc;
572 get_irn_call_attr (ir_node *node)
574 assert(node->op == op_Call);
575 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
579 get_irn_sel_attr (ir_node *node)
581 assert(node->op == op_Sel);
582 return node->attr.sel;
586 get_irn_phi_attr (ir_node *node)
588 assert(node->op == op_Phi);
589 return node->attr.phi0_pos;
593 get_irn_block_attr (ir_node *node)
595 assert(node->op == op_Block);
596 return node->attr.block;
600 get_irn_load_attr (ir_node *node)
602 assert(node->op == op_Load);
603 return node->attr.load;
607 get_irn_store_attr (ir_node *node)
609 assert(node->op == op_Store);
610 return node->attr.store;
614 get_irn_except_attr (ir_node *node)
616 assert(node->op == op_Div || node->op == op_Quot ||
617 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
618 return node->attr.except;
622 get_irn_generic_attr (ir_node *node) {
626 unsigned (get_irn_idx)(const ir_node *node) {
627 assert(is_ir_node(node));
628 return _get_irn_idx(node);
631 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
633 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
634 if (get_irn_n(node, i) == arg)
640 /** manipulate fields of individual nodes **/
642 /* this works for all except Block */
644 get_nodes_block (const ir_node *node) {
645 assert(!(node->op == op_Block));
646 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
647 return get_irn_n(node, -1);
651 set_nodes_block (ir_node *node, ir_node *block) {
652 assert(!(node->op == op_Block));
653 set_irn_n(node, -1, block);
656 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
657 * from Start. If so returns frame type, else Null. */
658 ir_type *is_frame_pointer(ir_node *n) {
659 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
660 ir_node *start = get_Proj_pred(n);
661 if (get_irn_op(start) == op_Start) {
662 return get_irg_frame_type(get_irn_irg(start));
668 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
669 * from Start. If so returns global type, else Null. */
670 ir_type *is_globals_pointer(ir_node *n) {
671 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
672 ir_node *start = get_Proj_pred(n);
673 if (get_irn_op(start) == op_Start) {
674 return get_glob_type();
680 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
681 * from Start. If so returns tls type, else Null. */
682 ir_type *is_tls_pointer(ir_node *n) {
683 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
684 ir_node *start = get_Proj_pred(n);
685 if (get_irn_op(start) == op_Start) {
686 return get_tls_type();
692 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
693 * from Start. If so returns 1, else 0. */
694 int is_value_arg_pointer(ir_node *n) {
695 if ((get_irn_op(n) == op_Proj) &&
696 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
697 (get_irn_op(get_Proj_pred(n)) == op_Start))
702 /* Returns an array with the predecessors of the Block. Depending on
703 the implementation of the graph data structure this can be a copy of
704 the internal representation of predecessors as well as the internal
705 array itself. Therefore writing to this array might obstruct the ir. */
707 get_Block_cfgpred_arr (ir_node *node)
709 assert((node->op == op_Block));
710 return (ir_node **)&(get_irn_in(node)[1]);
714 (get_Block_n_cfgpreds)(const ir_node *node) {
715 return _get_Block_n_cfgpreds(node);
719 (get_Block_cfgpred)(ir_node *node, int pos) {
720 return _get_Block_cfgpred(node, pos);
724 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
725 assert(node->op == op_Block);
726 set_irn_n(node, pos, pred);
730 (get_Block_cfgpred_block)(ir_node *node, int pos) {
731 return _get_Block_cfgpred_block(node, pos);
735 get_Block_matured (ir_node *node) {
736 assert(node->op == op_Block);
737 return (int)node->attr.block.matured;
741 set_Block_matured (ir_node *node, int matured) {
742 assert(node->op == op_Block);
743 node->attr.block.matured = matured;
747 (get_Block_block_visited)(ir_node *node) {
748 return _get_Block_block_visited(node);
752 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
753 _set_Block_block_visited(node, visit);
756 /* For this current_ir_graph must be set. */
758 (mark_Block_block_visited)(ir_node *node) {
759 _mark_Block_block_visited(node);
763 (Block_not_block_visited)(ir_node *node) {
764 return _Block_not_block_visited(node);
768 get_Block_graph_arr (ir_node *node, int pos) {
769 assert(node->op == op_Block);
770 return node->attr.block.graph_arr[pos+1];
774 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
775 assert(node->op == op_Block);
776 node->attr.block.graph_arr[pos+1] = value;
779 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
780 assert(node->op == op_Block);
781 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
782 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
783 node->attr.block.in_cg[0] = NULL;
784 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
786 /* Fix backedge array. fix_backedges() operates depending on
787 interprocedural_view. */
788 int ipv = get_interprocedural_view();
789 set_interprocedural_view(1);
790 fix_backedges(current_ir_graph->obst, node);
791 set_interprocedural_view(ipv);
794 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
797 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
798 assert(node->op == op_Block &&
799 node->attr.block.in_cg &&
800 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
801 node->attr.block.in_cg[pos + 1] = pred;
804 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
805 assert(node->op == op_Block);
806 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
809 int get_Block_cg_n_cfgpreds(ir_node * node) {
810 assert(node->op == op_Block);
811 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
814 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
815 assert(node->op == op_Block && node->attr.block.in_cg);
816 return node->attr.block.in_cg[pos + 1];
819 void remove_Block_cg_cfgpred_arr(ir_node * node) {
820 assert(node->op == op_Block);
821 node->attr.block.in_cg = NULL;
824 ir_node *(set_Block_dead)(ir_node *block) {
825 return _set_Block_dead(block);
828 int (is_Block_dead)(const ir_node *block) {
829 return _is_Block_dead(block);
832 ir_extblk *get_Block_extbb(const ir_node *block) {
834 assert(is_Block(block));
835 res = block->attr.block.extblk;
836 assert(res == NULL || is_ir_extbb(res));
840 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
841 assert(is_Block(block));
842 assert(extblk == NULL || is_ir_extbb(extblk));
843 block->attr.block.extblk = extblk;
847 get_End_n_keepalives(ir_node *end) {
848 assert(end->op == op_End);
849 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
853 get_End_keepalive(ir_node *end, int pos) {
854 assert(end->op == op_End);
855 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
859 add_End_keepalive (ir_node *end, ir_node *ka) {
861 ir_graph *irg = get_irn_irg(end);
863 assert(end->op == op_End);
864 l = ARR_LEN(end->in);
865 ARR_APP1(ir_node *, end->in, ka);
866 edges_notify_edge(end, l - 1, end->in[l], NULL, irg);
870 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
871 assert(end->op == op_End);
872 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
875 /* Set new keep-alives */
876 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
878 ir_graph *irg = get_irn_irg(end);
880 /* notify that edges are deleted */
881 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
882 edges_notify_edge(end, i, end->in[i], NULL, irg);
884 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
886 for (i = 0; i < n; ++i) {
887 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
888 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
891 /* Set new keep-alives from old keep-alives, skipping irn */
892 void remove_End_keepalive(ir_node *end, ir_node *irn) {
893 int n = get_End_n_keepalives(end);
897 NEW_ARR_A(ir_node *, in, n);
899 for (idx = i = 0; i < n; ++i) {
900 ir_node *old_ka = get_End_keepalive(end, i);
907 /* set new keep-alives */
908 set_End_keepalives(end, idx, in);
912 free_End (ir_node *end) {
913 assert(end->op == op_End);
916 end->in = NULL; /* @@@ make sure we get an error if we use the
917 in array afterwards ... */
920 /* Return the target address of an IJmp */
921 ir_node *get_IJmp_target(ir_node *ijmp) {
922 assert(ijmp->op == op_IJmp);
923 return get_irn_n(ijmp, 0);
926 /** Sets the target address of an IJmp */
927 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
928 assert(ijmp->op == op_IJmp);
929 set_irn_n(ijmp, 0, tgt);
933 > Implementing the case construct (which is where the constant Proj node is
934 > important) involves far more than simply determining the constant values.
935 > We could argue that this is more properly a function of the translator from
936 > Firm to the target machine. That could be done if there was some way of
937 > projecting "default" out of the Cond node.
938 I know it's complicated.
939 Basically there are two proglems:
940 - determining the gaps between the projs
941 - determining the biggest case constant to know the proj number for
943 I see several solutions:
944 1. Introduce a ProjDefault node. Solves both problems.
945 This means to extend all optimizations executed during construction.
946 2. Give the Cond node for switch two flavors:
947 a) there are no gaps in the projs (existing flavor)
948 b) gaps may exist, default proj is still the Proj with the largest
949 projection number. This covers also the gaps.
950 3. Fix the semantic of the Cond to that of 2b)
952 Solution 2 seems to be the best:
953 Computing the gaps in the Firm representation is not too hard, i.e.,
954 libFIRM can implement a routine that transforms between the two
955 flavours. This is also possible for 1) but 2) does not require to
956 change any existing optimization.
957 Further it should be far simpler to determine the biggest constant than
959 I don't want to choose 3) as 2a) seems to have advantages for
960 dataflow analysis and 3) does not allow to convert the representation to
964 get_Cond_selector (ir_node *node) {
965 assert(node->op == op_Cond);
966 return get_irn_n(node, 0);
970 set_Cond_selector (ir_node *node, ir_node *selector) {
971 assert(node->op == op_Cond);
972 set_irn_n(node, 0, selector);
976 get_Cond_kind (ir_node *node) {
977 assert(node->op == op_Cond);
978 return node->attr.cond.kind;
982 set_Cond_kind (ir_node *node, cond_kind kind) {
983 assert(node->op == op_Cond);
984 node->attr.cond.kind = kind;
988 get_Cond_defaultProj (ir_node *node) {
989 assert(node->op == op_Cond);
990 return node->attr.cond.default_proj;
994 get_Return_mem (ir_node *node) {
995 assert(node->op == op_Return);
996 return get_irn_n(node, 0);
1000 set_Return_mem (ir_node *node, ir_node *mem) {
1001 assert(node->op == op_Return);
1002 set_irn_n(node, 0, mem);
1006 get_Return_n_ress (ir_node *node) {
1007 assert(node->op == op_Return);
1008 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1012 get_Return_res_arr (ir_node *node)
1014 assert((node->op == op_Return));
1015 if (get_Return_n_ress(node) > 0)
1016 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1023 set_Return_n_res (ir_node *node, int results) {
1024 assert(node->op == op_Return);
1029 get_Return_res (ir_node *node, int pos) {
1030 assert(node->op == op_Return);
1031 assert(get_Return_n_ress(node) > pos);
1032 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1036 set_Return_res (ir_node *node, int pos, ir_node *res){
1037 assert(node->op == op_Return);
1038 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1041 tarval *(get_Const_tarval)(ir_node *node) {
1042 return _get_Const_tarval(node);
1046 set_Const_tarval (ir_node *node, tarval *con) {
1047 assert(node->op == op_Const);
1048 node->attr.con.tv = con;
1051 cnst_classify_t (classify_Const)(ir_node *node)
1053 return _classify_Const(node);
1057 /* The source language type. Must be an atomic type. Mode of type must
1058 be mode of node. For tarvals from entities type must be pointer to
1061 get_Const_type (ir_node *node) {
1062 assert(node->op == op_Const);
1063 return node->attr.con.tp;
1067 set_Const_type (ir_node *node, ir_type *tp) {
1068 assert(node->op == op_Const);
1069 if (tp != firm_unknown_type) {
1070 assert(is_atomic_type(tp));
1071 assert(get_type_mode(tp) == get_irn_mode(node));
1073 node->attr.con.tp = tp;
1078 get_SymConst_kind (const ir_node *node) {
1079 assert(node->op == op_SymConst);
1080 return node->attr.symc.num;
1084 set_SymConst_kind (ir_node *node, symconst_kind num) {
1085 assert(node->op == op_SymConst);
1086 node->attr.symc.num = num;
1090 get_SymConst_type (ir_node *node) {
1091 assert( (node->op == op_SymConst)
1092 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1093 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1097 set_SymConst_type (ir_node *node, ir_type *tp) {
1098 assert( (node->op == op_SymConst)
1099 && (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1100 node->attr.symc.sym.type_p = tp;
1104 get_SymConst_name (ir_node *node) {
1105 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1106 return node->attr.symc.sym.ident_p;
1110 set_SymConst_name (ir_node *node, ident *name) {
1111 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1112 node->attr.symc.sym.ident_p = name;
1116 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1117 entity *get_SymConst_entity (ir_node *node) {
1118 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1119 return node->attr.symc.sym.entity_p;
1122 void set_SymConst_entity (ir_node *node, entity *ent) {
1123 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1124 node->attr.symc.sym.entity_p = ent;
1127 ir_enum_const *get_SymConst_enum (ir_node *node) {
1128 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1129 return node->attr.symc.sym.enum_p;
1132 void set_SymConst_enum (ir_node *node, ir_enum_const *ec) {
1133 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1134 node->attr.symc.sym.enum_p = ec;
1137 union symconst_symbol
1138 get_SymConst_symbol (ir_node *node) {
1139 assert(node->op == op_SymConst);
1140 return node->attr.symc.sym;
1144 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1145 assert(node->op == op_SymConst);
1146 node->attr.symc.sym = sym;
1150 get_SymConst_value_type (ir_node *node) {
1151 assert(node->op == op_SymConst);
1152 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1153 return node->attr.symc.tp;
1157 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1158 assert(node->op == op_SymConst);
1159 node->attr.symc.tp = tp;
1163 get_Sel_mem (ir_node *node) {
1164 assert(node->op == op_Sel);
1165 return get_irn_n(node, 0);
1169 set_Sel_mem (ir_node *node, ir_node *mem) {
1170 assert(node->op == op_Sel);
1171 set_irn_n(node, 0, mem);
1175 get_Sel_ptr (ir_node *node) {
1176 assert(node->op == op_Sel);
1177 return get_irn_n(node, 1);
1181 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1182 assert(node->op == op_Sel);
1183 set_irn_n(node, 1, ptr);
1187 get_Sel_n_indexs (ir_node *node) {
1188 assert(node->op == op_Sel);
1189 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1193 get_Sel_index_arr (ir_node *node)
1195 assert((node->op == op_Sel));
1196 if (get_Sel_n_indexs(node) > 0)
1197 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1203 get_Sel_index (ir_node *node, int pos) {
1204 assert(node->op == op_Sel);
1205 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1209 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1210 assert(node->op == op_Sel);
1211 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1215 get_Sel_entity (ir_node *node) {
1216 assert(node->op == op_Sel);
1217 return node->attr.sel.ent;
1221 set_Sel_entity (ir_node *node, entity *ent) {
1222 assert(node->op == op_Sel);
1223 node->attr.sel.ent = ent;
1227 /* For unary and binary arithmetic operations the access to the
1228 operands can be factored out. Left is the first, right the
1229 second arithmetic value as listed in tech report 0999-33.
1230 unops are: Minus, Abs, Not, Conv, Cast
1231 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1232 Shr, Shrs, Rotate, Cmp */
1236 get_Call_mem (ir_node *node) {
1237 assert(node->op == op_Call);
1238 return get_irn_n(node, 0);
1242 set_Call_mem (ir_node *node, ir_node *mem) {
1243 assert(node->op == op_Call);
1244 set_irn_n(node, 0, mem);
1248 get_Call_ptr (ir_node *node) {
1249 assert(node->op == op_Call);
1250 return get_irn_n(node, 1);
1254 set_Call_ptr (ir_node *node, ir_node *ptr) {
1255 assert(node->op == op_Call);
1256 set_irn_n(node, 1, ptr);
1260 get_Call_param_arr (ir_node *node) {
1261 assert(node->op == op_Call);
1262 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1266 get_Call_n_params (ir_node *node) {
1267 assert(node->op == op_Call);
1268 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1272 get_Call_arity (ir_node *node) {
1273 assert(node->op == op_Call);
1274 return get_Call_n_params(node);
1278 set_Call_arity (ir_node *node, ir_node *arity) {
1279 assert(node->op == op_Call);
1284 get_Call_param (ir_node *node, int pos) {
1285 assert(node->op == op_Call);
1286 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1290 set_Call_param (ir_node *node, int pos, ir_node *param) {
1291 assert(node->op == op_Call);
1292 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1296 get_Call_type (ir_node *node) {
1297 assert(node->op == op_Call);
1298 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1302 set_Call_type (ir_node *node, ir_type *tp) {
1303 assert(node->op == op_Call);
1304 assert((get_unknown_type() == tp) || is_Method_type(tp));
1305 node->attr.call.cld_tp = tp;
1308 int Call_has_callees(ir_node *node) {
1309 assert(node && node->op == op_Call);
1310 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1311 (node->attr.call.callee_arr != NULL));
1314 int get_Call_n_callees(ir_node * node) {
1315 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1316 return ARR_LEN(node->attr.call.callee_arr);
1319 entity * get_Call_callee(ir_node * node, int pos) {
1320 assert(pos >= 0 && pos < get_Call_n_callees(node));
1321 return node->attr.call.callee_arr[pos];
1324 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1325 assert(node->op == op_Call);
1326 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1327 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1329 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1332 void remove_Call_callee_arr(ir_node * node) {
1333 assert(node->op == op_Call);
1334 node->attr.call.callee_arr = NULL;
1337 ir_node * get_CallBegin_ptr (ir_node *node) {
1338 assert(node->op == op_CallBegin);
1339 return get_irn_n(node, 0);
1341 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1342 assert(node->op == op_CallBegin);
1343 set_irn_n(node, 0, ptr);
1345 ir_node * get_CallBegin_call (ir_node *node) {
1346 assert(node->op == op_CallBegin);
1347 return node->attr.callbegin.call;
1349 void set_CallBegin_call (ir_node *node, ir_node *call) {
1350 assert(node->op == op_CallBegin);
1351 node->attr.callbegin.call = call;
1356 ir_node * get_##OP##_left(ir_node *node) { \
1357 assert(node->op == op_##OP); \
1358 return get_irn_n(node, node->op->op_index); \
1360 void set_##OP##_left(ir_node *node, ir_node *left) { \
1361 assert(node->op == op_##OP); \
1362 set_irn_n(node, node->op->op_index, left); \
1364 ir_node *get_##OP##_right(ir_node *node) { \
1365 assert(node->op == op_##OP); \
1366 return get_irn_n(node, node->op->op_index + 1); \
1368 void set_##OP##_right(ir_node *node, ir_node *right) { \
1369 assert(node->op == op_##OP); \
1370 set_irn_n(node, node->op->op_index + 1, right); \
1374 ir_node *get_##OP##_op(ir_node *node) { \
1375 assert(node->op == op_##OP); \
1376 return get_irn_n(node, node->op->op_index); \
1378 void set_##OP##_op (ir_node *node, ir_node *op) { \
1379 assert(node->op == op_##OP); \
1380 set_irn_n(node, node->op->op_index, op); \
1390 get_Quot_mem (ir_node *node) {
1391 assert(node->op == op_Quot);
1392 return get_irn_n(node, 0);
1396 set_Quot_mem (ir_node *node, ir_node *mem) {
1397 assert(node->op == op_Quot);
1398 set_irn_n(node, 0, mem);
1404 get_DivMod_mem (ir_node *node) {
1405 assert(node->op == op_DivMod);
1406 return get_irn_n(node, 0);
1410 set_DivMod_mem (ir_node *node, ir_node *mem) {
1411 assert(node->op == op_DivMod);
1412 set_irn_n(node, 0, mem);
1418 get_Div_mem (ir_node *node) {
1419 assert(node->op == op_Div);
1420 return get_irn_n(node, 0);
1424 set_Div_mem (ir_node *node, ir_node *mem) {
1425 assert(node->op == op_Div);
1426 set_irn_n(node, 0, mem);
1432 get_Mod_mem (ir_node *node) {
1433 assert(node->op == op_Mod);
1434 return get_irn_n(node, 0);
1438 set_Mod_mem (ir_node *node, ir_node *mem) {
1439 assert(node->op == op_Mod);
1440 set_irn_n(node, 0, mem);
1456 int get_Conv_strict(ir_node *node) {
1457 assert(node->op == op_Conv);
1458 return node->attr.conv.strict;
1461 void set_Conv_strict(ir_node *node, int strict_flag) {
1462 assert(node->op == op_Conv);
1463 node->attr.conv.strict = (char)strict_flag;
1467 get_Cast_type (ir_node *node) {
1468 assert(node->op == op_Cast);
1469 return node->attr.cast.totype;
1473 set_Cast_type (ir_node *node, ir_type *to_tp) {
1474 assert(node->op == op_Cast);
1475 node->attr.cast.totype = to_tp;
1479 /* Checks for upcast.
1481 * Returns true if the Cast node casts a class type to a super type.
1483 int is_Cast_upcast(ir_node *node) {
1484 ir_type *totype = get_Cast_type(node);
1485 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1486 ir_graph *myirg = get_irn_irg(node);
1488 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1491 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1492 totype = get_pointer_points_to_type(totype);
1493 fromtype = get_pointer_points_to_type(fromtype);
1498 if (!is_Class_type(totype)) return 0;
1499 return is_SubClass_of(fromtype, totype);
1502 /* Checks for downcast.
1504 * Returns true if the Cast node casts a class type to a sub type.
1506 int is_Cast_downcast(ir_node *node) {
1507 ir_type *totype = get_Cast_type(node);
1508 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1510 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1513 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1514 totype = get_pointer_points_to_type(totype);
1515 fromtype = get_pointer_points_to_type(fromtype);
1520 if (!is_Class_type(totype)) return 0;
1521 return is_SubClass_of(totype, fromtype);
1525 (is_unop)(const ir_node *node) {
1526 return _is_unop(node);
1530 get_unop_op (ir_node *node) {
1531 if (node->op->opar == oparity_unary)
1532 return get_irn_n(node, node->op->op_index);
1534 assert(node->op->opar == oparity_unary);
1539 set_unop_op (ir_node *node, ir_node *op) {
1540 if (node->op->opar == oparity_unary)
1541 set_irn_n(node, node->op->op_index, op);
1543 assert(node->op->opar == oparity_unary);
1547 (is_binop)(const ir_node *node) {
1548 return _is_binop(node);
1552 get_binop_left (ir_node *node) {
1553 if (node->op->opar == oparity_binary)
1554 return get_irn_n(node, node->op->op_index);
1556 assert(node->op->opar == oparity_binary);
1561 set_binop_left (ir_node *node, ir_node *left) {
1562 if (node->op->opar == oparity_binary)
1563 set_irn_n(node, node->op->op_index, left);
1565 assert(node->op->opar == oparity_binary);
1569 get_binop_right (ir_node *node) {
1570 if (node->op->opar == oparity_binary)
1571 return get_irn_n(node, node->op->op_index + 1);
1573 assert(node->op->opar == oparity_binary);
1578 set_binop_right (ir_node *node, ir_node *right) {
1579 if (node->op->opar == oparity_binary)
1580 set_irn_n(node, node->op->op_index + 1, right);
1582 assert(node->op->opar == oparity_binary);
1585 int is_Phi (const ir_node *n) {
1591 if (op == op_Filter) return get_interprocedural_view();
1594 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1595 (get_irn_arity(n) > 0));
1600 int is_Phi0 (const ir_node *n) {
1603 return ((get_irn_op(n) == op_Phi) &&
1604 (get_irn_arity(n) == 0) &&
1605 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1609 get_Phi_preds_arr (ir_node *node) {
1610 assert(node->op == op_Phi);
1611 return (ir_node **)&(get_irn_in(node)[1]);
1615 get_Phi_n_preds (ir_node *node) {
1616 assert(is_Phi(node) || is_Phi0(node));
1617 return (get_irn_arity(node));
1621 void set_Phi_n_preds (ir_node *node, int n_preds) {
1622 assert(node->op == op_Phi);
1627 get_Phi_pred (ir_node *node, int pos) {
1628 assert(is_Phi(node) || is_Phi0(node));
1629 return get_irn_n(node, pos);
1633 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1634 assert(is_Phi(node) || is_Phi0(node));
1635 set_irn_n(node, pos, pred);
1639 int is_memop(ir_node *node) {
1640 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1643 ir_node *get_memop_mem (ir_node *node) {
1644 assert(is_memop(node));
1645 return get_irn_n(node, 0);
1648 void set_memop_mem (ir_node *node, ir_node *mem) {
1649 assert(is_memop(node));
1650 set_irn_n(node, 0, mem);
1653 ir_node *get_memop_ptr (ir_node *node) {
1654 assert(is_memop(node));
1655 return get_irn_n(node, 1);
1658 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1659 assert(is_memop(node));
1660 set_irn_n(node, 1, ptr);
1664 get_Load_mem (ir_node *node) {
1665 assert(node->op == op_Load);
1666 return get_irn_n(node, 0);
1670 set_Load_mem (ir_node *node, ir_node *mem) {
1671 assert(node->op == op_Load);
1672 set_irn_n(node, 0, mem);
1676 get_Load_ptr (ir_node *node) {
1677 assert(node->op == op_Load);
1678 return get_irn_n(node, 1);
1682 set_Load_ptr (ir_node *node, ir_node *ptr) {
1683 assert(node->op == op_Load);
1684 set_irn_n(node, 1, ptr);
1688 get_Load_mode (ir_node *node) {
1689 assert(node->op == op_Load);
1690 return node->attr.load.load_mode;
1694 set_Load_mode (ir_node *node, ir_mode *mode) {
1695 assert(node->op == op_Load);
1696 node->attr.load.load_mode = mode;
1700 get_Load_volatility (ir_node *node) {
1701 assert(node->op == op_Load);
1702 return node->attr.load.volatility;
1706 set_Load_volatility (ir_node *node, ir_volatility volatility) {
1707 assert(node->op == op_Load);
1708 node->attr.load.volatility = volatility;
1713 get_Store_mem (ir_node *node) {
1714 assert(node->op == op_Store);
1715 return get_irn_n(node, 0);
1719 set_Store_mem (ir_node *node, ir_node *mem) {
1720 assert(node->op == op_Store);
1721 set_irn_n(node, 0, mem);
1725 get_Store_ptr (ir_node *node) {
1726 assert(node->op == op_Store);
1727 return get_irn_n(node, 1);
1731 set_Store_ptr (ir_node *node, ir_node *ptr) {
1732 assert(node->op == op_Store);
1733 set_irn_n(node, 1, ptr);
1737 get_Store_value (ir_node *node) {
1738 assert(node->op == op_Store);
1739 return get_irn_n(node, 2);
1743 set_Store_value (ir_node *node, ir_node *value) {
1744 assert(node->op == op_Store);
1745 set_irn_n(node, 2, value);
1749 get_Store_volatility (ir_node *node) {
1750 assert(node->op == op_Store);
1751 return node->attr.store.volatility;
1755 set_Store_volatility (ir_node *node, ir_volatility volatility) {
1756 assert(node->op == op_Store);
1757 node->attr.store.volatility = volatility;
1762 get_Alloc_mem (ir_node *node) {
1763 assert(node->op == op_Alloc);
1764 return get_irn_n(node, 0);
1768 set_Alloc_mem (ir_node *node, ir_node *mem) {
1769 assert(node->op == op_Alloc);
1770 set_irn_n(node, 0, mem);
1774 get_Alloc_size (ir_node *node) {
1775 assert(node->op == op_Alloc);
1776 return get_irn_n(node, 1);
1780 set_Alloc_size (ir_node *node, ir_node *size) {
1781 assert(node->op == op_Alloc);
1782 set_irn_n(node, 1, size);
1786 get_Alloc_type (ir_node *node) {
1787 assert(node->op == op_Alloc);
1788 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1792 set_Alloc_type (ir_node *node, ir_type *tp) {
1793 assert(node->op == op_Alloc);
1794 node->attr.alloc.type = tp;
1798 get_Alloc_where (ir_node *node) {
1799 assert(node->op == op_Alloc);
1800 return node->attr.alloc.where;
1804 set_Alloc_where (ir_node *node, where_alloc where) {
1805 assert(node->op == op_Alloc);
1806 node->attr.alloc.where = where;
1811 get_Free_mem (ir_node *node) {
1812 assert(node->op == op_Free);
1813 return get_irn_n(node, 0);
1817 set_Free_mem (ir_node *node, ir_node *mem) {
1818 assert(node->op == op_Free);
1819 set_irn_n(node, 0, mem);
1823 get_Free_ptr (ir_node *node) {
1824 assert(node->op == op_Free);
1825 return get_irn_n(node, 1);
1829 set_Free_ptr (ir_node *node, ir_node *ptr) {
1830 assert(node->op == op_Free);
1831 set_irn_n(node, 1, ptr);
1835 get_Free_size (ir_node *node) {
1836 assert(node->op == op_Free);
1837 return get_irn_n(node, 2);
1841 set_Free_size (ir_node *node, ir_node *size) {
1842 assert(node->op == op_Free);
1843 set_irn_n(node, 2, size);
1847 get_Free_type (ir_node *node) {
1848 assert(node->op == op_Free);
1849 return node->attr.free.type = skip_tid(node->attr.free.type);
1853 set_Free_type (ir_node *node, ir_type *tp) {
1854 assert(node->op == op_Free);
1855 node->attr.free.type = tp;
1859 get_Free_where (ir_node *node) {
1860 assert(node->op == op_Free);
1861 return node->attr.free.where;
1865 set_Free_where (ir_node *node, where_alloc where) {
1866 assert(node->op == op_Free);
1867 node->attr.free.where = where;
1870 ir_node **get_Sync_preds_arr (ir_node *node) {
1871 assert(node->op == op_Sync);
1872 return (ir_node **)&(get_irn_in(node)[1]);
1875 int get_Sync_n_preds (ir_node *node) {
1876 assert(node->op == op_Sync);
1877 return (get_irn_arity(node));
1881 void set_Sync_n_preds (ir_node *node, int n_preds) {
1882 assert(node->op == op_Sync);
1886 ir_node *get_Sync_pred (ir_node *node, int pos) {
1887 assert(node->op == op_Sync);
1888 return get_irn_n(node, pos);
1891 void set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1892 assert(node->op == op_Sync);
1893 set_irn_n(node, pos, pred);
1896 /* Add a new Sync predecessor */
1897 void add_Sync_pred (ir_node *node, ir_node *pred) {
1899 ir_graph *irg = get_irn_irg(node);
1901 assert(node->op == op_Sync);
1902 l = ARR_LEN(node->in);
1903 ARR_APP1(ir_node *, node->in, pred);
1904 edges_notify_edge(node, l, node->in[l], NULL, irg);
1907 /* Returns the source language type of a Proj node. */
1908 ir_type *get_Proj_type(ir_node *n)
1910 ir_type *tp = firm_unknown_type;
1911 ir_node *pred = get_Proj_pred(n);
1913 switch (get_irn_opcode(pred)) {
1916 /* Deal with Start / Call here: we need to know the Proj Nr. */
1917 assert(get_irn_mode(pred) == mode_T);
1918 pred_pred = get_Proj_pred(pred);
1919 if (get_irn_op(pred_pred) == op_Start) {
1920 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1921 tp = get_method_param_type(mtp, get_Proj_proj(n));
1922 } else if (get_irn_op(pred_pred) == op_Call) {
1923 ir_type *mtp = get_Call_type(pred_pred);
1924 tp = get_method_res_type(mtp, get_Proj_proj(n));
1927 case iro_Start: break;
1928 case iro_Call: break;
1930 ir_node *a = get_Load_ptr(pred);
1932 tp = get_entity_type(get_Sel_entity(a));
1941 get_Proj_pred (const ir_node *node) {
1942 assert(is_Proj(node));
1943 return get_irn_n(node, 0);
1947 set_Proj_pred (ir_node *node, ir_node *pred) {
1948 assert(is_Proj(node));
1949 set_irn_n(node, 0, pred);
1953 get_Proj_proj (const ir_node *node) {
1954 assert(is_Proj(node));
1955 if (get_irn_opcode(node) == iro_Proj) {
1956 return node->attr.proj;
1958 assert(get_irn_opcode(node) == iro_Filter);
1959 return node->attr.filter.proj;
1964 set_Proj_proj (ir_node *node, long proj) {
1965 assert(node->op == op_Proj);
1966 node->attr.proj = proj;
1970 get_Tuple_preds_arr (ir_node *node) {
1971 assert(node->op == op_Tuple);
1972 return (ir_node **)&(get_irn_in(node)[1]);
1976 get_Tuple_n_preds (ir_node *node) {
1977 assert(node->op == op_Tuple);
1978 return (get_irn_arity(node));
1983 set_Tuple_n_preds (ir_node *node, int n_preds) {
1984 assert(node->op == op_Tuple);
1989 get_Tuple_pred (ir_node *node, int pos) {
1990 assert(node->op == op_Tuple);
1991 return get_irn_n(node, pos);
1995 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1996 assert(node->op == op_Tuple);
1997 set_irn_n(node, pos, pred);
2001 get_Id_pred (ir_node *node) {
2002 assert(node->op == op_Id);
2003 return get_irn_n(node, 0);
2007 set_Id_pred (ir_node *node, ir_node *pred) {
2008 assert(node->op == op_Id);
2009 set_irn_n(node, 0, pred);
2012 ir_node *get_Confirm_value (ir_node *node) {
2013 assert(node->op == op_Confirm);
2014 return get_irn_n(node, 0);
2016 void set_Confirm_value (ir_node *node, ir_node *value) {
2017 assert(node->op == op_Confirm);
2018 set_irn_n(node, 0, value);
2020 ir_node *get_Confirm_bound (ir_node *node) {
2021 assert(node->op == op_Confirm);
2022 return get_irn_n(node, 1);
2024 void set_Confirm_bound (ir_node *node, ir_node *bound) {
2025 assert(node->op == op_Confirm);
2026 set_irn_n(node, 0, bound);
2028 pn_Cmp get_Confirm_cmp (ir_node *node) {
2029 assert(node->op == op_Confirm);
2030 return node->attr.confirm_cmp;
2032 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
2033 assert(node->op == op_Confirm);
2034 node->attr.confirm_cmp = cmp;
2039 get_Filter_pred (ir_node *node) {
2040 assert(node->op == op_Filter);
2044 set_Filter_pred (ir_node *node, ir_node *pred) {
2045 assert(node->op == op_Filter);
2049 get_Filter_proj(ir_node *node) {
2050 assert(node->op == op_Filter);
2051 return node->attr.filter.proj;
2054 set_Filter_proj (ir_node *node, long proj) {
2055 assert(node->op == op_Filter);
2056 node->attr.filter.proj = proj;
2059 /* Don't use get_irn_arity, get_irn_n in implementation as access
2060 shall work independent of view!!! */
2061 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2062 assert(node->op == op_Filter);
2063 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2064 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2065 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2066 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2067 node->attr.filter.in_cg[0] = node->in[0];
2069 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2072 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2073 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2074 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2075 node->attr.filter.in_cg[pos + 1] = pred;
2077 int get_Filter_n_cg_preds(ir_node *node) {
2078 assert(node->op == op_Filter && node->attr.filter.in_cg);
2079 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2081 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2083 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2085 arity = ARR_LEN(node->attr.filter.in_cg);
2086 assert(pos < arity - 1);
2087 return node->attr.filter.in_cg[pos + 1];
2091 ir_node *get_Mux_sel (ir_node *node) {
2092 if (node->op == op_Psi) {
2093 assert(get_irn_arity(node) == 3);
2094 return get_Psi_cond(node, 0);
2096 assert(node->op == op_Mux);
2099 void set_Mux_sel (ir_node *node, ir_node *sel) {
2100 if (node->op == op_Psi) {
2101 assert(get_irn_arity(node) == 3);
2102 set_Psi_cond(node, 0, sel);
2105 assert(node->op == op_Mux);
2110 ir_node *get_Mux_false (ir_node *node) {
2111 if (node->op == op_Psi) {
2112 assert(get_irn_arity(node) == 3);
2113 return get_Psi_default(node);
2115 assert(node->op == op_Mux);
2118 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2119 if (node->op == op_Psi) {
2120 assert(get_irn_arity(node) == 3);
2121 set_Psi_default(node, ir_false);
2124 assert(node->op == op_Mux);
2125 node->in[2] = ir_false;
2129 ir_node *get_Mux_true (ir_node *node) {
2130 if (node->op == op_Psi) {
2131 assert(get_irn_arity(node) == 3);
2132 return get_Psi_val(node, 0);
2134 assert(node->op == op_Mux);
2137 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2138 if (node->op == op_Psi) {
2139 assert(get_irn_arity(node) == 3);
2140 set_Psi_val(node, 0, ir_true);
2143 assert(node->op == op_Mux);
2144 node->in[3] = ir_true;
2149 ir_node *get_Psi_cond (ir_node *node, int pos) {
2150 int num_conds = get_Psi_n_conds(node);
2151 assert(node->op == op_Psi);
2152 assert(pos < num_conds);
2153 return get_irn_n(node, 2 * pos);
2156 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2157 int num_conds = get_Psi_n_conds(node);
2158 assert(node->op == op_Psi);
2159 assert(pos < num_conds);
2160 set_irn_n(node, 2 * pos, cond);
2163 ir_node *get_Psi_val (ir_node *node, int pos) {
2164 int num_vals = get_Psi_n_conds(node);
2165 assert(node->op == op_Psi);
2166 assert(pos < num_vals);
2167 return get_irn_n(node, 2 * pos + 1);
2170 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2171 int num_vals = get_Psi_n_conds(node);
2172 assert(node->op == op_Psi);
2173 assert(pos < num_vals);
2174 set_irn_n(node, 2 * pos + 1, val);
2177 ir_node *get_Psi_default(ir_node *node) {
2178 int def_pos = get_irn_arity(node) - 1;
2179 assert(node->op == op_Psi);
2180 return get_irn_n(node, def_pos);
2183 void set_Psi_default(ir_node *node, ir_node *val) {
2184 int def_pos = get_irn_arity(node);
2185 assert(node->op == op_Psi);
2186 set_irn_n(node, def_pos, val);
2189 int (get_Psi_n_conds)(ir_node *node) {
2190 return _get_Psi_n_conds(node);
2194 ir_node *get_CopyB_mem (ir_node *node) {
2195 assert(node->op == op_CopyB);
2196 return get_irn_n(node, 0);
2199 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2200 assert(node->op == op_CopyB);
2201 set_irn_n(node, 0, mem);
2204 ir_node *get_CopyB_dst (ir_node *node) {
2205 assert(node->op == op_CopyB);
2206 return get_irn_n(node, 1);
2209 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2210 assert(node->op == op_CopyB);
2211 set_irn_n(node, 1, dst);
2214 ir_node *get_CopyB_src (ir_node *node) {
2215 assert(node->op == op_CopyB);
2216 return get_irn_n(node, 2);
2219 void set_CopyB_src (ir_node *node, ir_node *src) {
2220 assert(node->op == op_CopyB);
2221 set_irn_n(node, 2, src);
2224 ir_type *get_CopyB_type(ir_node *node) {
2225 assert(node->op == op_CopyB);
2226 return node->attr.copyb.data_type;
2229 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2230 assert(node->op == op_CopyB && data_type);
2231 node->attr.copyb.data_type = data_type;
2236 get_InstOf_type (ir_node *node) {
2237 assert(node->op = op_InstOf);
2238 return node->attr.instof.type;
2242 set_InstOf_type (ir_node *node, ir_type *type) {
2243 assert(node->op = op_InstOf);
2244 node->attr.instof.type = type;
2248 get_InstOf_store (ir_node *node) {
2249 assert(node->op = op_InstOf);
2250 return get_irn_n(node, 0);
2254 set_InstOf_store (ir_node *node, ir_node *obj) {
2255 assert(node->op = op_InstOf);
2256 set_irn_n(node, 0, obj);
2260 get_InstOf_obj (ir_node *node) {
2261 assert(node->op = op_InstOf);
2262 return get_irn_n(node, 1);
2266 set_InstOf_obj (ir_node *node, ir_node *obj) {
2267 assert(node->op = op_InstOf);
2268 set_irn_n(node, 1, obj);
2271 /* Returns the memory input of a Raise operation. */
2273 get_Raise_mem (ir_node *node) {
2274 assert(node->op == op_Raise);
2275 return get_irn_n(node, 0);
2279 set_Raise_mem (ir_node *node, ir_node *mem) {
2280 assert(node->op == op_Raise);
2281 set_irn_n(node, 0, mem);
2285 get_Raise_exo_ptr (ir_node *node) {
2286 assert(node->op == op_Raise);
2287 return get_irn_n(node, 1);
2291 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2292 assert(node->op == op_Raise);
2293 set_irn_n(node, 1, exo_ptr);
2298 /* Returns the memory input of a Bound operation. */
2299 ir_node *get_Bound_mem(ir_node *bound) {
2300 assert(bound->op == op_Bound);
2301 return get_irn_n(bound, 0);
2304 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2305 assert(bound->op == op_Bound);
2306 set_irn_n(bound, 0, mem);
2309 /* Returns the index input of a Bound operation. */
2310 ir_node *get_Bound_index(ir_node *bound) {
2311 assert(bound->op == op_Bound);
2312 return get_irn_n(bound, 1);
2315 void set_Bound_index(ir_node *bound, ir_node *idx) {
2316 assert(bound->op == op_Bound);
2317 set_irn_n(bound, 1, idx);
2320 /* Returns the lower bound input of a Bound operation. */
2321 ir_node *get_Bound_lower(ir_node *bound) {
2322 assert(bound->op == op_Bound);
2323 return get_irn_n(bound, 2);
2326 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2327 assert(bound->op == op_Bound);
2328 set_irn_n(bound, 2, lower);
2331 /* Returns the upper bound input of a Bound operation. */
2332 ir_node *get_Bound_upper(ir_node *bound) {
2333 assert(bound->op == op_Bound);
2334 return get_irn_n(bound, 3);
2337 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2338 assert(bound->op == op_Bound);
2339 set_irn_n(bound, 3, upper);
2342 /* Return the operand of a Pin node. */
2343 ir_node *get_Pin_op(ir_node *pin) {
2344 assert(pin->op == op_Pin);
2345 return get_irn_n(pin, 0);
2348 void set_Pin_op(ir_node *pin, ir_node *node) {
2349 assert(pin->op == op_Pin);
2350 set_irn_n(pin, 0, node);
2354 /* returns the graph of a node */
2356 get_irn_irg(const ir_node *node) {
2358 * Do not use get_nodes_Block() here, because this
2359 * will check the pinned state.
2360 * However even a 'wrong' block is always in the proper
2363 if (! is_Block(node))
2364 node = get_irn_n(node, -1);
2365 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2366 node = get_irn_n(node, -1);
2367 assert(get_irn_op(node) == op_Block);
2368 return node->attr.block.irg;
2372 /*----------------------------------------------------------------*/
2373 /* Auxiliary routines */
2374 /*----------------------------------------------------------------*/
2377 skip_Proj (ir_node *node) {
2378 /* don't assert node !!! */
2379 if (node != NULL && is_Proj(node)) {
2380 return get_Proj_pred(node);
2382 return (ir_node*) node;
2387 skip_Tuple (ir_node *node) {
2391 if (!get_opt_normalize()) return node;
2394 node = skip_Id(node);
2395 if (get_irn_op(node) == op_Proj) {
2396 pred = skip_Id(get_Proj_pred(node));
2397 op = get_irn_op(pred);
2400 * Looks strange but calls get_irn_op() only once
2401 * in most often cases.
2403 if (op == op_Proj) { /* nested Tuple ? */
2404 pred = skip_Id(skip_Tuple(pred));
2405 op = get_irn_op(pred);
2407 if (op == op_Tuple) {
2408 node = get_Tuple_pred(pred, get_Proj_proj(node));
2412 else if (op == op_Tuple) {
2413 node = get_Tuple_pred(pred, get_Proj_proj(node));
2420 /* returns operand of node if node is a Cast */
2421 ir_node *skip_Cast (ir_node *node) {
2422 if (node && get_irn_op(node) == op_Cast)
2423 return get_Cast_op(node);
2427 /* returns operand of node if node is a Confirm */
2428 ir_node *skip_Confirm (ir_node *node) {
2429 if (node && get_irn_op(node) == op_Confirm)
2430 return get_Confirm_value(node);
2434 /* skip all high-level ops */
2435 ir_node *skip_HighLevel(ir_node *node) {
2436 if (node && is_op_highlevel(get_irn_op(node)))
2437 return get_irn_n(node, 0);
2442 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2443 * than any other approach, as Id chains are resolved and all point to the real node, or
2444 * all id's are self loops.
2446 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2449 skip_Id (ir_node *node) {
2450 /* don't assert node !!! */
2452 /* Don't use get_Id_pred: We get into an endless loop for
2453 self-referencing Ids. */
2454 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2455 ir_node *rem_pred = node->in[0+1];
2458 assert(get_irn_arity (node) > 0);
2460 node->in[0+1] = node;
2461 res = skip_Id(rem_pred);
2462 if (res->op == op_Id) /* self-loop */ return node;
2464 node->in[0+1] = res;
2471 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2472 * than any other approach, as Id chains are resolved and all point to the real node, or
2473 * all id's are self loops.
2475 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2476 * a little bit "hand optimized".
2478 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2481 skip_Id (ir_node *node) {
2483 /* don't assert node !!! */
2485 if (!node || (node->op != op_Id)) return node;
2487 /* Don't use get_Id_pred(): We get into an endless loop for
2488 self-referencing Ids. */
2489 pred = node->in[0+1];
2491 if (pred->op != op_Id) return pred;
2493 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2494 ir_node *rem_pred, *res;
2496 if (pred->op != op_Id) return pred; /* shortcut */
2499 assert(get_irn_arity (node) > 0);
2501 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2502 res = skip_Id(rem_pred);
2503 if (res->op == op_Id) /* self-loop */ return node;
2505 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2513 void skip_Id_and_store(ir_node **node) {
2516 if (!n || (n->op != op_Id)) return;
2518 /* Don't use get_Id_pred(): We get into an endless loop for
2519 self-referencing Ids. */
2524 (is_Bad)(const ir_node *node) {
2525 return _is_Bad(node);
2529 (is_NoMem)(const ir_node *node) {
2530 return _is_NoMem(node);
2534 (is_Mod)(const ir_node *node) {
2535 return _is_Mod(node);
2539 (is_Div)(const ir_node *node) {
2540 return _is_Div(node);
2544 (is_DivMod)(const ir_node *node) {
2545 return _is_DivMod(node);
2549 (is_Start)(const ir_node *node) {
2550 return _is_Start(node);
2554 (is_Const)(const ir_node *node) {
2555 return _is_Const(node);
2559 (is_no_Block)(const ir_node *node) {
2560 return _is_no_Block(node);
2564 (is_Block)(const ir_node *node) {
2565 return _is_Block(node);
2568 /* returns true if node is an Unknown node. */
2570 (is_Unknown)(const ir_node *node) {
2571 return _is_Unknown(node);
2574 /* returns true if node is a Return node. */
2576 (is_Return)(const ir_node *node) {
2577 return _is_Return(node);
2580 /* returns true if node is a Call node. */
2582 (is_Call)(const ir_node *node) {
2583 return _is_Call(node);
2586 /* returns true if node is a Sel node. */
2588 (is_Sel)(const ir_node *node) {
2589 return _is_Sel(node);
2592 /* returns true if node is a Mux node or a Psi with only one condition. */
2594 (is_Mux)(const ir_node *node) {
2595 return _is_Mux(node);
2598 /* returns true if node is a Load node. */
2600 (is_Load)(const ir_node *node) {
2601 return _is_Load(node);
2604 /* returns true if node is a Sync node. */
2606 (is_Sync)(const ir_node *node) {
2607 return _is_Sync(node);
2610 /* returns true if node is a Confirm node. */
2612 (is_Confirm)(const ir_node *node) {
2613 return _is_Confirm(node);
2616 /* returns true if node is a Pin node. */
2618 (is_Pin)(const ir_node *node) {
2619 return _is_Pin(node);
2622 /* returns true if node is a SymConst node. */
2624 (is_SymConst)(const ir_node *node) {
2625 return _is_SymConst(node);
2628 /* returns true if node is a Cond node. */
2630 (is_Cond)(const ir_node *node) {
2631 return _is_Cond(node);
2634 /* returns true if node is a Cmp node. */
2636 (is_Cmp)(const ir_node *node) {
2637 return _is_Cmp(node);
2640 /* returns true if node is an Alloc node. */
2642 (is_Alloc)(const ir_node *node) {
2643 return _is_Alloc(node);
2646 /* returns true if a node is a Jmp node. */
2648 (is_Jmp)(const ir_node *node) {
2649 return _is_Jmp(node);
2652 /* returns true if a node is a Raise node. */
2654 (is_Raise)(const ir_node *node) {
2655 return _is_Raise(node);
2659 is_Proj (const ir_node *node) {
2661 return node->op == op_Proj
2662 || (!get_interprocedural_view() && node->op == op_Filter);
2665 /* Returns true if the operation manipulates control flow. */
2667 is_cfop(const ir_node *node) {
2668 return is_cfopcode(get_irn_op(node));
2671 /* Returns true if the operation manipulates interprocedural control flow:
2672 CallBegin, EndReg, EndExcept */
2673 int is_ip_cfop(const ir_node *node) {
2674 return is_ip_cfopcode(get_irn_op(node));
2677 /* Returns true if the operation can change the control flow because
2680 is_fragile_op(const ir_node *node) {
2681 return is_op_fragile(get_irn_op(node));
2684 /* Returns the memory operand of fragile operations. */
2685 ir_node *get_fragile_op_mem(ir_node *node) {
2686 assert(node && is_fragile_op(node));
2688 switch (get_irn_opcode (node)) {
2698 return get_irn_n(node, 0);
2703 assert(0 && "should not be reached");
2708 /* Returns true if the operation is a forking control flow operation. */
2709 int (is_irn_forking)(const ir_node *node) {
2710 return _is_irn_forking(node);
2713 /* Return the type associated with the value produced by n
2714 * if the node remarks this type as it is the case for
2715 * Cast, Const, SymConst and some Proj nodes. */
2716 ir_type *(get_irn_type)(ir_node *node) {
2717 return _get_irn_type(node);
2720 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2722 ir_type *(get_irn_type_attr)(ir_node *node) {
2723 return _get_irn_type_attr(node);
2726 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2727 entity *(get_irn_entity_attr)(ir_node *node) {
2728 return _get_irn_entity_attr(node);
2731 /* Returns non-zero for constant-like nodes. */
2732 int (is_irn_constlike)(const ir_node *node) {
2733 return _is_irn_constlike(node);
2737 * Returns non-zero for nodes that are allowed to have keep-alives and
2738 * are neither Block nor PhiM.
2740 int (is_irn_keep)(const ir_node *node) {
2741 return _is_irn_keep(node);
2745 * Returns non-zero for nodes that are always placed in the start block.
2747 int (is_irn_start_block_placed)(const ir_node *node) {
2748 return _is_irn_start_block_placed(node);
2751 /* Returns non-zero for nodes that are machine operations. */
2752 int (is_irn_machine_op)(const ir_node *node) {
2753 return _is_irn_machine_op(node);
2756 /* Returns non-zero for nodes that are machine operands. */
2757 int (is_irn_machine_operand)(const ir_node *node) {
2758 return _is_irn_machine_operand(node);
2761 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2762 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2763 return _is_irn_machine_user(node, n);
2767 /* Gets the string representation of the jump prediction .*/
2768 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2772 case COND_JMP_PRED_NONE: return "no prediction";
2773 case COND_JMP_PRED_TRUE: return "true taken";
2774 case COND_JMP_PRED_FALSE: return "false taken";
2778 /* Returns the conditional jump prediction of a Cond node. */
2779 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2780 return _get_Cond_jmp_pred(cond);
2783 /* Sets a new conditional jump prediction. */
2784 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2785 _set_Cond_jmp_pred(cond, pred);
2788 /** the get_type operation must be always implemented and return a firm type */
2789 static ir_type *get_Default_type(ir_node *n) {
2790 return get_unknown_type();
2793 /* Sets the get_type operation for an ir_op_ops. */
2794 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2797 case iro_Const: ops->get_type = get_Const_type; break;
2798 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2799 case iro_Cast: ops->get_type = get_Cast_type; break;
2800 case iro_Proj: ops->get_type = get_Proj_type; break;
2802 /* not allowed to be NULL */
2803 if (! ops->get_type)
2804 ops->get_type = get_Default_type;
2810 /** Return the attribute type of a SymConst node if exists */
2811 static ir_type *get_SymConst_attr_type(ir_node *self) {
2812 symconst_kind kind = get_SymConst_kind(self);
2813 if (SYMCONST_HAS_TYPE(kind))
2814 return get_SymConst_type(self);
2818 /** Return the attribute entity of a SymConst node if exists */
2819 static entity *get_SymConst_attr_entity(ir_node *self) {
2820 symconst_kind kind = get_SymConst_kind(self);
2821 if (SYMCONST_HAS_ENT(kind))
2822 return get_SymConst_entity(self);
2826 /** the get_type_attr operation must be always implemented */
2827 static ir_type *get_Null_type(ir_node *n) {
2828 return firm_unknown_type;
2831 /* Sets the get_type operation for an ir_op_ops. */
2832 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2835 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2836 case iro_Call: ops->get_type_attr = get_Call_type; break;
2837 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2838 case iro_Free: ops->get_type_attr = get_Free_type; break;
2839 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2841 /* not allowed to be NULL */
2842 if (! ops->get_type_attr)
2843 ops->get_type_attr = get_Null_type;
2849 /** the get_entity_attr operation must be always implemented */
2850 static entity *get_Null_ent(ir_node *n) {
2854 /* Sets the get_type operation for an ir_op_ops. */
2855 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2858 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2859 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2861 /* not allowed to be NULL */
2862 if (! ops->get_entity_attr)
2863 ops->get_entity_attr = get_Null_ent;
2869 #ifdef DEBUG_libfirm
2870 void dump_irn (ir_node *n) {
2871 int i, arity = get_irn_arity(n);
2872 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2874 ir_node *pred = get_irn_n(n, -1);
2875 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2876 get_irn_node_nr(pred), (void *)pred);
2878 printf(" preds: \n");
2879 for (i = 0; i < arity; ++i) {
2880 ir_node *pred = get_irn_n(n, i);
2881 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2882 get_irn_node_nr(pred), (void *)pred);
2886 #else /* DEBUG_libfirm */
2887 void dump_irn (ir_node *n) {}
2888 #endif /* DEBUG_libfirm */