3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
84 * Indicates, whether additional data can be registered to ir nodes.
85 * If set to 1, this is not possible anymore.
87 static int forbid_new_data = 0;
90 * The amount of additional space for custom data to be allocated upon
91 * creating a new node.
93 unsigned firm_add_node_size = 0;
96 /* register new space for every node */
97 unsigned register_additional_node_data(unsigned size) {
98 assert(!forbid_new_data && "Too late to register additional node data");
103 return firm_add_node_size += size;
109 /* Forbid the addition of new data to an ir node. */
114 * irnode constructor.
115 * Create a new irnode in irg, with an op, mode, arity and
116 * some incoming irnodes.
117 * If arity is negative, a node with a dynamic array is created.
120 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
121 int arity, ir_node **in)
124 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
128 assert(irg && op && mode);
129 p = obstack_alloc (irg->obst, node_size);
130 memset(p, 0, node_size);
131 res = (ir_node *) (p + firm_add_node_size);
133 res->kind = k_ir_node;
137 res->node_idx = irg_register_node_idx(irg, res);
142 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
144 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
145 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
149 set_irn_dbg_info(res, db);
153 res->node_nr = get_irp_new_node_nr();
156 for(i = 0; i < EDGE_KIND_LAST; ++i)
157 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
159 // don't put this into the for loop, arity is -1 for some nodes!
160 edges_notify_edge(res, -1, res->in[0], NULL, irg);
161 for (i = 1; i <= arity; ++i)
162 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
164 hook_new_node(irg, res);
169 /*-- getting some parameters from ir_nodes --*/
172 (is_ir_node)(const void *thing) {
173 return _is_ir_node(thing);
177 (get_irn_intra_arity)(const ir_node *node) {
178 return _get_irn_intra_arity(node);
182 (get_irn_inter_arity)(const ir_node *node) {
183 return _get_irn_inter_arity(node);
186 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
189 (get_irn_arity)(const ir_node *node) {
190 return _get_irn_arity(node);
193 /* Returns the array with ins. This array is shifted with respect to the
194 array accessed by get_irn_n: The block operand is at position 0 not -1.
195 (@@@ This should be changed.)
196 The order of the predecessors in this array is not guaranteed, except that
197 lists of operands as predecessors of Block or arguments of a Call are
200 get_irn_in(const ir_node *node) {
202 if (get_interprocedural_view()) { /* handle Filter and Block specially */
203 if (get_irn_opcode(node) == iro_Filter) {
204 assert(node->attr.filter.in_cg);
205 return node->attr.filter.in_cg;
206 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
207 return node->attr.block.in_cg;
209 /* else fall through */
215 set_irn_in(ir_node *node, int arity, ir_node **in) {
218 ir_graph *irg = current_ir_graph;
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 arr = &node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 arr = &node->attr.block.in_cg;
233 for (i = 0; i < arity; i++) {
234 if (i < ARR_LEN(*arr)-1)
235 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
237 edges_notify_edge(node, i, in[i], NULL, irg);
239 for(;i < ARR_LEN(*arr)-1; i++) {
240 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
243 if (arity != ARR_LEN(*arr) - 1) {
244 ir_node * block = (*arr)[0];
245 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
248 fix_backedges(irg->obst, node);
250 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
254 (get_irn_intra_n)(const ir_node *node, int n) {
255 return _get_irn_intra_n (node, n);
259 (get_irn_inter_n)(const ir_node *node, int n) {
260 return _get_irn_inter_n (node, n);
263 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
266 (get_irn_n)(const ir_node *node, int n) {
267 return _get_irn_n(node, n);
271 set_irn_n (ir_node *node, int n, ir_node *in) {
272 assert(node && node->kind == k_ir_node);
274 assert(n < get_irn_arity(node));
275 assert(in && in->kind == k_ir_node);
277 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
278 /* Change block pred in both views! */
279 node->in[n + 1] = in;
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
284 if (get_interprocedural_view()) { /* handle Filter and Block specially */
285 if (get_irn_opcode(node) == iro_Filter) {
286 assert(node->attr.filter.in_cg);
287 node->attr.filter.in_cg[n + 1] = in;
289 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
290 node->attr.block.in_cg[n + 1] = in;
293 /* else fall through */
297 hook_set_irn_n(node, n, in, node->in[n + 1]);
299 /* Here, we rely on src and tgt being in the current ir graph */
300 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
302 node->in[n + 1] = in;
305 int add_irn_n(ir_node *node, ir_node *in)
308 ir_graph *irg = get_irn_irg(node);
310 assert(node->op->opar == oparity_dynamic);
311 pos = ARR_LEN(node->in) - 1;
312 ARR_APP1(ir_node *, node->in, in);
313 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
316 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
322 (get_irn_deps)(const ir_node *node)
324 return _get_irn_deps(node);
328 (get_irn_dep)(const ir_node *node, int pos)
330 return _get_irn_dep(node, pos);
334 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
336 _set_irn_dep(node, pos, dep);
339 int add_irn_dep(ir_node *node, ir_node *dep)
343 if (node->deps == NULL) {
344 node->deps = NEW_ARR_F(ir_node *, 1);
350 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
351 if(node->deps[i] == NULL)
354 if(node->deps[i] == dep)
358 if (first_zero >= 0) {
359 node->deps[first_zero] = dep;
362 ARR_APP1(ir_node *, node->deps, dep);
367 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
372 void add_irn_deps(ir_node *tgt, ir_node *src)
376 for(i = 0, n = get_irn_deps(src); i < n; ++i)
377 add_irn_dep(tgt, get_irn_dep(src, i));
382 (get_irn_mode)(const ir_node *node) {
383 return _get_irn_mode(node);
387 (set_irn_mode)(ir_node *node, ir_mode *mode) {
388 _set_irn_mode(node, mode);
392 get_irn_modecode(const ir_node *node) {
394 return node->mode->code;
397 /** Gets the string representation of the mode .*/
399 get_irn_modename(const ir_node *node) {
401 return get_mode_name(node->mode);
405 get_irn_modeident(const ir_node *node) {
407 return get_mode_ident(node->mode);
411 (get_irn_op)(const ir_node *node) {
412 return _get_irn_op(node);
415 /* should be private to the library: */
417 (set_irn_op)(ir_node *node, ir_op *op) {
418 _set_irn_op(node, op);
422 (get_irn_opcode)(const ir_node *node) {
423 return _get_irn_opcode(node);
427 get_irn_opname(const ir_node *node) {
429 if ((get_irn_op((ir_node *)node) == op_Phi) &&
430 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
431 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
432 return get_id_str(node->op->name);
436 get_irn_opident(const ir_node *node) {
438 return node->op->name;
442 (get_irn_visited)(const ir_node *node) {
443 return _get_irn_visited(node);
447 (set_irn_visited)(ir_node *node, unsigned long visited) {
448 _set_irn_visited(node, visited);
452 (mark_irn_visited)(ir_node *node) {
453 _mark_irn_visited(node);
457 (irn_not_visited)(const ir_node *node) {
458 return _irn_not_visited(node);
462 (irn_visited)(const ir_node *node) {
463 return _irn_visited(node);
467 (set_irn_link)(ir_node *node, void *link) {
468 _set_irn_link(node, link);
472 (get_irn_link)(const ir_node *node) {
473 return _get_irn_link(node);
477 (get_irn_pinned)(const ir_node *node) {
478 return _get_irn_pinned(node);
482 (is_irn_pinned_in_irg) (const ir_node *node) {
483 return _is_irn_pinned_in_irg(node);
486 void set_irn_pinned(ir_node *node, op_pin_state state) {
487 /* due to optimization an opt may be turned into a Tuple */
488 if (get_irn_op(node) == op_Tuple)
491 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
492 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
494 node->attr.except.pin_state = state;
497 #ifdef DO_HEAPANALYSIS
498 /* Access the abstract interpretation information of a node.
499 Returns NULL if no such information is available. */
500 struct abstval *get_irn_abst_value(ir_node *n) {
503 /* Set the abstract interpretation information of a node. */
504 void set_irn_abst_value(ir_node *n, struct abstval *os) {
507 struct section *firm_get_irn_section(ir_node *n) {
510 void firm_set_irn_section(ir_node *n, struct section *s) {
514 /* Dummies needed for firmjni. */
515 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
516 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
517 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
518 void firm_set_irn_section(ir_node *n, struct section *s) {}
519 #endif /* DO_HEAPANALYSIS */
522 /* Outputs a unique number for this node */
523 long get_irn_node_nr(const ir_node *node) {
526 return node->node_nr;
528 return (long)PTR_TO_INT(node);
533 get_irn_const_attr(ir_node *node) {
534 assert(node->op == op_Const);
535 return node->attr.con;
539 get_irn_proj_attr(ir_node *node) {
540 assert(node->op == op_Proj);
541 return node->attr.proj;
545 get_irn_alloc_attr(ir_node *node) {
546 assert(node->op == op_Alloc);
547 return node->attr.alloc;
551 get_irn_free_attr(ir_node *node) {
552 assert(node->op == op_Free);
553 return node->attr.free;
557 get_irn_symconst_attr(ir_node *node) {
558 assert(node->op == op_SymConst);
559 return node->attr.symc;
563 get_irn_call_attr(ir_node *node) {
564 assert(node->op == op_Call);
565 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
569 get_irn_sel_attr(ir_node *node) {
570 assert(node->op == op_Sel);
571 return node->attr.sel;
575 get_irn_phi_attr(ir_node *node) {
576 assert(node->op == op_Phi);
577 return node->attr.phi0_pos;
581 get_irn_block_attr(ir_node *node) {
582 assert(node->op == op_Block);
583 return node->attr.block;
587 get_irn_load_attr(ir_node *node)
589 assert(node->op == op_Load);
590 return node->attr.load;
594 get_irn_store_attr(ir_node *node)
596 assert(node->op == op_Store);
597 return node->attr.store;
601 get_irn_except_attr(ir_node *node) {
602 assert(node->op == op_Div || node->op == op_Quot ||
603 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
604 return node->attr.except;
608 get_irn_generic_attr(ir_node *node) {
612 unsigned (get_irn_idx)(const ir_node *node) {
613 assert(is_ir_node(node));
614 return _get_irn_idx(node);
617 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
619 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
620 if (get_irn_n(node, i) == arg)
626 /** manipulate fields of individual nodes **/
628 /* this works for all except Block */
630 get_nodes_block(const ir_node *node) {
631 assert(node->op != op_Block);
632 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
633 return get_irn_n(node, -1);
637 set_nodes_block(ir_node *node, ir_node *block) {
638 assert(node->op != op_Block);
639 set_irn_n(node, -1, block);
642 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
643 * from Start. If so returns frame type, else Null. */
644 ir_type *is_frame_pointer(ir_node *n) {
645 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
646 ir_node *start = get_Proj_pred(n);
647 if (get_irn_op(start) == op_Start) {
648 return get_irg_frame_type(get_irn_irg(start));
654 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
655 * from Start. If so returns global type, else Null. */
656 ir_type *is_globals_pointer(ir_node *n) {
657 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
658 ir_node *start = get_Proj_pred(n);
659 if (get_irn_op(start) == op_Start) {
660 return get_glob_type();
666 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
667 * from Start. If so returns tls type, else Null. */
668 ir_type *is_tls_pointer(ir_node *n) {
669 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
670 ir_node *start = get_Proj_pred(n);
671 if (get_irn_op(start) == op_Start) {
672 return get_tls_type();
678 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
679 * from Start. If so returns 1, else 0. */
680 int is_value_arg_pointer(ir_node *n) {
681 if ((get_irn_op(n) == op_Proj) &&
682 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
683 (get_irn_op(get_Proj_pred(n)) == op_Start))
688 /* Returns an array with the predecessors of the Block. Depending on
689 the implementation of the graph data structure this can be a copy of
690 the internal representation of predecessors as well as the internal
691 array itself. Therefore writing to this array might obstruct the ir. */
693 get_Block_cfgpred_arr(ir_node *node) {
694 assert((node->op == op_Block));
695 return (ir_node **)&(get_irn_in(node)[1]);
699 (get_Block_n_cfgpreds)(const ir_node *node) {
700 return _get_Block_n_cfgpreds(node);
704 (get_Block_cfgpred)(ir_node *node, int pos) {
705 return _get_Block_cfgpred(node, pos);
709 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
710 assert(node->op == op_Block);
711 set_irn_n(node, pos, pred);
715 (get_Block_cfgpred_block)(ir_node *node, int pos) {
716 return _get_Block_cfgpred_block(node, pos);
720 get_Block_matured(ir_node *node) {
721 assert(node->op == op_Block);
722 return (int)node->attr.block.matured;
726 set_Block_matured(ir_node *node, int matured) {
727 assert(node->op == op_Block);
728 node->attr.block.matured = matured;
732 (get_Block_block_visited)(ir_node *node) {
733 return _get_Block_block_visited(node);
737 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
738 _set_Block_block_visited(node, visit);
741 /* For this current_ir_graph must be set. */
743 (mark_Block_block_visited)(ir_node *node) {
744 _mark_Block_block_visited(node);
748 (Block_not_block_visited)(ir_node *node) {
749 return _Block_not_block_visited(node);
753 get_Block_graph_arr (ir_node *node, int pos) {
754 assert(node->op == op_Block);
755 return node->attr.block.graph_arr[pos+1];
759 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
760 assert(node->op == op_Block);
761 node->attr.block.graph_arr[pos+1] = value;
764 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
765 assert(node->op == op_Block);
766 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
767 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
768 node->attr.block.in_cg[0] = NULL;
769 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
771 /* Fix backedge array. fix_backedges() operates depending on
772 interprocedural_view. */
773 int ipv = get_interprocedural_view();
774 set_interprocedural_view(1);
775 fix_backedges(current_ir_graph->obst, node);
776 set_interprocedural_view(ipv);
779 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
782 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
783 assert(node->op == op_Block &&
784 node->attr.block.in_cg &&
785 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
786 node->attr.block.in_cg[pos + 1] = pred;
789 ir_node **get_Block_cg_cfgpred_arr(ir_node * node) {
790 assert(node->op == op_Block);
791 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
794 int get_Block_cg_n_cfgpreds(ir_node * node) {
795 assert(node->op == op_Block);
796 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
799 ir_node *get_Block_cg_cfgpred(ir_node * node, int pos) {
800 assert(node->op == op_Block && node->attr.block.in_cg);
801 return node->attr.block.in_cg[pos + 1];
804 void remove_Block_cg_cfgpred_arr(ir_node * node) {
805 assert(node->op == op_Block);
806 node->attr.block.in_cg = NULL;
809 ir_node *(set_Block_dead)(ir_node *block) {
810 return _set_Block_dead(block);
813 int (is_Block_dead)(const ir_node *block) {
814 return _is_Block_dead(block);
817 ir_extblk *get_Block_extbb(const ir_node *block) {
819 assert(is_Block(block));
820 res = block->attr.block.extblk;
821 assert(res == NULL || is_ir_extbb(res));
825 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
826 assert(is_Block(block));
827 assert(extblk == NULL || is_ir_extbb(extblk));
828 block->attr.block.extblk = extblk;
832 get_End_n_keepalives(ir_node *end) {
833 assert(end->op == op_End);
834 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
838 get_End_keepalive(ir_node *end, int pos) {
839 assert(end->op == op_End);
840 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
844 add_End_keepalive (ir_node *end, ir_node *ka) {
845 assert(end->op == op_End);
850 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
851 assert(end->op == op_End);
852 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
855 /* Set new keep-alives */
856 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
858 ir_graph *irg = get_irn_irg(end);
860 /* notify that edges are deleted */
861 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
862 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
864 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
866 for (i = 0; i < n; ++i) {
867 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
868 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
872 /* Set new keep-alives from old keep-alives, skipping irn */
873 void remove_End_keepalive(ir_node *end, ir_node *irn) {
874 int n = get_End_n_keepalives(end);
878 NEW_ARR_A(ir_node *, in, n);
880 for (idx = i = 0; i < n; ++i) {
881 ir_node *old_ka = get_End_keepalive(end, i);
888 /* set new keep-alives */
889 set_End_keepalives(end, idx, in);
893 free_End (ir_node *end) {
894 assert(end->op == op_End);
897 end->in = NULL; /* @@@ make sure we get an error if we use the
898 in array afterwards ... */
901 /* Return the target address of an IJmp */
902 ir_node *get_IJmp_target(ir_node *ijmp) {
903 assert(ijmp->op == op_IJmp);
904 return get_irn_n(ijmp, 0);
907 /** Sets the target address of an IJmp */
908 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
909 assert(ijmp->op == op_IJmp);
910 set_irn_n(ijmp, 0, tgt);
914 > Implementing the case construct (which is where the constant Proj node is
915 > important) involves far more than simply determining the constant values.
916 > We could argue that this is more properly a function of the translator from
917 > Firm to the target machine. That could be done if there was some way of
918 > projecting "default" out of the Cond node.
919 I know it's complicated.
920 Basically there are two proglems:
921 - determining the gaps between the projs
922 - determining the biggest case constant to know the proj number for
924 I see several solutions:
925 1. Introduce a ProjDefault node. Solves both problems.
926 This means to extend all optimizations executed during construction.
927 2. Give the Cond node for switch two flavors:
928 a) there are no gaps in the projs (existing flavor)
929 b) gaps may exist, default proj is still the Proj with the largest
930 projection number. This covers also the gaps.
931 3. Fix the semantic of the Cond to that of 2b)
933 Solution 2 seems to be the best:
934 Computing the gaps in the Firm representation is not too hard, i.e.,
935 libFIRM can implement a routine that transforms between the two
936 flavours. This is also possible for 1) but 2) does not require to
937 change any existing optimization.
938 Further it should be far simpler to determine the biggest constant than
940 I don't want to choose 3) as 2a) seems to have advantages for
941 dataflow analysis and 3) does not allow to convert the representation to
945 get_Cond_selector(ir_node *node) {
946 assert(node->op == op_Cond);
947 return get_irn_n(node, 0);
951 set_Cond_selector(ir_node *node, ir_node *selector) {
952 assert(node->op == op_Cond);
953 set_irn_n(node, 0, selector);
957 get_Cond_kind(ir_node *node) {
958 assert(node->op == op_Cond);
959 return node->attr.cond.kind;
963 set_Cond_kind(ir_node *node, cond_kind kind) {
964 assert(node->op == op_Cond);
965 node->attr.cond.kind = kind;
969 get_Cond_defaultProj(ir_node *node) {
970 assert(node->op == op_Cond);
971 return node->attr.cond.default_proj;
975 get_Return_mem(ir_node *node) {
976 assert(node->op == op_Return);
977 return get_irn_n(node, 0);
981 set_Return_mem(ir_node *node, ir_node *mem) {
982 assert(node->op == op_Return);
983 set_irn_n(node, 0, mem);
987 get_Return_n_ress(ir_node *node) {
988 assert(node->op == op_Return);
989 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
993 get_Return_res_arr (ir_node *node) {
994 assert((node->op == op_Return));
995 if (get_Return_n_ress(node) > 0)
996 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1003 set_Return_n_res(ir_node *node, int results) {
1004 assert(node->op == op_Return);
1009 get_Return_res(ir_node *node, int pos) {
1010 assert(node->op == op_Return);
1011 assert(get_Return_n_ress(node) > pos);
1012 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1016 set_Return_res(ir_node *node, int pos, ir_node *res){
1017 assert(node->op == op_Return);
1018 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1021 tarval *(get_Const_tarval)(ir_node *node) {
1022 return _get_Const_tarval(node);
1026 set_Const_tarval(ir_node *node, tarval *con) {
1027 assert(node->op == op_Const);
1028 node->attr.con.tv = con;
1031 cnst_classify_t (classify_Const)(ir_node *node) {
1032 return _classify_Const(node);
1036 /* The source language type. Must be an atomic type. Mode of type must
1037 be mode of node. For tarvals from entities type must be pointer to
1040 get_Const_type(ir_node *node) {
1041 assert(node->op == op_Const);
1042 return node->attr.con.tp;
1046 set_Const_type(ir_node *node, ir_type *tp) {
1047 assert(node->op == op_Const);
1048 if (tp != firm_unknown_type) {
1049 assert(is_atomic_type(tp));
1050 assert(get_type_mode(tp) == get_irn_mode(node));
1052 node->attr.con.tp = tp;
1057 get_SymConst_kind(const ir_node *node) {
1058 assert(node->op == op_SymConst);
1059 return node->attr.symc.num;
1063 set_SymConst_kind(ir_node *node, symconst_kind num) {
1064 assert(node->op == op_SymConst);
1065 node->attr.symc.num = num;
1069 get_SymConst_type(ir_node *node) {
1070 assert((node->op == op_SymConst) &&
1071 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1072 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1076 set_SymConst_type(ir_node *node, ir_type *tp) {
1077 assert((node->op == op_SymConst) &&
1078 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1079 node->attr.symc.sym.type_p = tp;
1083 get_SymConst_name(ir_node *node) {
1084 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1085 return node->attr.symc.sym.ident_p;
1089 set_SymConst_name(ir_node *node, ident *name) {
1090 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1091 node->attr.symc.sym.ident_p = name;
1095 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1096 ir_entity *get_SymConst_entity(ir_node *node) {
1097 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1098 return node->attr.symc.sym.entity_p;
1101 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1102 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1103 node->attr.symc.sym.entity_p = ent;
1106 ir_enum_const *get_SymConst_enum(ir_node *node) {
1107 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1108 return node->attr.symc.sym.enum_p;
1111 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1112 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1113 node->attr.symc.sym.enum_p = ec;
1116 union symconst_symbol
1117 get_SymConst_symbol(ir_node *node) {
1118 assert(node->op == op_SymConst);
1119 return node->attr.symc.sym;
1123 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1124 assert(node->op == op_SymConst);
1125 node->attr.symc.sym = sym;
1129 get_SymConst_value_type(ir_node *node) {
1130 assert(node->op == op_SymConst);
1131 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1132 return node->attr.symc.tp;
1136 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1137 assert(node->op == op_SymConst);
1138 node->attr.symc.tp = tp;
1142 get_Sel_mem(ir_node *node) {
1143 assert(node->op == op_Sel);
1144 return get_irn_n(node, 0);
1148 set_Sel_mem(ir_node *node, ir_node *mem) {
1149 assert(node->op == op_Sel);
1150 set_irn_n(node, 0, mem);
1154 get_Sel_ptr(ir_node *node) {
1155 assert(node->op == op_Sel);
1156 return get_irn_n(node, 1);
1160 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1161 assert(node->op == op_Sel);
1162 set_irn_n(node, 1, ptr);
1166 get_Sel_n_indexs(ir_node *node) {
1167 assert(node->op == op_Sel);
1168 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1172 get_Sel_index_arr(ir_node *node) {
1173 assert((node->op == op_Sel));
1174 if (get_Sel_n_indexs(node) > 0)
1175 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1181 get_Sel_index(ir_node *node, int pos) {
1182 assert(node->op == op_Sel);
1183 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1187 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1188 assert(node->op == op_Sel);
1189 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1193 get_Sel_entity(ir_node *node) {
1194 assert(node->op == op_Sel);
1195 return node->attr.sel.ent;
1199 set_Sel_entity(ir_node *node, ir_entity *ent) {
1200 assert(node->op == op_Sel);
1201 node->attr.sel.ent = ent;
1205 /* For unary and binary arithmetic operations the access to the
1206 operands can be factored out. Left is the first, right the
1207 second arithmetic value as listed in tech report 0999-33.
1208 unops are: Minus, Abs, Not, Conv, Cast
1209 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1210 Shr, Shrs, Rotate, Cmp */
1214 get_Call_mem(ir_node *node) {
1215 assert(node->op == op_Call);
1216 return get_irn_n(node, 0);
1220 set_Call_mem(ir_node *node, ir_node *mem) {
1221 assert(node->op == op_Call);
1222 set_irn_n(node, 0, mem);
1226 get_Call_ptr(ir_node *node) {
1227 assert(node->op == op_Call);
1228 return get_irn_n(node, 1);
1232 set_Call_ptr(ir_node *node, ir_node *ptr) {
1233 assert(node->op == op_Call);
1234 set_irn_n(node, 1, ptr);
1238 get_Call_param_arr(ir_node *node) {
1239 assert(node->op == op_Call);
1240 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1244 get_Call_n_params(ir_node *node) {
1245 assert(node->op == op_Call);
1246 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1250 get_Call_arity(ir_node *node) {
1251 assert(node->op == op_Call);
1252 return get_Call_n_params(node);
1256 set_Call_arity(ir_node *node, ir_node *arity) {
1257 assert(node->op == op_Call);
1262 get_Call_param(ir_node *node, int pos) {
1263 assert(node->op == op_Call);
1264 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1268 set_Call_param(ir_node *node, int pos, ir_node *param) {
1269 assert(node->op == op_Call);
1270 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1274 get_Call_type(ir_node *node) {
1275 assert(node->op == op_Call);
1276 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1280 set_Call_type(ir_node *node, ir_type *tp) {
1281 assert(node->op == op_Call);
1282 assert((get_unknown_type() == tp) || is_Method_type(tp));
1283 node->attr.call.cld_tp = tp;
1286 int Call_has_callees(ir_node *node) {
1287 assert(node && node->op == op_Call);
1288 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1289 (node->attr.call.callee_arr != NULL));
1292 int get_Call_n_callees(ir_node * node) {
1293 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1294 return ARR_LEN(node->attr.call.callee_arr);
1297 ir_entity * get_Call_callee(ir_node * node, int pos) {
1298 assert(pos >= 0 && pos < get_Call_n_callees(node));
1299 return node->attr.call.callee_arr[pos];
1302 void set_Call_callee_arr(ir_node * node, const int n, ir_entity ** arr) {
1303 assert(node->op == op_Call);
1304 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1305 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1307 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1310 void remove_Call_callee_arr(ir_node * node) {
1311 assert(node->op == op_Call);
1312 node->attr.call.callee_arr = NULL;
1315 ir_node * get_CallBegin_ptr(ir_node *node) {
1316 assert(node->op == op_CallBegin);
1317 return get_irn_n(node, 0);
1320 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1321 assert(node->op == op_CallBegin);
1322 set_irn_n(node, 0, ptr);
1325 ir_node * get_CallBegin_call(ir_node *node) {
1326 assert(node->op == op_CallBegin);
1327 return node->attr.callbegin.call;
1330 void set_CallBegin_call(ir_node *node, ir_node *call) {
1331 assert(node->op == op_CallBegin);
1332 node->attr.callbegin.call = call;
1337 ir_node * get_##OP##_left(ir_node *node) { \
1338 assert(node->op == op_##OP); \
1339 return get_irn_n(node, node->op->op_index); \
1341 void set_##OP##_left(ir_node *node, ir_node *left) { \
1342 assert(node->op == op_##OP); \
1343 set_irn_n(node, node->op->op_index, left); \
1345 ir_node *get_##OP##_right(ir_node *node) { \
1346 assert(node->op == op_##OP); \
1347 return get_irn_n(node, node->op->op_index + 1); \
1349 void set_##OP##_right(ir_node *node, ir_node *right) { \
1350 assert(node->op == op_##OP); \
1351 set_irn_n(node, node->op->op_index + 1, right); \
1355 ir_node *get_##OP##_op(ir_node *node) { \
1356 assert(node->op == op_##OP); \
1357 return get_irn_n(node, node->op->op_index); \
1359 void set_##OP##_op (ir_node *node, ir_node *op) { \
1360 assert(node->op == op_##OP); \
1361 set_irn_n(node, node->op->op_index, op); \
1364 #define BINOP_MEM(OP) \
1368 get_##OP##_mem(ir_node *node) { \
1369 assert(node->op == op_##OP); \
1370 return get_irn_n(node, 0); \
1374 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1375 assert(node->op == op_##OP); \
1376 set_irn_n(node, 0, mem); \
1400 int get_Conv_strict(ir_node *node) {
1401 assert(node->op == op_Conv);
1402 return node->attr.conv.strict;
1405 void set_Conv_strict(ir_node *node, int strict_flag) {
1406 assert(node->op == op_Conv);
1407 node->attr.conv.strict = (char)strict_flag;
1411 get_Cast_type(ir_node *node) {
1412 assert(node->op == op_Cast);
1413 return node->attr.cast.totype;
1417 set_Cast_type(ir_node *node, ir_type *to_tp) {
1418 assert(node->op == op_Cast);
1419 node->attr.cast.totype = to_tp;
1423 /* Checks for upcast.
1425 * Returns true if the Cast node casts a class type to a super type.
1427 int is_Cast_upcast(ir_node *node) {
1428 ir_type *totype = get_Cast_type(node);
1429 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1430 ir_graph *myirg = get_irn_irg(node);
1432 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1435 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1436 totype = get_pointer_points_to_type(totype);
1437 fromtype = get_pointer_points_to_type(fromtype);
1442 if (!is_Class_type(totype)) return 0;
1443 return is_SubClass_of(fromtype, totype);
1446 /* Checks for downcast.
1448 * Returns true if the Cast node casts a class type to a sub type.
1450 int is_Cast_downcast(ir_node *node) {
1451 ir_type *totype = get_Cast_type(node);
1452 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1454 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1457 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1458 totype = get_pointer_points_to_type(totype);
1459 fromtype = get_pointer_points_to_type(fromtype);
1464 if (!is_Class_type(totype)) return 0;
1465 return is_SubClass_of(totype, fromtype);
1469 (is_unop)(const ir_node *node) {
1470 return _is_unop(node);
1474 get_unop_op(ir_node *node) {
1475 if (node->op->opar == oparity_unary)
1476 return get_irn_n(node, node->op->op_index);
1478 assert(node->op->opar == oparity_unary);
1483 set_unop_op(ir_node *node, ir_node *op) {
1484 if (node->op->opar == oparity_unary)
1485 set_irn_n(node, node->op->op_index, op);
1487 assert(node->op->opar == oparity_unary);
1491 (is_binop)(const ir_node *node) {
1492 return _is_binop(node);
1496 get_binop_left(ir_node *node) {
1497 assert(node->op->opar == oparity_binary);
1498 return get_irn_n(node, node->op->op_index);
1502 set_binop_left(ir_node *node, ir_node *left) {
1503 assert(node->op->opar == oparity_binary);
1504 set_irn_n(node, node->op->op_index, left);
1508 get_binop_right(ir_node *node) {
1509 assert(node->op->opar == oparity_binary);
1510 return get_irn_n(node, node->op->op_index + 1);
1514 set_binop_right(ir_node *node, ir_node *right) {
1515 assert(node->op->opar == oparity_binary);
1516 set_irn_n(node, node->op->op_index + 1, right);
1519 int is_Phi(const ir_node *n) {
1525 if (op == op_Filter) return get_interprocedural_view();
1528 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1529 (get_irn_arity(n) > 0));
1534 int is_Phi0(const ir_node *n) {
1537 return ((get_irn_op(n) == op_Phi) &&
1538 (get_irn_arity(n) == 0) &&
1539 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1543 get_Phi_preds_arr(ir_node *node) {
1544 assert(node->op == op_Phi);
1545 return (ir_node **)&(get_irn_in(node)[1]);
1549 get_Phi_n_preds(ir_node *node) {
1550 assert(is_Phi(node) || is_Phi0(node));
1551 return (get_irn_arity(node));
1555 void set_Phi_n_preds(ir_node *node, int n_preds) {
1556 assert(node->op == op_Phi);
1561 get_Phi_pred(ir_node *node, int pos) {
1562 assert(is_Phi(node) || is_Phi0(node));
1563 return get_irn_n(node, pos);
1567 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1568 assert(is_Phi(node) || is_Phi0(node));
1569 set_irn_n(node, pos, pred);
1573 int is_memop(ir_node *node) {
1574 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1577 ir_node *get_memop_mem(ir_node *node) {
1578 assert(is_memop(node));
1579 return get_irn_n(node, 0);
1582 void set_memop_mem(ir_node *node, ir_node *mem) {
1583 assert(is_memop(node));
1584 set_irn_n(node, 0, mem);
1587 ir_node *get_memop_ptr(ir_node *node) {
1588 assert(is_memop(node));
1589 return get_irn_n(node, 1);
1592 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1593 assert(is_memop(node));
1594 set_irn_n(node, 1, ptr);
1598 get_Load_mem(ir_node *node) {
1599 assert(node->op == op_Load);
1600 return get_irn_n(node, 0);
1604 set_Load_mem(ir_node *node, ir_node *mem) {
1605 assert(node->op == op_Load);
1606 set_irn_n(node, 0, mem);
1610 get_Load_ptr(ir_node *node) {
1611 assert(node->op == op_Load);
1612 return get_irn_n(node, 1);
1616 set_Load_ptr(ir_node *node, ir_node *ptr) {
1617 assert(node->op == op_Load);
1618 set_irn_n(node, 1, ptr);
1622 get_Load_mode(ir_node *node) {
1623 assert(node->op == op_Load);
1624 return node->attr.load.load_mode;
1628 set_Load_mode(ir_node *node, ir_mode *mode) {
1629 assert(node->op == op_Load);
1630 node->attr.load.load_mode = mode;
1634 get_Load_volatility(ir_node *node) {
1635 assert(node->op == op_Load);
1636 return node->attr.load.volatility;
1640 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1641 assert(node->op == op_Load);
1642 node->attr.load.volatility = volatility;
1647 get_Store_mem(ir_node *node) {
1648 assert(node->op == op_Store);
1649 return get_irn_n(node, 0);
1653 set_Store_mem(ir_node *node, ir_node *mem) {
1654 assert(node->op == op_Store);
1655 set_irn_n(node, 0, mem);
1659 get_Store_ptr(ir_node *node) {
1660 assert(node->op == op_Store);
1661 return get_irn_n(node, 1);
1665 set_Store_ptr(ir_node *node, ir_node *ptr) {
1666 assert(node->op == op_Store);
1667 set_irn_n(node, 1, ptr);
1671 get_Store_value(ir_node *node) {
1672 assert(node->op == op_Store);
1673 return get_irn_n(node, 2);
1677 set_Store_value(ir_node *node, ir_node *value) {
1678 assert(node->op == op_Store);
1679 set_irn_n(node, 2, value);
1683 get_Store_volatility(ir_node *node) {
1684 assert(node->op == op_Store);
1685 return node->attr.store.volatility;
1689 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1690 assert(node->op == op_Store);
1691 node->attr.store.volatility = volatility;
1696 get_Alloc_mem(ir_node *node) {
1697 assert(node->op == op_Alloc);
1698 return get_irn_n(node, 0);
1702 set_Alloc_mem(ir_node *node, ir_node *mem) {
1703 assert(node->op == op_Alloc);
1704 set_irn_n(node, 0, mem);
1708 get_Alloc_size(ir_node *node) {
1709 assert(node->op == op_Alloc);
1710 return get_irn_n(node, 1);
1714 set_Alloc_size(ir_node *node, ir_node *size) {
1715 assert(node->op == op_Alloc);
1716 set_irn_n(node, 1, size);
1720 get_Alloc_type(ir_node *node) {
1721 assert(node->op == op_Alloc);
1722 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1726 set_Alloc_type(ir_node *node, ir_type *tp) {
1727 assert(node->op == op_Alloc);
1728 node->attr.alloc.type = tp;
1732 get_Alloc_where(ir_node *node) {
1733 assert(node->op == op_Alloc);
1734 return node->attr.alloc.where;
1738 set_Alloc_where(ir_node *node, where_alloc where) {
1739 assert(node->op == op_Alloc);
1740 node->attr.alloc.where = where;
1745 get_Free_mem(ir_node *node) {
1746 assert(node->op == op_Free);
1747 return get_irn_n(node, 0);
1751 set_Free_mem(ir_node *node, ir_node *mem) {
1752 assert(node->op == op_Free);
1753 set_irn_n(node, 0, mem);
1757 get_Free_ptr(ir_node *node) {
1758 assert(node->op == op_Free);
1759 return get_irn_n(node, 1);
1763 set_Free_ptr(ir_node *node, ir_node *ptr) {
1764 assert(node->op == op_Free);
1765 set_irn_n(node, 1, ptr);
1769 get_Free_size(ir_node *node) {
1770 assert(node->op == op_Free);
1771 return get_irn_n(node, 2);
1775 set_Free_size(ir_node *node, ir_node *size) {
1776 assert(node->op == op_Free);
1777 set_irn_n(node, 2, size);
1781 get_Free_type(ir_node *node) {
1782 assert(node->op == op_Free);
1783 return node->attr.free.type = skip_tid(node->attr.free.type);
1787 set_Free_type(ir_node *node, ir_type *tp) {
1788 assert(node->op == op_Free);
1789 node->attr.free.type = tp;
1793 get_Free_where(ir_node *node) {
1794 assert(node->op == op_Free);
1795 return node->attr.free.where;
1799 set_Free_where(ir_node *node, where_alloc where) {
1800 assert(node->op == op_Free);
1801 node->attr.free.where = where;
1804 ir_node **get_Sync_preds_arr(ir_node *node) {
1805 assert(node->op == op_Sync);
1806 return (ir_node **)&(get_irn_in(node)[1]);
1809 int get_Sync_n_preds(ir_node *node) {
1810 assert(node->op == op_Sync);
1811 return (get_irn_arity(node));
1815 void set_Sync_n_preds(ir_node *node, int n_preds) {
1816 assert(node->op == op_Sync);
1820 ir_node *get_Sync_pred(ir_node *node, int pos) {
1821 assert(node->op == op_Sync);
1822 return get_irn_n(node, pos);
1825 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1826 assert(node->op == op_Sync);
1827 set_irn_n(node, pos, pred);
1830 /* Add a new Sync predecessor */
1831 void add_Sync_pred(ir_node *node, ir_node *pred) {
1832 assert(node->op == op_Sync);
1833 add_irn_n(node, pred);
1836 /* Returns the source language type of a Proj node. */
1837 ir_type *get_Proj_type(ir_node *n) {
1838 ir_type *tp = firm_unknown_type;
1839 ir_node *pred = get_Proj_pred(n);
1841 switch (get_irn_opcode(pred)) {
1844 /* Deal with Start / Call here: we need to know the Proj Nr. */
1845 assert(get_irn_mode(pred) == mode_T);
1846 pred_pred = get_Proj_pred(pred);
1847 if (get_irn_op(pred_pred) == op_Start) {
1848 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1849 tp = get_method_param_type(mtp, get_Proj_proj(n));
1850 } else if (get_irn_op(pred_pred) == op_Call) {
1851 ir_type *mtp = get_Call_type(pred_pred);
1852 tp = get_method_res_type(mtp, get_Proj_proj(n));
1855 case iro_Start: break;
1856 case iro_Call: break;
1858 ir_node *a = get_Load_ptr(pred);
1860 tp = get_entity_type(get_Sel_entity(a));
1869 get_Proj_pred(const ir_node *node) {
1870 assert(is_Proj(node));
1871 return get_irn_n(node, 0);
1875 set_Proj_pred(ir_node *node, ir_node *pred) {
1876 assert(is_Proj(node));
1877 set_irn_n(node, 0, pred);
1881 get_Proj_proj(const ir_node *node) {
1882 assert(is_Proj(node));
1883 if (get_irn_opcode(node) == iro_Proj) {
1884 return node->attr.proj;
1886 assert(get_irn_opcode(node) == iro_Filter);
1887 return node->attr.filter.proj;
1892 set_Proj_proj(ir_node *node, long proj) {
1893 assert(node->op == op_Proj);
1894 node->attr.proj = proj;
1898 get_Tuple_preds_arr(ir_node *node) {
1899 assert(node->op == op_Tuple);
1900 return (ir_node **)&(get_irn_in(node)[1]);
1904 get_Tuple_n_preds(ir_node *node) {
1905 assert(node->op == op_Tuple);
1906 return (get_irn_arity(node));
1911 set_Tuple_n_preds(ir_node *node, int n_preds) {
1912 assert(node->op == op_Tuple);
1917 get_Tuple_pred (ir_node *node, int pos) {
1918 assert(node->op == op_Tuple);
1919 return get_irn_n(node, pos);
1923 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
1924 assert(node->op == op_Tuple);
1925 set_irn_n(node, pos, pred);
1929 get_Id_pred(ir_node *node) {
1930 assert(node->op == op_Id);
1931 return get_irn_n(node, 0);
1935 set_Id_pred(ir_node *node, ir_node *pred) {
1936 assert(node->op == op_Id);
1937 set_irn_n(node, 0, pred);
1940 ir_node *get_Confirm_value(ir_node *node) {
1941 assert(node->op == op_Confirm);
1942 return get_irn_n(node, 0);
1945 void set_Confirm_value(ir_node *node, ir_node *value) {
1946 assert(node->op == op_Confirm);
1947 set_irn_n(node, 0, value);
1950 ir_node *get_Confirm_bound(ir_node *node) {
1951 assert(node->op == op_Confirm);
1952 return get_irn_n(node, 1);
1955 void set_Confirm_bound(ir_node *node, ir_node *bound) {
1956 assert(node->op == op_Confirm);
1957 set_irn_n(node, 0, bound);
1960 pn_Cmp get_Confirm_cmp(ir_node *node) {
1961 assert(node->op == op_Confirm);
1962 return node->attr.confirm_cmp;
1965 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
1966 assert(node->op == op_Confirm);
1967 node->attr.confirm_cmp = cmp;
1972 get_Filter_pred(ir_node *node) {
1973 assert(node->op == op_Filter);
1978 set_Filter_pred(ir_node *node, ir_node *pred) {
1979 assert(node->op == op_Filter);
1984 get_Filter_proj(ir_node *node) {
1985 assert(node->op == op_Filter);
1986 return node->attr.filter.proj;
1990 set_Filter_proj(ir_node *node, long proj) {
1991 assert(node->op == op_Filter);
1992 node->attr.filter.proj = proj;
1995 /* Don't use get_irn_arity, get_irn_n in implementation as access
1996 shall work independent of view!!! */
1997 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1998 assert(node->op == op_Filter);
1999 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2000 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2001 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2002 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2003 node->attr.filter.in_cg[0] = node->in[0];
2005 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2008 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2009 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2010 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2011 node->attr.filter.in_cg[pos + 1] = pred;
2014 int get_Filter_n_cg_preds(ir_node *node) {
2015 assert(node->op == op_Filter && node->attr.filter.in_cg);
2016 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2019 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2021 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2023 arity = ARR_LEN(node->attr.filter.in_cg);
2024 assert(pos < arity - 1);
2025 return node->attr.filter.in_cg[pos + 1];
2029 ir_node *get_Mux_sel(ir_node *node) {
2030 if (node->op == op_Psi) {
2031 assert(get_irn_arity(node) == 3);
2032 return get_Psi_cond(node, 0);
2034 assert(node->op == op_Mux);
2038 void set_Mux_sel(ir_node *node, ir_node *sel) {
2039 if (node->op == op_Psi) {
2040 assert(get_irn_arity(node) == 3);
2041 set_Psi_cond(node, 0, sel);
2043 assert(node->op == op_Mux);
2048 ir_node *get_Mux_false(ir_node *node) {
2049 if (node->op == op_Psi) {
2050 assert(get_irn_arity(node) == 3);
2051 return get_Psi_default(node);
2053 assert(node->op == op_Mux);
2057 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2058 if (node->op == op_Psi) {
2059 assert(get_irn_arity(node) == 3);
2060 set_Psi_default(node, ir_false);
2062 assert(node->op == op_Mux);
2063 node->in[2] = ir_false;
2067 ir_node *get_Mux_true(ir_node *node) {
2068 if (node->op == op_Psi) {
2069 assert(get_irn_arity(node) == 3);
2070 return get_Psi_val(node, 0);
2072 assert(node->op == op_Mux);
2076 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2077 if (node->op == op_Psi) {
2078 assert(get_irn_arity(node) == 3);
2079 set_Psi_val(node, 0, ir_true);
2081 assert(node->op == op_Mux);
2082 node->in[3] = ir_true;
2087 ir_node *get_Psi_cond(ir_node *node, int pos) {
2088 int num_conds = get_Psi_n_conds(node);
2089 assert(node->op == op_Psi);
2090 assert(pos < num_conds);
2091 return get_irn_n(node, 2 * pos);
2094 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2095 int num_conds = get_Psi_n_conds(node);
2096 assert(node->op == op_Psi);
2097 assert(pos < num_conds);
2098 set_irn_n(node, 2 * pos, cond);
2101 ir_node *get_Psi_val(ir_node *node, int pos) {
2102 int num_vals = get_Psi_n_conds(node);
2103 assert(node->op == op_Psi);
2104 assert(pos < num_vals);
2105 return get_irn_n(node, 2 * pos + 1);
2108 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2109 int num_vals = get_Psi_n_conds(node);
2110 assert(node->op == op_Psi);
2111 assert(pos < num_vals);
2112 set_irn_n(node, 2 * pos + 1, val);
2115 ir_node *get_Psi_default(ir_node *node) {
2116 int def_pos = get_irn_arity(node) - 1;
2117 assert(node->op == op_Psi);
2118 return get_irn_n(node, def_pos);
2121 void set_Psi_default(ir_node *node, ir_node *val) {
2122 int def_pos = get_irn_arity(node);
2123 assert(node->op == op_Psi);
2124 set_irn_n(node, def_pos, val);
2127 int (get_Psi_n_conds)(ir_node *node) {
2128 return _get_Psi_n_conds(node);
2132 ir_node *get_CopyB_mem(ir_node *node) {
2133 assert(node->op == op_CopyB);
2134 return get_irn_n(node, 0);
2137 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2138 assert(node->op == op_CopyB);
2139 set_irn_n(node, 0, mem);
2142 ir_node *get_CopyB_dst(ir_node *node) {
2143 assert(node->op == op_CopyB);
2144 return get_irn_n(node, 1);
2147 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2148 assert(node->op == op_CopyB);
2149 set_irn_n(node, 1, dst);
2152 ir_node *get_CopyB_src (ir_node *node) {
2153 assert(node->op == op_CopyB);
2154 return get_irn_n(node, 2);
2157 void set_CopyB_src(ir_node *node, ir_node *src) {
2158 assert(node->op == op_CopyB);
2159 set_irn_n(node, 2, src);
2162 ir_type *get_CopyB_type(ir_node *node) {
2163 assert(node->op == op_CopyB);
2164 return node->attr.copyb.data_type;
2167 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2168 assert(node->op == op_CopyB && data_type);
2169 node->attr.copyb.data_type = data_type;
2174 get_InstOf_type(ir_node *node) {
2175 assert(node->op = op_InstOf);
2176 return node->attr.instof.type;
2180 set_InstOf_type(ir_node *node, ir_type *type) {
2181 assert(node->op = op_InstOf);
2182 node->attr.instof.type = type;
2186 get_InstOf_store(ir_node *node) {
2187 assert(node->op = op_InstOf);
2188 return get_irn_n(node, 0);
2192 set_InstOf_store(ir_node *node, ir_node *obj) {
2193 assert(node->op = op_InstOf);
2194 set_irn_n(node, 0, obj);
2198 get_InstOf_obj(ir_node *node) {
2199 assert(node->op = op_InstOf);
2200 return get_irn_n(node, 1);
2204 set_InstOf_obj(ir_node *node, ir_node *obj) {
2205 assert(node->op = op_InstOf);
2206 set_irn_n(node, 1, obj);
2209 /* Returns the memory input of a Raise operation. */
2211 get_Raise_mem(ir_node *node) {
2212 assert(node->op == op_Raise);
2213 return get_irn_n(node, 0);
2217 set_Raise_mem(ir_node *node, ir_node *mem) {
2218 assert(node->op == op_Raise);
2219 set_irn_n(node, 0, mem);
2223 get_Raise_exo_ptr(ir_node *node) {
2224 assert(node->op == op_Raise);
2225 return get_irn_n(node, 1);
2229 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2230 assert(node->op == op_Raise);
2231 set_irn_n(node, 1, exo_ptr);
2236 /* Returns the memory input of a Bound operation. */
2237 ir_node *get_Bound_mem(ir_node *bound) {
2238 assert(bound->op == op_Bound);
2239 return get_irn_n(bound, 0);
2242 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2243 assert(bound->op == op_Bound);
2244 set_irn_n(bound, 0, mem);
2247 /* Returns the index input of a Bound operation. */
2248 ir_node *get_Bound_index(ir_node *bound) {
2249 assert(bound->op == op_Bound);
2250 return get_irn_n(bound, 1);
2253 void set_Bound_index(ir_node *bound, ir_node *idx) {
2254 assert(bound->op == op_Bound);
2255 set_irn_n(bound, 1, idx);
2258 /* Returns the lower bound input of a Bound operation. */
2259 ir_node *get_Bound_lower(ir_node *bound) {
2260 assert(bound->op == op_Bound);
2261 return get_irn_n(bound, 2);
2264 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2265 assert(bound->op == op_Bound);
2266 set_irn_n(bound, 2, lower);
2269 /* Returns the upper bound input of a Bound operation. */
2270 ir_node *get_Bound_upper(ir_node *bound) {
2271 assert(bound->op == op_Bound);
2272 return get_irn_n(bound, 3);
2275 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2276 assert(bound->op == op_Bound);
2277 set_irn_n(bound, 3, upper);
2280 /* Return the operand of a Pin node. */
2281 ir_node *get_Pin_op(ir_node *pin) {
2282 assert(pin->op == op_Pin);
2283 return get_irn_n(pin, 0);
2286 void set_Pin_op(ir_node *pin, ir_node *node) {
2287 assert(pin->op == op_Pin);
2288 set_irn_n(pin, 0, node);
2292 /* returns the graph of a node */
2294 get_irn_irg(const ir_node *node) {
2296 * Do not use get_nodes_Block() here, because this
2297 * will check the pinned state.
2298 * However even a 'wrong' block is always in the proper
2301 if (! is_Block(node))
2302 node = get_irn_n(node, -1);
2303 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2304 node = get_irn_n(node, -1);
2305 assert(get_irn_op(node) == op_Block);
2306 return node->attr.block.irg;
2310 /*----------------------------------------------------------------*/
2311 /* Auxiliary routines */
2312 /*----------------------------------------------------------------*/
2315 skip_Proj(ir_node *node) {
2316 /* don't assert node !!! */
2321 node = get_Proj_pred(node);
2327 skip_Proj_const(const ir_node *node) {
2328 /* don't assert node !!! */
2333 node = get_Proj_pred(node);
2339 skip_Tuple(ir_node *node) {
2343 if (!get_opt_normalize()) return node;
2346 if (get_irn_op(node) == op_Proj) {
2347 pred = get_Proj_pred(node);
2348 op = get_irn_op(pred);
2351 * Looks strange but calls get_irn_op() only once
2352 * in most often cases.
2354 if (op == op_Proj) { /* nested Tuple ? */
2355 pred = skip_Tuple(pred);
2356 op = get_irn_op(pred);
2358 if (op == op_Tuple) {
2359 node = get_Tuple_pred(pred, get_Proj_proj(node));
2362 } else if (op == op_Tuple) {
2363 node = get_Tuple_pred(pred, get_Proj_proj(node));
2370 /* returns operand of node if node is a Cast */
2371 ir_node *skip_Cast(ir_node *node) {
2372 if (get_irn_op(node) == op_Cast)
2373 return get_Cast_op(node);
2377 /* returns operand of node if node is a Confirm */
2378 ir_node *skip_Confirm(ir_node *node) {
2379 if (get_irn_op(node) == op_Confirm)
2380 return get_Confirm_value(node);
2384 /* skip all high-level ops */
2385 ir_node *skip_HighLevel(ir_node *node) {
2386 if (is_op_highlevel(get_irn_op(node)))
2387 return get_irn_n(node, 0);
2392 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2393 * than any other approach, as Id chains are resolved and all point to the real node, or
2394 * all id's are self loops.
2396 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2397 * a little bit "hand optimized".
2399 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2402 skip_Id(ir_node *node) {
2404 /* don't assert node !!! */
2406 if (!node || (node->op != op_Id)) return node;
2408 /* Don't use get_Id_pred(): We get into an endless loop for
2409 self-referencing Ids. */
2410 pred = node->in[0+1];
2412 if (pred->op != op_Id) return pred;
2414 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2415 ir_node *rem_pred, *res;
2417 if (pred->op != op_Id) return pred; /* shortcut */
2420 assert(get_irn_arity (node) > 0);
2422 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2423 res = skip_Id(rem_pred);
2424 if (res->op == op_Id) /* self-loop */ return node;
2426 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2433 void skip_Id_and_store(ir_node **node) {
2436 if (!n || (n->op != op_Id)) return;
2438 /* Don't use get_Id_pred(): We get into an endless loop for
2439 self-referencing Ids. */
2444 (is_Bad)(const ir_node *node) {
2445 return _is_Bad(node);
2449 (is_NoMem)(const ir_node *node) {
2450 return _is_NoMem(node);
2454 (is_Mod)(const ir_node *node) {
2455 return _is_Mod(node);
2459 (is_Div)(const ir_node *node) {
2460 return _is_Div(node);
2464 (is_DivMod)(const ir_node *node) {
2465 return _is_DivMod(node);
2469 (is_Quot)(const ir_node *node) {
2470 return _is_Quot(node);
2474 (is_Start)(const ir_node *node) {
2475 return _is_Start(node);
2479 (is_End)(const ir_node *node) {
2480 return _is_End(node);
2484 (is_Const)(const ir_node *node) {
2485 return _is_Const(node);
2489 (is_no_Block)(const ir_node *node) {
2490 return _is_no_Block(node);
2494 (is_Block)(const ir_node *node) {
2495 return _is_Block(node);
2498 /* returns true if node is an Unknown node. */
2500 (is_Unknown)(const ir_node *node) {
2501 return _is_Unknown(node);
2504 /* returns true if node is a Return node. */
2506 (is_Return)(const ir_node *node) {
2507 return _is_Return(node);
2510 /* returns true if node is a Call node. */
2512 (is_Call)(const ir_node *node) {
2513 return _is_Call(node);
2516 /* returns true if node is a Sel node. */
2518 (is_Sel)(const ir_node *node) {
2519 return _is_Sel(node);
2522 /* returns true if node is a Mux node or a Psi with only one condition. */
2524 (is_Mux)(const ir_node *node) {
2525 return _is_Mux(node);
2528 /* returns true if node is a Load node. */
2530 (is_Load)(const ir_node *node) {
2531 return _is_Load(node);
2534 /* returns true if node is a Load node. */
2536 (is_Store)(const ir_node *node) {
2537 return _is_Store(node);
2540 /* returns true if node is a Sync node. */
2542 (is_Sync)(const ir_node *node) {
2543 return _is_Sync(node);
2546 /* returns true if node is a Confirm node. */
2548 (is_Confirm)(const ir_node *node) {
2549 return _is_Confirm(node);
2552 /* returns true if node is a Pin node. */
2554 (is_Pin)(const ir_node *node) {
2555 return _is_Pin(node);
2558 /* returns true if node is a SymConst node. */
2560 (is_SymConst)(const ir_node *node) {
2561 return _is_SymConst(node);
2564 /* returns true if node is a Cond node. */
2566 (is_Cond)(const ir_node *node) {
2567 return _is_Cond(node);
2571 (is_CopyB)(const ir_node *node) {
2572 return _is_CopyB(node);
2575 /* returns true if node is a Cmp node. */
2577 (is_Cmp)(const ir_node *node) {
2578 return _is_Cmp(node);
2581 /* returns true if node is an Alloc node. */
2583 (is_Alloc)(const ir_node *node) {
2584 return _is_Alloc(node);
2587 /* returns true if a node is a Jmp node. */
2589 (is_Jmp)(const ir_node *node) {
2590 return _is_Jmp(node);
2593 /* returns true if a node is a Raise node. */
2595 (is_Raise)(const ir_node *node) {
2596 return _is_Raise(node);
2600 is_Proj(const ir_node *node) {
2602 return node->op == op_Proj ||
2603 (!get_interprocedural_view() && node->op == op_Filter);
2606 /* Returns true if the operation manipulates control flow. */
2608 is_cfop(const ir_node *node) {
2609 return is_cfopcode(get_irn_op(node));
2612 /* Returns true if the operation manipulates interprocedural control flow:
2613 CallBegin, EndReg, EndExcept */
2614 int is_ip_cfop(const ir_node *node) {
2615 return is_ip_cfopcode(get_irn_op(node));
2618 /* Returns true if the operation can change the control flow because
2621 is_fragile_op(const ir_node *node) {
2622 return is_op_fragile(get_irn_op(node));
2625 /* Returns the memory operand of fragile operations. */
2626 ir_node *get_fragile_op_mem(ir_node *node) {
2627 assert(node && is_fragile_op(node));
2629 switch (get_irn_opcode (node)) {
2639 return get_irn_n(node, 0);
2644 assert(0 && "should not be reached");
2649 /* Returns true if the operation is a forking control flow operation. */
2650 int (is_irn_forking)(const ir_node *node) {
2651 return _is_irn_forking(node);
2654 /* Return the type associated with the value produced by n
2655 * if the node remarks this type as it is the case for
2656 * Cast, Const, SymConst and some Proj nodes. */
2657 ir_type *(get_irn_type)(ir_node *node) {
2658 return _get_irn_type(node);
2661 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2663 ir_type *(get_irn_type_attr)(ir_node *node) {
2664 return _get_irn_type_attr(node);
2667 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2668 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2669 return _get_irn_entity_attr(node);
2672 /* Returns non-zero for constant-like nodes. */
2673 int (is_irn_constlike)(const ir_node *node) {
2674 return _is_irn_constlike(node);
2678 * Returns non-zero for nodes that are allowed to have keep-alives and
2679 * are neither Block nor PhiM.
2681 int (is_irn_keep)(const ir_node *node) {
2682 return _is_irn_keep(node);
2686 * Returns non-zero for nodes that are always placed in the start block.
2688 int (is_irn_start_block_placed)(const ir_node *node) {
2689 return _is_irn_start_block_placed(node);
2692 /* Returns non-zero for nodes that are machine operations. */
2693 int (is_irn_machine_op)(const ir_node *node) {
2694 return _is_irn_machine_op(node);
2697 /* Returns non-zero for nodes that are machine operands. */
2698 int (is_irn_machine_operand)(const ir_node *node) {
2699 return _is_irn_machine_operand(node);
2702 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2703 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2704 return _is_irn_machine_user(node, n);
2708 /* Gets the string representation of the jump prediction .*/
2709 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2712 case COND_JMP_PRED_NONE: return "no prediction";
2713 case COND_JMP_PRED_TRUE: return "true taken";
2714 case COND_JMP_PRED_FALSE: return "false taken";
2718 /* Returns the conditional jump prediction of a Cond node. */
2719 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2720 return _get_Cond_jmp_pred(cond);
2723 /* Sets a new conditional jump prediction. */
2724 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2725 _set_Cond_jmp_pred(cond, pred);
2728 /** the get_type operation must be always implemented and return a firm type */
2729 static ir_type *get_Default_type(ir_node *n) {
2730 return get_unknown_type();
2733 /* Sets the get_type operation for an ir_op_ops. */
2734 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2736 case iro_Const: ops->get_type = get_Const_type; break;
2737 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2738 case iro_Cast: ops->get_type = get_Cast_type; break;
2739 case iro_Proj: ops->get_type = get_Proj_type; break;
2741 /* not allowed to be NULL */
2742 if (! ops->get_type)
2743 ops->get_type = get_Default_type;
2749 /** Return the attribute type of a SymConst node if exists */
2750 static ir_type *get_SymConst_attr_type(ir_node *self) {
2751 symconst_kind kind = get_SymConst_kind(self);
2752 if (SYMCONST_HAS_TYPE(kind))
2753 return get_SymConst_type(self);
2757 /** Return the attribute entity of a SymConst node if exists */
2758 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2759 symconst_kind kind = get_SymConst_kind(self);
2760 if (SYMCONST_HAS_ENT(kind))
2761 return get_SymConst_entity(self);
2765 /** the get_type_attr operation must be always implemented */
2766 static ir_type *get_Null_type(ir_node *n) {
2767 return firm_unknown_type;
2770 /* Sets the get_type operation for an ir_op_ops. */
2771 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2773 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2774 case iro_Call: ops->get_type_attr = get_Call_type; break;
2775 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2776 case iro_Free: ops->get_type_attr = get_Free_type; break;
2777 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2779 /* not allowed to be NULL */
2780 if (! ops->get_type_attr)
2781 ops->get_type_attr = get_Null_type;
2787 /** the get_entity_attr operation must be always implemented */
2788 static ir_entity *get_Null_ent(ir_node *n) {
2792 /* Sets the get_type operation for an ir_op_ops. */
2793 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2795 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2796 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2798 /* not allowed to be NULL */
2799 if (! ops->get_entity_attr)
2800 ops->get_entity_attr = get_Null_ent;
2806 #ifdef DEBUG_libfirm
2807 void dump_irn(ir_node *n) {
2808 int i, arity = get_irn_arity(n);
2809 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2811 ir_node *pred = get_irn_n(n, -1);
2812 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2813 get_irn_node_nr(pred), (void *)pred);
2815 printf(" preds: \n");
2816 for (i = 0; i < arity; ++i) {
2817 ir_node *pred = get_irn_n(n, i);
2818 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2819 get_irn_node_nr(pred), (void *)pred);
2823 #else /* DEBUG_libfirm */
2824 void dump_irn(ir_node *n) {}
2825 #endif /* DEBUG_libfirm */