3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier, Michael Beck
9 * Copyright: (c) 1998-2006 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredgekinds.h"
31 #include "iredges_t.h"
36 /* some constants fixing the positions of nodes predecessors
38 #define CALL_PARAM_OFFSET 2
39 #define FUNCCALL_PARAM_OFFSET 1
40 #define SEL_INDEX_OFFSET 2
41 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
42 #define END_KEEPALIVE_OFFSET 0
44 static const char *pnc_name_arr [] = {
45 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
46 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
47 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
48 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
52 * returns the pnc name from an pnc constant
54 const char *get_pnc_string(int pnc) {
55 return pnc_name_arr[pnc];
59 * Calculates the negated (Complement(R)) pnc condition.
61 int get_negated_pnc(int pnc, ir_mode *mode) {
64 /* do NOT add the Uo bit for non-floating point values */
65 if (! mode_is_float(mode))
71 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
73 get_inversed_pnc(int pnc) {
74 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
75 int lesser = pnc & pn_Cmp_Lt;
76 int greater = pnc & pn_Cmp_Gt;
78 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
84 * Indicates, whether additional data can be registered to ir nodes.
85 * If set to 1, this is not possible anymore.
87 static int forbid_new_data = 0;
90 * The amount of additional space for custom data to be allocated upon
91 * creating a new node.
93 unsigned firm_add_node_size = 0;
96 /* register new space for every node */
97 unsigned register_additional_node_data(unsigned size) {
98 assert(!forbid_new_data && "Too late to register additional node data");
103 return firm_add_node_size += size;
109 /* Forbid the addition of new data to an ir node. */
114 * irnode constructor.
115 * Create a new irnode in irg, with an op, mode, arity and
116 * some incoming irnodes.
117 * If arity is negative, a node with a dynamic array is created.
120 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
121 int arity, ir_node **in)
124 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
128 assert(irg && op && mode);
129 p = obstack_alloc (irg->obst, node_size);
130 memset(p, 0, node_size);
131 res = (ir_node *) (p + firm_add_node_size);
133 res->kind = k_ir_node;
137 res->node_idx = irg_register_node_idx(irg, res);
142 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
144 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
145 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
149 set_irn_dbg_info(res, db);
153 res->node_nr = get_irp_new_node_nr();
156 for(i = 0; i < EDGE_KIND_LAST; ++i)
157 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
159 // don't put this into the for loop, arity is -1 for some nodes!
160 edges_notify_edge(res, -1, res->in[0], NULL, irg);
161 for (i = 1; i <= arity; ++i)
162 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
164 hook_new_node(irg, res);
169 /*-- getting some parameters from ir_nodes --*/
172 (is_ir_node)(const void *thing) {
173 return _is_ir_node(thing);
177 (get_irn_intra_arity)(const ir_node *node) {
178 return _get_irn_intra_arity(node);
182 (get_irn_inter_arity)(const ir_node *node) {
183 return _get_irn_inter_arity(node);
186 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
189 (get_irn_arity)(const ir_node *node) {
190 return _get_irn_arity(node);
193 /* Returns the array with ins. This array is shifted with respect to the
194 array accessed by get_irn_n: The block operand is at position 0 not -1.
195 (@@@ This should be changed.)
196 The order of the predecessors in this array is not guaranteed, except that
197 lists of operands as predecessors of Block or arguments of a Call are
200 get_irn_in(const ir_node *node) {
202 if (get_interprocedural_view()) { /* handle Filter and Block specially */
203 if (get_irn_opcode(node) == iro_Filter) {
204 assert(node->attr.filter.in_cg);
205 return node->attr.filter.in_cg;
206 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
207 return node->attr.block.in_cg;
209 /* else fall through */
215 set_irn_in(ir_node *node, int arity, ir_node **in) {
218 ir_graph *irg = current_ir_graph;
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 arr = &node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 arr = &node->attr.block.in_cg;
233 for (i = 0; i < arity; i++) {
234 if (i < ARR_LEN(*arr)-1)
235 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
237 edges_notify_edge(node, i, in[i], NULL, irg);
239 for(;i < ARR_LEN(*arr)-1; i++) {
240 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
243 if (arity != ARR_LEN(*arr) - 1) {
244 ir_node * block = (*arr)[0];
245 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
248 fix_backedges(irg->obst, node);
250 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
254 (get_irn_intra_n)(const ir_node *node, int n) {
255 return _get_irn_intra_n (node, n);
259 (get_irn_inter_n)(const ir_node *node, int n) {
260 return _get_irn_inter_n (node, n);
263 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
266 (get_irn_n)(const ir_node *node, int n) {
267 return _get_irn_n(node, n);
271 set_irn_n (ir_node *node, int n, ir_node *in) {
272 assert(node && node->kind == k_ir_node);
274 assert(n < get_irn_arity(node));
275 assert(in && in->kind == k_ir_node);
277 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
278 /* Change block pred in both views! */
279 node->in[n + 1] = in;
280 assert(node->attr.filter.in_cg);
281 node->attr.filter.in_cg[n + 1] = in;
284 if (get_interprocedural_view()) { /* handle Filter and Block specially */
285 if (get_irn_opcode(node) == iro_Filter) {
286 assert(node->attr.filter.in_cg);
287 node->attr.filter.in_cg[n + 1] = in;
289 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
290 node->attr.block.in_cg[n + 1] = in;
293 /* else fall through */
297 hook_set_irn_n(node, n, in, node->in[n + 1]);
299 /* Here, we rely on src and tgt being in the current ir graph */
300 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
302 node->in[n + 1] = in;
305 int add_irn_n(ir_node *node, ir_node *in)
308 ir_graph *irg = get_irn_irg(node);
310 assert(node->op->opar == oparity_dynamic);
311 pos = ARR_LEN(node->in) - 1;
312 ARR_APP1(ir_node *, node->in, in);
313 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
316 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
322 (get_irn_deps)(const ir_node *node)
324 return _get_irn_deps(node);
328 (get_irn_dep)(const ir_node *node, int pos)
330 return _get_irn_dep(node, pos);
334 (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
336 _set_irn_dep(node, pos, dep);
339 int add_irn_dep(ir_node *node, ir_node *dep)
343 if (node->deps == NULL) {
344 node->deps = NEW_ARR_F(ir_node *, 1);
350 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
351 if(node->deps[i] == NULL)
354 if(node->deps[i] == dep)
358 if (first_zero >= 0) {
359 node->deps[first_zero] = dep;
362 ARR_APP1(ir_node *, node->deps, dep);
367 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
372 void add_irn_deps(ir_node *tgt, ir_node *src)
376 for(i = 0, n = get_irn_deps(src); i < n; ++i)
377 add_irn_dep(tgt, get_irn_dep(src, i));
382 (get_irn_mode)(const ir_node *node) {
383 return _get_irn_mode(node);
387 (set_irn_mode)(ir_node *node, ir_mode *mode) {
388 _set_irn_mode(node, mode);
392 get_irn_modecode(const ir_node *node) {
394 return node->mode->code;
397 /** Gets the string representation of the mode .*/
399 get_irn_modename(const ir_node *node) {
401 return get_mode_name(node->mode);
405 get_irn_modeident(const ir_node *node) {
407 return get_mode_ident(node->mode);
411 (get_irn_op)(const ir_node *node) {
412 return _get_irn_op(node);
415 /* should be private to the library: */
417 (set_irn_op)(ir_node *node, ir_op *op) {
418 _set_irn_op(node, op);
422 (get_irn_opcode)(const ir_node *node) {
423 return _get_irn_opcode(node);
427 get_irn_opname(const ir_node *node) {
429 if ((get_irn_op((ir_node *)node) == op_Phi) &&
430 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
431 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
432 return get_id_str(node->op->name);
436 get_irn_opident(const ir_node *node) {
438 return node->op->name;
442 (get_irn_visited)(const ir_node *node) {
443 return _get_irn_visited(node);
447 (set_irn_visited)(ir_node *node, unsigned long visited) {
448 _set_irn_visited(node, visited);
452 (mark_irn_visited)(ir_node *node) {
453 _mark_irn_visited(node);
457 (irn_not_visited)(const ir_node *node) {
458 return _irn_not_visited(node);
462 (irn_visited)(const ir_node *node) {
463 return _irn_visited(node);
467 (set_irn_link)(ir_node *node, void *link) {
468 _set_irn_link(node, link);
472 (get_irn_link)(const ir_node *node) {
473 return _get_irn_link(node);
477 (get_irn_pinned)(const ir_node *node) {
478 return _get_irn_pinned(node);
482 (is_irn_pinned_in_irg) (const ir_node *node) {
483 return _is_irn_pinned_in_irg(node);
486 void set_irn_pinned(ir_node *node, op_pin_state state) {
487 /* due to optimization an opt may be turned into a Tuple */
488 if (get_irn_op(node) == op_Tuple)
491 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
492 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
494 node->attr.except.pin_state = state;
497 #ifdef DO_HEAPANALYSIS
498 /* Access the abstract interpretation information of a node.
499 Returns NULL if no such information is available. */
500 struct abstval *get_irn_abst_value(ir_node *n) {
503 /* Set the abstract interpretation information of a node. */
504 void set_irn_abst_value(ir_node *n, struct abstval *os) {
507 struct section *firm_get_irn_section(ir_node *n) {
510 void firm_set_irn_section(ir_node *n, struct section *s) {
514 /* Dummies needed for firmjni. */
515 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
516 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
517 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
518 void firm_set_irn_section(ir_node *n, struct section *s) {}
519 #endif /* DO_HEAPANALYSIS */
522 /* Outputs a unique number for this node */
523 long get_irn_node_nr(const ir_node *node) {
526 return node->node_nr;
528 return (long)PTR_TO_INT(node);
533 get_irn_const_attr(ir_node *node) {
534 assert(node->op == op_Const);
535 return node->attr.con;
539 get_irn_proj_attr(ir_node *node) {
540 assert(node->op == op_Proj);
541 return node->attr.proj;
545 get_irn_alloc_attr(ir_node *node) {
546 assert(node->op == op_Alloc);
547 return node->attr.alloc;
551 get_irn_free_attr(ir_node *node) {
552 assert(node->op == op_Free);
553 return node->attr.free;
557 get_irn_symconst_attr(ir_node *node) {
558 assert(node->op == op_SymConst);
559 return node->attr.symc;
563 get_irn_call_attr(ir_node *node) {
564 assert(node->op == op_Call);
565 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
569 get_irn_sel_attr(ir_node *node) {
570 assert(node->op == op_Sel);
571 return node->attr.sel;
575 get_irn_phi_attr(ir_node *node) {
576 assert(node->op == op_Phi);
577 return node->attr.phi0_pos;
581 get_irn_block_attr(ir_node *node) {
582 assert(node->op == op_Block);
583 return node->attr.block;
587 get_irn_load_attr(ir_node *node)
589 assert(node->op == op_Load);
590 return node->attr.load;
594 get_irn_store_attr(ir_node *node)
596 assert(node->op == op_Store);
597 return node->attr.store;
601 get_irn_except_attr(ir_node *node) {
602 assert(node->op == op_Div || node->op == op_Quot ||
603 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
604 return node->attr.except;
608 get_irn_generic_attr(ir_node *node) {
612 unsigned (get_irn_idx)(const ir_node *node) {
613 assert(is_ir_node(node));
614 return _get_irn_idx(node);
617 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
619 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
620 if (get_irn_n(node, i) == arg)
626 /** manipulate fields of individual nodes **/
628 /* this works for all except Block */
630 get_nodes_block(const ir_node *node) {
631 assert(node->op != op_Block);
632 assert(is_irn_pinned_in_irg(node) && "block info may be incorrect");
633 return get_irn_n(node, -1);
637 set_nodes_block(ir_node *node, ir_node *block) {
638 assert(node->op != op_Block);
639 set_irn_n(node, -1, block);
642 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
643 * from Start. If so returns frame type, else Null. */
644 ir_type *is_frame_pointer(ir_node *n) {
645 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
646 ir_node *start = get_Proj_pred(n);
647 if (get_irn_op(start) == op_Start) {
648 return get_irg_frame_type(get_irn_irg(start));
654 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
655 * from Start. If so returns global type, else Null. */
656 ir_type *is_globals_pointer(ir_node *n) {
657 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
658 ir_node *start = get_Proj_pred(n);
659 if (get_irn_op(start) == op_Start) {
660 return get_glob_type();
666 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
667 * from Start. If so returns tls type, else Null. */
668 ir_type *is_tls_pointer(ir_node *n) {
669 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
670 ir_node *start = get_Proj_pred(n);
671 if (get_irn_op(start) == op_Start) {
672 return get_tls_type();
678 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
679 * from Start. If so returns 1, else 0. */
680 int is_value_arg_pointer(ir_node *n) {
681 if ((get_irn_op(n) == op_Proj) &&
682 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
683 (get_irn_op(get_Proj_pred(n)) == op_Start))
688 /* Returns an array with the predecessors of the Block. Depending on
689 the implementation of the graph data structure this can be a copy of
690 the internal representation of predecessors as well as the internal
691 array itself. Therefore writing to this array might obstruct the ir. */
693 get_Block_cfgpred_arr(ir_node *node) {
694 assert((node->op == op_Block));
695 return (ir_node **)&(get_irn_in(node)[1]);
699 (get_Block_n_cfgpreds)(const ir_node *node) {
700 return _get_Block_n_cfgpreds(node);
704 (get_Block_cfgpred)(ir_node *node, int pos) {
705 return _get_Block_cfgpred(node, pos);
709 set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
710 assert(node->op == op_Block);
711 set_irn_n(node, pos, pred);
715 (get_Block_cfgpred_block)(ir_node *node, int pos) {
716 return _get_Block_cfgpred_block(node, pos);
720 get_Block_matured(ir_node *node) {
721 assert(node->op == op_Block);
722 return (int)node->attr.block.matured;
726 set_Block_matured(ir_node *node, int matured) {
727 assert(node->op == op_Block);
728 node->attr.block.matured = matured;
732 (get_Block_block_visited)(const ir_node *node) {
733 return _get_Block_block_visited(node);
737 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
738 _set_Block_block_visited(node, visit);
741 /* For this current_ir_graph must be set. */
743 (mark_Block_block_visited)(ir_node *node) {
744 _mark_Block_block_visited(node);
748 (Block_not_block_visited)(const ir_node *node) {
749 return _Block_not_block_visited(node);
753 (Block_block_visited)(const ir_node *node) {
754 return _Block_block_visited(node);
758 get_Block_graph_arr (ir_node *node, int pos) {
759 assert(node->op == op_Block);
760 return node->attr.block.graph_arr[pos+1];
764 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
765 assert(node->op == op_Block);
766 node->attr.block.graph_arr[pos+1] = value;
769 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
770 assert(node->op == op_Block);
771 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
772 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
773 node->attr.block.in_cg[0] = NULL;
774 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
776 /* Fix backedge array. fix_backedges() operates depending on
777 interprocedural_view. */
778 int ipv = get_interprocedural_view();
779 set_interprocedural_view(1);
780 fix_backedges(current_ir_graph->obst, node);
781 set_interprocedural_view(ipv);
784 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
787 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
788 assert(node->op == op_Block &&
789 node->attr.block.in_cg &&
790 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
791 node->attr.block.in_cg[pos + 1] = pred;
794 ir_node **get_Block_cg_cfgpred_arr(ir_node * node) {
795 assert(node->op == op_Block);
796 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
799 int get_Block_cg_n_cfgpreds(ir_node * node) {
800 assert(node->op == op_Block);
801 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
804 ir_node *get_Block_cg_cfgpred(ir_node * node, int pos) {
805 assert(node->op == op_Block && node->attr.block.in_cg);
806 return node->attr.block.in_cg[pos + 1];
809 void remove_Block_cg_cfgpred_arr(ir_node * node) {
810 assert(node->op == op_Block);
811 node->attr.block.in_cg = NULL;
814 ir_node *(set_Block_dead)(ir_node *block) {
815 return _set_Block_dead(block);
818 int (is_Block_dead)(const ir_node *block) {
819 return _is_Block_dead(block);
822 ir_extblk *get_Block_extbb(const ir_node *block) {
824 assert(is_Block(block));
825 res = block->attr.block.extblk;
826 assert(res == NULL || is_ir_extbb(res));
830 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
831 assert(is_Block(block));
832 assert(extblk == NULL || is_ir_extbb(extblk));
833 block->attr.block.extblk = extblk;
837 get_End_n_keepalives(ir_node *end) {
838 assert(end->op == op_End);
839 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
843 get_End_keepalive(ir_node *end, int pos) {
844 assert(end->op == op_End);
845 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
849 add_End_keepalive (ir_node *end, ir_node *ka) {
850 assert(end->op == op_End);
855 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
856 assert(end->op == op_End);
857 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
860 /* Set new keep-alives */
861 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
863 ir_graph *irg = get_irn_irg(end);
865 /* notify that edges are deleted */
866 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
867 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
869 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
871 for (i = 0; i < n; ++i) {
872 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
873 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
877 /* Set new keep-alives from old keep-alives, skipping irn */
878 void remove_End_keepalive(ir_node *end, ir_node *irn) {
879 int n = get_End_n_keepalives(end);
883 NEW_ARR_A(ir_node *, in, n);
885 for (idx = i = 0; i < n; ++i) {
886 ir_node *old_ka = get_End_keepalive(end, i);
893 /* set new keep-alives */
894 set_End_keepalives(end, idx, in);
898 free_End (ir_node *end) {
899 assert(end->op == op_End);
902 end->in = NULL; /* @@@ make sure we get an error if we use the
903 in array afterwards ... */
906 /* Return the target address of an IJmp */
907 ir_node *get_IJmp_target(ir_node *ijmp) {
908 assert(ijmp->op == op_IJmp);
909 return get_irn_n(ijmp, 0);
912 /** Sets the target address of an IJmp */
913 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
914 assert(ijmp->op == op_IJmp);
915 set_irn_n(ijmp, 0, tgt);
919 > Implementing the case construct (which is where the constant Proj node is
920 > important) involves far more than simply determining the constant values.
921 > We could argue that this is more properly a function of the translator from
922 > Firm to the target machine. That could be done if there was some way of
923 > projecting "default" out of the Cond node.
924 I know it's complicated.
925 Basically there are two proglems:
926 - determining the gaps between the projs
927 - determining the biggest case constant to know the proj number for
929 I see several solutions:
930 1. Introduce a ProjDefault node. Solves both problems.
931 This means to extend all optimizations executed during construction.
932 2. Give the Cond node for switch two flavors:
933 a) there are no gaps in the projs (existing flavor)
934 b) gaps may exist, default proj is still the Proj with the largest
935 projection number. This covers also the gaps.
936 3. Fix the semantic of the Cond to that of 2b)
938 Solution 2 seems to be the best:
939 Computing the gaps in the Firm representation is not too hard, i.e.,
940 libFIRM can implement a routine that transforms between the two
941 flavours. This is also possible for 1) but 2) does not require to
942 change any existing optimization.
943 Further it should be far simpler to determine the biggest constant than
945 I don't want to choose 3) as 2a) seems to have advantages for
946 dataflow analysis and 3) does not allow to convert the representation to
950 get_Cond_selector(ir_node *node) {
951 assert(node->op == op_Cond);
952 return get_irn_n(node, 0);
956 set_Cond_selector(ir_node *node, ir_node *selector) {
957 assert(node->op == op_Cond);
958 set_irn_n(node, 0, selector);
962 get_Cond_kind(ir_node *node) {
963 assert(node->op == op_Cond);
964 return node->attr.cond.kind;
968 set_Cond_kind(ir_node *node, cond_kind kind) {
969 assert(node->op == op_Cond);
970 node->attr.cond.kind = kind;
974 get_Cond_defaultProj(ir_node *node) {
975 assert(node->op == op_Cond);
976 return node->attr.cond.default_proj;
980 get_Return_mem(ir_node *node) {
981 assert(node->op == op_Return);
982 return get_irn_n(node, 0);
986 set_Return_mem(ir_node *node, ir_node *mem) {
987 assert(node->op == op_Return);
988 set_irn_n(node, 0, mem);
992 get_Return_n_ress(ir_node *node) {
993 assert(node->op == op_Return);
994 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
998 get_Return_res_arr (ir_node *node) {
999 assert((node->op == op_Return));
1000 if (get_Return_n_ress(node) > 0)
1001 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1008 set_Return_n_res(ir_node *node, int results) {
1009 assert(node->op == op_Return);
1014 get_Return_res(ir_node *node, int pos) {
1015 assert(node->op == op_Return);
1016 assert(get_Return_n_ress(node) > pos);
1017 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1021 set_Return_res(ir_node *node, int pos, ir_node *res){
1022 assert(node->op == op_Return);
1023 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1026 tarval *(get_Const_tarval)(ir_node *node) {
1027 return _get_Const_tarval(node);
1031 set_Const_tarval(ir_node *node, tarval *con) {
1032 assert(node->op == op_Const);
1033 node->attr.con.tv = con;
1036 cnst_classify_t (classify_Const)(ir_node *node) {
1037 return _classify_Const(node);
1041 /* The source language type. Must be an atomic type. Mode of type must
1042 be mode of node. For tarvals from entities type must be pointer to
1045 get_Const_type(ir_node *node) {
1046 assert(node->op == op_Const);
1047 return node->attr.con.tp;
1051 set_Const_type(ir_node *node, ir_type *tp) {
1052 assert(node->op == op_Const);
1053 if (tp != firm_unknown_type) {
1054 assert(is_atomic_type(tp));
1055 assert(get_type_mode(tp) == get_irn_mode(node));
1057 node->attr.con.tp = tp;
1062 get_SymConst_kind(const ir_node *node) {
1063 assert(node->op == op_SymConst);
1064 return node->attr.symc.num;
1068 set_SymConst_kind(ir_node *node, symconst_kind num) {
1069 assert(node->op == op_SymConst);
1070 node->attr.symc.num = num;
1074 get_SymConst_type(ir_node *node) {
1075 assert((node->op == op_SymConst) &&
1076 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1077 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1081 set_SymConst_type(ir_node *node, ir_type *tp) {
1082 assert((node->op == op_SymConst) &&
1083 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1084 node->attr.symc.sym.type_p = tp;
1088 get_SymConst_name(ir_node *node) {
1089 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1090 return node->attr.symc.sym.ident_p;
1094 set_SymConst_name(ir_node *node, ident *name) {
1095 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1096 node->attr.symc.sym.ident_p = name;
1100 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1101 ir_entity *get_SymConst_entity(ir_node *node) {
1102 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1103 return node->attr.symc.sym.entity_p;
1106 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1107 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1108 node->attr.symc.sym.entity_p = ent;
1111 ir_enum_const *get_SymConst_enum(ir_node *node) {
1112 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1113 return node->attr.symc.sym.enum_p;
1116 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1117 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1118 node->attr.symc.sym.enum_p = ec;
1121 union symconst_symbol
1122 get_SymConst_symbol(ir_node *node) {
1123 assert(node->op == op_SymConst);
1124 return node->attr.symc.sym;
1128 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1129 assert(node->op == op_SymConst);
1130 node->attr.symc.sym = sym;
1134 get_SymConst_value_type(ir_node *node) {
1135 assert(node->op == op_SymConst);
1136 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1137 return node->attr.symc.tp;
1141 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1142 assert(node->op == op_SymConst);
1143 node->attr.symc.tp = tp;
1147 get_Sel_mem(ir_node *node) {
1148 assert(node->op == op_Sel);
1149 return get_irn_n(node, 0);
1153 set_Sel_mem(ir_node *node, ir_node *mem) {
1154 assert(node->op == op_Sel);
1155 set_irn_n(node, 0, mem);
1159 get_Sel_ptr(ir_node *node) {
1160 assert(node->op == op_Sel);
1161 return get_irn_n(node, 1);
1165 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1166 assert(node->op == op_Sel);
1167 set_irn_n(node, 1, ptr);
1171 get_Sel_n_indexs(ir_node *node) {
1172 assert(node->op == op_Sel);
1173 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1177 get_Sel_index_arr(ir_node *node) {
1178 assert((node->op == op_Sel));
1179 if (get_Sel_n_indexs(node) > 0)
1180 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1186 get_Sel_index(ir_node *node, int pos) {
1187 assert(node->op == op_Sel);
1188 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1192 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1193 assert(node->op == op_Sel);
1194 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1198 get_Sel_entity(ir_node *node) {
1199 assert(node->op == op_Sel);
1200 return node->attr.sel.ent;
1204 set_Sel_entity(ir_node *node, ir_entity *ent) {
1205 assert(node->op == op_Sel);
1206 node->attr.sel.ent = ent;
1210 /* For unary and binary arithmetic operations the access to the
1211 operands can be factored out. Left is the first, right the
1212 second arithmetic value as listed in tech report 0999-33.
1213 unops are: Minus, Abs, Not, Conv, Cast
1214 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1215 Shr, Shrs, Rotate, Cmp */
1219 get_Call_mem(ir_node *node) {
1220 assert(node->op == op_Call);
1221 return get_irn_n(node, 0);
1225 set_Call_mem(ir_node *node, ir_node *mem) {
1226 assert(node->op == op_Call);
1227 set_irn_n(node, 0, mem);
1231 get_Call_ptr(ir_node *node) {
1232 assert(node->op == op_Call);
1233 return get_irn_n(node, 1);
1237 set_Call_ptr(ir_node *node, ir_node *ptr) {
1238 assert(node->op == op_Call);
1239 set_irn_n(node, 1, ptr);
1243 get_Call_param_arr(ir_node *node) {
1244 assert(node->op == op_Call);
1245 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1249 get_Call_n_params(ir_node *node) {
1250 assert(node->op == op_Call);
1251 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1255 get_Call_arity(ir_node *node) {
1256 assert(node->op == op_Call);
1257 return get_Call_n_params(node);
1261 set_Call_arity(ir_node *node, ir_node *arity) {
1262 assert(node->op == op_Call);
1267 get_Call_param(ir_node *node, int pos) {
1268 assert(node->op == op_Call);
1269 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1273 set_Call_param(ir_node *node, int pos, ir_node *param) {
1274 assert(node->op == op_Call);
1275 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1279 get_Call_type(ir_node *node) {
1280 assert(node->op == op_Call);
1281 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1285 set_Call_type(ir_node *node, ir_type *tp) {
1286 assert(node->op == op_Call);
1287 assert((get_unknown_type() == tp) || is_Method_type(tp));
1288 node->attr.call.cld_tp = tp;
1291 int Call_has_callees(ir_node *node) {
1292 assert(node && node->op == op_Call);
1293 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1294 (node->attr.call.callee_arr != NULL));
1297 int get_Call_n_callees(ir_node * node) {
1298 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1299 return ARR_LEN(node->attr.call.callee_arr);
1302 ir_entity * get_Call_callee(ir_node * node, int pos) {
1303 assert(pos >= 0 && pos < get_Call_n_callees(node));
1304 return node->attr.call.callee_arr[pos];
1307 void set_Call_callee_arr(ir_node * node, const int n, ir_entity ** arr) {
1308 assert(node->op == op_Call);
1309 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1310 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1312 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1315 void remove_Call_callee_arr(ir_node * node) {
1316 assert(node->op == op_Call);
1317 node->attr.call.callee_arr = NULL;
1320 ir_node * get_CallBegin_ptr(ir_node *node) {
1321 assert(node->op == op_CallBegin);
1322 return get_irn_n(node, 0);
1325 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1326 assert(node->op == op_CallBegin);
1327 set_irn_n(node, 0, ptr);
1330 ir_node * get_CallBegin_call(ir_node *node) {
1331 assert(node->op == op_CallBegin);
1332 return node->attr.callbegin.call;
1335 void set_CallBegin_call(ir_node *node, ir_node *call) {
1336 assert(node->op == op_CallBegin);
1337 node->attr.callbegin.call = call;
1342 ir_node * get_##OP##_left(ir_node *node) { \
1343 assert(node->op == op_##OP); \
1344 return get_irn_n(node, node->op->op_index); \
1346 void set_##OP##_left(ir_node *node, ir_node *left) { \
1347 assert(node->op == op_##OP); \
1348 set_irn_n(node, node->op->op_index, left); \
1350 ir_node *get_##OP##_right(ir_node *node) { \
1351 assert(node->op == op_##OP); \
1352 return get_irn_n(node, node->op->op_index + 1); \
1354 void set_##OP##_right(ir_node *node, ir_node *right) { \
1355 assert(node->op == op_##OP); \
1356 set_irn_n(node, node->op->op_index + 1, right); \
1360 ir_node *get_##OP##_op(ir_node *node) { \
1361 assert(node->op == op_##OP); \
1362 return get_irn_n(node, node->op->op_index); \
1364 void set_##OP##_op (ir_node *node, ir_node *op) { \
1365 assert(node->op == op_##OP); \
1366 set_irn_n(node, node->op->op_index, op); \
1369 #define BINOP_MEM(OP) \
1373 get_##OP##_mem(ir_node *node) { \
1374 assert(node->op == op_##OP); \
1375 return get_irn_n(node, 0); \
1379 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1380 assert(node->op == op_##OP); \
1381 set_irn_n(node, 0, mem); \
1405 int get_Conv_strict(ir_node *node) {
1406 assert(node->op == op_Conv);
1407 return node->attr.conv.strict;
1410 void set_Conv_strict(ir_node *node, int strict_flag) {
1411 assert(node->op == op_Conv);
1412 node->attr.conv.strict = (char)strict_flag;
1416 get_Cast_type(ir_node *node) {
1417 assert(node->op == op_Cast);
1418 return node->attr.cast.totype;
1422 set_Cast_type(ir_node *node, ir_type *to_tp) {
1423 assert(node->op == op_Cast);
1424 node->attr.cast.totype = to_tp;
1428 /* Checks for upcast.
1430 * Returns true if the Cast node casts a class type to a super type.
1432 int is_Cast_upcast(ir_node *node) {
1433 ir_type *totype = get_Cast_type(node);
1434 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1435 ir_graph *myirg = get_irn_irg(node);
1437 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1440 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1441 totype = get_pointer_points_to_type(totype);
1442 fromtype = get_pointer_points_to_type(fromtype);
1447 if (!is_Class_type(totype)) return 0;
1448 return is_SubClass_of(fromtype, totype);
1451 /* Checks for downcast.
1453 * Returns true if the Cast node casts a class type to a sub type.
1455 int is_Cast_downcast(ir_node *node) {
1456 ir_type *totype = get_Cast_type(node);
1457 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1459 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1462 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1463 totype = get_pointer_points_to_type(totype);
1464 fromtype = get_pointer_points_to_type(fromtype);
1469 if (!is_Class_type(totype)) return 0;
1470 return is_SubClass_of(totype, fromtype);
1474 (is_unop)(const ir_node *node) {
1475 return _is_unop(node);
1479 get_unop_op(ir_node *node) {
1480 if (node->op->opar == oparity_unary)
1481 return get_irn_n(node, node->op->op_index);
1483 assert(node->op->opar == oparity_unary);
1488 set_unop_op(ir_node *node, ir_node *op) {
1489 if (node->op->opar == oparity_unary)
1490 set_irn_n(node, node->op->op_index, op);
1492 assert(node->op->opar == oparity_unary);
1496 (is_binop)(const ir_node *node) {
1497 return _is_binop(node);
1501 get_binop_left(ir_node *node) {
1502 assert(node->op->opar == oparity_binary);
1503 return get_irn_n(node, node->op->op_index);
1507 set_binop_left(ir_node *node, ir_node *left) {
1508 assert(node->op->opar == oparity_binary);
1509 set_irn_n(node, node->op->op_index, left);
1513 get_binop_right(ir_node *node) {
1514 assert(node->op->opar == oparity_binary);
1515 return get_irn_n(node, node->op->op_index + 1);
1519 set_binop_right(ir_node *node, ir_node *right) {
1520 assert(node->op->opar == oparity_binary);
1521 set_irn_n(node, node->op->op_index + 1, right);
1524 int is_Phi(const ir_node *n) {
1530 if (op == op_Filter) return get_interprocedural_view();
1533 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1534 (get_irn_arity(n) > 0));
1539 int is_Phi0(const ir_node *n) {
1542 return ((get_irn_op(n) == op_Phi) &&
1543 (get_irn_arity(n) == 0) &&
1544 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1548 get_Phi_preds_arr(ir_node *node) {
1549 assert(node->op == op_Phi);
1550 return (ir_node **)&(get_irn_in(node)[1]);
1554 get_Phi_n_preds(ir_node *node) {
1555 assert(is_Phi(node) || is_Phi0(node));
1556 return (get_irn_arity(node));
1560 void set_Phi_n_preds(ir_node *node, int n_preds) {
1561 assert(node->op == op_Phi);
1566 get_Phi_pred(ir_node *node, int pos) {
1567 assert(is_Phi(node) || is_Phi0(node));
1568 return get_irn_n(node, pos);
1572 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1573 assert(is_Phi(node) || is_Phi0(node));
1574 set_irn_n(node, pos, pred);
1578 int is_memop(ir_node *node) {
1579 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1582 ir_node *get_memop_mem(ir_node *node) {
1583 assert(is_memop(node));
1584 return get_irn_n(node, 0);
1587 void set_memop_mem(ir_node *node, ir_node *mem) {
1588 assert(is_memop(node));
1589 set_irn_n(node, 0, mem);
1592 ir_node *get_memop_ptr(ir_node *node) {
1593 assert(is_memop(node));
1594 return get_irn_n(node, 1);
1597 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1598 assert(is_memop(node));
1599 set_irn_n(node, 1, ptr);
1603 get_Load_mem(ir_node *node) {
1604 assert(node->op == op_Load);
1605 return get_irn_n(node, 0);
1609 set_Load_mem(ir_node *node, ir_node *mem) {
1610 assert(node->op == op_Load);
1611 set_irn_n(node, 0, mem);
1615 get_Load_ptr(ir_node *node) {
1616 assert(node->op == op_Load);
1617 return get_irn_n(node, 1);
1621 set_Load_ptr(ir_node *node, ir_node *ptr) {
1622 assert(node->op == op_Load);
1623 set_irn_n(node, 1, ptr);
1627 get_Load_mode(ir_node *node) {
1628 assert(node->op == op_Load);
1629 return node->attr.load.load_mode;
1633 set_Load_mode(ir_node *node, ir_mode *mode) {
1634 assert(node->op == op_Load);
1635 node->attr.load.load_mode = mode;
1639 get_Load_volatility(ir_node *node) {
1640 assert(node->op == op_Load);
1641 return node->attr.load.volatility;
1645 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1646 assert(node->op == op_Load);
1647 node->attr.load.volatility = volatility;
1652 get_Store_mem(ir_node *node) {
1653 assert(node->op == op_Store);
1654 return get_irn_n(node, 0);
1658 set_Store_mem(ir_node *node, ir_node *mem) {
1659 assert(node->op == op_Store);
1660 set_irn_n(node, 0, mem);
1664 get_Store_ptr(ir_node *node) {
1665 assert(node->op == op_Store);
1666 return get_irn_n(node, 1);
1670 set_Store_ptr(ir_node *node, ir_node *ptr) {
1671 assert(node->op == op_Store);
1672 set_irn_n(node, 1, ptr);
1676 get_Store_value(ir_node *node) {
1677 assert(node->op == op_Store);
1678 return get_irn_n(node, 2);
1682 set_Store_value(ir_node *node, ir_node *value) {
1683 assert(node->op == op_Store);
1684 set_irn_n(node, 2, value);
1688 get_Store_volatility(ir_node *node) {
1689 assert(node->op == op_Store);
1690 return node->attr.store.volatility;
1694 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1695 assert(node->op == op_Store);
1696 node->attr.store.volatility = volatility;
1701 get_Alloc_mem(ir_node *node) {
1702 assert(node->op == op_Alloc);
1703 return get_irn_n(node, 0);
1707 set_Alloc_mem(ir_node *node, ir_node *mem) {
1708 assert(node->op == op_Alloc);
1709 set_irn_n(node, 0, mem);
1713 get_Alloc_size(ir_node *node) {
1714 assert(node->op == op_Alloc);
1715 return get_irn_n(node, 1);
1719 set_Alloc_size(ir_node *node, ir_node *size) {
1720 assert(node->op == op_Alloc);
1721 set_irn_n(node, 1, size);
1725 get_Alloc_type(ir_node *node) {
1726 assert(node->op == op_Alloc);
1727 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1731 set_Alloc_type(ir_node *node, ir_type *tp) {
1732 assert(node->op == op_Alloc);
1733 node->attr.alloc.type = tp;
1737 get_Alloc_where(ir_node *node) {
1738 assert(node->op == op_Alloc);
1739 return node->attr.alloc.where;
1743 set_Alloc_where(ir_node *node, where_alloc where) {
1744 assert(node->op == op_Alloc);
1745 node->attr.alloc.where = where;
1750 get_Free_mem(ir_node *node) {
1751 assert(node->op == op_Free);
1752 return get_irn_n(node, 0);
1756 set_Free_mem(ir_node *node, ir_node *mem) {
1757 assert(node->op == op_Free);
1758 set_irn_n(node, 0, mem);
1762 get_Free_ptr(ir_node *node) {
1763 assert(node->op == op_Free);
1764 return get_irn_n(node, 1);
1768 set_Free_ptr(ir_node *node, ir_node *ptr) {
1769 assert(node->op == op_Free);
1770 set_irn_n(node, 1, ptr);
1774 get_Free_size(ir_node *node) {
1775 assert(node->op == op_Free);
1776 return get_irn_n(node, 2);
1780 set_Free_size(ir_node *node, ir_node *size) {
1781 assert(node->op == op_Free);
1782 set_irn_n(node, 2, size);
1786 get_Free_type(ir_node *node) {
1787 assert(node->op == op_Free);
1788 return node->attr.free.type = skip_tid(node->attr.free.type);
1792 set_Free_type(ir_node *node, ir_type *tp) {
1793 assert(node->op == op_Free);
1794 node->attr.free.type = tp;
1798 get_Free_where(ir_node *node) {
1799 assert(node->op == op_Free);
1800 return node->attr.free.where;
1804 set_Free_where(ir_node *node, where_alloc where) {
1805 assert(node->op == op_Free);
1806 node->attr.free.where = where;
1809 ir_node **get_Sync_preds_arr(ir_node *node) {
1810 assert(node->op == op_Sync);
1811 return (ir_node **)&(get_irn_in(node)[1]);
1814 int get_Sync_n_preds(ir_node *node) {
1815 assert(node->op == op_Sync);
1816 return (get_irn_arity(node));
1820 void set_Sync_n_preds(ir_node *node, int n_preds) {
1821 assert(node->op == op_Sync);
1825 ir_node *get_Sync_pred(ir_node *node, int pos) {
1826 assert(node->op == op_Sync);
1827 return get_irn_n(node, pos);
1830 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1831 assert(node->op == op_Sync);
1832 set_irn_n(node, pos, pred);
1835 /* Add a new Sync predecessor */
1836 void add_Sync_pred(ir_node *node, ir_node *pred) {
1837 assert(node->op == op_Sync);
1838 add_irn_n(node, pred);
1841 /* Returns the source language type of a Proj node. */
1842 ir_type *get_Proj_type(ir_node *n) {
1843 ir_type *tp = firm_unknown_type;
1844 ir_node *pred = get_Proj_pred(n);
1846 switch (get_irn_opcode(pred)) {
1849 /* Deal with Start / Call here: we need to know the Proj Nr. */
1850 assert(get_irn_mode(pred) == mode_T);
1851 pred_pred = get_Proj_pred(pred);
1852 if (get_irn_op(pred_pred) == op_Start) {
1853 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1854 tp = get_method_param_type(mtp, get_Proj_proj(n));
1855 } else if (get_irn_op(pred_pred) == op_Call) {
1856 ir_type *mtp = get_Call_type(pred_pred);
1857 tp = get_method_res_type(mtp, get_Proj_proj(n));
1860 case iro_Start: break;
1861 case iro_Call: break;
1863 ir_node *a = get_Load_ptr(pred);
1865 tp = get_entity_type(get_Sel_entity(a));
1874 get_Proj_pred(const ir_node *node) {
1875 assert(is_Proj(node));
1876 return get_irn_n(node, 0);
1880 set_Proj_pred(ir_node *node, ir_node *pred) {
1881 assert(is_Proj(node));
1882 set_irn_n(node, 0, pred);
1886 get_Proj_proj(const ir_node *node) {
1887 assert(is_Proj(node));
1888 if (get_irn_opcode(node) == iro_Proj) {
1889 return node->attr.proj;
1891 assert(get_irn_opcode(node) == iro_Filter);
1892 return node->attr.filter.proj;
1897 set_Proj_proj(ir_node *node, long proj) {
1898 assert(node->op == op_Proj);
1899 node->attr.proj = proj;
1903 get_Tuple_preds_arr(ir_node *node) {
1904 assert(node->op == op_Tuple);
1905 return (ir_node **)&(get_irn_in(node)[1]);
1909 get_Tuple_n_preds(ir_node *node) {
1910 assert(node->op == op_Tuple);
1911 return (get_irn_arity(node));
1916 set_Tuple_n_preds(ir_node *node, int n_preds) {
1917 assert(node->op == op_Tuple);
1922 get_Tuple_pred (ir_node *node, int pos) {
1923 assert(node->op == op_Tuple);
1924 return get_irn_n(node, pos);
1928 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
1929 assert(node->op == op_Tuple);
1930 set_irn_n(node, pos, pred);
1934 get_Id_pred(ir_node *node) {
1935 assert(node->op == op_Id);
1936 return get_irn_n(node, 0);
1940 set_Id_pred(ir_node *node, ir_node *pred) {
1941 assert(node->op == op_Id);
1942 set_irn_n(node, 0, pred);
1945 ir_node *get_Confirm_value(ir_node *node) {
1946 assert(node->op == op_Confirm);
1947 return get_irn_n(node, 0);
1950 void set_Confirm_value(ir_node *node, ir_node *value) {
1951 assert(node->op == op_Confirm);
1952 set_irn_n(node, 0, value);
1955 ir_node *get_Confirm_bound(ir_node *node) {
1956 assert(node->op == op_Confirm);
1957 return get_irn_n(node, 1);
1960 void set_Confirm_bound(ir_node *node, ir_node *bound) {
1961 assert(node->op == op_Confirm);
1962 set_irn_n(node, 0, bound);
1965 pn_Cmp get_Confirm_cmp(ir_node *node) {
1966 assert(node->op == op_Confirm);
1967 return node->attr.confirm_cmp;
1970 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
1971 assert(node->op == op_Confirm);
1972 node->attr.confirm_cmp = cmp;
1977 get_Filter_pred(ir_node *node) {
1978 assert(node->op == op_Filter);
1983 set_Filter_pred(ir_node *node, ir_node *pred) {
1984 assert(node->op == op_Filter);
1989 get_Filter_proj(ir_node *node) {
1990 assert(node->op == op_Filter);
1991 return node->attr.filter.proj;
1995 set_Filter_proj(ir_node *node, long proj) {
1996 assert(node->op == op_Filter);
1997 node->attr.filter.proj = proj;
2000 /* Don't use get_irn_arity, get_irn_n in implementation as access
2001 shall work independent of view!!! */
2002 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2003 assert(node->op == op_Filter);
2004 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2005 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2006 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2007 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2008 node->attr.filter.in_cg[0] = node->in[0];
2010 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2013 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2014 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2015 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2016 node->attr.filter.in_cg[pos + 1] = pred;
2019 int get_Filter_n_cg_preds(ir_node *node) {
2020 assert(node->op == op_Filter && node->attr.filter.in_cg);
2021 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2024 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2026 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2028 arity = ARR_LEN(node->attr.filter.in_cg);
2029 assert(pos < arity - 1);
2030 return node->attr.filter.in_cg[pos + 1];
2034 ir_node *get_Mux_sel(ir_node *node) {
2035 if (node->op == op_Psi) {
2036 assert(get_irn_arity(node) == 3);
2037 return get_Psi_cond(node, 0);
2039 assert(node->op == op_Mux);
2043 void set_Mux_sel(ir_node *node, ir_node *sel) {
2044 if (node->op == op_Psi) {
2045 assert(get_irn_arity(node) == 3);
2046 set_Psi_cond(node, 0, sel);
2048 assert(node->op == op_Mux);
2053 ir_node *get_Mux_false(ir_node *node) {
2054 if (node->op == op_Psi) {
2055 assert(get_irn_arity(node) == 3);
2056 return get_Psi_default(node);
2058 assert(node->op == op_Mux);
2062 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2063 if (node->op == op_Psi) {
2064 assert(get_irn_arity(node) == 3);
2065 set_Psi_default(node, ir_false);
2067 assert(node->op == op_Mux);
2068 node->in[2] = ir_false;
2072 ir_node *get_Mux_true(ir_node *node) {
2073 if (node->op == op_Psi) {
2074 assert(get_irn_arity(node) == 3);
2075 return get_Psi_val(node, 0);
2077 assert(node->op == op_Mux);
2081 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2082 if (node->op == op_Psi) {
2083 assert(get_irn_arity(node) == 3);
2084 set_Psi_val(node, 0, ir_true);
2086 assert(node->op == op_Mux);
2087 node->in[3] = ir_true;
2092 ir_node *get_Psi_cond(ir_node *node, int pos) {
2093 int num_conds = get_Psi_n_conds(node);
2094 assert(node->op == op_Psi);
2095 assert(pos < num_conds);
2096 return get_irn_n(node, 2 * pos);
2099 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2100 int num_conds = get_Psi_n_conds(node);
2101 assert(node->op == op_Psi);
2102 assert(pos < num_conds);
2103 set_irn_n(node, 2 * pos, cond);
2106 ir_node *get_Psi_val(ir_node *node, int pos) {
2107 int num_vals = get_Psi_n_conds(node);
2108 assert(node->op == op_Psi);
2109 assert(pos < num_vals);
2110 return get_irn_n(node, 2 * pos + 1);
2113 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2114 int num_vals = get_Psi_n_conds(node);
2115 assert(node->op == op_Psi);
2116 assert(pos < num_vals);
2117 set_irn_n(node, 2 * pos + 1, val);
2120 ir_node *get_Psi_default(ir_node *node) {
2121 int def_pos = get_irn_arity(node) - 1;
2122 assert(node->op == op_Psi);
2123 return get_irn_n(node, def_pos);
2126 void set_Psi_default(ir_node *node, ir_node *val) {
2127 int def_pos = get_irn_arity(node);
2128 assert(node->op == op_Psi);
2129 set_irn_n(node, def_pos, val);
2132 int (get_Psi_n_conds)(ir_node *node) {
2133 return _get_Psi_n_conds(node);
2137 ir_node *get_CopyB_mem(ir_node *node) {
2138 assert(node->op == op_CopyB);
2139 return get_irn_n(node, 0);
2142 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2143 assert(node->op == op_CopyB);
2144 set_irn_n(node, 0, mem);
2147 ir_node *get_CopyB_dst(ir_node *node) {
2148 assert(node->op == op_CopyB);
2149 return get_irn_n(node, 1);
2152 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2153 assert(node->op == op_CopyB);
2154 set_irn_n(node, 1, dst);
2157 ir_node *get_CopyB_src (ir_node *node) {
2158 assert(node->op == op_CopyB);
2159 return get_irn_n(node, 2);
2162 void set_CopyB_src(ir_node *node, ir_node *src) {
2163 assert(node->op == op_CopyB);
2164 set_irn_n(node, 2, src);
2167 ir_type *get_CopyB_type(ir_node *node) {
2168 assert(node->op == op_CopyB);
2169 return node->attr.copyb.data_type;
2172 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2173 assert(node->op == op_CopyB && data_type);
2174 node->attr.copyb.data_type = data_type;
2179 get_InstOf_type(ir_node *node) {
2180 assert(node->op = op_InstOf);
2181 return node->attr.instof.type;
2185 set_InstOf_type(ir_node *node, ir_type *type) {
2186 assert(node->op = op_InstOf);
2187 node->attr.instof.type = type;
2191 get_InstOf_store(ir_node *node) {
2192 assert(node->op = op_InstOf);
2193 return get_irn_n(node, 0);
2197 set_InstOf_store(ir_node *node, ir_node *obj) {
2198 assert(node->op = op_InstOf);
2199 set_irn_n(node, 0, obj);
2203 get_InstOf_obj(ir_node *node) {
2204 assert(node->op = op_InstOf);
2205 return get_irn_n(node, 1);
2209 set_InstOf_obj(ir_node *node, ir_node *obj) {
2210 assert(node->op = op_InstOf);
2211 set_irn_n(node, 1, obj);
2214 /* Returns the memory input of a Raise operation. */
2216 get_Raise_mem(ir_node *node) {
2217 assert(node->op == op_Raise);
2218 return get_irn_n(node, 0);
2222 set_Raise_mem(ir_node *node, ir_node *mem) {
2223 assert(node->op == op_Raise);
2224 set_irn_n(node, 0, mem);
2228 get_Raise_exo_ptr(ir_node *node) {
2229 assert(node->op == op_Raise);
2230 return get_irn_n(node, 1);
2234 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2235 assert(node->op == op_Raise);
2236 set_irn_n(node, 1, exo_ptr);
2241 /* Returns the memory input of a Bound operation. */
2242 ir_node *get_Bound_mem(ir_node *bound) {
2243 assert(bound->op == op_Bound);
2244 return get_irn_n(bound, 0);
2247 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2248 assert(bound->op == op_Bound);
2249 set_irn_n(bound, 0, mem);
2252 /* Returns the index input of a Bound operation. */
2253 ir_node *get_Bound_index(ir_node *bound) {
2254 assert(bound->op == op_Bound);
2255 return get_irn_n(bound, 1);
2258 void set_Bound_index(ir_node *bound, ir_node *idx) {
2259 assert(bound->op == op_Bound);
2260 set_irn_n(bound, 1, idx);
2263 /* Returns the lower bound input of a Bound operation. */
2264 ir_node *get_Bound_lower(ir_node *bound) {
2265 assert(bound->op == op_Bound);
2266 return get_irn_n(bound, 2);
2269 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2270 assert(bound->op == op_Bound);
2271 set_irn_n(bound, 2, lower);
2274 /* Returns the upper bound input of a Bound operation. */
2275 ir_node *get_Bound_upper(ir_node *bound) {
2276 assert(bound->op == op_Bound);
2277 return get_irn_n(bound, 3);
2280 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2281 assert(bound->op == op_Bound);
2282 set_irn_n(bound, 3, upper);
2285 /* Return the operand of a Pin node. */
2286 ir_node *get_Pin_op(ir_node *pin) {
2287 assert(pin->op == op_Pin);
2288 return get_irn_n(pin, 0);
2291 void set_Pin_op(ir_node *pin, ir_node *node) {
2292 assert(pin->op == op_Pin);
2293 set_irn_n(pin, 0, node);
2297 /* returns the graph of a node */
2299 get_irn_irg(const ir_node *node) {
2301 * Do not use get_nodes_Block() here, because this
2302 * will check the pinned state.
2303 * However even a 'wrong' block is always in the proper
2306 if (! is_Block(node))
2307 node = get_irn_n(node, -1);
2308 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2309 node = get_irn_n(node, -1);
2310 assert(get_irn_op(node) == op_Block);
2311 return node->attr.block.irg;
2315 /*----------------------------------------------------------------*/
2316 /* Auxiliary routines */
2317 /*----------------------------------------------------------------*/
2320 skip_Proj(ir_node *node) {
2321 /* don't assert node !!! */
2326 node = get_Proj_pred(node);
2332 skip_Proj_const(const ir_node *node) {
2333 /* don't assert node !!! */
2338 node = get_Proj_pred(node);
2344 skip_Tuple(ir_node *node) {
2348 if (!get_opt_normalize()) return node;
2351 if (get_irn_op(node) == op_Proj) {
2352 pred = get_Proj_pred(node);
2353 op = get_irn_op(pred);
2356 * Looks strange but calls get_irn_op() only once
2357 * in most often cases.
2359 if (op == op_Proj) { /* nested Tuple ? */
2360 pred = skip_Tuple(pred);
2361 op = get_irn_op(pred);
2363 if (op == op_Tuple) {
2364 node = get_Tuple_pred(pred, get_Proj_proj(node));
2367 } else if (op == op_Tuple) {
2368 node = get_Tuple_pred(pred, get_Proj_proj(node));
2375 /* returns operand of node if node is a Cast */
2376 ir_node *skip_Cast(ir_node *node) {
2377 if (get_irn_op(node) == op_Cast)
2378 return get_Cast_op(node);
2382 /* returns operand of node if node is a Confirm */
2383 ir_node *skip_Confirm(ir_node *node) {
2384 if (get_irn_op(node) == op_Confirm)
2385 return get_Confirm_value(node);
2389 /* skip all high-level ops */
2390 ir_node *skip_HighLevel(ir_node *node) {
2391 if (is_op_highlevel(get_irn_op(node)))
2392 return get_irn_n(node, 0);
2397 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2398 * than any other approach, as Id chains are resolved and all point to the real node, or
2399 * all id's are self loops.
2401 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2402 * a little bit "hand optimized".
2404 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2407 skip_Id(ir_node *node) {
2409 /* don't assert node !!! */
2411 if (!node || (node->op != op_Id)) return node;
2413 /* Don't use get_Id_pred(): We get into an endless loop for
2414 self-referencing Ids. */
2415 pred = node->in[0+1];
2417 if (pred->op != op_Id) return pred;
2419 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2420 ir_node *rem_pred, *res;
2422 if (pred->op != op_Id) return pred; /* shortcut */
2425 assert(get_irn_arity (node) > 0);
2427 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2428 res = skip_Id(rem_pred);
2429 if (res->op == op_Id) /* self-loop */ return node;
2431 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2438 void skip_Id_and_store(ir_node **node) {
2441 if (!n || (n->op != op_Id)) return;
2443 /* Don't use get_Id_pred(): We get into an endless loop for
2444 self-referencing Ids. */
2449 (is_Bad)(const ir_node *node) {
2450 return _is_Bad(node);
2454 (is_NoMem)(const ir_node *node) {
2455 return _is_NoMem(node);
2459 (is_Mod)(const ir_node *node) {
2460 return _is_Mod(node);
2464 (is_Div)(const ir_node *node) {
2465 return _is_Div(node);
2469 (is_DivMod)(const ir_node *node) {
2470 return _is_DivMod(node);
2474 (is_Quot)(const ir_node *node) {
2475 return _is_Quot(node);
2479 (is_Start)(const ir_node *node) {
2480 return _is_Start(node);
2484 (is_End)(const ir_node *node) {
2485 return _is_End(node);
2489 (is_Const)(const ir_node *node) {
2490 return _is_Const(node);
2494 (is_no_Block)(const ir_node *node) {
2495 return _is_no_Block(node);
2499 (is_Block)(const ir_node *node) {
2500 return _is_Block(node);
2503 /* returns true if node is an Unknown node. */
2505 (is_Unknown)(const ir_node *node) {
2506 return _is_Unknown(node);
2509 /* returns true if node is a Return node. */
2511 (is_Return)(const ir_node *node) {
2512 return _is_Return(node);
2515 /* returns true if node is a Call node. */
2517 (is_Call)(const ir_node *node) {
2518 return _is_Call(node);
2521 /* returns true if node is a Sel node. */
2523 (is_Sel)(const ir_node *node) {
2524 return _is_Sel(node);
2527 /* returns true if node is a Mux node or a Psi with only one condition. */
2529 (is_Mux)(const ir_node *node) {
2530 return _is_Mux(node);
2533 /* returns true if node is a Load node. */
2535 (is_Load)(const ir_node *node) {
2536 return _is_Load(node);
2539 /* returns true if node is a Load node. */
2541 (is_Store)(const ir_node *node) {
2542 return _is_Store(node);
2545 /* returns true if node is a Sync node. */
2547 (is_Sync)(const ir_node *node) {
2548 return _is_Sync(node);
2551 /* returns true if node is a Confirm node. */
2553 (is_Confirm)(const ir_node *node) {
2554 return _is_Confirm(node);
2557 /* returns true if node is a Pin node. */
2559 (is_Pin)(const ir_node *node) {
2560 return _is_Pin(node);
2563 /* returns true if node is a SymConst node. */
2565 (is_SymConst)(const ir_node *node) {
2566 return _is_SymConst(node);
2569 /* returns true if node is a Cond node. */
2571 (is_Cond)(const ir_node *node) {
2572 return _is_Cond(node);
2576 (is_CopyB)(const ir_node *node) {
2577 return _is_CopyB(node);
2580 /* returns true if node is a Cmp node. */
2582 (is_Cmp)(const ir_node *node) {
2583 return _is_Cmp(node);
2586 /* returns true if node is an Alloc node. */
2588 (is_Alloc)(const ir_node *node) {
2589 return _is_Alloc(node);
2592 /* returns true if a node is a Jmp node. */
2594 (is_Jmp)(const ir_node *node) {
2595 return _is_Jmp(node);
2598 /* returns true if a node is a Raise node. */
2600 (is_Raise)(const ir_node *node) {
2601 return _is_Raise(node);
2605 is_Proj(const ir_node *node) {
2607 return node->op == op_Proj ||
2608 (!get_interprocedural_view() && node->op == op_Filter);
2611 /* Returns true if the operation manipulates control flow. */
2613 is_cfop(const ir_node *node) {
2614 return is_cfopcode(get_irn_op(node));
2617 /* Returns true if the operation manipulates interprocedural control flow:
2618 CallBegin, EndReg, EndExcept */
2619 int is_ip_cfop(const ir_node *node) {
2620 return is_ip_cfopcode(get_irn_op(node));
2623 /* Returns true if the operation can change the control flow because
2626 is_fragile_op(const ir_node *node) {
2627 return is_op_fragile(get_irn_op(node));
2630 /* Returns the memory operand of fragile operations. */
2631 ir_node *get_fragile_op_mem(ir_node *node) {
2632 assert(node && is_fragile_op(node));
2634 switch (get_irn_opcode (node)) {
2644 return get_irn_n(node, 0);
2649 assert(0 && "should not be reached");
2654 /* Returns true if the operation is a forking control flow operation. */
2655 int (is_irn_forking)(const ir_node *node) {
2656 return _is_irn_forking(node);
2659 /* Return the type associated with the value produced by n
2660 * if the node remarks this type as it is the case for
2661 * Cast, Const, SymConst and some Proj nodes. */
2662 ir_type *(get_irn_type)(ir_node *node) {
2663 return _get_irn_type(node);
2666 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2668 ir_type *(get_irn_type_attr)(ir_node *node) {
2669 return _get_irn_type_attr(node);
2672 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2673 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2674 return _get_irn_entity_attr(node);
2677 /* Returns non-zero for constant-like nodes. */
2678 int (is_irn_constlike)(const ir_node *node) {
2679 return _is_irn_constlike(node);
2683 * Returns non-zero for nodes that are allowed to have keep-alives and
2684 * are neither Block nor PhiM.
2686 int (is_irn_keep)(const ir_node *node) {
2687 return _is_irn_keep(node);
2691 * Returns non-zero for nodes that are always placed in the start block.
2693 int (is_irn_start_block_placed)(const ir_node *node) {
2694 return _is_irn_start_block_placed(node);
2697 /* Returns non-zero for nodes that are machine operations. */
2698 int (is_irn_machine_op)(const ir_node *node) {
2699 return _is_irn_machine_op(node);
2702 /* Returns non-zero for nodes that are machine operands. */
2703 int (is_irn_machine_operand)(const ir_node *node) {
2704 return _is_irn_machine_operand(node);
2707 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2708 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2709 return _is_irn_machine_user(node, n);
2713 /* Gets the string representation of the jump prediction .*/
2714 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2717 case COND_JMP_PRED_NONE: return "no prediction";
2718 case COND_JMP_PRED_TRUE: return "true taken";
2719 case COND_JMP_PRED_FALSE: return "false taken";
2723 /* Returns the conditional jump prediction of a Cond node. */
2724 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2725 return _get_Cond_jmp_pred(cond);
2728 /* Sets a new conditional jump prediction. */
2729 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2730 _set_Cond_jmp_pred(cond, pred);
2733 /** the get_type operation must be always implemented and return a firm type */
2734 static ir_type *get_Default_type(ir_node *n) {
2735 return get_unknown_type();
2738 /* Sets the get_type operation for an ir_op_ops. */
2739 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2741 case iro_Const: ops->get_type = get_Const_type; break;
2742 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2743 case iro_Cast: ops->get_type = get_Cast_type; break;
2744 case iro_Proj: ops->get_type = get_Proj_type; break;
2746 /* not allowed to be NULL */
2747 if (! ops->get_type)
2748 ops->get_type = get_Default_type;
2754 /** Return the attribute type of a SymConst node if exists */
2755 static ir_type *get_SymConst_attr_type(ir_node *self) {
2756 symconst_kind kind = get_SymConst_kind(self);
2757 if (SYMCONST_HAS_TYPE(kind))
2758 return get_SymConst_type(self);
2762 /** Return the attribute entity of a SymConst node if exists */
2763 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2764 symconst_kind kind = get_SymConst_kind(self);
2765 if (SYMCONST_HAS_ENT(kind))
2766 return get_SymConst_entity(self);
2770 /** the get_type_attr operation must be always implemented */
2771 static ir_type *get_Null_type(ir_node *n) {
2772 return firm_unknown_type;
2775 /* Sets the get_type operation for an ir_op_ops. */
2776 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2778 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2779 case iro_Call: ops->get_type_attr = get_Call_type; break;
2780 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2781 case iro_Free: ops->get_type_attr = get_Free_type; break;
2782 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2784 /* not allowed to be NULL */
2785 if (! ops->get_type_attr)
2786 ops->get_type_attr = get_Null_type;
2792 /** the get_entity_attr operation must be always implemented */
2793 static ir_entity *get_Null_ent(ir_node *n) {
2797 /* Sets the get_type operation for an ir_op_ops. */
2798 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2800 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2801 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2803 /* not allowed to be NULL */
2804 if (! ops->get_entity_attr)
2805 ops->get_entity_attr = get_Null_ent;
2811 #ifdef DEBUG_libfirm
2812 void dump_irn(ir_node *n) {
2813 int i, arity = get_irn_arity(n);
2814 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2816 ir_node *pred = get_irn_n(n, -1);
2817 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2818 get_irn_node_nr(pred), (void *)pred);
2820 printf(" preds: \n");
2821 for (i = 0; i < arity; ++i) {
2822 ir_node *pred = get_irn_n(n, i);
2823 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2824 get_irn_node_nr(pred), (void *)pred);
2828 #else /* DEBUG_libfirm */
2829 void dump_irn(ir_node *n) {}
2830 #endif /* DEBUG_libfirm */