2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
36 #include "irgraph_t.h"
38 #include "irbackedge_t.h"
42 #include "iredgekinds.h"
43 #include "iredges_t.h"
49 /* some constants fixing the positions of nodes predecessors
51 #define CALL_PARAM_OFFSET 2
52 #define FUNCCALL_PARAM_OFFSET 1
53 #define SEL_INDEX_OFFSET 2
54 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
55 #define END_KEEPALIVE_OFFSET 0
57 static const char *pnc_name_arr [] = {
58 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
59 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
60 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
61 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
65 * returns the pnc name from an pnc constant
67 const char *get_pnc_string(int pnc) {
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
79 /* do NOT add the Uo bit for non-floating point values */
80 if (! mode_is_float(mode))
86 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
87 pn_Cmp get_inversed_pnc(long pnc) {
88 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
89 long lesser = pnc & pn_Cmp_Lt;
90 long greater = pnc & pn_Cmp_Gt;
92 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
98 * Indicates, whether additional data can be registered to ir nodes.
99 * If set to 1, this is not possible anymore.
101 static int forbid_new_data = 0;
104 * The amount of additional space for custom data to be allocated upon
105 * creating a new node.
107 unsigned firm_add_node_size = 0;
110 /* register new space for every node */
111 unsigned firm_register_additional_node_data(unsigned size) {
112 assert(!forbid_new_data && "Too late to register additional node data");
117 return firm_add_node_size += size;
121 void init_irnode(void) {
122 /* Forbid the addition of new data to an ir node. */
127 * irnode constructor.
128 * Create a new irnode in irg, with an op, mode, arity and
129 * some incoming irnodes.
130 * If arity is negative, a node with a dynamic array is created.
133 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
134 int arity, ir_node **in)
137 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
141 assert(irg && op && mode);
142 p = obstack_alloc(irg->obst, node_size);
143 memset(p, 0, node_size);
144 res = (ir_node *)(p + firm_add_node_size);
146 res->kind = k_ir_node;
150 res->node_idx = irg_register_node_idx(irg, res);
155 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
157 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
158 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
162 set_irn_dbg_info(res, db);
166 res->node_nr = get_irp_new_node_nr();
169 for (i = 0; i < EDGE_KIND_LAST; ++i)
170 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
172 /* don't put this into the for loop, arity is -1 for some nodes! */
173 edges_notify_edge(res, -1, res->in[0], NULL, irg);
174 for (i = 1; i <= arity; ++i)
175 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
177 hook_new_node(irg, res);
182 /*-- getting some parameters from ir_nodes --*/
184 int (is_ir_node)(const void *thing) {
185 return _is_ir_node(thing);
188 int (get_irn_intra_arity)(const ir_node *node) {
189 return _get_irn_intra_arity(node);
192 int (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
198 int (get_irn_arity)(const ir_node *node) {
199 return _get_irn_arity(node);
202 /* Returns the array with ins. This array is shifted with respect to the
203 array accessed by get_irn_n: The block operand is at position 0 not -1.
204 (@@@ This should be changed.)
205 The order of the predecessors in this array is not guaranteed, except that
206 lists of operands as predecessors of Block or arguments of a Call are
208 ir_node **get_irn_in(const ir_node *node) {
210 if (get_interprocedural_view()) { /* handle Filter and Block specially */
211 if (get_irn_opcode(node) == iro_Filter) {
212 assert(node->attr.filter.in_cg);
213 return node->attr.filter.in_cg;
214 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
215 return node->attr.block.in_cg;
217 /* else fall through */
222 void set_irn_in(ir_node *node, int arity, ir_node **in) {
225 ir_graph *irg = current_ir_graph;
227 if (get_interprocedural_view()) { /* handle Filter and Block specially */
228 ir_opcode code = get_irn_opcode(node);
229 if (code == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 pOld_in = &node->attr.filter.in_cg;
232 } else if (code == iro_Block && node->attr.block.in_cg) {
233 pOld_in = &node->attr.block.in_cg;
241 for (i = 0; i < arity; i++) {
242 if (i < ARR_LEN(*pOld_in)-1)
243 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
245 edges_notify_edge(node, i, in[i], NULL, irg);
247 for (;i < ARR_LEN(*pOld_in)-1; i++) {
248 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
251 if (arity != ARR_LEN(*pOld_in) - 1) {
252 ir_node * block = (*pOld_in)[0];
253 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
254 (*pOld_in)[0] = block;
256 fix_backedges(irg->obst, node);
258 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
261 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
262 return _get_irn_intra_n (node, n);
265 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
266 return _get_irn_inter_n (node, n);
269 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
271 ir_node *(get_irn_n)(const ir_node *node, int n) {
272 return _get_irn_n(node, n);
275 void set_irn_n(ir_node *node, int n, ir_node *in) {
276 assert(node && node->kind == k_ir_node);
278 assert(n < get_irn_arity(node));
279 assert(in && in->kind == k_ir_node);
281 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
282 /* Change block pred in both views! */
283 node->in[n + 1] = in;
284 assert(node->attr.filter.in_cg);
285 node->attr.filter.in_cg[n + 1] = in;
288 if (get_interprocedural_view()) { /* handle Filter and Block specially */
289 if (get_irn_opcode(node) == iro_Filter) {
290 assert(node->attr.filter.in_cg);
291 node->attr.filter.in_cg[n + 1] = in;
293 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
294 node->attr.block.in_cg[n + 1] = in;
297 /* else fall through */
301 hook_set_irn_n(node, n, in, node->in[n + 1]);
303 /* Here, we rely on src and tgt being in the current ir graph */
304 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
306 node->in[n + 1] = in;
309 int add_irn_n(ir_node *node, ir_node *in) {
311 ir_graph *irg = get_irn_irg(node);
313 assert(node->op->opar == oparity_dynamic);
314 pos = ARR_LEN(node->in) - 1;
315 ARR_APP1(ir_node *, node->in, in);
316 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
319 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
324 int (get_irn_deps)(const ir_node *node) {
325 return _get_irn_deps(node);
328 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
329 return _get_irn_dep(node, pos);
332 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
333 _set_irn_dep(node, pos, dep);
336 int add_irn_dep(ir_node *node, ir_node *dep) {
339 if (node->deps == NULL) {
340 node->deps = NEW_ARR_F(ir_node *, 1);
346 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
347 if(node->deps[i] == NULL)
350 if(node->deps[i] == dep)
354 if (first_zero >= 0) {
355 node->deps[first_zero] = dep;
358 ARR_APP1(ir_node *, node->deps, dep);
363 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
368 void add_irn_deps(ir_node *tgt, ir_node *src) {
371 for (i = 0, n = get_irn_deps(src); i < n; ++i)
372 add_irn_dep(tgt, get_irn_dep(src, i));
376 ir_mode *(get_irn_mode)(const ir_node *node) {
377 return _get_irn_mode(node);
380 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
381 _set_irn_mode(node, mode);
384 modecode get_irn_modecode(const ir_node *node) {
386 return node->mode->code;
389 /** Gets the string representation of the mode .*/
390 const char *get_irn_modename(const ir_node *node) {
392 return get_mode_name(node->mode);
395 ident *get_irn_modeident(const ir_node *node) {
397 return get_mode_ident(node->mode);
400 ir_op *(get_irn_op)(const ir_node *node) {
401 return _get_irn_op(node);
404 /* should be private to the library: */
405 void (set_irn_op)(ir_node *node, ir_op *op) {
406 _set_irn_op(node, op);
409 unsigned (get_irn_opcode)(const ir_node *node) {
410 return _get_irn_opcode(node);
413 const char *get_irn_opname(const ir_node *node) {
415 if (is_Phi0(node)) return "Phi0";
416 return get_id_str(node->op->name);
419 ident *get_irn_opident(const ir_node *node) {
421 return node->op->name;
424 unsigned long (get_irn_visited)(const ir_node *node) {
425 return _get_irn_visited(node);
428 void (set_irn_visited)(ir_node *node, unsigned long visited) {
429 _set_irn_visited(node, visited);
432 void (mark_irn_visited)(ir_node *node) {
433 _mark_irn_visited(node);
436 int (irn_not_visited)(const ir_node *node) {
437 return _irn_not_visited(node);
440 int (irn_visited)(const ir_node *node) {
441 return _irn_visited(node);
444 void (set_irn_link)(ir_node *node, void *link) {
445 _set_irn_link(node, link);
448 void *(get_irn_link)(const ir_node *node) {
449 return _get_irn_link(node);
452 op_pin_state (get_irn_pinned)(const ir_node *node) {
453 return _get_irn_pinned(node);
456 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
457 return _is_irn_pinned_in_irg(node);
460 void set_irn_pinned(ir_node *node, op_pin_state state) {
461 /* due to optimization an opt may be turned into a Tuple */
462 if (get_irn_op(node) == op_Tuple)
465 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
466 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
468 node->attr.except.pin_state = state;
471 #ifdef DO_HEAPANALYSIS
472 /* Access the abstract interpretation information of a node.
473 Returns NULL if no such information is available. */
474 struct abstval *get_irn_abst_value(ir_node *n) {
477 /* Set the abstract interpretation information of a node. */
478 void set_irn_abst_value(ir_node *n, struct abstval *os) {
481 struct section *firm_get_irn_section(ir_node *n) {
484 void firm_set_irn_section(ir_node *n, struct section *s) {
488 /* Dummies needed for firmjni. */
489 struct abstval *get_irn_abst_value(ir_node *n) {
493 void set_irn_abst_value(ir_node *n, struct abstval *os) {
497 struct section *firm_get_irn_section(ir_node *n) {
501 void firm_set_irn_section(ir_node *n, struct section *s) {
505 #endif /* DO_HEAPANALYSIS */
508 /* Outputs a unique number for this node */
509 long get_irn_node_nr(const ir_node *node) {
512 return node->node_nr;
514 return (long)PTR_TO_INT(node);
518 const_attr *get_irn_const_attr(ir_node *node) {
519 assert(node->op == op_Const);
520 return &node->attr.con;
523 long get_irn_proj_attr(ir_node *node) {
524 assert(node->op == op_Proj);
525 return node->attr.proj;
528 alloc_attr *get_irn_alloc_attr(ir_node *node) {
529 assert(node->op == op_Alloc);
530 return &node->attr.alloc;
533 free_attr *get_irn_free_attr(ir_node *node) {
534 assert(node->op == op_Free);
535 return &node->attr.free;
538 symconst_attr *get_irn_symconst_attr(ir_node *node) {
539 assert(node->op == op_SymConst);
540 return &node->attr.symc;
543 ir_type *get_irn_call_attr(ir_node *node) {
544 assert(node->op == op_Call);
545 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
548 sel_attr *get_irn_sel_attr(ir_node *node) {
549 assert(node->op == op_Sel);
550 return &node->attr.sel;
553 phi_attr *get_irn_phi_attr(ir_node *node) {
554 return &node->attr.phi;
557 block_attr *get_irn_block_attr(ir_node *node) {
558 assert(node->op == op_Block);
559 return &node->attr.block;
562 load_attr *get_irn_load_attr(ir_node *node) {
563 assert(node->op == op_Load);
564 return &node->attr.load;
567 store_attr *get_irn_store_attr(ir_node *node) {
568 assert(node->op == op_Store);
569 return &node->attr.store;
572 except_attr *get_irn_except_attr(ir_node *node) {
573 assert(node->op == op_Div || node->op == op_Quot ||
574 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
575 return &node->attr.except;
578 divmod_attr *get_irn_divmod_attr(ir_node *node) {
579 assert(node->op == op_Div || node->op == op_Quot ||
580 node->op == op_DivMod || node->op == op_Mod);
581 return &node->attr.divmod;
584 void *(get_irn_generic_attr)(ir_node *node) {
585 assert(is_ir_node(node));
586 return _get_irn_generic_attr(node);
589 const void *(get_irn_generic_attr_const)(const ir_node *node) {
590 assert(is_ir_node(node));
591 return _get_irn_generic_attr_const(node);
594 unsigned (get_irn_idx)(const ir_node *node) {
595 assert(is_ir_node(node));
596 return _get_irn_idx(node);
599 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
601 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
602 if (get_irn_n(node, i) == arg)
608 /** manipulate fields of individual nodes **/
610 /* this works for all except Block */
611 ir_node *get_nodes_block(const ir_node *node) {
612 assert(node->op != op_Block);
613 return get_irn_n(node, -1);
616 void set_nodes_block(ir_node *node, ir_node *block) {
617 assert(node->op != op_Block);
618 set_irn_n(node, -1, block);
621 /* this works for all except Block */
622 ir_node *get_nodes_MacroBlock(const ir_node *node) {
623 assert(node->op != op_Block);
624 return get_Block_MacroBlock(get_irn_n(node, -1));
627 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
628 * from Start. If so returns frame type, else Null. */
629 ir_type *is_frame_pointer(const ir_node *n) {
630 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
631 ir_node *start = get_Proj_pred(n);
632 if (is_Start(start)) {
633 return get_irg_frame_type(get_irn_irg(start));
639 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
640 * from Start. If so returns global type, else Null. */
641 ir_type *is_globals_pointer(const ir_node *n) {
642 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
643 ir_node *start = get_Proj_pred(n);
644 if (is_Start(start)) {
645 return get_glob_type();
651 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
652 * from Start. If so returns tls type, else Null. */
653 ir_type *is_tls_pointer(const ir_node *n) {
654 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_globals)) {
655 ir_node *start = get_Proj_pred(n);
656 if (is_Start(start)) {
657 return get_tls_type();
663 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
664 * from Start. If so returns 1, else 0. */
665 int is_value_arg_pointer(const ir_node *n) {
667 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
668 is_Start(get_Proj_pred(n)))
673 /* Returns an array with the predecessors of the Block. Depending on
674 the implementation of the graph data structure this can be a copy of
675 the internal representation of predecessors as well as the internal
676 array itself. Therefore writing to this array might obstruct the ir. */
677 ir_node **get_Block_cfgpred_arr(ir_node *node) {
678 assert((node->op == op_Block));
679 return (ir_node **)&(get_irn_in(node)[1]);
682 int (get_Block_n_cfgpreds)(const ir_node *node) {
683 return _get_Block_n_cfgpreds(node);
686 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
687 return _get_Block_cfgpred(node, pos);
690 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
691 assert(node->op == op_Block);
692 set_irn_n(node, pos, pred);
695 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
696 return _get_Block_cfgpred_block(node, pos);
699 int get_Block_matured(const ir_node *node) {
700 assert(node->op == op_Block);
701 return (int)node->attr.block.is_matured;
704 void set_Block_matured(ir_node *node, int matured) {
705 assert(node->op == op_Block);
706 node->attr.block.is_matured = matured;
709 unsigned long (get_Block_block_visited)(const ir_node *node) {
710 return _get_Block_block_visited(node);
713 void (set_Block_block_visited)(ir_node *node, unsigned long visit) {
714 _set_Block_block_visited(node, visit);
717 /* For this current_ir_graph must be set. */
718 void (mark_Block_block_visited)(ir_node *node) {
719 _mark_Block_block_visited(node);
722 int (Block_not_block_visited)(const ir_node *node) {
723 return _Block_not_block_visited(node);
726 int (Block_block_visited)(const ir_node *node) {
727 return _Block_block_visited(node);
730 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
731 assert(node->op == op_Block);
732 return node->attr.block.graph_arr[pos+1];
735 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
736 assert(node->op == op_Block);
737 node->attr.block.graph_arr[pos+1] = value;
740 #ifdef INTERPROCEDURAL_VIEW
741 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
742 assert(node->op == op_Block);
743 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
744 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
745 node->attr.block.in_cg[0] = NULL;
746 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
748 /* Fix backedge array. fix_backedges() operates depending on
749 interprocedural_view. */
750 int ipv = get_interprocedural_view();
751 set_interprocedural_view(1);
752 fix_backedges(current_ir_graph->obst, node);
753 set_interprocedural_view(ipv);
756 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
759 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
760 assert(node->op == op_Block &&
761 node->attr.block.in_cg &&
762 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
763 node->attr.block.in_cg[pos + 1] = pred;
766 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
767 assert(node->op == op_Block);
768 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
771 int get_Block_cg_n_cfgpreds(const ir_node *node) {
772 assert(node->op == op_Block);
773 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
776 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
777 assert(node->op == op_Block && node->attr.block.in_cg);
778 return node->attr.block.in_cg[pos + 1];
781 void remove_Block_cg_cfgpred_arr(ir_node *node) {
782 assert(node->op == op_Block);
783 node->attr.block.in_cg = NULL;
787 ir_node *(set_Block_dead)(ir_node *block) {
788 return _set_Block_dead(block);
791 int (is_Block_dead)(const ir_node *block) {
792 return _is_Block_dead(block);
795 ir_extblk *get_Block_extbb(const ir_node *block) {
797 assert(is_Block(block));
798 res = block->attr.block.extblk;
799 assert(res == NULL || is_ir_extbb(res));
803 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
804 assert(is_Block(block));
805 assert(extblk == NULL || is_ir_extbb(extblk));
806 block->attr.block.extblk = extblk;
809 /* returns the macro block header of a block. */
810 ir_node *get_Block_MacroBlock(const ir_node *block) {
812 assert(is_Block(block));
813 mbh = get_irn_n(block, -1);
814 /* once macro block header is respected by all optimizations,
815 this assert can be removed */
820 /* returns the macro block header of a node. */
821 ir_node *get_irn_MacroBlock(const ir_node *n) {
823 n = get_nodes_block(n);
824 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
828 return get_Block_MacroBlock(n);
831 /* returns the graph of a Block. */
832 ir_graph *get_Block_irg(const ir_node *block) {
833 assert(is_Block(block));
834 return block->attr.block.irg;
837 int has_Block_label(const ir_node *block) {
838 assert(is_Block(block));
839 return block->attr.block.has_label;
842 ir_label_t get_Block_label(const ir_node *block) {
843 assert(is_Block(block));
844 return block->attr.block.label;
847 void set_Block_label(ir_node *block, ir_label_t label) {
848 assert(is_Block(block));
849 block->attr.block.has_label = 1;
850 block->attr.block.label = label;
853 ir_node *(get_Block_phis)(const ir_node *block) {
854 return _get_Block_phis(block);
857 void (set_Block_phis)(ir_node *block, ir_node *phi) {
858 _set_Block_phis(block, phi);
861 int get_End_n_keepalives(const ir_node *end) {
862 assert(end->op == op_End);
863 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
866 ir_node *get_End_keepalive(const ir_node *end, int pos) {
867 assert(end->op == op_End);
868 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
871 void add_End_keepalive(ir_node *end, ir_node *ka) {
872 assert(end->op == op_End);
873 assert((is_Phi(ka) || is_Proj(ka) || is_Block(ka) || is_irn_keep(ka)) && "Only Phi, Block or Keep nodes can be kept alive!");
877 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
878 assert(end->op == op_End);
879 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
882 /* Set new keep-alives */
883 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
885 ir_graph *irg = get_irn_irg(end);
887 /* notify that edges are deleted */
888 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
889 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
891 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
893 for (i = 0; i < n; ++i) {
894 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
895 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
899 /* Set new keep-alives from old keep-alives, skipping irn */
900 void remove_End_keepalive(ir_node *end, ir_node *irn) {
901 int n = get_End_n_keepalives(end);
905 NEW_ARR_A(ir_node *, in, n);
907 for (idx = i = 0; i < n; ++i) {
908 ir_node *old_ka = get_End_keepalive(end, i);
915 /* set new keep-alives */
916 set_End_keepalives(end, idx, in);
920 free_End(ir_node *end) {
921 assert(end->op == op_End);
924 end->in = NULL; /* @@@ make sure we get an error if we use the
925 in array afterwards ... */
928 /* Return the target address of an IJmp */
929 ir_node *get_IJmp_target(const ir_node *ijmp) {
930 assert(ijmp->op == op_IJmp);
931 return get_irn_n(ijmp, 0);
934 /** Sets the target address of an IJmp */
935 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
936 assert(ijmp->op == op_IJmp);
937 set_irn_n(ijmp, 0, tgt);
941 > Implementing the case construct (which is where the constant Proj node is
942 > important) involves far more than simply determining the constant values.
943 > We could argue that this is more properly a function of the translator from
944 > Firm to the target machine. That could be done if there was some way of
945 > projecting "default" out of the Cond node.
946 I know it's complicated.
947 Basically there are two problems:
948 - determining the gaps between the Projs
949 - determining the biggest case constant to know the proj number for
951 I see several solutions:
952 1. Introduce a ProjDefault node. Solves both problems.
953 This means to extend all optimizations executed during construction.
954 2. Give the Cond node for switch two flavors:
955 a) there are no gaps in the Projs (existing flavor)
956 b) gaps may exist, default proj is still the Proj with the largest
957 projection number. This covers also the gaps.
958 3. Fix the semantic of the Cond to that of 2b)
960 Solution 2 seems to be the best:
961 Computing the gaps in the Firm representation is not too hard, i.e.,
962 libFIRM can implement a routine that transforms between the two
963 flavours. This is also possible for 1) but 2) does not require to
964 change any existing optimization.
965 Further it should be far simpler to determine the biggest constant than
967 I don't want to choose 3) as 2a) seems to have advantages for
968 dataflow analysis and 3) does not allow to convert the representation to
972 get_Cond_selector(const ir_node *node) {
973 assert(node->op == op_Cond);
974 return get_irn_n(node, 0);
978 set_Cond_selector(ir_node *node, ir_node *selector) {
979 assert(node->op == op_Cond);
980 set_irn_n(node, 0, selector);
984 get_Cond_kind(const ir_node *node) {
985 assert(node->op == op_Cond);
986 return node->attr.cond.kind;
990 set_Cond_kind(ir_node *node, cond_kind kind) {
991 assert(node->op == op_Cond);
992 node->attr.cond.kind = kind;
996 get_Cond_defaultProj(const ir_node *node) {
997 assert(node->op == op_Cond);
998 return node->attr.cond.default_proj;
1002 get_Return_mem(const ir_node *node) {
1003 assert(node->op == op_Return);
1004 return get_irn_n(node, 0);
1008 set_Return_mem(ir_node *node, ir_node *mem) {
1009 assert(node->op == op_Return);
1010 set_irn_n(node, 0, mem);
1014 get_Return_n_ress(const ir_node *node) {
1015 assert(node->op == op_Return);
1016 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1020 get_Return_res_arr(ir_node *node) {
1021 assert((node->op == op_Return));
1022 if (get_Return_n_ress(node) > 0)
1023 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1030 set_Return_n_res(ir_node *node, int results) {
1031 assert(node->op == op_Return);
1036 get_Return_res(const ir_node *node, int pos) {
1037 assert(node->op == op_Return);
1038 assert(get_Return_n_ress(node) > pos);
1039 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1043 set_Return_res(ir_node *node, int pos, ir_node *res){
1044 assert(node->op == op_Return);
1045 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1048 tarval *(get_Const_tarval)(const ir_node *node) {
1049 return _get_Const_tarval(node);
1053 set_Const_tarval(ir_node *node, tarval *con) {
1054 assert(node->op == op_Const);
1055 node->attr.con.tv = con;
1058 int (is_Const_null)(const ir_node *node) {
1059 return _is_Const_null(node);
1062 int (is_Const_one)(const ir_node *node) {
1063 return _is_Const_one(node);
1066 int (is_Const_all_one)(const ir_node *node) {
1067 return _is_Const_all_one(node);
1071 /* The source language type. Must be an atomic type. Mode of type must
1072 be mode of node. For tarvals from entities type must be pointer to
1075 get_Const_type(ir_node *node) {
1076 assert(node->op == op_Const);
1077 node->attr.con.tp = skip_tid(node->attr.con.tp);
1078 return node->attr.con.tp;
1082 set_Const_type(ir_node *node, ir_type *tp) {
1083 assert(node->op == op_Const);
1084 if (tp != firm_unknown_type) {
1085 assert(is_atomic_type(tp));
1086 assert(get_type_mode(tp) == get_irn_mode(node));
1088 node->attr.con.tp = tp;
1093 get_SymConst_kind(const ir_node *node) {
1094 assert(node->op == op_SymConst);
1095 return node->attr.symc.num;
1099 set_SymConst_kind(ir_node *node, symconst_kind num) {
1100 assert(node->op == op_SymConst);
1101 node->attr.symc.num = num;
1105 get_SymConst_type(ir_node *node) {
1106 assert((node->op == op_SymConst) &&
1107 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1108 return node->attr.symc.sym.type_p = skip_tid(node->attr.symc.sym.type_p);
1112 set_SymConst_type(ir_node *node, ir_type *tp) {
1113 assert((node->op == op_SymConst) &&
1114 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1115 node->attr.symc.sym.type_p = tp;
1119 get_SymConst_name(const ir_node *node) {
1120 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1121 return node->attr.symc.sym.ident_p;
1125 set_SymConst_name(ir_node *node, ident *name) {
1126 assert(node->op == op_SymConst && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1127 node->attr.symc.sym.ident_p = name;
1131 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1132 ir_entity *get_SymConst_entity(const ir_node *node) {
1133 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1134 return node->attr.symc.sym.entity_p;
1137 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1138 assert(node->op == op_SymConst && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1139 node->attr.symc.sym.entity_p = ent;
1142 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1143 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1144 return node->attr.symc.sym.enum_p;
1147 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1148 assert(node->op == op_SymConst && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1149 node->attr.symc.sym.enum_p = ec;
1152 union symconst_symbol
1153 get_SymConst_symbol(const ir_node *node) {
1154 assert(node->op == op_SymConst);
1155 return node->attr.symc.sym;
1159 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1160 assert(node->op == op_SymConst);
1161 node->attr.symc.sym = sym;
1164 ir_label_t get_SymConst_label(const ir_node *node) {
1165 assert(node->op == op_SymConst && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1166 return node->attr.symc.sym.label;
1169 void set_SymConst_label(ir_node *node, ir_label_t label) {
1170 assert(node->op == op_SymConst && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1171 node->attr.symc.sym.label = label;
1175 get_SymConst_value_type(ir_node *node) {
1176 assert(node->op == op_SymConst);
1177 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1178 return node->attr.symc.tp;
1182 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1183 assert(node->op == op_SymConst);
1184 node->attr.symc.tp = tp;
1188 get_Sel_mem(const ir_node *node) {
1189 assert(node->op == op_Sel);
1190 return get_irn_n(node, 0);
1194 set_Sel_mem(ir_node *node, ir_node *mem) {
1195 assert(node->op == op_Sel);
1196 set_irn_n(node, 0, mem);
1200 get_Sel_ptr(const ir_node *node) {
1201 assert(node->op == op_Sel);
1202 return get_irn_n(node, 1);
1206 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1207 assert(node->op == op_Sel);
1208 set_irn_n(node, 1, ptr);
1212 get_Sel_n_indexs(const ir_node *node) {
1213 assert(node->op == op_Sel);
1214 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1218 get_Sel_index_arr(ir_node *node) {
1219 assert((node->op == op_Sel));
1220 if (get_Sel_n_indexs(node) > 0)
1221 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1227 get_Sel_index(const ir_node *node, int pos) {
1228 assert(node->op == op_Sel);
1229 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1233 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1234 assert(node->op == op_Sel);
1235 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1239 get_Sel_entity(const ir_node *node) {
1240 assert(node->op == op_Sel);
1241 return node->attr.sel.ent;
1244 ir_entity *_get_Sel_entity(ir_node *node) {
1245 return get_Sel_entity(node);
1249 set_Sel_entity(ir_node *node, ir_entity *ent) {
1250 assert(node->op == op_Sel);
1251 node->attr.sel.ent = ent;
1255 /* For unary and binary arithmetic operations the access to the
1256 operands can be factored out. Left is the first, right the
1257 second arithmetic value as listed in tech report 0999-33.
1258 unops are: Minus, Abs, Not, Conv, Cast
1259 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1260 Shr, Shrs, Rotate, Cmp */
1264 get_Call_mem(const ir_node *node) {
1265 assert(node->op == op_Call);
1266 return get_irn_n(node, 0);
1270 set_Call_mem(ir_node *node, ir_node *mem) {
1271 assert(node->op == op_Call);
1272 set_irn_n(node, 0, mem);
1276 get_Call_ptr(const ir_node *node) {
1277 assert(node->op == op_Call);
1278 return get_irn_n(node, 1);
1282 set_Call_ptr(ir_node *node, ir_node *ptr) {
1283 assert(node->op == op_Call);
1284 set_irn_n(node, 1, ptr);
1288 get_Call_param_arr(ir_node *node) {
1289 assert(node->op == op_Call);
1290 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1294 get_Call_n_params(const ir_node *node) {
1295 assert(node->op == op_Call);
1296 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1300 get_Call_arity(const ir_node *node) {
1301 assert(node->op == op_Call);
1302 return get_Call_n_params(node);
1306 set_Call_arity(ir_node *node, ir_node *arity) {
1307 assert(node->op == op_Call);
1312 get_Call_param(const ir_node *node, int pos) {
1313 assert(node->op == op_Call);
1314 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1318 set_Call_param(ir_node *node, int pos, ir_node *param) {
1319 assert(node->op == op_Call);
1320 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1324 get_Call_type(ir_node *node) {
1325 assert(node->op == op_Call);
1326 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1330 set_Call_type(ir_node *node, ir_type *tp) {
1331 assert(node->op == op_Call);
1332 assert((get_unknown_type() == tp) || is_Method_type(tp));
1333 node->attr.call.cld_tp = tp;
1336 int Call_has_callees(const ir_node *node) {
1337 assert(node && node->op == op_Call);
1338 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1339 (node->attr.call.callee_arr != NULL));
1342 int get_Call_n_callees(const ir_node *node) {
1343 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1344 return ARR_LEN(node->attr.call.callee_arr);
1347 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1348 assert(pos >= 0 && pos < get_Call_n_callees(node));
1349 return node->attr.call.callee_arr[pos];
1352 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1353 assert(node->op == op_Call);
1354 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1355 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1357 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1360 void remove_Call_callee_arr(ir_node *node) {
1361 assert(node->op == op_Call);
1362 node->attr.call.callee_arr = NULL;
1365 ir_node *get_CallBegin_ptr(const ir_node *node) {
1366 assert(node->op == op_CallBegin);
1367 return get_irn_n(node, 0);
1370 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1371 assert(node->op == op_CallBegin);
1372 set_irn_n(node, 0, ptr);
1375 ir_node *get_CallBegin_call(const ir_node *node) {
1376 assert(node->op == op_CallBegin);
1377 return node->attr.callbegin.call;
1380 void set_CallBegin_call(ir_node *node, ir_node *call) {
1381 assert(node->op == op_CallBegin);
1382 node->attr.callbegin.call = call;
1387 ir_node * get_##OP##_left(const ir_node *node) { \
1388 assert(node->op == op_##OP); \
1389 return get_irn_n(node, node->op->op_index); \
1391 void set_##OP##_left(ir_node *node, ir_node *left) { \
1392 assert(node->op == op_##OP); \
1393 set_irn_n(node, node->op->op_index, left); \
1395 ir_node *get_##OP##_right(const ir_node *node) { \
1396 assert(node->op == op_##OP); \
1397 return get_irn_n(node, node->op->op_index + 1); \
1399 void set_##OP##_right(ir_node *node, ir_node *right) { \
1400 assert(node->op == op_##OP); \
1401 set_irn_n(node, node->op->op_index + 1, right); \
1405 ir_node *get_##OP##_op(const ir_node *node) { \
1406 assert(node->op == op_##OP); \
1407 return get_irn_n(node, node->op->op_index); \
1409 void set_##OP##_op(ir_node *node, ir_node *op) { \
1410 assert(node->op == op_##OP); \
1411 set_irn_n(node, node->op->op_index, op); \
1414 #define BINOP_MEM(OP) \
1418 get_##OP##_mem(const ir_node *node) { \
1419 assert(node->op == op_##OP); \
1420 return get_irn_n(node, 0); \
1424 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1425 assert(node->op == op_##OP); \
1426 set_irn_n(node, 0, mem); \
1432 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1433 assert(node->op == op_##OP); \
1434 return node->attr.divmod.res_mode; \
1437 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1438 assert(node->op == op_##OP); \
1439 node->attr.divmod.res_mode = mode; \
1465 int is_Div_remainderless(const ir_node *node) {
1466 assert(node->op == op_Div);
1467 return node->attr.divmod.no_remainder;
1470 int get_Conv_strict(const ir_node *node) {
1471 assert(node->op == op_Conv);
1472 return node->attr.conv.strict;
1475 void set_Conv_strict(ir_node *node, int strict_flag) {
1476 assert(node->op == op_Conv);
1477 node->attr.conv.strict = (char)strict_flag;
1481 get_Cast_type(ir_node *node) {
1482 assert(node->op == op_Cast);
1483 node->attr.cast.totype = skip_tid(node->attr.cast.totype);
1484 return node->attr.cast.totype;
1488 set_Cast_type(ir_node *node, ir_type *to_tp) {
1489 assert(node->op == op_Cast);
1490 node->attr.cast.totype = to_tp;
1494 /* Checks for upcast.
1496 * Returns true if the Cast node casts a class type to a super type.
1498 int is_Cast_upcast(ir_node *node) {
1499 ir_type *totype = get_Cast_type(node);
1500 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1502 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1505 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1506 totype = get_pointer_points_to_type(totype);
1507 fromtype = get_pointer_points_to_type(fromtype);
1512 if (!is_Class_type(totype)) return 0;
1513 return is_SubClass_of(fromtype, totype);
1516 /* Checks for downcast.
1518 * Returns true if the Cast node casts a class type to a sub type.
1520 int is_Cast_downcast(ir_node *node) {
1521 ir_type *totype = get_Cast_type(node);
1522 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1524 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1527 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1528 totype = get_pointer_points_to_type(totype);
1529 fromtype = get_pointer_points_to_type(fromtype);
1534 if (!is_Class_type(totype)) return 0;
1535 return is_SubClass_of(totype, fromtype);
1539 (is_unop)(const ir_node *node) {
1540 return _is_unop(node);
1544 get_unop_op(const ir_node *node) {
1545 if (node->op->opar == oparity_unary)
1546 return get_irn_n(node, node->op->op_index);
1548 assert(node->op->opar == oparity_unary);
1553 set_unop_op(ir_node *node, ir_node *op) {
1554 if (node->op->opar == oparity_unary)
1555 set_irn_n(node, node->op->op_index, op);
1557 assert(node->op->opar == oparity_unary);
1561 (is_binop)(const ir_node *node) {
1562 return _is_binop(node);
1566 get_binop_left(const ir_node *node) {
1567 assert(node->op->opar == oparity_binary);
1568 return get_irn_n(node, node->op->op_index);
1572 set_binop_left(ir_node *node, ir_node *left) {
1573 assert(node->op->opar == oparity_binary);
1574 set_irn_n(node, node->op->op_index, left);
1578 get_binop_right(const ir_node *node) {
1579 assert(node->op->opar == oparity_binary);
1580 return get_irn_n(node, node->op->op_index + 1);
1584 set_binop_right(ir_node *node, ir_node *right) {
1585 assert(node->op->opar == oparity_binary);
1586 set_irn_n(node, node->op->op_index + 1, right);
1590 (is_Phi)(const ir_node *n) {
1594 int is_Phi0(const ir_node *n) {
1597 return ((get_irn_op(n) == op_Phi) &&
1598 (get_irn_arity(n) == 0) &&
1599 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1603 get_Phi_preds_arr(ir_node *node) {
1604 assert(node->op == op_Phi);
1605 return (ir_node **)&(get_irn_in(node)[1]);
1609 get_Phi_n_preds(const ir_node *node) {
1610 assert(is_Phi(node) || is_Phi0(node));
1611 return (get_irn_arity(node));
1615 void set_Phi_n_preds(ir_node *node, int n_preds) {
1616 assert(node->op == op_Phi);
1621 get_Phi_pred(const ir_node *node, int pos) {
1622 assert(is_Phi(node) || is_Phi0(node));
1623 return get_irn_n(node, pos);
1627 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1628 assert(is_Phi(node) || is_Phi0(node));
1629 set_irn_n(node, pos, pred);
1632 ir_node *(get_Phi_next)(const ir_node *phi) {
1633 return _get_Phi_next(phi);
1636 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1637 _set_Phi_next(phi, next);
1640 int is_memop(const ir_node *node) {
1641 ir_opcode code = get_irn_opcode(node);
1642 return (code == iro_Load || code == iro_Store);
1645 ir_node *get_memop_mem(const ir_node *node) {
1646 assert(is_memop(node));
1647 return get_irn_n(node, 0);
1650 void set_memop_mem(ir_node *node, ir_node *mem) {
1651 assert(is_memop(node));
1652 set_irn_n(node, 0, mem);
1655 ir_node *get_memop_ptr(const ir_node *node) {
1656 assert(is_memop(node));
1657 return get_irn_n(node, 1);
1660 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1661 assert(is_memop(node));
1662 set_irn_n(node, 1, ptr);
1666 get_Load_mem(const ir_node *node) {
1667 assert(node->op == op_Load);
1668 return get_irn_n(node, 0);
1672 set_Load_mem(ir_node *node, ir_node *mem) {
1673 assert(node->op == op_Load);
1674 set_irn_n(node, 0, mem);
1678 get_Load_ptr(const ir_node *node) {
1679 assert(node->op == op_Load);
1680 return get_irn_n(node, 1);
1684 set_Load_ptr(ir_node *node, ir_node *ptr) {
1685 assert(node->op == op_Load);
1686 set_irn_n(node, 1, ptr);
1690 get_Load_mode(const ir_node *node) {
1691 assert(node->op == op_Load);
1692 return node->attr.load.load_mode;
1696 set_Load_mode(ir_node *node, ir_mode *mode) {
1697 assert(node->op == op_Load);
1698 node->attr.load.load_mode = mode;
1702 get_Load_volatility(const ir_node *node) {
1703 assert(node->op == op_Load);
1704 return node->attr.load.volatility;
1708 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1709 assert(node->op == op_Load);
1710 node->attr.load.volatility = volatility;
1714 get_Load_align(const ir_node *node) {
1715 assert(node->op == op_Load);
1716 return node->attr.load.aligned;
1720 set_Load_align(ir_node *node, ir_align align) {
1721 assert(node->op == op_Load);
1722 node->attr.load.aligned = align;
1727 get_Store_mem(const ir_node *node) {
1728 assert(node->op == op_Store);
1729 return get_irn_n(node, 0);
1733 set_Store_mem(ir_node *node, ir_node *mem) {
1734 assert(node->op == op_Store);
1735 set_irn_n(node, 0, mem);
1739 get_Store_ptr(const ir_node *node) {
1740 assert(node->op == op_Store);
1741 return get_irn_n(node, 1);
1745 set_Store_ptr(ir_node *node, ir_node *ptr) {
1746 assert(node->op == op_Store);
1747 set_irn_n(node, 1, ptr);
1751 get_Store_value(const ir_node *node) {
1752 assert(node->op == op_Store);
1753 return get_irn_n(node, 2);
1757 set_Store_value(ir_node *node, ir_node *value) {
1758 assert(node->op == op_Store);
1759 set_irn_n(node, 2, value);
1763 get_Store_volatility(const ir_node *node) {
1764 assert(node->op == op_Store);
1765 return node->attr.store.volatility;
1769 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1770 assert(node->op == op_Store);
1771 node->attr.store.volatility = volatility;
1775 get_Store_align(const ir_node *node) {
1776 assert(node->op == op_Store);
1777 return node->attr.store.aligned;
1781 set_Store_align(ir_node *node, ir_align align) {
1782 assert(node->op == op_Store);
1783 node->attr.store.aligned = align;
1788 get_Alloc_mem(const ir_node *node) {
1789 assert(node->op == op_Alloc);
1790 return get_irn_n(node, 0);
1794 set_Alloc_mem(ir_node *node, ir_node *mem) {
1795 assert(node->op == op_Alloc);
1796 set_irn_n(node, 0, mem);
1800 get_Alloc_size(const ir_node *node) {
1801 assert(node->op == op_Alloc);
1802 return get_irn_n(node, 1);
1806 set_Alloc_size(ir_node *node, ir_node *size) {
1807 assert(node->op == op_Alloc);
1808 set_irn_n(node, 1, size);
1812 get_Alloc_type(ir_node *node) {
1813 assert(node->op == op_Alloc);
1814 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1818 set_Alloc_type(ir_node *node, ir_type *tp) {
1819 assert(node->op == op_Alloc);
1820 node->attr.alloc.type = tp;
1824 get_Alloc_where(const ir_node *node) {
1825 assert(node->op == op_Alloc);
1826 return node->attr.alloc.where;
1830 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1831 assert(node->op == op_Alloc);
1832 node->attr.alloc.where = where;
1837 get_Free_mem(const ir_node *node) {
1838 assert(node->op == op_Free);
1839 return get_irn_n(node, 0);
1843 set_Free_mem(ir_node *node, ir_node *mem) {
1844 assert(node->op == op_Free);
1845 set_irn_n(node, 0, mem);
1849 get_Free_ptr(const ir_node *node) {
1850 assert(node->op == op_Free);
1851 return get_irn_n(node, 1);
1855 set_Free_ptr(ir_node *node, ir_node *ptr) {
1856 assert(node->op == op_Free);
1857 set_irn_n(node, 1, ptr);
1861 get_Free_size(const ir_node *node) {
1862 assert(node->op == op_Free);
1863 return get_irn_n(node, 2);
1867 set_Free_size(ir_node *node, ir_node *size) {
1868 assert(node->op == op_Free);
1869 set_irn_n(node, 2, size);
1873 get_Free_type(ir_node *node) {
1874 assert(node->op == op_Free);
1875 return node->attr.free.type = skip_tid(node->attr.free.type);
1879 set_Free_type(ir_node *node, ir_type *tp) {
1880 assert(node->op == op_Free);
1881 node->attr.free.type = tp;
1885 get_Free_where(const ir_node *node) {
1886 assert(node->op == op_Free);
1887 return node->attr.free.where;
1891 set_Free_where(ir_node *node, ir_where_alloc where) {
1892 assert(node->op == op_Free);
1893 node->attr.free.where = where;
1896 ir_node **get_Sync_preds_arr(ir_node *node) {
1897 assert(node->op == op_Sync);
1898 return (ir_node **)&(get_irn_in(node)[1]);
1901 int get_Sync_n_preds(const ir_node *node) {
1902 assert(node->op == op_Sync);
1903 return (get_irn_arity(node));
1907 void set_Sync_n_preds(ir_node *node, int n_preds) {
1908 assert(node->op == op_Sync);
1912 ir_node *get_Sync_pred(const ir_node *node, int pos) {
1913 assert(node->op == op_Sync);
1914 return get_irn_n(node, pos);
1917 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1918 assert(node->op == op_Sync);
1919 set_irn_n(node, pos, pred);
1922 /* Add a new Sync predecessor */
1923 void add_Sync_pred(ir_node *node, ir_node *pred) {
1924 assert(node->op == op_Sync);
1925 add_irn_n(node, pred);
1928 /* Returns the source language type of a Proj node. */
1929 ir_type *get_Proj_type(ir_node *n) {
1930 ir_type *tp = firm_unknown_type;
1931 ir_node *pred = get_Proj_pred(n);
1933 switch (get_irn_opcode(pred)) {
1936 /* Deal with Start / Call here: we need to know the Proj Nr. */
1937 assert(get_irn_mode(pred) == mode_T);
1938 pred_pred = get_Proj_pred(pred);
1939 if (get_irn_op(pred_pred) == op_Start) {
1940 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1941 tp = get_method_param_type(mtp, get_Proj_proj(n));
1942 } else if (get_irn_op(pred_pred) == op_Call) {
1943 ir_type *mtp = get_Call_type(pred_pred);
1944 tp = get_method_res_type(mtp, get_Proj_proj(n));
1947 case iro_Start: break;
1948 case iro_Call: break;
1950 ir_node *a = get_Load_ptr(pred);
1952 tp = get_entity_type(get_Sel_entity(a));
1961 get_Proj_pred(const ir_node *node) {
1962 assert(is_Proj(node));
1963 return get_irn_n(node, 0);
1967 set_Proj_pred(ir_node *node, ir_node *pred) {
1968 assert(is_Proj(node));
1969 set_irn_n(node, 0, pred);
1973 get_Proj_proj(const ir_node *node) {
1974 assert(is_Proj(node));
1975 if (get_irn_opcode(node) == iro_Proj) {
1976 return node->attr.proj;
1978 assert(get_irn_opcode(node) == iro_Filter);
1979 return node->attr.filter.proj;
1984 set_Proj_proj(ir_node *node, long proj) {
1985 assert(node->op == op_Proj);
1986 node->attr.proj = proj;
1990 get_Tuple_preds_arr(ir_node *node) {
1991 assert(node->op == op_Tuple);
1992 return (ir_node **)&(get_irn_in(node)[1]);
1996 get_Tuple_n_preds(const ir_node *node) {
1997 assert(node->op == op_Tuple);
1998 return (get_irn_arity(node));
2003 set_Tuple_n_preds(ir_node *node, int n_preds) {
2004 assert(node->op == op_Tuple);
2009 get_Tuple_pred(const ir_node *node, int pos) {
2010 assert(node->op == op_Tuple);
2011 return get_irn_n(node, pos);
2015 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2016 assert(node->op == op_Tuple);
2017 set_irn_n(node, pos, pred);
2021 get_Id_pred(const ir_node *node) {
2022 assert(node->op == op_Id);
2023 return get_irn_n(node, 0);
2027 set_Id_pred(ir_node *node, ir_node *pred) {
2028 assert(node->op == op_Id);
2029 set_irn_n(node, 0, pred);
2032 ir_node *get_Confirm_value(const ir_node *node) {
2033 assert(node->op == op_Confirm);
2034 return get_irn_n(node, 0);
2037 void set_Confirm_value(ir_node *node, ir_node *value) {
2038 assert(node->op == op_Confirm);
2039 set_irn_n(node, 0, value);
2042 ir_node *get_Confirm_bound(const ir_node *node) {
2043 assert(node->op == op_Confirm);
2044 return get_irn_n(node, 1);
2047 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2048 assert(node->op == op_Confirm);
2049 set_irn_n(node, 0, bound);
2052 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2053 assert(node->op == op_Confirm);
2054 return node->attr.confirm.cmp;
2057 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2058 assert(node->op == op_Confirm);
2059 node->attr.confirm.cmp = cmp;
2063 get_Filter_pred(ir_node *node) {
2064 assert(node->op == op_Filter);
2069 set_Filter_pred(ir_node *node, ir_node *pred) {
2070 assert(node->op == op_Filter);
2075 get_Filter_proj(ir_node *node) {
2076 assert(node->op == op_Filter);
2077 return node->attr.filter.proj;
2081 set_Filter_proj(ir_node *node, long proj) {
2082 assert(node->op == op_Filter);
2083 node->attr.filter.proj = proj;
2086 /* Don't use get_irn_arity, get_irn_n in implementation as access
2087 shall work independent of view!!! */
2088 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2089 assert(node->op == op_Filter);
2090 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2091 ir_graph *irg = get_irn_irg(node);
2092 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2093 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2094 node->attr.filter.in_cg[0] = node->in[0];
2096 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2099 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2100 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2101 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2102 node->attr.filter.in_cg[pos + 1] = pred;
2105 int get_Filter_n_cg_preds(ir_node *node) {
2106 assert(node->op == op_Filter && node->attr.filter.in_cg);
2107 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2110 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2112 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2114 arity = ARR_LEN(node->attr.filter.in_cg);
2115 assert(pos < arity - 1);
2116 return node->attr.filter.in_cg[pos + 1];
2120 ir_node *get_Mux_sel(const ir_node *node) {
2121 if (node->op == op_Psi) {
2122 assert(get_irn_arity(node) == 3);
2123 return get_Psi_cond(node, 0);
2125 assert(node->op == op_Mux);
2129 void set_Mux_sel(ir_node *node, ir_node *sel) {
2130 if (node->op == op_Psi) {
2131 assert(get_irn_arity(node) == 3);
2132 set_Psi_cond(node, 0, sel);
2134 assert(node->op == op_Mux);
2139 ir_node *get_Mux_false(const ir_node *node) {
2140 if (node->op == op_Psi) {
2141 assert(get_irn_arity(node) == 3);
2142 return get_Psi_default(node);
2144 assert(node->op == op_Mux);
2148 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2149 if (node->op == op_Psi) {
2150 assert(get_irn_arity(node) == 3);
2151 set_Psi_default(node, ir_false);
2153 assert(node->op == op_Mux);
2154 node->in[2] = ir_false;
2158 ir_node *get_Mux_true(const ir_node *node) {
2159 if (node->op == op_Psi) {
2160 assert(get_irn_arity(node) == 3);
2161 return get_Psi_val(node, 0);
2163 assert(node->op == op_Mux);
2167 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2168 if (node->op == op_Psi) {
2169 assert(get_irn_arity(node) == 3);
2170 set_Psi_val(node, 0, ir_true);
2172 assert(node->op == op_Mux);
2173 node->in[3] = ir_true;
2178 ir_node *get_Psi_cond(const ir_node *node, int pos) {
2179 assert(node->op == op_Psi);
2180 assert(pos < get_Psi_n_conds(node));
2181 return get_irn_n(node, 2 * pos);
2184 void set_Psi_cond(ir_node *node, int pos, ir_node *cond) {
2185 assert(node->op == op_Psi);
2186 assert(pos < get_Psi_n_conds(node));
2187 set_irn_n(node, 2 * pos, cond);
2190 ir_node *get_Psi_val(const ir_node *node, int pos) {
2191 assert(node->op == op_Psi);
2192 assert(pos < get_Psi_n_conds(node));
2193 return get_irn_n(node, 2 * pos + 1);
2196 void set_Psi_val(ir_node *node, int pos, ir_node *val) {
2197 assert(node->op == op_Psi);
2198 assert(pos < get_Psi_n_conds(node));
2199 set_irn_n(node, 2 * pos + 1, val);
2202 ir_node *get_Psi_default(const ir_node *node) {
2203 int def_pos = get_irn_arity(node) - 1;
2204 assert(node->op == op_Psi);
2205 return get_irn_n(node, def_pos);
2208 void set_Psi_default(ir_node *node, ir_node *val) {
2209 int def_pos = get_irn_arity(node);
2210 assert(node->op == op_Psi);
2211 set_irn_n(node, def_pos, val);
2214 int (get_Psi_n_conds)(const ir_node *node) {
2215 return _get_Psi_n_conds(node);
2219 ir_node *get_CopyB_mem(const ir_node *node) {
2220 assert(node->op == op_CopyB);
2221 return get_irn_n(node, 0);
2224 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2225 assert(node->op == op_CopyB);
2226 set_irn_n(node, 0, mem);
2229 ir_node *get_CopyB_dst(const ir_node *node) {
2230 assert(node->op == op_CopyB);
2231 return get_irn_n(node, 1);
2234 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2235 assert(node->op == op_CopyB);
2236 set_irn_n(node, 1, dst);
2239 ir_node *get_CopyB_src(const ir_node *node) {
2240 assert(node->op == op_CopyB);
2241 return get_irn_n(node, 2);
2244 void set_CopyB_src(ir_node *node, ir_node *src) {
2245 assert(node->op == op_CopyB);
2246 set_irn_n(node, 2, src);
2249 ir_type *get_CopyB_type(ir_node *node) {
2250 assert(node->op == op_CopyB);
2251 return node->attr.copyb.data_type = skip_tid(node->attr.copyb.data_type);
2254 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2255 assert(node->op == op_CopyB && data_type);
2256 node->attr.copyb.data_type = data_type;
2261 get_InstOf_type(ir_node *node) {
2262 assert(node->op == op_InstOf);
2263 return node->attr.instof.type = skip_tid(node->attr.instof.type);
2267 set_InstOf_type(ir_node *node, ir_type *type) {
2268 assert(node->op == op_InstOf);
2269 node->attr.instof.type = type;
2273 get_InstOf_store(const ir_node *node) {
2274 assert(node->op == op_InstOf);
2275 return get_irn_n(node, 0);
2279 set_InstOf_store(ir_node *node, ir_node *obj) {
2280 assert(node->op == op_InstOf);
2281 set_irn_n(node, 0, obj);
2285 get_InstOf_obj(const ir_node *node) {
2286 assert(node->op == op_InstOf);
2287 return get_irn_n(node, 1);
2291 set_InstOf_obj(ir_node *node, ir_node *obj) {
2292 assert(node->op == op_InstOf);
2293 set_irn_n(node, 1, obj);
2296 /* Returns the memory input of a Raise operation. */
2298 get_Raise_mem(const ir_node *node) {
2299 assert(node->op == op_Raise);
2300 return get_irn_n(node, 0);
2304 set_Raise_mem(ir_node *node, ir_node *mem) {
2305 assert(node->op == op_Raise);
2306 set_irn_n(node, 0, mem);
2310 get_Raise_exo_ptr(const ir_node *node) {
2311 assert(node->op == op_Raise);
2312 return get_irn_n(node, 1);
2316 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2317 assert(node->op == op_Raise);
2318 set_irn_n(node, 1, exo_ptr);
2323 /* Returns the memory input of a Bound operation. */
2324 ir_node *get_Bound_mem(const ir_node *bound) {
2325 assert(bound->op == op_Bound);
2326 return get_irn_n(bound, 0);
2329 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2330 assert(bound->op == op_Bound);
2331 set_irn_n(bound, 0, mem);
2334 /* Returns the index input of a Bound operation. */
2335 ir_node *get_Bound_index(const ir_node *bound) {
2336 assert(bound->op == op_Bound);
2337 return get_irn_n(bound, 1);
2340 void set_Bound_index(ir_node *bound, ir_node *idx) {
2341 assert(bound->op == op_Bound);
2342 set_irn_n(bound, 1, idx);
2345 /* Returns the lower bound input of a Bound operation. */
2346 ir_node *get_Bound_lower(const ir_node *bound) {
2347 assert(bound->op == op_Bound);
2348 return get_irn_n(bound, 2);
2351 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2352 assert(bound->op == op_Bound);
2353 set_irn_n(bound, 2, lower);
2356 /* Returns the upper bound input of a Bound operation. */
2357 ir_node *get_Bound_upper(const ir_node *bound) {
2358 assert(bound->op == op_Bound);
2359 return get_irn_n(bound, 3);
2362 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2363 assert(bound->op == op_Bound);
2364 set_irn_n(bound, 3, upper);
2367 /* Return the operand of a Pin node. */
2368 ir_node *get_Pin_op(const ir_node *pin) {
2369 assert(pin->op == op_Pin);
2370 return get_irn_n(pin, 0);
2373 void set_Pin_op(ir_node *pin, ir_node *node) {
2374 assert(pin->op == op_Pin);
2375 set_irn_n(pin, 0, node);
2378 /* Return the assembler text of an ASM pseudo node. */
2379 ident *get_ASM_text(const ir_node *node) {
2380 assert(node->op == op_ASM);
2381 return node->attr.assem.asm_text;
2384 /* Return the number of input constraints for an ASM node. */
2385 int get_ASM_n_input_constraints(const ir_node *node) {
2386 assert(node->op == op_ASM);
2387 return ARR_LEN(node->attr.assem.inputs);
2390 /* Return the input constraints for an ASM node. This is a flexible array. */
2391 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2392 assert(node->op == op_ASM);
2393 return node->attr.assem.inputs;
2396 /* Return the number of output constraints for an ASM node. */
2397 int get_ASM_n_output_constraints(const ir_node *node) {
2398 assert(node->op == op_ASM);
2399 return ARR_LEN(node->attr.assem.outputs);
2402 /* Return the output constraints for an ASM node. */
2403 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2404 assert(node->op == op_ASM);
2405 return node->attr.assem.outputs;
2408 /* Return the number of clobbered registers for an ASM node. */
2409 int get_ASM_n_clobbers(const ir_node *node) {
2410 assert(node->op == op_ASM);
2411 return ARR_LEN(node->attr.assem.clobber);
2414 /* Return the list of clobbered registers for an ASM node. */
2415 ident **get_ASM_clobbers(const ir_node *node) {
2416 assert(node->op == op_ASM);
2417 return node->attr.assem.clobber;
2420 /* returns the graph of a node */
2422 get_irn_irg(const ir_node *node) {
2424 * Do not use get_nodes_Block() here, because this
2425 * will check the pinned state.
2426 * However even a 'wrong' block is always in the proper
2429 if (! is_Block(node))
2430 node = get_irn_n(node, -1);
2431 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2432 node = get_irn_n(node, -1);
2433 assert(get_irn_op(node) == op_Block);
2434 return node->attr.block.irg;
2438 /*----------------------------------------------------------------*/
2439 /* Auxiliary routines */
2440 /*----------------------------------------------------------------*/
2443 skip_Proj(ir_node *node) {
2444 /* don't assert node !!! */
2449 node = get_Proj_pred(node);
2455 skip_Proj_const(const ir_node *node) {
2456 /* don't assert node !!! */
2461 node = get_Proj_pred(node);
2467 skip_Tuple(ir_node *node) {
2471 if (!get_opt_normalize()) return node;
2474 if (get_irn_op(node) == op_Proj) {
2475 pred = get_Proj_pred(node);
2476 op = get_irn_op(pred);
2479 * Looks strange but calls get_irn_op() only once
2480 * in most often cases.
2482 if (op == op_Proj) { /* nested Tuple ? */
2483 pred = skip_Tuple(pred);
2484 op = get_irn_op(pred);
2486 if (op == op_Tuple) {
2487 node = get_Tuple_pred(pred, get_Proj_proj(node));
2490 } else if (op == op_Tuple) {
2491 node = get_Tuple_pred(pred, get_Proj_proj(node));
2498 /* returns operand of node if node is a Cast */
2499 ir_node *skip_Cast(ir_node *node) {
2500 if (get_irn_op(node) == op_Cast)
2501 return get_Cast_op(node);
2505 /* returns operand of node if node is a Confirm */
2506 ir_node *skip_Confirm(ir_node *node) {
2507 if (get_irn_op(node) == op_Confirm)
2508 return get_Confirm_value(node);
2512 /* skip all high-level ops */
2513 ir_node *skip_HighLevel_ops(ir_node *node) {
2514 while (is_op_highlevel(get_irn_op(node))) {
2515 node = get_irn_n(node, 0);
2521 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2522 * than any other approach, as Id chains are resolved and all point to the real node, or
2523 * all id's are self loops.
2525 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2526 * a little bit "hand optimized".
2528 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2531 skip_Id(ir_node *node) {
2533 /* don't assert node !!! */
2535 if (!node || (node->op != op_Id)) return node;
2537 /* Don't use get_Id_pred(): We get into an endless loop for
2538 self-referencing Ids. */
2539 pred = node->in[0+1];
2541 if (pred->op != op_Id) return pred;
2543 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2544 ir_node *rem_pred, *res;
2546 if (pred->op != op_Id) return pred; /* shortcut */
2549 assert(get_irn_arity (node) > 0);
2551 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2552 res = skip_Id(rem_pred);
2553 if (res->op == op_Id) /* self-loop */ return node;
2555 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2562 void skip_Id_and_store(ir_node **node) {
2565 if (!n || (n->op != op_Id)) return;
2567 /* Don't use get_Id_pred(): We get into an endless loop for
2568 self-referencing Ids. */
2573 (is_Bad)(const ir_node *node) {
2574 return _is_Bad(node);
2578 (is_NoMem)(const ir_node *node) {
2579 return _is_NoMem(node);
2583 (is_Minus)(const ir_node *node) {
2584 return _is_Minus(node);
2588 (is_Mod)(const ir_node *node) {
2589 return _is_Mod(node);
2593 (is_Div)(const ir_node *node) {
2594 return _is_Div(node);
2598 (is_DivMod)(const ir_node *node) {
2599 return _is_DivMod(node);
2603 (is_Quot)(const ir_node *node) {
2604 return _is_Quot(node);
2608 (is_Add)(const ir_node *node) {
2609 return _is_Add(node);
2613 (is_And)(const ir_node *node) {
2614 return _is_And(node);
2618 (is_Or)(const ir_node *node) {
2619 return _is_Or(node);
2623 (is_Eor)(const ir_node *node) {
2624 return _is_Eor(node);
2628 (is_Sub)(const ir_node *node) {
2629 return _is_Sub(node);
2633 (is_Shl)(const ir_node *node) {
2634 return _is_Shl(node);
2638 (is_Shr)(const ir_node *node) {
2639 return _is_Shr(node);
2643 (is_Shrs)(const ir_node *node) {
2644 return _is_Shrs(node);
2648 (is_Rot)(const ir_node *node) {
2649 return _is_Rot(node);
2653 (is_Not)(const ir_node *node) {
2654 return _is_Not(node);
2658 (is_Psi)(const ir_node *node) {
2659 return _is_Psi(node);
2663 (is_Tuple)(const ir_node *node) {
2664 return _is_Tuple(node);
2668 (is_Bound)(const ir_node *node) {
2669 return _is_Bound(node);
2673 (is_Start)(const ir_node *node) {
2674 return _is_Start(node);
2678 (is_End)(const ir_node *node) {
2679 return _is_End(node);
2683 (is_Const)(const ir_node *node) {
2684 return _is_Const(node);
2688 (is_Conv)(const ir_node *node) {
2689 return _is_Conv(node);
2693 (is_strictConv)(const ir_node *node) {
2694 return _is_strictConv(node);
2698 (is_Cast)(const ir_node *node) {
2699 return _is_Cast(node);
2703 (is_no_Block)(const ir_node *node) {
2704 return _is_no_Block(node);
2708 (is_Block)(const ir_node *node) {
2709 return _is_Block(node);
2712 /* returns true if node is an Unknown node. */
2714 (is_Unknown)(const ir_node *node) {
2715 return _is_Unknown(node);
2718 /* returns true if node is a Return node. */
2720 (is_Return)(const ir_node *node) {
2721 return _is_Return(node);
2724 /* returns true if node is a Call node. */
2726 (is_Call)(const ir_node *node) {
2727 return _is_Call(node);
2730 /* returns true if node is a Sel node. */
2732 (is_Sel)(const ir_node *node) {
2733 return _is_Sel(node);
2736 /* returns true if node is a Mux node or a Psi with only one condition. */
2738 (is_Mux)(const ir_node *node) {
2739 return _is_Mux(node);
2742 /* returns true if node is a Load node. */
2744 (is_Load)(const ir_node *node) {
2745 return _is_Load(node);
2748 /* returns true if node is a Load node. */
2750 (is_Store)(const ir_node *node) {
2751 return _is_Store(node);
2754 /* returns true if node is a Sync node. */
2756 (is_Sync)(const ir_node *node) {
2757 return _is_Sync(node);
2760 /* Returns true if node is a Confirm node. */
2762 (is_Confirm)(const ir_node *node) {
2763 return _is_Confirm(node);
2766 /* Returns true if node is a Pin node. */
2768 (is_Pin)(const ir_node *node) {
2769 return _is_Pin(node);
2772 /* Returns true if node is a SymConst node. */
2774 (is_SymConst)(const ir_node *node) {
2775 return _is_SymConst(node);
2778 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2780 (is_SymConst_addr_ent)(const ir_node *node) {
2781 return _is_SymConst_addr_ent(node);
2784 /* Returns true if node is a Cond node. */
2786 (is_Cond)(const ir_node *node) {
2787 return _is_Cond(node);
2791 (is_CopyB)(const ir_node *node) {
2792 return _is_CopyB(node);
2795 /* returns true if node is a Cmp node. */
2797 (is_Cmp)(const ir_node *node) {
2798 return _is_Cmp(node);
2801 /* returns true if node is an Alloc node. */
2803 (is_Alloc)(const ir_node *node) {
2804 return _is_Alloc(node);
2807 /* returns true if a node is a Jmp node. */
2809 (is_Jmp)(const ir_node *node) {
2810 return _is_Jmp(node);
2813 /* returns true if a node is a Raise node. */
2815 (is_Raise)(const ir_node *node) {
2816 return _is_Raise(node);
2819 /* returns true if a node is an ASM node. */
2821 (is_ASM)(const ir_node *node) {
2822 return _is_ASM(node);
2826 (is_Proj)(const ir_node *node) {
2828 return node->op == op_Proj ||
2829 (!get_interprocedural_view() && node->op == op_Filter);
2832 /* Returns true if the operation manipulates control flow. */
2833 int is_cfop(const ir_node *node) {
2834 return is_op_cfopcode(get_irn_op(node));
2837 /* Returns true if the operation manipulates interprocedural control flow:
2838 CallBegin, EndReg, EndExcept */
2839 int is_ip_cfop(const ir_node *node) {
2840 return is_ip_cfopcode(get_irn_op(node));
2843 /* Returns true if the operation can change the control flow because
2846 is_fragile_op(const ir_node *node) {
2847 return is_op_fragile(get_irn_op(node));
2850 /* Returns the memory operand of fragile operations. */
2851 ir_node *get_fragile_op_mem(ir_node *node) {
2852 assert(node && is_fragile_op(node));
2854 switch (get_irn_opcode(node)) {
2865 return get_irn_n(node, pn_Generic_M_regular);
2870 assert(0 && "should not be reached");
2875 /* Returns the result mode of a Div operation. */
2876 ir_mode *get_divop_resmod(const ir_node *node) {
2877 switch (get_irn_opcode(node)) {
2878 case iro_Quot : return get_Quot_resmode(node);
2879 case iro_DivMod: return get_DivMod_resmode(node);
2880 case iro_Div : return get_Div_resmode(node);
2881 case iro_Mod : return get_Mod_resmode(node);
2883 assert(0 && "should not be reached");
2888 /* Returns true if the operation is a forking control flow operation. */
2889 int (is_irn_forking)(const ir_node *node) {
2890 return _is_irn_forking(node);
2893 /* Return the type associated with the value produced by n
2894 * if the node remarks this type as it is the case for
2895 * Cast, Const, SymConst and some Proj nodes. */
2896 ir_type *(get_irn_type)(ir_node *node) {
2897 return _get_irn_type(node);
2900 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2902 ir_type *(get_irn_type_attr)(ir_node *node) {
2903 return _get_irn_type_attr(node);
2906 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2907 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2908 return _get_irn_entity_attr(node);
2911 /* Returns non-zero for constant-like nodes. */
2912 int (is_irn_constlike)(const ir_node *node) {
2913 return _is_irn_constlike(node);
2917 * Returns non-zero for nodes that are allowed to have keep-alives and
2918 * are neither Block nor PhiM.
2920 int (is_irn_keep)(const ir_node *node) {
2921 return _is_irn_keep(node);
2925 * Returns non-zero for nodes that are always placed in the start block.
2927 int (is_irn_start_block_placed)(const ir_node *node) {
2928 return _is_irn_start_block_placed(node);
2931 /* Returns non-zero for nodes that are machine operations. */
2932 int (is_irn_machine_op)(const ir_node *node) {
2933 return _is_irn_machine_op(node);
2936 /* Returns non-zero for nodes that are machine operands. */
2937 int (is_irn_machine_operand)(const ir_node *node) {
2938 return _is_irn_machine_operand(node);
2941 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2942 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2943 return _is_irn_machine_user(node, n);
2947 /* Gets the string representation of the jump prediction .*/
2948 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2951 case COND_JMP_PRED_NONE: return "no prediction";
2952 case COND_JMP_PRED_TRUE: return "true taken";
2953 case COND_JMP_PRED_FALSE: return "false taken";
2957 /* Returns the conditional jump prediction of a Cond node. */
2958 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2959 return _get_Cond_jmp_pred(cond);
2962 /* Sets a new conditional jump prediction. */
2963 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2964 _set_Cond_jmp_pred(cond, pred);
2967 /** the get_type operation must be always implemented and return a firm type */
2968 static ir_type *get_Default_type(ir_node *n) {
2970 return get_unknown_type();
2973 /* Sets the get_type operation for an ir_op_ops. */
2974 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2976 case iro_Const: ops->get_type = get_Const_type; break;
2977 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2978 case iro_Cast: ops->get_type = get_Cast_type; break;
2979 case iro_Proj: ops->get_type = get_Proj_type; break;
2981 /* not allowed to be NULL */
2982 if (! ops->get_type)
2983 ops->get_type = get_Default_type;
2989 /** Return the attribute type of a SymConst node if exists */
2990 static ir_type *get_SymConst_attr_type(ir_node *self) {
2991 symconst_kind kind = get_SymConst_kind(self);
2992 if (SYMCONST_HAS_TYPE(kind))
2993 return get_SymConst_type(self);
2997 /** Return the attribute entity of a SymConst node if exists */
2998 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2999 symconst_kind kind = get_SymConst_kind(self);
3000 if (SYMCONST_HAS_ENT(kind))
3001 return get_SymConst_entity(self);
3005 /** the get_type_attr operation must be always implemented */
3006 static ir_type *get_Null_type(ir_node *n) {
3008 return firm_unknown_type;
3011 /* Sets the get_type operation for an ir_op_ops. */
3012 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
3014 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3015 case iro_Call: ops->get_type_attr = get_Call_type; break;
3016 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3017 case iro_Free: ops->get_type_attr = get_Free_type; break;
3018 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3020 /* not allowed to be NULL */
3021 if (! ops->get_type_attr)
3022 ops->get_type_attr = get_Null_type;
3028 /** the get_entity_attr operation must be always implemented */
3029 static ir_entity *get_Null_ent(ir_node *n) {
3034 /* Sets the get_type operation for an ir_op_ops. */
3035 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
3037 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3038 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3040 /* not allowed to be NULL */
3041 if (! ops->get_entity_attr)
3042 ops->get_entity_attr = get_Null_ent;
3048 /* Sets the debug information of a node. */
3049 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
3050 _set_irn_dbg_info(n, db);
3054 * Returns the debug information of an node.
3056 * @param n The node.
3058 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
3059 return _get_irn_dbg_info(n);
3064 #ifdef DEBUG_libfirm
3065 void dump_irn(const ir_node *n) {
3066 int i, arity = get_irn_arity(n);
3067 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
3069 ir_node *pred = get_irn_n(n, -1);
3070 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
3071 get_irn_node_nr(pred), (void *)pred);
3073 printf(" preds: \n");
3074 for (i = 0; i < arity; ++i) {
3075 ir_node *pred = get_irn_n(n, i);
3076 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
3077 get_irn_node_nr(pred), (void *)pred);
3081 #else /* DEBUG_libfirm */
3082 void dump_irn(const ir_node *n) { (void) n; }
3083 #endif /* DEBUG_libfirm */