2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
86 pn_Cmp get_inversed_pnc(long pnc) {
87 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
88 long lesser = pnc & pn_Cmp_Lt;
89 long greater = pnc & pn_Cmp_Gt;
91 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size) {
111 assert(!forbid_new_data && "Too late to register additional node data");
116 return firm_add_node_size += size;
120 void init_irnode(void) {
121 /* Forbid the addition of new data to an ir node. */
126 * irnode constructor.
127 * Create a new irnode in irg, with an op, mode, arity and
128 * some incoming irnodes.
129 * If arity is negative, a node with a dynamic array is created.
132 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
133 int arity, ir_node **in)
136 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 /* not nice but necessary: End and Sync must always have a flexible array */
159 if (op == op_End || op == op_Sync)
160 res->in = NEW_ARR_F(ir_node *, (arity+1));
162 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
163 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
167 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
171 for (i = 0; i < EDGE_KIND_LAST; ++i) {
172 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* edges will be build immediately */
174 res->edge_info[i].edges_built = 1;
175 res->edge_info[i].out_count = 0;
178 /* don't put this into the for loop, arity is -1 for some nodes! */
179 edges_notify_edge(res, -1, res->in[0], NULL, irg);
180 for (i = 1; i <= arity; ++i)
181 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
183 hook_new_node(irg, res);
184 if (get_irg_phase_state(irg) == phase_backend) {
185 be_info_new_node(res);
187 // Init the VRP structures
188 res->vrp.range_type = VRP_UNDEFINED;
190 if(mode_is_int(mode)) {
191 // We are assuming that 0 is always represented as 0x0000
192 res->vrp.bits_set = new_tarval_from_long(0, mode);
193 res->vrp.bits_not_set = new_tarval_from_long(0, mode);
194 res->vrp.range_bottom = get_tarval_top();
195 res->vrp.range_top = get_tarval_top();
197 res->vrp.bits_set = get_tarval_bad();
198 res->vrp.bits_not_set = get_tarval_bad();
199 res->vrp.range_bottom = get_tarval_bad();
200 res->vrp.range_top = get_tarval_bad();
202 res->vrp.bits_node = NULL;
203 res->vrp.range_node = NULL;
204 res->vrp.range_op = VRP_NONE;
210 /*-- getting some parameters from ir_nodes --*/
212 int (is_ir_node)(const void *thing) {
213 return _is_ir_node(thing);
216 int (get_irn_intra_arity)(const ir_node *node) {
217 return _get_irn_intra_arity(node);
220 int (get_irn_inter_arity)(const ir_node *node) {
221 return _get_irn_inter_arity(node);
224 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
226 int (get_irn_arity)(const ir_node *node) {
227 return _get_irn_arity(node);
230 /* Returns the array with ins. This array is shifted with respect to the
231 array accessed by get_irn_n: The block operand is at position 0 not -1.
232 (@@@ This should be changed.)
233 The order of the predecessors in this array is not guaranteed, except that
234 lists of operands as predecessors of Block or arguments of a Call are
236 ir_node **get_irn_in(const ir_node *node) {
238 #ifdef INTERPROCEDURAL_VIEW
239 if (get_interprocedural_view()) { /* handle Filter and Block specially */
240 if (get_irn_opcode(node) == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 return node->attr.filter.in_cg;
243 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
244 return node->attr.block.in_cg;
246 /* else fall through */
248 #endif /* INTERPROCEDURAL_VIEW */
252 void set_irn_in(ir_node *node, int arity, ir_node **in) {
255 ir_graph *irg = get_irn_irg(node);
258 #ifdef INTERPROCEDURAL_VIEW
259 if (get_interprocedural_view()) { /* handle Filter and Block specially */
260 ir_opcode code = get_irn_opcode(node);
261 if (code == iro_Filter) {
262 assert(node->attr.filter.in_cg);
263 pOld_in = &node->attr.filter.in_cg;
264 } else if (code == iro_Block && node->attr.block.in_cg) {
265 pOld_in = &node->attr.block.in_cg;
270 #endif /* INTERPROCEDURAL_VIEW */
274 for (i = 0; i < arity; i++) {
275 if (i < ARR_LEN(*pOld_in)-1)
276 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
278 edges_notify_edge(node, i, in[i], NULL, irg);
280 for (;i < ARR_LEN(*pOld_in)-1; i++) {
281 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
284 if (arity != ARR_LEN(*pOld_in) - 1) {
285 ir_node * block = (*pOld_in)[0];
286 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
287 (*pOld_in)[0] = block;
289 fix_backedges(irg->obst, node);
291 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
294 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
295 return _get_irn_intra_n(node, n);
298 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
299 return _get_irn_inter_n(node, n);
302 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
304 ir_node *(get_irn_n)(const ir_node *node, int n) {
305 return _get_irn_n(node, n);
308 void set_irn_n(ir_node *node, int n, ir_node *in) {
309 assert(node && node->kind == k_ir_node);
311 assert(n < get_irn_arity(node));
312 assert(in && in->kind == k_ir_node);
314 #ifdef INTERPROCEDURAL_VIEW
315 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
316 /* Change block pred in both views! */
317 node->in[n + 1] = in;
318 assert(node->attr.filter.in_cg);
319 node->attr.filter.in_cg[n + 1] = in;
322 if (get_interprocedural_view()) { /* handle Filter and Block specially */
323 if (get_irn_opcode(node) == iro_Filter) {
324 assert(node->attr.filter.in_cg);
325 node->attr.filter.in_cg[n + 1] = in;
327 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
328 node->attr.block.in_cg[n + 1] = in;
331 /* else fall through */
333 #endif /* INTERPROCEDURAL_VIEW */
336 hook_set_irn_n(node, n, in, node->in[n + 1]);
338 /* Here, we rely on src and tgt being in the current ir graph */
339 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
341 node->in[n + 1] = in;
344 int add_irn_n(ir_node *node, ir_node *in) {
346 ir_graph *irg = get_irn_irg(node);
348 assert(node->op->opar == oparity_dynamic);
349 pos = ARR_LEN(node->in) - 1;
350 ARR_APP1(ir_node *, node->in, in);
351 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
354 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
359 void del_Sync_n(ir_node *n, int i)
361 int arity = get_Sync_n_preds(n);
362 ir_node *last_pred = get_Sync_pred(n, arity - 1);
363 set_Sync_pred(n, i, last_pred);
364 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
365 ARR_SHRINKLEN(get_irn_in(n), arity);
368 int (get_irn_deps)(const ir_node *node) {
369 return _get_irn_deps(node);
372 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
373 return _get_irn_dep(node, pos);
376 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
377 _set_irn_dep(node, pos, dep);
380 int add_irn_dep(ir_node *node, ir_node *dep) {
383 /* DEP edges are only allowed in backend phase */
384 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
385 if (node->deps == NULL) {
386 node->deps = NEW_ARR_F(ir_node *, 1);
392 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
393 if(node->deps[i] == NULL)
396 if(node->deps[i] == dep)
400 if (first_zero >= 0) {
401 node->deps[first_zero] = dep;
404 ARR_APP1(ir_node *, node->deps, dep);
409 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
414 void add_irn_deps(ir_node *tgt, ir_node *src) {
417 for (i = 0, n = get_irn_deps(src); i < n; ++i)
418 add_irn_dep(tgt, get_irn_dep(src, i));
422 ir_mode *(get_irn_mode)(const ir_node *node) {
423 return _get_irn_mode(node);
426 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
427 _set_irn_mode(node, mode);
430 /** Gets the string representation of the mode .*/
431 const char *get_irn_modename(const ir_node *node) {
433 return get_mode_name(node->mode);
436 ident *get_irn_modeident(const ir_node *node) {
438 return get_mode_ident(node->mode);
441 ir_op *(get_irn_op)(const ir_node *node) {
442 return _get_irn_op(node);
445 /* should be private to the library: */
446 void (set_irn_op)(ir_node *node, ir_op *op) {
447 _set_irn_op(node, op);
450 unsigned (get_irn_opcode)(const ir_node *node) {
451 return _get_irn_opcode(node);
454 const char *get_irn_opname(const ir_node *node) {
456 if (is_Phi0(node)) return "Phi0";
457 return get_id_str(node->op->name);
460 ident *get_irn_opident(const ir_node *node) {
462 return node->op->name;
465 ir_visited_t (get_irn_visited)(const ir_node *node) {
466 return _get_irn_visited(node);
469 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
470 _set_irn_visited(node, visited);
473 void (mark_irn_visited)(ir_node *node) {
474 _mark_irn_visited(node);
477 int (irn_visited)(const ir_node *node) {
478 return _irn_visited(node);
481 int (irn_visited_else_mark)(ir_node *node) {
482 return _irn_visited_else_mark(node);
485 void (set_irn_link)(ir_node *node, void *link) {
486 _set_irn_link(node, link);
489 void *(get_irn_link)(const ir_node *node) {
490 return _get_irn_link(node);
493 op_pin_state (get_irn_pinned)(const ir_node *node) {
494 return _get_irn_pinned(node);
497 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
498 return _is_irn_pinned_in_irg(node);
501 void set_irn_pinned(ir_node *node, op_pin_state state) {
502 /* due to optimization an opt may be turned into a Tuple */
506 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
507 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
509 node->attr.except.pin_state = state;
512 /* Outputs a unique number for this node */
513 long get_irn_node_nr(const ir_node *node) {
515 return node->node_nr;
518 const_attr *get_irn_const_attr(ir_node *node) {
519 assert(is_Const(node));
520 return &node->attr.con;
523 long get_irn_proj_attr(ir_node *node) {
524 /* BEWARE: check for true Proj node here, no Filter */
525 assert(node->op == op_Proj);
526 return node->attr.proj;
529 alloc_attr *get_irn_alloc_attr(ir_node *node) {
530 assert(is_Alloc(node));
531 return &node->attr.alloc;
534 free_attr *get_irn_free_attr(ir_node *node) {
535 assert(is_Free(node));
536 return &node->attr.free;
539 symconst_attr *get_irn_symconst_attr(ir_node *node) {
540 assert(is_SymConst(node));
541 return &node->attr.symc;
544 call_attr *get_irn_call_attr(ir_node *node) {
545 assert(is_Call(node));
546 return &node->attr.call;
549 sel_attr *get_irn_sel_attr(ir_node *node) {
550 assert(is_Sel(node));
551 return &node->attr.sel;
554 phi_attr *get_irn_phi_attr(ir_node *node) {
555 return &node->attr.phi;
558 block_attr *get_irn_block_attr(ir_node *node) {
559 assert(is_Block(node));
560 return &node->attr.block;
563 load_attr *get_irn_load_attr(ir_node *node) {
564 assert(is_Load(node));
565 return &node->attr.load;
568 store_attr *get_irn_store_attr(ir_node *node) {
569 assert(is_Store(node));
570 return &node->attr.store;
573 except_attr *get_irn_except_attr(ir_node *node) {
574 assert(node->op == op_Div || node->op == op_Quot ||
575 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
576 return &node->attr.except;
579 divmod_attr *get_irn_divmod_attr(ir_node *node) {
580 assert(node->op == op_Div || node->op == op_Quot ||
581 node->op == op_DivMod || node->op == op_Mod);
582 return &node->attr.divmod;
585 builtin_attr *get_irn_builtin_attr(ir_node *node) {
586 assert(is_Builtin(node));
587 return &node->attr.builtin;
590 void *(get_irn_generic_attr)(ir_node *node) {
591 assert(is_ir_node(node));
592 return _get_irn_generic_attr(node);
595 const void *(get_irn_generic_attr_const)(const ir_node *node) {
596 assert(is_ir_node(node));
597 return _get_irn_generic_attr_const(node);
600 unsigned (get_irn_idx)(const ir_node *node) {
601 assert(is_ir_node(node));
602 return _get_irn_idx(node);
605 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
607 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
608 if (get_irn_n(node, i) == arg)
614 /** manipulate fields of individual nodes **/
616 /* this works for all except Block */
617 ir_node *get_nodes_block(const ir_node *node) {
618 assert(node->op != op_Block);
619 return get_irn_n(node, -1);
622 void set_nodes_block(ir_node *node, ir_node *block) {
623 assert(node->op != op_Block);
624 set_irn_n(node, -1, block);
627 /* this works for all except Block */
628 ir_node *get_nodes_MacroBlock(const ir_node *node) {
629 assert(node->op != op_Block);
630 return get_Block_MacroBlock(get_irn_n(node, -1));
633 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
634 * from Start. If so returns frame type, else Null. */
635 ir_type *is_frame_pointer(const ir_node *n) {
636 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
637 ir_node *start = get_Proj_pred(n);
638 if (is_Start(start)) {
639 return get_irg_frame_type(get_irn_irg(start));
645 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
646 * from Start. If so returns tls type, else Null. */
647 ir_type *is_tls_pointer(const ir_node *n) {
648 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
649 ir_node *start = get_Proj_pred(n);
650 if (is_Start(start)) {
651 return get_tls_type();
657 ir_node **get_Block_cfgpred_arr(ir_node *node) {
658 assert(is_Block(node));
659 return (ir_node **)&(get_irn_in(node)[1]);
662 int (get_Block_n_cfgpreds)(const ir_node *node) {
663 return _get_Block_n_cfgpreds(node);
666 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
667 return _get_Block_cfgpred(node, pos);
670 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
671 assert(is_Block(node));
672 set_irn_n(node, pos, pred);
675 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
678 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
679 if (get_Block_cfgpred_block(block, i) == pred)
685 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
686 return _get_Block_cfgpred_block(node, pos);
689 int get_Block_matured(const ir_node *node) {
690 assert(is_Block(node));
691 return (int)node->attr.block.is_matured;
694 void set_Block_matured(ir_node *node, int matured) {
695 assert(is_Block(node));
696 node->attr.block.is_matured = matured;
699 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
700 return _get_Block_block_visited(node);
703 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
704 _set_Block_block_visited(node, visit);
707 /* For this current_ir_graph must be set. */
708 void (mark_Block_block_visited)(ir_node *node) {
709 _mark_Block_block_visited(node);
712 int (Block_block_visited)(const ir_node *node) {
713 return _Block_block_visited(node);
716 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
717 assert(is_Block(node));
718 return node->attr.block.graph_arr[pos+1];
721 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
722 assert(is_Block(node));
723 node->attr.block.graph_arr[pos+1] = value;
726 #ifdef INTERPROCEDURAL_VIEW
727 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
728 assert(is_Block(node));
729 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
730 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
731 node->attr.block.in_cg[0] = NULL;
732 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
734 /* Fix backedge array. fix_backedges() operates depending on
735 interprocedural_view. */
736 int ipv = get_interprocedural_view();
737 set_interprocedural_view(1);
738 fix_backedges(current_ir_graph->obst, node);
739 set_interprocedural_view(ipv);
742 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
745 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
746 assert(is_Block(node) && node->attr.block.in_cg &&
747 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
748 node->attr.block.in_cg[pos + 1] = pred;
751 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
752 assert(is_Block(node));
753 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
756 int get_Block_cg_n_cfgpreds(const ir_node *node) {
757 assert(is_Block(node));
758 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
761 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
762 assert(is_Block(node) && node->attr.block.in_cg);
763 return node->attr.block.in_cg[pos + 1];
766 void remove_Block_cg_cfgpred_arr(ir_node *node) {
767 assert(is_Block(node));
768 node->attr.block.in_cg = NULL;
770 #endif /* INTERPROCEDURAL_VIEW */
772 ir_node *(set_Block_dead)(ir_node *block) {
773 return _set_Block_dead(block);
776 int (is_Block_dead)(const ir_node *block) {
777 return _is_Block_dead(block);
780 ir_extblk *get_Block_extbb(const ir_node *block) {
782 assert(is_Block(block));
783 res = block->attr.block.extblk;
784 assert(res == NULL || is_ir_extbb(res));
788 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
789 assert(is_Block(block));
790 assert(extblk == NULL || is_ir_extbb(extblk));
791 block->attr.block.extblk = extblk;
794 /* Returns the macro block header of a block.*/
795 ir_node *get_Block_MacroBlock(const ir_node *block) {
797 assert(is_Block(block));
798 mbh = get_irn_n(block, -1);
799 /* once macro block header is respected by all optimizations,
800 this assert can be removed */
805 /* Sets the macro block header of a block. */
806 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
807 assert(is_Block(block));
809 assert(is_Block(mbh));
810 set_irn_n(block, -1, mbh);
813 /* returns the macro block header of a node. */
814 ir_node *get_irn_MacroBlock(const ir_node *n) {
816 n = get_nodes_block(n);
817 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
821 return get_Block_MacroBlock(n);
824 /* returns the graph of a Block. */
825 ir_graph *(get_Block_irg)(const ir_node *block) {
826 return _get_Block_irg(block);
829 ir_entity *create_Block_entity(ir_node *block) {
831 assert(is_Block(block));
833 entity = block->attr.block.entity;
834 if (entity == NULL) {
838 glob = get_glob_type();
839 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
840 set_entity_visibility(entity, ir_visibility_local);
841 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
842 nr = get_irp_next_label_nr();
843 set_entity_label(entity, nr);
844 set_entity_compiler_generated(entity, 1);
846 block->attr.block.entity = entity;
851 ir_entity *get_Block_entity(const ir_node *block) {
852 assert(is_Block(block));
853 return block->attr.block.entity;
856 void set_Block_entity(ir_node *block, ir_entity *entity)
858 assert(is_Block(block));
859 assert(get_entity_type(entity) == get_code_type());
860 block->attr.block.entity = entity;
863 int has_Block_entity(const ir_node *block)
865 return block->attr.block.entity != NULL;
868 ir_node *(get_Block_phis)(const ir_node *block) {
869 return _get_Block_phis(block);
872 void (set_Block_phis)(ir_node *block, ir_node *phi) {
873 _set_Block_phis(block, phi);
876 void (add_Block_phi)(ir_node *block, ir_node *phi) {
877 _add_Block_phi(block, phi);
880 /* Get the Block mark (single bit). */
881 unsigned (get_Block_mark)(const ir_node *block) {
882 return _get_Block_mark(block);
885 /* Set the Block mark (single bit). */
886 void (set_Block_mark)(ir_node *block, unsigned mark) {
887 _set_Block_mark(block, mark);
890 int get_End_n_keepalives(const ir_node *end) {
892 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
895 ir_node *get_End_keepalive(const ir_node *end, int pos) {
897 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
900 void add_End_keepalive(ir_node *end, ir_node *ka) {
905 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
907 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
910 /* Set new keep-alives */
911 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
913 ir_graph *irg = get_irn_irg(end);
915 /* notify that edges are deleted */
916 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
917 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
919 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
921 for (i = 0; i < n; ++i) {
922 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
923 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
927 /* Set new keep-alives from old keep-alives, skipping irn */
928 void remove_End_keepalive(ir_node *end, ir_node *irn) {
929 int n = get_End_n_keepalives(end);
934 for (i = n -1; i >= 0; --i) {
935 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
945 irg = get_irn_irg(end);
947 /* remove the edge */
948 edges_notify_edge(end, idx, NULL, irn, irg);
951 /* exchange with the last one */
952 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
953 edges_notify_edge(end, n - 1, NULL, old, irg);
954 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
955 edges_notify_edge(end, idx, old, NULL, irg);
957 /* now n - 1 keeps, 1 block input */
958 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
961 /* remove Bads, NoMems and doublets from the keep-alive set */
962 void remove_End_Bads_and_doublets(ir_node *end) {
964 int idx, n = get_End_n_keepalives(end);
970 irg = get_irn_irg(end);
971 pset_new_init(&keeps);
973 for (idx = n - 1; idx >= 0; --idx) {
974 ir_node *ka = get_End_keepalive(end, idx);
976 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
977 /* remove the edge */
978 edges_notify_edge(end, idx, NULL, ka, irg);
981 /* exchange with the last one */
982 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
983 edges_notify_edge(end, n - 1, NULL, old, irg);
984 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
985 edges_notify_edge(end, idx, old, NULL, irg);
989 pset_new_insert(&keeps, ka);
992 /* n keeps, 1 block input */
993 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
995 pset_new_destroy(&keeps);
998 void free_End(ir_node *end) {
1002 end->in = NULL; /* @@@ make sure we get an error if we use the
1003 in array afterwards ... */
1006 /* Return the target address of an IJmp */
1007 ir_node *get_IJmp_target(const ir_node *ijmp) {
1008 assert(is_IJmp(ijmp));
1009 return get_irn_n(ijmp, 0);
1012 /** Sets the target address of an IJmp */
1013 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
1014 assert(is_IJmp(ijmp));
1015 set_irn_n(ijmp, 0, tgt);
1019 get_Cond_selector(const ir_node *node) {
1020 assert(is_Cond(node));
1021 return get_irn_n(node, 0);
1025 set_Cond_selector(ir_node *node, ir_node *selector) {
1026 assert(is_Cond(node));
1027 set_irn_n(node, 0, selector);
1031 get_Cond_default_proj(const ir_node *node) {
1032 assert(is_Cond(node));
1033 return node->attr.cond.default_proj;
1036 void set_Cond_default_proj(ir_node *node, long defproj) {
1037 assert(is_Cond(node));
1038 node->attr.cond.default_proj = defproj;
1042 get_Return_mem(const ir_node *node) {
1043 assert(is_Return(node));
1044 return get_irn_n(node, 0);
1048 set_Return_mem(ir_node *node, ir_node *mem) {
1049 assert(is_Return(node));
1050 set_irn_n(node, 0, mem);
1054 get_Return_n_ress(const ir_node *node) {
1055 assert(is_Return(node));
1056 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1060 get_Return_res_arr(ir_node *node) {
1061 assert(is_Return(node));
1062 if (get_Return_n_ress(node) > 0)
1063 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1070 set_Return_n_res(ir_node *node, int results) {
1071 assert(is_Return(node));
1076 get_Return_res(const ir_node *node, int pos) {
1077 assert(is_Return(node));
1078 assert(get_Return_n_ress(node) > pos);
1079 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1083 set_Return_res(ir_node *node, int pos, ir_node *res){
1084 assert(is_Return(node));
1085 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1088 tarval *(get_Const_tarval)(const ir_node *node) {
1089 return _get_Const_tarval(node);
1093 set_Const_tarval(ir_node *node, tarval *con) {
1094 assert(is_Const(node));
1095 node->attr.con.tv = con;
1098 int (is_Const_null)(const ir_node *node) {
1099 return _is_Const_null(node);
1102 int (is_Const_one)(const ir_node *node) {
1103 return _is_Const_one(node);
1106 int (is_Const_all_one)(const ir_node *node) {
1107 return _is_Const_all_one(node);
1111 /* The source language type. Must be an atomic type. Mode of type must
1112 be mode of node. For tarvals from entities type must be pointer to
1115 get_Const_type(ir_node *node) {
1116 assert(is_Const(node));
1117 return node->attr.con.tp;
1121 set_Const_type(ir_node *node, ir_type *tp) {
1122 assert(is_Const(node));
1123 if (tp != firm_unknown_type) {
1124 assert(is_atomic_type(tp));
1125 assert(get_type_mode(tp) == get_irn_mode(node));
1127 node->attr.con.tp = tp;
1132 get_SymConst_kind(const ir_node *node) {
1133 assert(is_SymConst(node));
1134 return node->attr.symc.kind;
1138 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1139 assert(is_SymConst(node));
1140 node->attr.symc.kind = kind;
1144 get_SymConst_type(const ir_node *node) {
1145 /* the cast here is annoying, but we have to compensate for
1147 ir_node *irn = (ir_node *)node;
1148 assert(is_SymConst(node) &&
1149 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1150 return irn->attr.symc.sym.type_p;
1154 set_SymConst_type(ir_node *node, ir_type *tp) {
1155 assert(is_SymConst(node) &&
1156 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1157 node->attr.symc.sym.type_p = tp;
1161 get_SymConst_name(const ir_node *node) {
1162 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1163 return node->attr.symc.sym.ident_p;
1167 set_SymConst_name(ir_node *node, ident *name) {
1168 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1169 node->attr.symc.sym.ident_p = name;
1173 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1174 ir_entity *get_SymConst_entity(const ir_node *node) {
1175 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1176 return node->attr.symc.sym.entity_p;
1179 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1180 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1181 node->attr.symc.sym.entity_p = ent;
1184 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1185 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1186 return node->attr.symc.sym.enum_p;
1189 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1190 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1191 node->attr.symc.sym.enum_p = ec;
1194 union symconst_symbol
1195 get_SymConst_symbol(const ir_node *node) {
1196 assert(is_SymConst(node));
1197 return node->attr.symc.sym;
1201 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1202 assert(is_SymConst(node));
1203 node->attr.symc.sym = sym;
1207 get_SymConst_value_type(ir_node *node) {
1208 assert(is_SymConst(node));
1209 return node->attr.symc.tp;
1213 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1214 assert(is_SymConst(node));
1215 node->attr.symc.tp = tp;
1219 get_Sel_mem(const ir_node *node) {
1220 assert(is_Sel(node));
1221 return get_irn_n(node, 0);
1225 set_Sel_mem(ir_node *node, ir_node *mem) {
1226 assert(is_Sel(node));
1227 set_irn_n(node, 0, mem);
1231 get_Sel_ptr(const ir_node *node) {
1232 assert(is_Sel(node));
1233 return get_irn_n(node, 1);
1237 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1238 assert(is_Sel(node));
1239 set_irn_n(node, 1, ptr);
1243 get_Sel_n_indexs(const ir_node *node) {
1244 assert(is_Sel(node));
1245 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1249 get_Sel_index_arr(ir_node *node) {
1250 assert(is_Sel(node));
1251 if (get_Sel_n_indexs(node) > 0)
1252 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1258 get_Sel_index(const ir_node *node, int pos) {
1259 assert(is_Sel(node));
1260 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1264 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1265 assert(is_Sel(node));
1266 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1270 get_Sel_entity(const ir_node *node) {
1271 assert(is_Sel(node));
1272 return node->attr.sel.entity;
1275 /* need a version without const to prevent warning */
1276 static ir_entity *_get_Sel_entity(ir_node *node) {
1277 return get_Sel_entity(node);
1281 set_Sel_entity(ir_node *node, ir_entity *ent) {
1282 assert(is_Sel(node));
1283 node->attr.sel.entity = ent;
1287 /* For unary and binary arithmetic operations the access to the
1288 operands can be factored out. Left is the first, right the
1289 second arithmetic value as listed in tech report 0999-33.
1290 unops are: Minus, Abs, Not, Conv, Cast
1291 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1292 Shr, Shrs, Rotate, Cmp */
1296 get_Call_mem(const ir_node *node) {
1297 assert(is_Call(node));
1298 return get_irn_n(node, 0);
1302 set_Call_mem(ir_node *node, ir_node *mem) {
1303 assert(is_Call(node));
1304 set_irn_n(node, 0, mem);
1308 get_Call_ptr(const ir_node *node) {
1309 assert(is_Call(node));
1310 return get_irn_n(node, 1);
1314 set_Call_ptr(ir_node *node, ir_node *ptr) {
1315 assert(is_Call(node));
1316 set_irn_n(node, 1, ptr);
1320 get_Call_param_arr(ir_node *node) {
1321 assert(is_Call(node));
1322 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1326 get_Call_n_params(const ir_node *node) {
1327 assert(is_Call(node));
1328 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1332 get_Call_param(const ir_node *node, int pos) {
1333 assert(is_Call(node));
1334 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1338 set_Call_param(ir_node *node, int pos, ir_node *param) {
1339 assert(is_Call(node));
1340 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1344 get_Call_type(ir_node *node) {
1345 assert(is_Call(node));
1346 return node->attr.call.type;
1350 set_Call_type(ir_node *node, ir_type *tp) {
1351 assert(is_Call(node));
1352 assert((get_unknown_type() == tp) || is_Method_type(tp));
1353 node->attr.call.type = tp;
1357 get_Call_tail_call(const ir_node *node) {
1358 assert(is_Call(node));
1359 return node->attr.call.tail_call;
1363 set_Call_tail_call(ir_node *node, unsigned tail_call) {
1364 assert(is_Call(node));
1365 node->attr.call.tail_call = tail_call != 0;
1369 get_Builtin_mem(const ir_node *node) {
1370 assert(is_Builtin(node));
1371 return get_irn_n(node, 0);
1375 set_Builin_mem(ir_node *node, ir_node *mem) {
1376 assert(is_Builtin(node));
1377 set_irn_n(node, 0, mem);
1381 get_Builtin_kind(const ir_node *node) {
1382 assert(is_Builtin(node));
1383 return node->attr.builtin.kind;
1387 set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
1388 assert(is_Builtin(node));
1389 node->attr.builtin.kind = kind;
1393 get_Builtin_param_arr(ir_node *node) {
1394 assert(is_Builtin(node));
1395 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1399 get_Builtin_n_params(const ir_node *node) {
1400 assert(is_Builtin(node));
1401 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1405 get_Builtin_param(const ir_node *node, int pos) {
1406 assert(is_Builtin(node));
1407 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1411 set_Builtin_param(ir_node *node, int pos, ir_node *param) {
1412 assert(is_Builtin(node));
1413 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1417 get_Builtin_type(ir_node *node) {
1418 assert(is_Builtin(node));
1419 return node->attr.builtin.type;
1423 set_Builtin_type(ir_node *node, ir_type *tp) {
1424 assert(is_Builtin(node));
1425 assert((get_unknown_type() == tp) || is_Method_type(tp));
1426 node->attr.builtin.type = tp;
1429 /* Returns a human readable string for the ir_builtin_kind. */
1430 const char *get_builtin_kind_name(ir_builtin_kind kind) {
1431 #define X(a) case a: return #a;
1434 X(ir_bk_debugbreak);
1435 X(ir_bk_return_address);
1436 X(ir_bk_frame_address);
1446 X(ir_bk_inner_trampoline);
1453 int Call_has_callees(const ir_node *node) {
1454 assert(is_Call(node));
1455 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1456 (node->attr.call.callee_arr != NULL));
1459 int get_Call_n_callees(const ir_node *node) {
1460 assert(is_Call(node) && node->attr.call.callee_arr);
1461 return ARR_LEN(node->attr.call.callee_arr);
1464 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1465 assert(pos >= 0 && pos < get_Call_n_callees(node));
1466 return node->attr.call.callee_arr[pos];
1469 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1470 assert(is_Call(node));
1471 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1472 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1474 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1477 void remove_Call_callee_arr(ir_node *node) {
1478 assert(is_Call(node));
1479 node->attr.call.callee_arr = NULL;
1482 ir_node *get_CallBegin_ptr(const ir_node *node) {
1483 assert(is_CallBegin(node));
1484 return get_irn_n(node, 0);
1487 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1488 assert(is_CallBegin(node));
1489 set_irn_n(node, 0, ptr);
1492 ir_node *get_CallBegin_call(const ir_node *node) {
1493 assert(is_CallBegin(node));
1494 return node->attr.callbegin.call;
1497 void set_CallBegin_call(ir_node *node, ir_node *call) {
1498 assert(is_CallBegin(node));
1499 node->attr.callbegin.call = call;
1503 * Returns non-zero if a Call is surely a self-recursive Call.
1504 * Beware: if this functions returns 0, the call might be self-recursive!
1506 int is_self_recursive_Call(const ir_node *call) {
1507 const ir_node *callee = get_Call_ptr(call);
1509 if (is_SymConst_addr_ent(callee)) {
1510 const ir_entity *ent = get_SymConst_entity(callee);
1511 const ir_graph *irg = get_entity_irg(ent);
1512 if (irg == get_irn_irg(call))
1519 ir_node * get_##OP##_left(const ir_node *node) { \
1520 assert(is_##OP(node)); \
1521 return get_irn_n(node, node->op->op_index); \
1523 void set_##OP##_left(ir_node *node, ir_node *left) { \
1524 assert(is_##OP(node)); \
1525 set_irn_n(node, node->op->op_index, left); \
1527 ir_node *get_##OP##_right(const ir_node *node) { \
1528 assert(is_##OP(node)); \
1529 return get_irn_n(node, node->op->op_index + 1); \
1531 void set_##OP##_right(ir_node *node, ir_node *right) { \
1532 assert(is_##OP(node)); \
1533 set_irn_n(node, node->op->op_index + 1, right); \
1537 ir_node *get_##OP##_op(const ir_node *node) { \
1538 assert(is_##OP(node)); \
1539 return get_irn_n(node, node->op->op_index); \
1541 void set_##OP##_op(ir_node *node, ir_node *op) { \
1542 assert(is_##OP(node)); \
1543 set_irn_n(node, node->op->op_index, op); \
1546 #define BINOP_MEM(OP) \
1550 get_##OP##_mem(const ir_node *node) { \
1551 assert(is_##OP(node)); \
1552 return get_irn_n(node, 0); \
1556 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1557 assert(is_##OP(node)); \
1558 set_irn_n(node, 0, mem); \
1564 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1565 assert(is_##OP(node)); \
1566 return node->attr.divmod.resmode; \
1569 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1570 assert(is_##OP(node)); \
1571 node->attr.divmod.resmode = mode; \
1599 int get_Div_no_remainder(const ir_node *node) {
1600 assert(is_Div(node));
1601 return node->attr.divmod.no_remainder;
1604 void set_Div_no_remainder(ir_node *node, int no_remainder) {
1605 assert(is_Div(node));
1606 node->attr.divmod.no_remainder = no_remainder;
1609 int get_Conv_strict(const ir_node *node) {
1610 assert(is_Conv(node));
1611 return node->attr.conv.strict;
1614 void set_Conv_strict(ir_node *node, int strict_flag) {
1615 assert(is_Conv(node));
1616 node->attr.conv.strict = (char)strict_flag;
1620 get_Cast_type(ir_node *node) {
1621 assert(is_Cast(node));
1622 return node->attr.cast.type;
1626 set_Cast_type(ir_node *node, ir_type *to_tp) {
1627 assert(is_Cast(node));
1628 node->attr.cast.type = to_tp;
1632 /* Checks for upcast.
1634 * Returns true if the Cast node casts a class type to a super type.
1636 int is_Cast_upcast(ir_node *node) {
1637 ir_type *totype = get_Cast_type(node);
1638 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1640 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1643 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1644 totype = get_pointer_points_to_type(totype);
1645 fromtype = get_pointer_points_to_type(fromtype);
1650 if (!is_Class_type(totype)) return 0;
1651 return is_SubClass_of(fromtype, totype);
1654 /* Checks for downcast.
1656 * Returns true if the Cast node casts a class type to a sub type.
1658 int is_Cast_downcast(ir_node *node) {
1659 ir_type *totype = get_Cast_type(node);
1660 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1662 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1665 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1666 totype = get_pointer_points_to_type(totype);
1667 fromtype = get_pointer_points_to_type(fromtype);
1672 if (!is_Class_type(totype)) return 0;
1673 return is_SubClass_of(totype, fromtype);
1677 (is_unop)(const ir_node *node) {
1678 return _is_unop(node);
1682 get_unop_op(const ir_node *node) {
1683 if (node->op->opar == oparity_unary)
1684 return get_irn_n(node, node->op->op_index);
1686 assert(node->op->opar == oparity_unary);
1691 set_unop_op(ir_node *node, ir_node *op) {
1692 if (node->op->opar == oparity_unary)
1693 set_irn_n(node, node->op->op_index, op);
1695 assert(node->op->opar == oparity_unary);
1699 (is_binop)(const ir_node *node) {
1700 return _is_binop(node);
1704 get_binop_left(const ir_node *node) {
1705 assert(node->op->opar == oparity_binary);
1706 return get_irn_n(node, node->op->op_index);
1710 set_binop_left(ir_node *node, ir_node *left) {
1711 assert(node->op->opar == oparity_binary);
1712 set_irn_n(node, node->op->op_index, left);
1716 get_binop_right(const ir_node *node) {
1717 assert(node->op->opar == oparity_binary);
1718 return get_irn_n(node, node->op->op_index + 1);
1722 set_binop_right(ir_node *node, ir_node *right) {
1723 assert(node->op->opar == oparity_binary);
1724 set_irn_n(node, node->op->op_index + 1, right);
1727 int is_Phi0(const ir_node *n) {
1730 return ((get_irn_op(n) == op_Phi) &&
1731 (get_irn_arity(n) == 0) &&
1732 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1736 get_Phi_preds_arr(ir_node *node) {
1737 assert(node->op == op_Phi);
1738 return (ir_node **)&(get_irn_in(node)[1]);
1742 get_Phi_n_preds(const ir_node *node) {
1743 assert(is_Phi(node) || is_Phi0(node));
1744 return (get_irn_arity(node));
1748 void set_Phi_n_preds(ir_node *node, int n_preds) {
1749 assert(node->op == op_Phi);
1754 get_Phi_pred(const ir_node *node, int pos) {
1755 assert(is_Phi(node) || is_Phi0(node));
1756 return get_irn_n(node, pos);
1760 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1761 assert(is_Phi(node) || is_Phi0(node));
1762 set_irn_n(node, pos, pred);
1765 ir_node *(get_Phi_next)(const ir_node *phi) {
1766 return _get_Phi_next(phi);
1769 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1770 _set_Phi_next(phi, next);
1773 int is_memop(const ir_node *node) {
1774 ir_opcode code = get_irn_opcode(node);
1775 return (code == iro_Load || code == iro_Store);
1778 ir_node *get_memop_mem(const ir_node *node) {
1779 assert(is_memop(node));
1780 return get_irn_n(node, 0);
1783 void set_memop_mem(ir_node *node, ir_node *mem) {
1784 assert(is_memop(node));
1785 set_irn_n(node, 0, mem);
1788 ir_node *get_memop_ptr(const ir_node *node) {
1789 assert(is_memop(node));
1790 return get_irn_n(node, 1);
1793 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1794 assert(is_memop(node));
1795 set_irn_n(node, 1, ptr);
1799 get_Load_mem(const ir_node *node) {
1800 assert(is_Load(node));
1801 return get_irn_n(node, 0);
1805 set_Load_mem(ir_node *node, ir_node *mem) {
1806 assert(is_Load(node));
1807 set_irn_n(node, 0, mem);
1811 get_Load_ptr(const ir_node *node) {
1812 assert(is_Load(node));
1813 return get_irn_n(node, 1);
1817 set_Load_ptr(ir_node *node, ir_node *ptr) {
1818 assert(is_Load(node));
1819 set_irn_n(node, 1, ptr);
1823 get_Load_mode(const ir_node *node) {
1824 assert(is_Load(node));
1825 return node->attr.load.mode;
1829 set_Load_mode(ir_node *node, ir_mode *mode) {
1830 assert(is_Load(node));
1831 node->attr.load.mode = mode;
1835 get_Load_volatility(const ir_node *node) {
1836 assert(is_Load(node));
1837 return node->attr.load.volatility;
1841 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1842 assert(is_Load(node));
1843 node->attr.load.volatility = volatility;
1847 get_Load_align(const ir_node *node) {
1848 assert(is_Load(node));
1849 return node->attr.load.aligned;
1853 set_Load_align(ir_node *node, ir_align align) {
1854 assert(is_Load(node));
1855 node->attr.load.aligned = align;
1860 get_Store_mem(const ir_node *node) {
1861 assert(is_Store(node));
1862 return get_irn_n(node, 0);
1866 set_Store_mem(ir_node *node, ir_node *mem) {
1867 assert(is_Store(node));
1868 set_irn_n(node, 0, mem);
1872 get_Store_ptr(const ir_node *node) {
1873 assert(is_Store(node));
1874 return get_irn_n(node, 1);
1878 set_Store_ptr(ir_node *node, ir_node *ptr) {
1879 assert(is_Store(node));
1880 set_irn_n(node, 1, ptr);
1884 get_Store_value(const ir_node *node) {
1885 assert(is_Store(node));
1886 return get_irn_n(node, 2);
1890 set_Store_value(ir_node *node, ir_node *value) {
1891 assert(is_Store(node));
1892 set_irn_n(node, 2, value);
1896 get_Store_volatility(const ir_node *node) {
1897 assert(is_Store(node));
1898 return node->attr.store.volatility;
1902 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1903 assert(is_Store(node));
1904 node->attr.store.volatility = volatility;
1908 get_Store_align(const ir_node *node) {
1909 assert(is_Store(node));
1910 return node->attr.store.aligned;
1914 set_Store_align(ir_node *node, ir_align align) {
1915 assert(is_Store(node));
1916 node->attr.store.aligned = align;
1921 get_Alloc_mem(const ir_node *node) {
1922 assert(is_Alloc(node));
1923 return get_irn_n(node, 0);
1927 set_Alloc_mem(ir_node *node, ir_node *mem) {
1928 assert(is_Alloc(node));
1929 set_irn_n(node, 0, mem);
1933 get_Alloc_size(const ir_node *node) {
1934 assert(is_Alloc(node));
1935 return get_irn_n(node, 1);
1939 set_Alloc_size(ir_node *node, ir_node *size) {
1940 assert(is_Alloc(node));
1941 set_irn_n(node, 1, size);
1945 get_Alloc_type(ir_node *node) {
1946 assert(is_Alloc(node));
1947 return node->attr.alloc.type;
1951 set_Alloc_type(ir_node *node, ir_type *tp) {
1952 assert(is_Alloc(node));
1953 node->attr.alloc.type = tp;
1957 get_Alloc_where(const ir_node *node) {
1958 assert(is_Alloc(node));
1959 return node->attr.alloc.where;
1963 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1964 assert(is_Alloc(node));
1965 node->attr.alloc.where = where;
1970 get_Free_mem(const ir_node *node) {
1971 assert(is_Free(node));
1972 return get_irn_n(node, 0);
1976 set_Free_mem(ir_node *node, ir_node *mem) {
1977 assert(is_Free(node));
1978 set_irn_n(node, 0, mem);
1982 get_Free_ptr(const ir_node *node) {
1983 assert(is_Free(node));
1984 return get_irn_n(node, 1);
1988 set_Free_ptr(ir_node *node, ir_node *ptr) {
1989 assert(is_Free(node));
1990 set_irn_n(node, 1, ptr);
1994 get_Free_size(const ir_node *node) {
1995 assert(is_Free(node));
1996 return get_irn_n(node, 2);
2000 set_Free_size(ir_node *node, ir_node *size) {
2001 assert(is_Free(node));
2002 set_irn_n(node, 2, size);
2006 get_Free_type(ir_node *node) {
2007 assert(is_Free(node));
2008 return node->attr.free.type;
2012 set_Free_type(ir_node *node, ir_type *tp) {
2013 assert(is_Free(node));
2014 node->attr.free.type = tp;
2018 get_Free_where(const ir_node *node) {
2019 assert(is_Free(node));
2020 return node->attr.free.where;
2024 set_Free_where(ir_node *node, ir_where_alloc where) {
2025 assert(is_Free(node));
2026 node->attr.free.where = where;
2029 ir_node **get_Sync_preds_arr(ir_node *node) {
2030 assert(is_Sync(node));
2031 return (ir_node **)&(get_irn_in(node)[1]);
2034 int get_Sync_n_preds(const ir_node *node) {
2035 assert(is_Sync(node));
2036 return (get_irn_arity(node));
2040 void set_Sync_n_preds(ir_node *node, int n_preds) {
2041 assert(is_Sync(node));
2045 ir_node *get_Sync_pred(const ir_node *node, int pos) {
2046 assert(is_Sync(node));
2047 return get_irn_n(node, pos);
2050 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
2051 assert(is_Sync(node));
2052 set_irn_n(node, pos, pred);
2055 /* Add a new Sync predecessor */
2056 void add_Sync_pred(ir_node *node, ir_node *pred) {
2057 assert(is_Sync(node));
2058 add_irn_n(node, pred);
2061 /* Returns the source language type of a Proj node. */
2062 ir_type *get_Proj_type(ir_node *n) {
2063 ir_type *tp = firm_unknown_type;
2064 ir_node *pred = get_Proj_pred(n);
2066 switch (get_irn_opcode(pred)) {
2069 /* Deal with Start / Call here: we need to know the Proj Nr. */
2070 assert(get_irn_mode(pred) == mode_T);
2071 pred_pred = get_Proj_pred(pred);
2073 if (is_Start(pred_pred)) {
2074 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2075 tp = get_method_param_type(mtp, get_Proj_proj(n));
2076 } else if (is_Call(pred_pred)) {
2077 ir_type *mtp = get_Call_type(pred_pred);
2078 tp = get_method_res_type(mtp, get_Proj_proj(n));
2081 case iro_Start: break;
2082 case iro_Call: break;
2084 ir_node *a = get_Load_ptr(pred);
2086 tp = get_entity_type(get_Sel_entity(a));
2095 get_Proj_pred(const ir_node *node) {
2096 assert(is_Proj(node));
2097 return get_irn_n(node, 0);
2101 set_Proj_pred(ir_node *node, ir_node *pred) {
2102 assert(is_Proj(node));
2103 set_irn_n(node, 0, pred);
2107 get_Proj_proj(const ir_node *node) {
2108 #ifdef INTERPROCEDURAL_VIEW
2109 ir_opcode code = get_irn_opcode(node);
2111 if (code == iro_Proj) {
2112 return node->attr.proj;
2115 assert(code == iro_Filter);
2116 return node->attr.filter.proj;
2119 assert(is_Proj(node));
2120 return node->attr.proj;
2121 #endif /* INTERPROCEDURAL_VIEW */
2125 set_Proj_proj(ir_node *node, long proj) {
2126 #ifdef INTERPROCEDURAL_VIEW
2127 ir_opcode code = get_irn_opcode(node);
2129 if (code == iro_Proj) {
2130 node->attr.proj = proj;
2133 assert(code == iro_Filter);
2134 node->attr.filter.proj = proj;
2137 assert(is_Proj(node));
2138 node->attr.proj = proj;
2139 #endif /* INTERPROCEDURAL_VIEW */
2142 /* Returns non-zero if a node is a routine parameter. */
2143 int (is_arg_Proj)(const ir_node *node) {
2144 return _is_arg_Proj(node);
2148 get_Tuple_preds_arr(ir_node *node) {
2149 assert(is_Tuple(node));
2150 return (ir_node **)&(get_irn_in(node)[1]);
2154 get_Tuple_n_preds(const ir_node *node) {
2155 assert(is_Tuple(node));
2156 return get_irn_arity(node);
2161 set_Tuple_n_preds(ir_node *node, int n_preds) {
2162 assert(is_Tuple(node));
2167 get_Tuple_pred(const ir_node *node, int pos) {
2168 assert(is_Tuple(node));
2169 return get_irn_n(node, pos);
2173 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2174 assert(is_Tuple(node));
2175 set_irn_n(node, pos, pred);
2179 get_Id_pred(const ir_node *node) {
2180 assert(is_Id(node));
2181 return get_irn_n(node, 0);
2185 set_Id_pred(ir_node *node, ir_node *pred) {
2186 assert(is_Id(node));
2187 set_irn_n(node, 0, pred);
2190 ir_node *get_Confirm_value(const ir_node *node) {
2191 assert(is_Confirm(node));
2192 return get_irn_n(node, 0);
2195 void set_Confirm_value(ir_node *node, ir_node *value) {
2196 assert(is_Confirm(node));
2197 set_irn_n(node, 0, value);
2200 ir_node *get_Confirm_bound(const ir_node *node) {
2201 assert(is_Confirm(node));
2202 return get_irn_n(node, 1);
2205 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2206 assert(is_Confirm(node));
2207 set_irn_n(node, 0, bound);
2210 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2211 assert(is_Confirm(node));
2212 return node->attr.confirm.cmp;
2215 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2216 assert(is_Confirm(node));
2217 node->attr.confirm.cmp = cmp;
2221 get_Filter_pred(ir_node *node) {
2222 assert(is_Filter(node));
2227 set_Filter_pred(ir_node *node, ir_node *pred) {
2228 assert(is_Filter(node));
2233 get_Filter_proj(ir_node *node) {
2234 assert(is_Filter(node));
2235 return node->attr.filter.proj;
2239 set_Filter_proj(ir_node *node, long proj) {
2240 assert(is_Filter(node));
2241 node->attr.filter.proj = proj;
2244 /* Don't use get_irn_arity, get_irn_n in implementation as access
2245 shall work independent of view!!! */
2246 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2247 assert(is_Filter(node));
2248 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2249 ir_graph *irg = get_irn_irg(node);
2250 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2251 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2252 node->attr.filter.in_cg[0] = node->in[0];
2254 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2257 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2258 assert(is_Filter(node) && node->attr.filter.in_cg &&
2259 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2260 node->attr.filter.in_cg[pos + 1] = pred;
2263 int get_Filter_n_cg_preds(ir_node *node) {
2264 assert(is_Filter(node) && node->attr.filter.in_cg);
2265 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2268 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2270 assert(is_Filter(node) && node->attr.filter.in_cg &&
2272 arity = ARR_LEN(node->attr.filter.in_cg);
2273 assert(pos < arity - 1);
2274 return node->attr.filter.in_cg[pos + 1];
2278 ir_node *get_Mux_sel(const ir_node *node) {
2279 assert(is_Mux(node));
2283 void set_Mux_sel(ir_node *node, ir_node *sel) {
2284 assert(is_Mux(node));
2288 ir_node *get_Mux_false(const ir_node *node) {
2289 assert(is_Mux(node));
2293 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2294 assert(is_Mux(node));
2295 node->in[2] = ir_false;
2298 ir_node *get_Mux_true(const ir_node *node) {
2299 assert(is_Mux(node));
2303 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2304 assert(is_Mux(node));
2305 node->in[3] = ir_true;
2309 ir_node *get_CopyB_mem(const ir_node *node) {
2310 assert(is_CopyB(node));
2311 return get_irn_n(node, 0);
2314 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2315 assert(node->op == op_CopyB);
2316 set_irn_n(node, 0, mem);
2319 ir_node *get_CopyB_dst(const ir_node *node) {
2320 assert(is_CopyB(node));
2321 return get_irn_n(node, 1);
2324 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2325 assert(is_CopyB(node));
2326 set_irn_n(node, 1, dst);
2329 ir_node *get_CopyB_src(const ir_node *node) {
2330 assert(is_CopyB(node));
2331 return get_irn_n(node, 2);
2334 void set_CopyB_src(ir_node *node, ir_node *src) {
2335 assert(is_CopyB(node));
2336 set_irn_n(node, 2, src);
2339 ir_type *get_CopyB_type(ir_node *node) {
2340 assert(is_CopyB(node));
2341 return node->attr.copyb.type;
2344 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2345 assert(is_CopyB(node) && data_type);
2346 node->attr.copyb.type = data_type;
2351 get_InstOf_type(ir_node *node) {
2352 assert(node->op == op_InstOf);
2353 return node->attr.instof.type;
2357 set_InstOf_type(ir_node *node, ir_type *type) {
2358 assert(node->op == op_InstOf);
2359 node->attr.instof.type = type;
2363 get_InstOf_store(const ir_node *node) {
2364 assert(node->op == op_InstOf);
2365 return get_irn_n(node, 0);
2369 set_InstOf_store(ir_node *node, ir_node *obj) {
2370 assert(node->op == op_InstOf);
2371 set_irn_n(node, 0, obj);
2375 get_InstOf_obj(const ir_node *node) {
2376 assert(node->op == op_InstOf);
2377 return get_irn_n(node, 1);
2381 set_InstOf_obj(ir_node *node, ir_node *obj) {
2382 assert(node->op == op_InstOf);
2383 set_irn_n(node, 1, obj);
2386 /* Returns the memory input of a Raise operation. */
2388 get_Raise_mem(const ir_node *node) {
2389 assert(is_Raise(node));
2390 return get_irn_n(node, 0);
2394 set_Raise_mem(ir_node *node, ir_node *mem) {
2395 assert(is_Raise(node));
2396 set_irn_n(node, 0, mem);
2400 get_Raise_exo_ptr(const ir_node *node) {
2401 assert(is_Raise(node));
2402 return get_irn_n(node, 1);
2406 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2407 assert(is_Raise(node));
2408 set_irn_n(node, 1, exo_ptr);
2413 /* Returns the memory input of a Bound operation. */
2414 ir_node *get_Bound_mem(const ir_node *bound) {
2415 assert(is_Bound(bound));
2416 return get_irn_n(bound, 0);
2419 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2420 assert(is_Bound(bound));
2421 set_irn_n(bound, 0, mem);
2424 /* Returns the index input of a Bound operation. */
2425 ir_node *get_Bound_index(const ir_node *bound) {
2426 assert(is_Bound(bound));
2427 return get_irn_n(bound, 1);
2430 void set_Bound_index(ir_node *bound, ir_node *idx) {
2431 assert(is_Bound(bound));
2432 set_irn_n(bound, 1, idx);
2435 /* Returns the lower bound input of a Bound operation. */
2436 ir_node *get_Bound_lower(const ir_node *bound) {
2437 assert(is_Bound(bound));
2438 return get_irn_n(bound, 2);
2441 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2442 assert(is_Bound(bound));
2443 set_irn_n(bound, 2, lower);
2446 /* Returns the upper bound input of a Bound operation. */
2447 ir_node *get_Bound_upper(const ir_node *bound) {
2448 assert(is_Bound(bound));
2449 return get_irn_n(bound, 3);
2452 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2453 assert(is_Bound(bound));
2454 set_irn_n(bound, 3, upper);
2457 /* Return the operand of a Pin node. */
2458 ir_node *get_Pin_op(const ir_node *pin) {
2459 assert(is_Pin(pin));
2460 return get_irn_n(pin, 0);
2463 void set_Pin_op(ir_node *pin, ir_node *node) {
2464 assert(is_Pin(pin));
2465 set_irn_n(pin, 0, node);
2468 /* Return the assembler text of an ASM pseudo node. */
2469 ident *get_ASM_text(const ir_node *node) {
2470 assert(is_ASM(node));
2471 return node->attr.assem.asm_text;
2474 /* Return the number of input constraints for an ASM node. */
2475 int get_ASM_n_input_constraints(const ir_node *node) {
2476 assert(is_ASM(node));
2477 return ARR_LEN(node->attr.assem.inputs);
2480 /* Return the input constraints for an ASM node. This is a flexible array. */
2481 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2482 assert(is_ASM(node));
2483 return node->attr.assem.inputs;
2486 /* Return the number of output constraints for an ASM node. */
2487 int get_ASM_n_output_constraints(const ir_node *node) {
2488 assert(is_ASM(node));
2489 return ARR_LEN(node->attr.assem.outputs);
2492 /* Return the output constraints for an ASM node. */
2493 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2494 assert(is_ASM(node));
2495 return node->attr.assem.outputs;
2498 /* Return the number of clobbered registers for an ASM node. */
2499 int get_ASM_n_clobbers(const ir_node *node) {
2500 assert(is_ASM(node));
2501 return ARR_LEN(node->attr.assem.clobber);
2504 /* Return the list of clobbered registers for an ASM node. */
2505 ident **get_ASM_clobbers(const ir_node *node) {
2506 assert(is_ASM(node));
2507 return node->attr.assem.clobber;
2510 /* returns the graph of a node */
2512 get_irn_irg(const ir_node *node) {
2514 * Do not use get_nodes_Block() here, because this
2515 * will check the pinned state.
2516 * However even a 'wrong' block is always in the proper
2519 if (! is_Block(node))
2520 node = get_irn_n(node, -1);
2521 /* note that get_Block_irg() can handle Bad nodes */
2522 return get_Block_irg(node);
2526 /*----------------------------------------------------------------*/
2527 /* Auxiliary routines */
2528 /*----------------------------------------------------------------*/
2531 skip_Proj(ir_node *node) {
2532 /* don't assert node !!! */
2537 node = get_Proj_pred(node);
2543 skip_Proj_const(const ir_node *node) {
2544 /* don't assert node !!! */
2549 node = get_Proj_pred(node);
2555 skip_Tuple(ir_node *node) {
2560 if (is_Proj(node)) {
2561 pred = get_Proj_pred(node);
2562 op = get_irn_op(pred);
2565 * Looks strange but calls get_irn_op() only once
2566 * in most often cases.
2568 if (op == op_Proj) { /* nested Tuple ? */
2569 pred = skip_Tuple(pred);
2571 if (is_Tuple(pred)) {
2572 node = get_Tuple_pred(pred, get_Proj_proj(node));
2575 } else if (op == op_Tuple) {
2576 node = get_Tuple_pred(pred, get_Proj_proj(node));
2583 /* returns operand of node if node is a Cast */
2584 ir_node *skip_Cast(ir_node *node) {
2586 return get_Cast_op(node);
2590 /* returns operand of node if node is a Cast */
2591 const ir_node *skip_Cast_const(const ir_node *node) {
2593 return get_Cast_op(node);
2597 /* returns operand of node if node is a Pin */
2598 ir_node *skip_Pin(ir_node *node) {
2600 return get_Pin_op(node);
2604 /* returns operand of node if node is a Confirm */
2605 ir_node *skip_Confirm(ir_node *node) {
2606 if (is_Confirm(node))
2607 return get_Confirm_value(node);
2611 /* skip all high-level ops */
2612 ir_node *skip_HighLevel_ops(ir_node *node) {
2613 while (is_op_highlevel(get_irn_op(node))) {
2614 node = get_irn_n(node, 0);
2620 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2621 * than any other approach, as Id chains are resolved and all point to the real node, or
2622 * all id's are self loops.
2624 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2625 * a little bit "hand optimized".
2627 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2630 skip_Id(ir_node *node) {
2632 /* don't assert node !!! */
2634 if (!node || (node->op != op_Id)) return node;
2636 /* Don't use get_Id_pred(): We get into an endless loop for
2637 self-referencing Ids. */
2638 pred = node->in[0+1];
2640 if (pred->op != op_Id) return pred;
2642 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2643 ir_node *rem_pred, *res;
2645 if (pred->op != op_Id) return pred; /* shortcut */
2648 assert(get_irn_arity (node) > 0);
2650 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2651 res = skip_Id(rem_pred);
2652 if (res->op == op_Id) /* self-loop */ return node;
2654 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2661 void skip_Id_and_store(ir_node **node) {
2664 if (!n || (n->op != op_Id)) return;
2666 /* Don't use get_Id_pred(): We get into an endless loop for
2667 self-referencing Ids. */
2672 (is_strictConv)(const ir_node *node) {
2673 return _is_strictConv(node);
2677 (is_no_Block)(const ir_node *node) {
2678 return _is_no_Block(node);
2681 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2683 (is_SymConst_addr_ent)(const ir_node *node) {
2684 return _is_SymConst_addr_ent(node);
2687 /* Returns true if the operation manipulates control flow. */
2688 int is_cfop(const ir_node *node) {
2689 return is_op_cfopcode(get_irn_op(node));
2692 /* Returns true if the operation manipulates interprocedural control flow:
2693 CallBegin, EndReg, EndExcept */
2694 int is_ip_cfop(const ir_node *node) {
2695 return is_ip_cfopcode(get_irn_op(node));
2698 /* Returns true if the operation can change the control flow because
2701 is_fragile_op(const ir_node *node) {
2702 return is_op_fragile(get_irn_op(node));
2705 /* Returns the memory operand of fragile operations. */
2706 ir_node *get_fragile_op_mem(ir_node *node) {
2707 assert(node && is_fragile_op(node));
2709 switch (get_irn_opcode(node)) {
2720 return get_irn_n(node, pn_Generic_M);
2725 assert(0 && "should not be reached");
2730 /* Returns the result mode of a Div operation. */
2731 ir_mode *get_divop_resmod(const ir_node *node) {
2732 switch (get_irn_opcode(node)) {
2733 case iro_Quot : return get_Quot_resmode(node);
2734 case iro_DivMod: return get_DivMod_resmode(node);
2735 case iro_Div : return get_Div_resmode(node);
2736 case iro_Mod : return get_Mod_resmode(node);
2738 assert(0 && "should not be reached");
2743 /* Returns true if the operation is a forking control flow operation. */
2744 int (is_irn_forking)(const ir_node *node) {
2745 return _is_irn_forking(node);
2748 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
2749 _copy_node_attr(old_node, new_node);
2752 /* Return the type associated with the value produced by n
2753 * if the node remarks this type as it is the case for
2754 * Cast, Const, SymConst and some Proj nodes. */
2755 ir_type *(get_irn_type)(ir_node *node) {
2756 return _get_irn_type(node);
2759 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2761 ir_type *(get_irn_type_attr)(ir_node *node) {
2762 return _get_irn_type_attr(node);
2765 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2766 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2767 return _get_irn_entity_attr(node);
2770 /* Returns non-zero for constant-like nodes. */
2771 int (is_irn_constlike)(const ir_node *node) {
2772 return _is_irn_constlike(node);
2776 * Returns non-zero for nodes that are allowed to have keep-alives and
2777 * are neither Block nor PhiM.
2779 int (is_irn_keep)(const ir_node *node) {
2780 return _is_irn_keep(node);
2784 * Returns non-zero for nodes that are always placed in the start block.
2786 int (is_irn_start_block_placed)(const ir_node *node) {
2787 return _is_irn_start_block_placed(node);
2790 /* Returns non-zero for nodes that are machine operations. */
2791 int (is_irn_machine_op)(const ir_node *node) {
2792 return _is_irn_machine_op(node);
2795 /* Returns non-zero for nodes that are machine operands. */
2796 int (is_irn_machine_operand)(const ir_node *node) {
2797 return _is_irn_machine_operand(node);
2800 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2801 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2802 return _is_irn_machine_user(node, n);
2805 /* Returns non-zero for nodes that are CSE neutral to its users. */
2806 int (is_irn_cse_neutral)(const ir_node *node) {
2807 return _is_irn_cse_neutral(node);
2810 /* Gets the string representation of the jump prediction .*/
2811 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2812 #define X(a) case a: return #a;
2814 X(COND_JMP_PRED_NONE);
2815 X(COND_JMP_PRED_TRUE);
2816 X(COND_JMP_PRED_FALSE);
2822 /* Returns the conditional jump prediction of a Cond node. */
2823 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2824 return _get_Cond_jmp_pred(cond);
2827 /* Sets a new conditional jump prediction. */
2828 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2829 _set_Cond_jmp_pred(cond, pred);
2832 /** the get_type operation must be always implemented and return a firm type */
2833 static ir_type *get_Default_type(ir_node *n) {
2835 return get_unknown_type();
2838 /* Sets the get_type operation for an ir_op_ops. */
2839 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2841 case iro_Const: ops->get_type = get_Const_type; break;
2842 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2843 case iro_Cast: ops->get_type = get_Cast_type; break;
2844 case iro_Proj: ops->get_type = get_Proj_type; break;
2846 /* not allowed to be NULL */
2847 if (! ops->get_type)
2848 ops->get_type = get_Default_type;
2854 /** Return the attribute type of a SymConst node if exists */
2855 static ir_type *get_SymConst_attr_type(ir_node *self) {
2856 symconst_kind kind = get_SymConst_kind(self);
2857 if (SYMCONST_HAS_TYPE(kind))
2858 return get_SymConst_type(self);
2862 /** Return the attribute entity of a SymConst node if exists */
2863 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2864 symconst_kind kind = get_SymConst_kind(self);
2865 if (SYMCONST_HAS_ENT(kind))
2866 return get_SymConst_entity(self);
2870 /** the get_type_attr operation must be always implemented */
2871 static ir_type *get_Null_type(ir_node *n) {
2873 return firm_unknown_type;
2876 /* Sets the get_type operation for an ir_op_ops. */
2877 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2879 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2880 case iro_Call: ops->get_type_attr = get_Call_type; break;
2881 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2882 case iro_Free: ops->get_type_attr = get_Free_type; break;
2883 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2885 /* not allowed to be NULL */
2886 if (! ops->get_type_attr)
2887 ops->get_type_attr = get_Null_type;
2893 /** the get_entity_attr operation must be always implemented */
2894 static ir_entity *get_Null_ent(ir_node *n) {
2899 /* Sets the get_type operation for an ir_op_ops. */
2900 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2902 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2903 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
2905 /* not allowed to be NULL */
2906 if (! ops->get_entity_attr)
2907 ops->get_entity_attr = get_Null_ent;
2913 /* Sets the debug information of a node. */
2914 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2915 _set_irn_dbg_info(n, db);
2919 * Returns the debug information of an node.
2921 * @param n The node.
2923 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2924 return _get_irn_dbg_info(n);
2927 /* checks whether a node represents a global address */
2928 int is_Global(const ir_node *node) {
2929 return is_SymConst_addr_ent(node);
2932 /* returns the entity of a global address */
2933 ir_entity *get_Global_entity(const ir_node *node) {
2934 return get_SymConst_entity(node);
2938 * Calculate a hash value of a node.
2940 unsigned firm_default_hash(const ir_node *node) {
2944 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2945 h = irn_arity = get_irn_intra_arity(node);
2947 /* consider all in nodes... except the block if not a control flow. */
2948 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2949 ir_node *pred = get_irn_intra_n(node, i);
2950 if (is_irn_cse_neutral(pred))
2953 h = 9*h + HASH_PTR(pred);
2957 h = 9*h + HASH_PTR(get_irn_mode(node));
2959 h = 9*h + HASH_PTR(get_irn_op(node));
2962 } /* firm_default_hash */
2964 /* include generated code */
2965 #include "gen_irnode.c.inl"