2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
86 pn_Cmp get_inversed_pnc(long pnc) {
87 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
88 long lesser = pnc & pn_Cmp_Lt;
89 long greater = pnc & pn_Cmp_Gt;
91 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size) {
111 assert(!forbid_new_data && "Too late to register additional node data");
116 return firm_add_node_size += size;
120 void init_irnode(void) {
121 /* Forbid the addition of new data to an ir node. */
126 * irnode constructor.
127 * Create a new irnode in irg, with an op, mode, arity and
128 * some incoming irnodes.
129 * If arity is negative, a node with a dynamic array is created.
132 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
133 int arity, ir_node **in)
136 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 /* not nice but necessary: End and Sync must always have a flexible array */
159 if (op == op_End || op == op_Sync)
160 res->in = NEW_ARR_F(ir_node *, (arity+1));
162 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
163 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
167 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
171 for (i = 0; i < EDGE_KIND_LAST; ++i)
172 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
174 /* don't put this into the for loop, arity is -1 for some nodes! */
175 edges_notify_edge(res, -1, res->in[0], NULL, irg);
176 for (i = 1; i <= arity; ++i)
177 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
179 hook_new_node(irg, res);
180 if (get_irg_phase_state(irg) == phase_backend) {
181 be_info_new_node(res);
187 /*-- getting some parameters from ir_nodes --*/
189 int (is_ir_node)(const void *thing) {
190 return _is_ir_node(thing);
193 int (get_irn_intra_arity)(const ir_node *node) {
194 return _get_irn_intra_arity(node);
197 int (get_irn_inter_arity)(const ir_node *node) {
198 return _get_irn_inter_arity(node);
201 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
203 int (get_irn_arity)(const ir_node *node) {
204 return _get_irn_arity(node);
207 /* Returns the array with ins. This array is shifted with respect to the
208 array accessed by get_irn_n: The block operand is at position 0 not -1.
209 (@@@ This should be changed.)
210 The order of the predecessors in this array is not guaranteed, except that
211 lists of operands as predecessors of Block or arguments of a Call are
213 ir_node **get_irn_in(const ir_node *node) {
215 #ifdef INTERPROCEDURAL_VIEW
216 if (get_interprocedural_view()) { /* handle Filter and Block specially */
217 if (get_irn_opcode(node) == iro_Filter) {
218 assert(node->attr.filter.in_cg);
219 return node->attr.filter.in_cg;
220 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
221 return node->attr.block.in_cg;
223 /* else fall through */
225 #endif /* INTERPROCEDURAL_VIEW */
229 void set_irn_in(ir_node *node, int arity, ir_node **in) {
232 ir_graph *irg = current_ir_graph;
235 #ifdef INTERPROCEDURAL_VIEW
236 if (get_interprocedural_view()) { /* handle Filter and Block specially */
237 ir_opcode code = get_irn_opcode(node);
238 if (code == iro_Filter) {
239 assert(node->attr.filter.in_cg);
240 pOld_in = &node->attr.filter.in_cg;
241 } else if (code == iro_Block && node->attr.block.in_cg) {
242 pOld_in = &node->attr.block.in_cg;
247 #endif /* INTERPROCEDURAL_VIEW */
251 for (i = 0; i < arity; i++) {
252 if (i < ARR_LEN(*pOld_in)-1)
253 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
255 edges_notify_edge(node, i, in[i], NULL, irg);
257 for (;i < ARR_LEN(*pOld_in)-1; i++) {
258 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
261 if (arity != ARR_LEN(*pOld_in) - 1) {
262 ir_node * block = (*pOld_in)[0];
263 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
264 (*pOld_in)[0] = block;
266 fix_backedges(irg->obst, node);
268 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
271 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
272 return _get_irn_intra_n(node, n);
275 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
276 return _get_irn_inter_n(node, n);
279 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
281 ir_node *(get_irn_n)(const ir_node *node, int n) {
282 return _get_irn_n(node, n);
285 void set_irn_n(ir_node *node, int n, ir_node *in) {
286 assert(node && node->kind == k_ir_node);
288 assert(n < get_irn_arity(node));
289 assert(in && in->kind == k_ir_node);
291 #ifdef INTERPROCEDURAL_VIEW
292 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
293 /* Change block pred in both views! */
294 node->in[n + 1] = in;
295 assert(node->attr.filter.in_cg);
296 node->attr.filter.in_cg[n + 1] = in;
299 if (get_interprocedural_view()) { /* handle Filter and Block specially */
300 if (get_irn_opcode(node) == iro_Filter) {
301 assert(node->attr.filter.in_cg);
302 node->attr.filter.in_cg[n + 1] = in;
304 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
305 node->attr.block.in_cg[n + 1] = in;
308 /* else fall through */
310 #endif /* INTERPROCEDURAL_VIEW */
313 hook_set_irn_n(node, n, in, node->in[n + 1]);
315 /* Here, we rely on src and tgt being in the current ir graph */
316 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
318 node->in[n + 1] = in;
321 int add_irn_n(ir_node *node, ir_node *in) {
323 ir_graph *irg = get_irn_irg(node);
325 assert(node->op->opar == oparity_dynamic);
326 pos = ARR_LEN(node->in) - 1;
327 ARR_APP1(ir_node *, node->in, in);
328 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
331 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
336 void del_Sync_n(ir_node *n, int i)
338 int arity = get_Sync_n_preds(n);
339 ir_node *last_pred = get_Sync_pred(n, arity - 1);
340 set_Sync_pred(n, i, last_pred);
341 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
342 ARR_SHRINKLEN(get_irn_in(n), arity);
345 int (get_irn_deps)(const ir_node *node) {
346 return _get_irn_deps(node);
349 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
350 return _get_irn_dep(node, pos);
353 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
354 _set_irn_dep(node, pos, dep);
357 int add_irn_dep(ir_node *node, ir_node *dep) {
360 /* DEP edges are only allowed in backend phase */
361 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
362 if (node->deps == NULL) {
363 node->deps = NEW_ARR_F(ir_node *, 1);
369 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
370 if(node->deps[i] == NULL)
373 if(node->deps[i] == dep)
377 if (first_zero >= 0) {
378 node->deps[first_zero] = dep;
381 ARR_APP1(ir_node *, node->deps, dep);
386 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
391 void add_irn_deps(ir_node *tgt, ir_node *src) {
394 for (i = 0, n = get_irn_deps(src); i < n; ++i)
395 add_irn_dep(tgt, get_irn_dep(src, i));
399 ir_mode *(get_irn_mode)(const ir_node *node) {
400 return _get_irn_mode(node);
403 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
404 _set_irn_mode(node, mode);
407 /** Gets the string representation of the mode .*/
408 const char *get_irn_modename(const ir_node *node) {
410 return get_mode_name(node->mode);
413 ident *get_irn_modeident(const ir_node *node) {
415 return get_mode_ident(node->mode);
418 ir_op *(get_irn_op)(const ir_node *node) {
419 return _get_irn_op(node);
422 /* should be private to the library: */
423 void (set_irn_op)(ir_node *node, ir_op *op) {
424 _set_irn_op(node, op);
427 unsigned (get_irn_opcode)(const ir_node *node) {
428 return _get_irn_opcode(node);
431 const char *get_irn_opname(const ir_node *node) {
433 if (is_Phi0(node)) return "Phi0";
434 return get_id_str(node->op->name);
437 ident *get_irn_opident(const ir_node *node) {
439 return node->op->name;
442 ir_visited_t (get_irn_visited)(const ir_node *node) {
443 return _get_irn_visited(node);
446 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
447 _set_irn_visited(node, visited);
450 void (mark_irn_visited)(ir_node *node) {
451 _mark_irn_visited(node);
454 int (irn_visited)(const ir_node *node) {
455 return _irn_visited(node);
458 int (irn_visited_else_mark)(ir_node *node) {
459 return _irn_visited_else_mark(node);
462 void (set_irn_link)(ir_node *node, void *link) {
463 _set_irn_link(node, link);
466 void *(get_irn_link)(const ir_node *node) {
467 return _get_irn_link(node);
470 op_pin_state (get_irn_pinned)(const ir_node *node) {
471 return _get_irn_pinned(node);
474 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
475 return _is_irn_pinned_in_irg(node);
478 void set_irn_pinned(ir_node *node, op_pin_state state) {
479 /* due to optimization an opt may be turned into a Tuple */
483 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
484 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
486 node->attr.except.pin_state = state;
489 /* Outputs a unique number for this node */
490 long get_irn_node_nr(const ir_node *node) {
492 return node->node_nr;
495 const_attr *get_irn_const_attr(ir_node *node) {
496 assert(is_Const(node));
497 return &node->attr.con;
500 long get_irn_proj_attr(ir_node *node) {
501 /* BEWARE: check for true Proj node here, no Filter */
502 assert(node->op == op_Proj);
503 return node->attr.proj;
506 alloc_attr *get_irn_alloc_attr(ir_node *node) {
507 assert(is_Alloc(node));
508 return &node->attr.alloc;
511 free_attr *get_irn_free_attr(ir_node *node) {
512 assert(is_Free(node));
513 return &node->attr.free;
516 symconst_attr *get_irn_symconst_attr(ir_node *node) {
517 assert(is_SymConst(node));
518 return &node->attr.symc;
521 call_attr *get_irn_call_attr(ir_node *node) {
522 assert(is_Call(node));
523 node->attr.call.type = skip_tid(node->attr.call.type);
524 return &node->attr.call;
527 sel_attr *get_irn_sel_attr(ir_node *node) {
528 assert(is_Sel(node));
529 return &node->attr.sel;
532 phi_attr *get_irn_phi_attr(ir_node *node) {
533 return &node->attr.phi;
536 block_attr *get_irn_block_attr(ir_node *node) {
537 assert(is_Block(node));
538 return &node->attr.block;
541 load_attr *get_irn_load_attr(ir_node *node) {
542 assert(is_Load(node));
543 return &node->attr.load;
546 store_attr *get_irn_store_attr(ir_node *node) {
547 assert(is_Store(node));
548 return &node->attr.store;
551 except_attr *get_irn_except_attr(ir_node *node) {
552 assert(node->op == op_Div || node->op == op_Quot ||
553 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
554 return &node->attr.except;
557 divmod_attr *get_irn_divmod_attr(ir_node *node) {
558 assert(node->op == op_Div || node->op == op_Quot ||
559 node->op == op_DivMod || node->op == op_Mod);
560 return &node->attr.divmod;
563 builtin_attr *get_irn_builtin_attr(ir_node *node) {
564 assert(is_Builtin(node));
565 return &node->attr.builtin;
568 void *(get_irn_generic_attr)(ir_node *node) {
569 assert(is_ir_node(node));
570 return _get_irn_generic_attr(node);
573 const void *(get_irn_generic_attr_const)(const ir_node *node) {
574 assert(is_ir_node(node));
575 return _get_irn_generic_attr_const(node);
578 unsigned (get_irn_idx)(const ir_node *node) {
579 assert(is_ir_node(node));
580 return _get_irn_idx(node);
583 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
585 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
586 if (get_irn_n(node, i) == arg)
592 /** manipulate fields of individual nodes **/
594 /* this works for all except Block */
595 ir_node *get_nodes_block(const ir_node *node) {
596 assert(node->op != op_Block);
597 return get_irn_n(node, -1);
600 void set_nodes_block(ir_node *node, ir_node *block) {
601 assert(node->op != op_Block);
602 set_irn_n(node, -1, block);
605 /* this works for all except Block */
606 ir_node *get_nodes_MacroBlock(const ir_node *node) {
607 assert(node->op != op_Block);
608 return get_Block_MacroBlock(get_irn_n(node, -1));
611 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
612 * from Start. If so returns frame type, else Null. */
613 ir_type *is_frame_pointer(const ir_node *n) {
614 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
615 ir_node *start = get_Proj_pred(n);
616 if (is_Start(start)) {
617 return get_irg_frame_type(get_irn_irg(start));
623 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
624 * from Start. If so returns tls type, else Null. */
625 ir_type *is_tls_pointer(const ir_node *n) {
626 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
627 ir_node *start = get_Proj_pred(n);
628 if (is_Start(start)) {
629 return get_tls_type();
635 ir_node **get_Block_cfgpred_arr(ir_node *node) {
636 assert(is_Block(node));
637 return (ir_node **)&(get_irn_in(node)[1]);
640 int (get_Block_n_cfgpreds)(const ir_node *node) {
641 return _get_Block_n_cfgpreds(node);
644 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
645 return _get_Block_cfgpred(node, pos);
648 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
649 assert(is_Block(node));
650 set_irn_n(node, pos, pred);
653 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
656 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
657 if (get_Block_cfgpred_block(block, i) == pred)
663 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
664 return _get_Block_cfgpred_block(node, pos);
667 int get_Block_matured(const ir_node *node) {
668 assert(is_Block(node));
669 return (int)node->attr.block.is_matured;
672 void set_Block_matured(ir_node *node, int matured) {
673 assert(is_Block(node));
674 node->attr.block.is_matured = matured;
677 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
678 return _get_Block_block_visited(node);
681 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
682 _set_Block_block_visited(node, visit);
685 /* For this current_ir_graph must be set. */
686 void (mark_Block_block_visited)(ir_node *node) {
687 _mark_Block_block_visited(node);
690 int (Block_block_visited)(const ir_node *node) {
691 return _Block_block_visited(node);
694 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
695 assert(is_Block(node));
696 return node->attr.block.graph_arr[pos+1];
699 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
700 assert(is_Block(node));
701 node->attr.block.graph_arr[pos+1] = value;
704 #ifdef INTERPROCEDURAL_VIEW
705 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
706 assert(is_Block(node));
707 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
708 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
709 node->attr.block.in_cg[0] = NULL;
710 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
712 /* Fix backedge array. fix_backedges() operates depending on
713 interprocedural_view. */
714 int ipv = get_interprocedural_view();
715 set_interprocedural_view(1);
716 fix_backedges(current_ir_graph->obst, node);
717 set_interprocedural_view(ipv);
720 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
723 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
724 assert(is_Block(node) && node->attr.block.in_cg &&
725 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
726 node->attr.block.in_cg[pos + 1] = pred;
729 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
730 assert(is_Block(node));
731 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
734 int get_Block_cg_n_cfgpreds(const ir_node *node) {
735 assert(is_Block(node));
736 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
739 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
740 assert(is_Block(node) && node->attr.block.in_cg);
741 return node->attr.block.in_cg[pos + 1];
744 void remove_Block_cg_cfgpred_arr(ir_node *node) {
745 assert(is_Block(node));
746 node->attr.block.in_cg = NULL;
748 #endif /* INTERPROCEDURAL_VIEW */
750 ir_node *(set_Block_dead)(ir_node *block) {
751 return _set_Block_dead(block);
754 int (is_Block_dead)(const ir_node *block) {
755 return _is_Block_dead(block);
758 ir_extblk *get_Block_extbb(const ir_node *block) {
760 assert(is_Block(block));
761 res = block->attr.block.extblk;
762 assert(res == NULL || is_ir_extbb(res));
766 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
767 assert(is_Block(block));
768 assert(extblk == NULL || is_ir_extbb(extblk));
769 block->attr.block.extblk = extblk;
772 /* Returns the macro block header of a block.*/
773 ir_node *get_Block_MacroBlock(const ir_node *block) {
775 assert(is_Block(block));
776 mbh = get_irn_n(block, -1);
777 /* once macro block header is respected by all optimizations,
778 this assert can be removed */
783 /* Sets the macro block header of a block. */
784 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
785 assert(is_Block(block));
787 assert(is_Block(mbh));
788 set_irn_n(block, -1, mbh);
791 /* returns the macro block header of a node. */
792 ir_node *get_irn_MacroBlock(const ir_node *n) {
794 n = get_nodes_block(n);
795 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
799 return get_Block_MacroBlock(n);
802 /* returns the graph of a Block. */
803 ir_graph *(get_Block_irg)(const ir_node *block) {
804 return _get_Block_irg(block);
807 ir_entity *create_Block_entity(ir_node *block) {
809 assert(is_Block(block));
811 entity = block->attr.block.entity;
812 if (entity == NULL) {
816 glob = get_glob_type();
817 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
818 nr = get_irp_next_label_nr();
819 set_entity_label(entity, nr);
820 set_entity_compiler_generated(entity, 1);
821 set_entity_allocation(entity, allocation_static);
823 block->attr.block.entity = entity;
828 ir_entity *get_Block_entity(const ir_node *block) {
829 assert(is_Block(block));
830 return block->attr.block.entity;
833 void set_Block_entity(ir_node *block, ir_entity *entity)
835 assert(is_Block(block));
836 assert(get_entity_type(entity) == get_code_type());
837 block->attr.block.entity = entity;
840 int has_Block_entity(const ir_node *block)
842 return block->attr.block.entity != NULL;
845 ir_node *(get_Block_phis)(const ir_node *block) {
846 return _get_Block_phis(block);
849 void (set_Block_phis)(ir_node *block, ir_node *phi) {
850 _set_Block_phis(block, phi);
853 void (add_Block_phi)(ir_node *block, ir_node *phi) {
854 _add_Block_phi(block, phi);
857 /* Get the Block mark (single bit). */
858 unsigned (get_Block_mark)(const ir_node *block) {
859 return _get_Block_mark(block);
862 /* Set the Block mark (single bit). */
863 void (set_Block_mark)(ir_node *block, unsigned mark) {
864 _set_Block_mark(block, mark);
867 int get_End_n_keepalives(const ir_node *end) {
869 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
872 ir_node *get_End_keepalive(const ir_node *end, int pos) {
874 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
877 void add_End_keepalive(ir_node *end, ir_node *ka) {
882 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
884 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
887 /* Set new keep-alives */
888 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
890 ir_graph *irg = get_irn_irg(end);
892 /* notify that edges are deleted */
893 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
894 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
896 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
898 for (i = 0; i < n; ++i) {
899 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
900 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
904 /* Set new keep-alives from old keep-alives, skipping irn */
905 void remove_End_keepalive(ir_node *end, ir_node *irn) {
906 int n = get_End_n_keepalives(end);
911 for (i = n -1; i >= 0; --i) {
912 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
922 irg = get_irn_irg(end);
924 /* remove the edge */
925 edges_notify_edge(end, idx, NULL, irn, irg);
928 /* exchange with the last one */
929 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
930 edges_notify_edge(end, n - 1, NULL, old, irg);
931 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
932 edges_notify_edge(end, idx, old, NULL, irg);
934 /* now n - 1 keeps, 1 block input */
935 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
938 /* remove Bads, NoMems and doublets from the keep-alive set */
939 void remove_End_Bads_and_doublets(ir_node *end) {
941 int idx, n = get_End_n_keepalives(end);
947 irg = get_irn_irg(end);
948 pset_new_init(&keeps);
950 for (idx = n - 1; idx >= 0; --idx) {
951 ir_node *ka = get_End_keepalive(end, idx);
953 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
954 /* remove the edge */
955 edges_notify_edge(end, idx, NULL, ka, irg);
958 /* exchange with the last one */
959 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
960 edges_notify_edge(end, n - 1, NULL, old, irg);
961 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
962 edges_notify_edge(end, idx, old, NULL, irg);
966 pset_new_insert(&keeps, ka);
969 /* n keeps, 1 block input */
970 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
972 pset_new_destroy(&keeps);
975 void free_End(ir_node *end) {
979 end->in = NULL; /* @@@ make sure we get an error if we use the
980 in array afterwards ... */
983 /* Return the target address of an IJmp */
984 ir_node *get_IJmp_target(const ir_node *ijmp) {
985 assert(is_IJmp(ijmp));
986 return get_irn_n(ijmp, 0);
989 /** Sets the target address of an IJmp */
990 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
991 assert(is_IJmp(ijmp));
992 set_irn_n(ijmp, 0, tgt);
996 > Implementing the case construct (which is where the constant Proj node is
997 > important) involves far more than simply determining the constant values.
998 > We could argue that this is more properly a function of the translator from
999 > Firm to the target machine. That could be done if there was some way of
1000 > projecting "default" out of the Cond node.
1001 I know it's complicated.
1002 Basically there are two problems:
1003 - determining the gaps between the Projs
1004 - determining the biggest case constant to know the proj number for
1006 I see several solutions:
1007 1. Introduce a ProjDefault node. Solves both problems.
1008 This means to extend all optimizations executed during construction.
1009 2. Give the Cond node for switch two flavors:
1010 a) there are no gaps in the Projs (existing flavor)
1011 b) gaps may exist, default proj is still the Proj with the largest
1012 projection number. This covers also the gaps.
1013 3. Fix the semantic of the Cond to that of 2b)
1015 Solution 2 seems to be the best:
1016 Computing the gaps in the Firm representation is not too hard, i.e.,
1017 libFIRM can implement a routine that transforms between the two
1018 flavours. This is also possible for 1) but 2) does not require to
1019 change any existing optimization.
1020 Further it should be far simpler to determine the biggest constant than
1021 to compute all gaps.
1022 I don't want to choose 3) as 2a) seems to have advantages for
1023 dataflow analysis and 3) does not allow to convert the representation to
1027 const char *get_cond_kind_name(cond_kind kind)
1029 #define X(a) case a: return #a;
1039 get_Cond_selector(const ir_node *node) {
1040 assert(is_Cond(node));
1041 return get_irn_n(node, 0);
1045 set_Cond_selector(ir_node *node, ir_node *selector) {
1046 assert(is_Cond(node));
1047 set_irn_n(node, 0, selector);
1051 get_Cond_kind(const ir_node *node) {
1052 assert(is_Cond(node));
1053 return node->attr.cond.kind;
1057 set_Cond_kind(ir_node *node, cond_kind kind) {
1058 assert(is_Cond(node));
1059 node->attr.cond.kind = kind;
1063 get_Cond_default_proj(const ir_node *node) {
1064 assert(is_Cond(node));
1065 return node->attr.cond.default_proj;
1068 void set_Cond_default_proj(ir_node *node, long defproj) {
1069 assert(is_Cond(node));
1070 node->attr.cond.default_proj = defproj;
1074 get_Return_mem(const ir_node *node) {
1075 assert(is_Return(node));
1076 return get_irn_n(node, 0);
1080 set_Return_mem(ir_node *node, ir_node *mem) {
1081 assert(is_Return(node));
1082 set_irn_n(node, 0, mem);
1086 get_Return_n_ress(const ir_node *node) {
1087 assert(is_Return(node));
1088 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1092 get_Return_res_arr(ir_node *node) {
1093 assert(is_Return(node));
1094 if (get_Return_n_ress(node) > 0)
1095 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1102 set_Return_n_res(ir_node *node, int results) {
1103 assert(is_Return(node));
1108 get_Return_res(const ir_node *node, int pos) {
1109 assert(is_Return(node));
1110 assert(get_Return_n_ress(node) > pos);
1111 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1115 set_Return_res(ir_node *node, int pos, ir_node *res){
1116 assert(is_Return(node));
1117 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1120 tarval *(get_Const_tarval)(const ir_node *node) {
1121 return _get_Const_tarval(node);
1125 set_Const_tarval(ir_node *node, tarval *con) {
1126 assert(is_Const(node));
1127 node->attr.con.tv = con;
1130 int (is_Const_null)(const ir_node *node) {
1131 return _is_Const_null(node);
1134 int (is_Const_one)(const ir_node *node) {
1135 return _is_Const_one(node);
1138 int (is_Const_all_one)(const ir_node *node) {
1139 return _is_Const_all_one(node);
1143 /* The source language type. Must be an atomic type. Mode of type must
1144 be mode of node. For tarvals from entities type must be pointer to
1147 get_Const_type(ir_node *node) {
1148 assert(is_Const(node));
1149 node->attr.con.tp = skip_tid(node->attr.con.tp);
1150 return node->attr.con.tp;
1154 set_Const_type(ir_node *node, ir_type *tp) {
1155 assert(is_Const(node));
1156 if (tp != firm_unknown_type) {
1157 assert(is_atomic_type(tp));
1158 assert(get_type_mode(tp) == get_irn_mode(node));
1160 node->attr.con.tp = tp;
1165 get_SymConst_kind(const ir_node *node) {
1166 assert(is_SymConst(node));
1167 return node->attr.symc.kind;
1171 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1172 assert(is_SymConst(node));
1173 node->attr.symc.kind = kind;
1177 get_SymConst_type(const ir_node *node) {
1178 /* the cast here is annoying, but we have to compensate for
1180 ir_node *irn = (ir_node *)node;
1181 assert(is_SymConst(node) &&
1182 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1183 return irn->attr.symc.sym.type_p = skip_tid(irn->attr.symc.sym.type_p);
1187 set_SymConst_type(ir_node *node, ir_type *tp) {
1188 assert(is_SymConst(node) &&
1189 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1190 node->attr.symc.sym.type_p = tp;
1194 get_SymConst_name(const ir_node *node) {
1195 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1196 return node->attr.symc.sym.ident_p;
1200 set_SymConst_name(ir_node *node, ident *name) {
1201 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1202 node->attr.symc.sym.ident_p = name;
1206 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1207 ir_entity *get_SymConst_entity(const ir_node *node) {
1208 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1209 return node->attr.symc.sym.entity_p;
1212 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1213 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1214 node->attr.symc.sym.entity_p = ent;
1217 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1218 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1219 return node->attr.symc.sym.enum_p;
1222 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1223 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1224 node->attr.symc.sym.enum_p = ec;
1227 union symconst_symbol
1228 get_SymConst_symbol(const ir_node *node) {
1229 assert(is_SymConst(node));
1230 return node->attr.symc.sym;
1234 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1235 assert(is_SymConst(node));
1236 node->attr.symc.sym = sym;
1240 get_SymConst_value_type(ir_node *node) {
1241 assert(is_SymConst(node));
1242 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1243 return node->attr.symc.tp;
1247 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1248 assert(is_SymConst(node));
1249 node->attr.symc.tp = tp;
1253 get_Sel_mem(const ir_node *node) {
1254 assert(is_Sel(node));
1255 return get_irn_n(node, 0);
1259 set_Sel_mem(ir_node *node, ir_node *mem) {
1260 assert(is_Sel(node));
1261 set_irn_n(node, 0, mem);
1265 get_Sel_ptr(const ir_node *node) {
1266 assert(is_Sel(node));
1267 return get_irn_n(node, 1);
1271 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1272 assert(is_Sel(node));
1273 set_irn_n(node, 1, ptr);
1277 get_Sel_n_indexs(const ir_node *node) {
1278 assert(is_Sel(node));
1279 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1283 get_Sel_index_arr(ir_node *node) {
1284 assert(is_Sel(node));
1285 if (get_Sel_n_indexs(node) > 0)
1286 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1292 get_Sel_index(const ir_node *node, int pos) {
1293 assert(is_Sel(node));
1294 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1298 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1299 assert(is_Sel(node));
1300 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1304 get_Sel_entity(const ir_node *node) {
1305 assert(is_Sel(node));
1306 return node->attr.sel.entity;
1309 /* need a version without const to prevent warning */
1310 static ir_entity *_get_Sel_entity(ir_node *node) {
1311 return get_Sel_entity(node);
1315 set_Sel_entity(ir_node *node, ir_entity *ent) {
1316 assert(is_Sel(node));
1317 node->attr.sel.entity = ent;
1321 /* For unary and binary arithmetic operations the access to the
1322 operands can be factored out. Left is the first, right the
1323 second arithmetic value as listed in tech report 0999-33.
1324 unops are: Minus, Abs, Not, Conv, Cast
1325 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1326 Shr, Shrs, Rotate, Cmp */
1330 get_Call_mem(const ir_node *node) {
1331 assert(is_Call(node));
1332 return get_irn_n(node, 0);
1336 set_Call_mem(ir_node *node, ir_node *mem) {
1337 assert(is_Call(node));
1338 set_irn_n(node, 0, mem);
1342 get_Call_ptr(const ir_node *node) {
1343 assert(is_Call(node));
1344 return get_irn_n(node, 1);
1348 set_Call_ptr(ir_node *node, ir_node *ptr) {
1349 assert(is_Call(node));
1350 set_irn_n(node, 1, ptr);
1354 get_Call_param_arr(ir_node *node) {
1355 assert(is_Call(node));
1356 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1360 get_Call_n_params(const ir_node *node) {
1361 assert(is_Call(node));
1362 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1366 get_Call_param(const ir_node *node, int pos) {
1367 assert(is_Call(node));
1368 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1372 set_Call_param(ir_node *node, int pos, ir_node *param) {
1373 assert(is_Call(node));
1374 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1378 get_Call_type(ir_node *node) {
1379 assert(is_Call(node));
1380 return node->attr.call.type = skip_tid(node->attr.call.type);
1384 set_Call_type(ir_node *node, ir_type *tp) {
1385 assert(is_Call(node));
1386 assert((get_unknown_type() == tp) || is_Method_type(tp));
1387 node->attr.call.type = tp;
1391 get_Call_tail_call(const ir_node *node) {
1392 assert(is_Call(node));
1393 return node->attr.call.tail_call;
1397 set_Call_tail_call(ir_node *node, unsigned tail_call) {
1398 assert(is_Call(node));
1399 node->attr.call.tail_call = tail_call != 0;
1403 get_Builtin_mem(const ir_node *node) {
1404 assert(is_Builtin(node));
1405 return get_irn_n(node, 0);
1409 set_Builin_mem(ir_node *node, ir_node *mem) {
1410 assert(is_Builtin(node));
1411 set_irn_n(node, 0, mem);
1415 get_Builtin_kind(const ir_node *node) {
1416 assert(is_Builtin(node));
1417 return node->attr.builtin.kind;
1421 set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
1422 assert(is_Builtin(node));
1423 node->attr.builtin.kind = kind;
1427 get_Builtin_param_arr(ir_node *node) {
1428 assert(is_Builtin(node));
1429 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1433 get_Builtin_n_params(const ir_node *node) {
1434 assert(is_Builtin(node));
1435 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1439 get_Builtin_param(const ir_node *node, int pos) {
1440 assert(is_Builtin(node));
1441 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1445 set_Builtin_param(ir_node *node, int pos, ir_node *param) {
1446 assert(is_Builtin(node));
1447 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1451 get_Builtin_type(ir_node *node) {
1452 assert(is_Builtin(node));
1453 return node->attr.builtin.type = skip_tid(node->attr.builtin.type);
1457 set_Builtin_type(ir_node *node, ir_type *tp) {
1458 assert(is_Builtin(node));
1459 assert((get_unknown_type() == tp) || is_Method_type(tp));
1460 node->attr.builtin.type = tp;
1463 /* Returns a human readable string for the ir_builtin_kind. */
1464 const char *get_builtin_kind_name(ir_builtin_kind kind) {
1465 #define X(a) case a: return #a;
1468 X(ir_bk_debugbreak);
1469 X(ir_bk_return_address);
1470 X(ir_bk_frame_address);
1480 X(ir_bk_inner_trampoline);
1487 int Call_has_callees(const ir_node *node) {
1488 assert(is_Call(node));
1489 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1490 (node->attr.call.callee_arr != NULL));
1493 int get_Call_n_callees(const ir_node *node) {
1494 assert(is_Call(node) && node->attr.call.callee_arr);
1495 return ARR_LEN(node->attr.call.callee_arr);
1498 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1499 assert(pos >= 0 && pos < get_Call_n_callees(node));
1500 return node->attr.call.callee_arr[pos];
1503 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1504 assert(is_Call(node));
1505 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1506 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1508 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1511 void remove_Call_callee_arr(ir_node *node) {
1512 assert(is_Call(node));
1513 node->attr.call.callee_arr = NULL;
1516 ir_node *get_CallBegin_ptr(const ir_node *node) {
1517 assert(is_CallBegin(node));
1518 return get_irn_n(node, 0);
1521 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1522 assert(is_CallBegin(node));
1523 set_irn_n(node, 0, ptr);
1526 ir_node *get_CallBegin_call(const ir_node *node) {
1527 assert(is_CallBegin(node));
1528 return node->attr.callbegin.call;
1531 void set_CallBegin_call(ir_node *node, ir_node *call) {
1532 assert(is_CallBegin(node));
1533 node->attr.callbegin.call = call;
1537 * Returns non-zero if a Call is surely a self-recursive Call.
1538 * Beware: if this functions returns 0, the call might be self-recursive!
1540 int is_self_recursive_Call(const ir_node *call) {
1541 const ir_node *callee = get_Call_ptr(call);
1543 if (is_SymConst_addr_ent(callee)) {
1544 const ir_entity *ent = get_SymConst_entity(callee);
1545 const ir_graph *irg = get_entity_irg(ent);
1546 if (irg == get_irn_irg(call))
1553 ir_node * get_##OP##_left(const ir_node *node) { \
1554 assert(is_##OP(node)); \
1555 return get_irn_n(node, node->op->op_index); \
1557 void set_##OP##_left(ir_node *node, ir_node *left) { \
1558 assert(is_##OP(node)); \
1559 set_irn_n(node, node->op->op_index, left); \
1561 ir_node *get_##OP##_right(const ir_node *node) { \
1562 assert(is_##OP(node)); \
1563 return get_irn_n(node, node->op->op_index + 1); \
1565 void set_##OP##_right(ir_node *node, ir_node *right) { \
1566 assert(is_##OP(node)); \
1567 set_irn_n(node, node->op->op_index + 1, right); \
1571 ir_node *get_##OP##_op(const ir_node *node) { \
1572 assert(is_##OP(node)); \
1573 return get_irn_n(node, node->op->op_index); \
1575 void set_##OP##_op(ir_node *node, ir_node *op) { \
1576 assert(is_##OP(node)); \
1577 set_irn_n(node, node->op->op_index, op); \
1580 #define BINOP_MEM(OP) \
1584 get_##OP##_mem(const ir_node *node) { \
1585 assert(is_##OP(node)); \
1586 return get_irn_n(node, 0); \
1590 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1591 assert(is_##OP(node)); \
1592 set_irn_n(node, 0, mem); \
1598 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1599 assert(is_##OP(node)); \
1600 return node->attr.divmod.resmode; \
1603 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1604 assert(is_##OP(node)); \
1605 node->attr.divmod.resmode = mode; \
1633 int get_Div_no_remainder(const ir_node *node) {
1634 assert(is_Div(node));
1635 return node->attr.divmod.no_remainder;
1638 void set_Div_no_remainder(ir_node *node, int no_remainder) {
1639 assert(is_Div(node));
1640 node->attr.divmod.no_remainder = no_remainder;
1643 int get_Conv_strict(const ir_node *node) {
1644 assert(is_Conv(node));
1645 return node->attr.conv.strict;
1648 void set_Conv_strict(ir_node *node, int strict_flag) {
1649 assert(is_Conv(node));
1650 node->attr.conv.strict = (char)strict_flag;
1654 get_Cast_type(ir_node *node) {
1655 assert(is_Cast(node));
1656 node->attr.cast.type = skip_tid(node->attr.cast.type);
1657 return node->attr.cast.type;
1661 set_Cast_type(ir_node *node, ir_type *to_tp) {
1662 assert(is_Cast(node));
1663 node->attr.cast.type = to_tp;
1667 /* Checks for upcast.
1669 * Returns true if the Cast node casts a class type to a super type.
1671 int is_Cast_upcast(ir_node *node) {
1672 ir_type *totype = get_Cast_type(node);
1673 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1675 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1678 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1679 totype = get_pointer_points_to_type(totype);
1680 fromtype = get_pointer_points_to_type(fromtype);
1685 if (!is_Class_type(totype)) return 0;
1686 return is_SubClass_of(fromtype, totype);
1689 /* Checks for downcast.
1691 * Returns true if the Cast node casts a class type to a sub type.
1693 int is_Cast_downcast(ir_node *node) {
1694 ir_type *totype = get_Cast_type(node);
1695 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1697 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1700 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1701 totype = get_pointer_points_to_type(totype);
1702 fromtype = get_pointer_points_to_type(fromtype);
1707 if (!is_Class_type(totype)) return 0;
1708 return is_SubClass_of(totype, fromtype);
1712 (is_unop)(const ir_node *node) {
1713 return _is_unop(node);
1717 get_unop_op(const ir_node *node) {
1718 if (node->op->opar == oparity_unary)
1719 return get_irn_n(node, node->op->op_index);
1721 assert(node->op->opar == oparity_unary);
1726 set_unop_op(ir_node *node, ir_node *op) {
1727 if (node->op->opar == oparity_unary)
1728 set_irn_n(node, node->op->op_index, op);
1730 assert(node->op->opar == oparity_unary);
1734 (is_binop)(const ir_node *node) {
1735 return _is_binop(node);
1739 get_binop_left(const ir_node *node) {
1740 assert(node->op->opar == oparity_binary);
1741 return get_irn_n(node, node->op->op_index);
1745 set_binop_left(ir_node *node, ir_node *left) {
1746 assert(node->op->opar == oparity_binary);
1747 set_irn_n(node, node->op->op_index, left);
1751 get_binop_right(const ir_node *node) {
1752 assert(node->op->opar == oparity_binary);
1753 return get_irn_n(node, node->op->op_index + 1);
1757 set_binop_right(ir_node *node, ir_node *right) {
1758 assert(node->op->opar == oparity_binary);
1759 set_irn_n(node, node->op->op_index + 1, right);
1762 int is_Phi0(const ir_node *n) {
1765 return ((get_irn_op(n) == op_Phi) &&
1766 (get_irn_arity(n) == 0) &&
1767 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1771 get_Phi_preds_arr(ir_node *node) {
1772 assert(node->op == op_Phi);
1773 return (ir_node **)&(get_irn_in(node)[1]);
1777 get_Phi_n_preds(const ir_node *node) {
1778 assert(is_Phi(node) || is_Phi0(node));
1779 return (get_irn_arity(node));
1783 void set_Phi_n_preds(ir_node *node, int n_preds) {
1784 assert(node->op == op_Phi);
1789 get_Phi_pred(const ir_node *node, int pos) {
1790 assert(is_Phi(node) || is_Phi0(node));
1791 return get_irn_n(node, pos);
1795 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1796 assert(is_Phi(node) || is_Phi0(node));
1797 set_irn_n(node, pos, pred);
1800 ir_node *(get_Phi_next)(const ir_node *phi) {
1801 return _get_Phi_next(phi);
1804 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1805 _set_Phi_next(phi, next);
1808 int is_memop(const ir_node *node) {
1809 ir_opcode code = get_irn_opcode(node);
1810 return (code == iro_Load || code == iro_Store);
1813 ir_node *get_memop_mem(const ir_node *node) {
1814 assert(is_memop(node));
1815 return get_irn_n(node, 0);
1818 void set_memop_mem(ir_node *node, ir_node *mem) {
1819 assert(is_memop(node));
1820 set_irn_n(node, 0, mem);
1823 ir_node *get_memop_ptr(const ir_node *node) {
1824 assert(is_memop(node));
1825 return get_irn_n(node, 1);
1828 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1829 assert(is_memop(node));
1830 set_irn_n(node, 1, ptr);
1834 get_Load_mem(const ir_node *node) {
1835 assert(is_Load(node));
1836 return get_irn_n(node, 0);
1840 set_Load_mem(ir_node *node, ir_node *mem) {
1841 assert(is_Load(node));
1842 set_irn_n(node, 0, mem);
1846 get_Load_ptr(const ir_node *node) {
1847 assert(is_Load(node));
1848 return get_irn_n(node, 1);
1852 set_Load_ptr(ir_node *node, ir_node *ptr) {
1853 assert(is_Load(node));
1854 set_irn_n(node, 1, ptr);
1858 get_Load_mode(const ir_node *node) {
1859 assert(is_Load(node));
1860 return node->attr.load.mode;
1864 set_Load_mode(ir_node *node, ir_mode *mode) {
1865 assert(is_Load(node));
1866 node->attr.load.mode = mode;
1870 get_Load_volatility(const ir_node *node) {
1871 assert(is_Load(node));
1872 return node->attr.load.volatility;
1876 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1877 assert(is_Load(node));
1878 node->attr.load.volatility = volatility;
1882 get_Load_align(const ir_node *node) {
1883 assert(is_Load(node));
1884 return node->attr.load.aligned;
1888 set_Load_align(ir_node *node, ir_align align) {
1889 assert(is_Load(node));
1890 node->attr.load.aligned = align;
1895 get_Store_mem(const ir_node *node) {
1896 assert(is_Store(node));
1897 return get_irn_n(node, 0);
1901 set_Store_mem(ir_node *node, ir_node *mem) {
1902 assert(is_Store(node));
1903 set_irn_n(node, 0, mem);
1907 get_Store_ptr(const ir_node *node) {
1908 assert(is_Store(node));
1909 return get_irn_n(node, 1);
1913 set_Store_ptr(ir_node *node, ir_node *ptr) {
1914 assert(is_Store(node));
1915 set_irn_n(node, 1, ptr);
1919 get_Store_value(const ir_node *node) {
1920 assert(is_Store(node));
1921 return get_irn_n(node, 2);
1925 set_Store_value(ir_node *node, ir_node *value) {
1926 assert(is_Store(node));
1927 set_irn_n(node, 2, value);
1931 get_Store_volatility(const ir_node *node) {
1932 assert(is_Store(node));
1933 return node->attr.store.volatility;
1937 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1938 assert(is_Store(node));
1939 node->attr.store.volatility = volatility;
1943 get_Store_align(const ir_node *node) {
1944 assert(is_Store(node));
1945 return node->attr.store.aligned;
1949 set_Store_align(ir_node *node, ir_align align) {
1950 assert(is_Store(node));
1951 node->attr.store.aligned = align;
1956 get_Alloc_mem(const ir_node *node) {
1957 assert(is_Alloc(node));
1958 return get_irn_n(node, 0);
1962 set_Alloc_mem(ir_node *node, ir_node *mem) {
1963 assert(is_Alloc(node));
1964 set_irn_n(node, 0, mem);
1968 get_Alloc_size(const ir_node *node) {
1969 assert(is_Alloc(node));
1970 return get_irn_n(node, 1);
1974 set_Alloc_size(ir_node *node, ir_node *size) {
1975 assert(is_Alloc(node));
1976 set_irn_n(node, 1, size);
1980 get_Alloc_type(ir_node *node) {
1981 assert(is_Alloc(node));
1982 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1986 set_Alloc_type(ir_node *node, ir_type *tp) {
1987 assert(is_Alloc(node));
1988 node->attr.alloc.type = tp;
1992 get_Alloc_where(const ir_node *node) {
1993 assert(is_Alloc(node));
1994 return node->attr.alloc.where;
1998 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1999 assert(is_Alloc(node));
2000 node->attr.alloc.where = where;
2005 get_Free_mem(const ir_node *node) {
2006 assert(is_Free(node));
2007 return get_irn_n(node, 0);
2011 set_Free_mem(ir_node *node, ir_node *mem) {
2012 assert(is_Free(node));
2013 set_irn_n(node, 0, mem);
2017 get_Free_ptr(const ir_node *node) {
2018 assert(is_Free(node));
2019 return get_irn_n(node, 1);
2023 set_Free_ptr(ir_node *node, ir_node *ptr) {
2024 assert(is_Free(node));
2025 set_irn_n(node, 1, ptr);
2029 get_Free_size(const ir_node *node) {
2030 assert(is_Free(node));
2031 return get_irn_n(node, 2);
2035 set_Free_size(ir_node *node, ir_node *size) {
2036 assert(is_Free(node));
2037 set_irn_n(node, 2, size);
2041 get_Free_type(ir_node *node) {
2042 assert(is_Free(node));
2043 return node->attr.free.type = skip_tid(node->attr.free.type);
2047 set_Free_type(ir_node *node, ir_type *tp) {
2048 assert(is_Free(node));
2049 node->attr.free.type = tp;
2053 get_Free_where(const ir_node *node) {
2054 assert(is_Free(node));
2055 return node->attr.free.where;
2059 set_Free_where(ir_node *node, ir_where_alloc where) {
2060 assert(is_Free(node));
2061 node->attr.free.where = where;
2064 ir_node **get_Sync_preds_arr(ir_node *node) {
2065 assert(is_Sync(node));
2066 return (ir_node **)&(get_irn_in(node)[1]);
2069 int get_Sync_n_preds(const ir_node *node) {
2070 assert(is_Sync(node));
2071 return (get_irn_arity(node));
2075 void set_Sync_n_preds(ir_node *node, int n_preds) {
2076 assert(is_Sync(node));
2080 ir_node *get_Sync_pred(const ir_node *node, int pos) {
2081 assert(is_Sync(node));
2082 return get_irn_n(node, pos);
2085 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
2086 assert(is_Sync(node));
2087 set_irn_n(node, pos, pred);
2090 /* Add a new Sync predecessor */
2091 void add_Sync_pred(ir_node *node, ir_node *pred) {
2092 assert(is_Sync(node));
2093 add_irn_n(node, pred);
2096 /* Returns the source language type of a Proj node. */
2097 ir_type *get_Proj_type(ir_node *n) {
2098 ir_type *tp = firm_unknown_type;
2099 ir_node *pred = get_Proj_pred(n);
2101 switch (get_irn_opcode(pred)) {
2104 /* Deal with Start / Call here: we need to know the Proj Nr. */
2105 assert(get_irn_mode(pred) == mode_T);
2106 pred_pred = get_Proj_pred(pred);
2108 if (is_Start(pred_pred)) {
2109 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2110 tp = get_method_param_type(mtp, get_Proj_proj(n));
2111 } else if (is_Call(pred_pred)) {
2112 ir_type *mtp = get_Call_type(pred_pred);
2113 tp = get_method_res_type(mtp, get_Proj_proj(n));
2116 case iro_Start: break;
2117 case iro_Call: break;
2119 ir_node *a = get_Load_ptr(pred);
2121 tp = get_entity_type(get_Sel_entity(a));
2130 get_Proj_pred(const ir_node *node) {
2131 assert(is_Proj(node));
2132 return get_irn_n(node, 0);
2136 set_Proj_pred(ir_node *node, ir_node *pred) {
2137 assert(is_Proj(node));
2138 set_irn_n(node, 0, pred);
2142 get_Proj_proj(const ir_node *node) {
2143 #ifdef INTERPROCEDURAL_VIEW
2144 ir_opcode code = get_irn_opcode(node);
2146 if (code == iro_Proj) {
2147 return node->attr.proj;
2150 assert(code == iro_Filter);
2151 return node->attr.filter.proj;
2154 assert(is_Proj(node));
2155 return node->attr.proj;
2156 #endif /* INTERPROCEDURAL_VIEW */
2160 set_Proj_proj(ir_node *node, long proj) {
2161 #ifdef INTERPROCEDURAL_VIEW
2162 ir_opcode code = get_irn_opcode(node);
2164 if (code == iro_Proj) {
2165 node->attr.proj = proj;
2168 assert(code == iro_Filter);
2169 node->attr.filter.proj = proj;
2172 assert(is_Proj(node));
2173 node->attr.proj = proj;
2174 #endif /* INTERPROCEDURAL_VIEW */
2177 /* Returns non-zero if a node is a routine parameter. */
2178 int (is_arg_Proj)(const ir_node *node) {
2179 return _is_arg_Proj(node);
2183 get_Tuple_preds_arr(ir_node *node) {
2184 assert(is_Tuple(node));
2185 return (ir_node **)&(get_irn_in(node)[1]);
2189 get_Tuple_n_preds(const ir_node *node) {
2190 assert(is_Tuple(node));
2191 return get_irn_arity(node);
2196 set_Tuple_n_preds(ir_node *node, int n_preds) {
2197 assert(is_Tuple(node));
2202 get_Tuple_pred(const ir_node *node, int pos) {
2203 assert(is_Tuple(node));
2204 return get_irn_n(node, pos);
2208 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2209 assert(is_Tuple(node));
2210 set_irn_n(node, pos, pred);
2214 get_Id_pred(const ir_node *node) {
2215 assert(is_Id(node));
2216 return get_irn_n(node, 0);
2220 set_Id_pred(ir_node *node, ir_node *pred) {
2221 assert(is_Id(node));
2222 set_irn_n(node, 0, pred);
2225 ir_node *get_Confirm_value(const ir_node *node) {
2226 assert(is_Confirm(node));
2227 return get_irn_n(node, 0);
2230 void set_Confirm_value(ir_node *node, ir_node *value) {
2231 assert(is_Confirm(node));
2232 set_irn_n(node, 0, value);
2235 ir_node *get_Confirm_bound(const ir_node *node) {
2236 assert(is_Confirm(node));
2237 return get_irn_n(node, 1);
2240 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2241 assert(is_Confirm(node));
2242 set_irn_n(node, 0, bound);
2245 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2246 assert(is_Confirm(node));
2247 return node->attr.confirm.cmp;
2250 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2251 assert(is_Confirm(node));
2252 node->attr.confirm.cmp = cmp;
2256 get_Filter_pred(ir_node *node) {
2257 assert(is_Filter(node));
2262 set_Filter_pred(ir_node *node, ir_node *pred) {
2263 assert(is_Filter(node));
2268 get_Filter_proj(ir_node *node) {
2269 assert(is_Filter(node));
2270 return node->attr.filter.proj;
2274 set_Filter_proj(ir_node *node, long proj) {
2275 assert(is_Filter(node));
2276 node->attr.filter.proj = proj;
2279 /* Don't use get_irn_arity, get_irn_n in implementation as access
2280 shall work independent of view!!! */
2281 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2282 assert(is_Filter(node));
2283 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2284 ir_graph *irg = get_irn_irg(node);
2285 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2286 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2287 node->attr.filter.in_cg[0] = node->in[0];
2289 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2292 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2293 assert(is_Filter(node) && node->attr.filter.in_cg &&
2294 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2295 node->attr.filter.in_cg[pos + 1] = pred;
2298 int get_Filter_n_cg_preds(ir_node *node) {
2299 assert(is_Filter(node) && node->attr.filter.in_cg);
2300 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2303 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2305 assert(is_Filter(node) && node->attr.filter.in_cg &&
2307 arity = ARR_LEN(node->attr.filter.in_cg);
2308 assert(pos < arity - 1);
2309 return node->attr.filter.in_cg[pos + 1];
2313 ir_node *get_Mux_sel(const ir_node *node) {
2314 assert(is_Mux(node));
2318 void set_Mux_sel(ir_node *node, ir_node *sel) {
2319 assert(is_Mux(node));
2323 ir_node *get_Mux_false(const ir_node *node) {
2324 assert(is_Mux(node));
2328 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2329 assert(is_Mux(node));
2330 node->in[2] = ir_false;
2333 ir_node *get_Mux_true(const ir_node *node) {
2334 assert(is_Mux(node));
2338 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2339 assert(is_Mux(node));
2340 node->in[3] = ir_true;
2344 ir_node *get_CopyB_mem(const ir_node *node) {
2345 assert(is_CopyB(node));
2346 return get_irn_n(node, 0);
2349 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2350 assert(node->op == op_CopyB);
2351 set_irn_n(node, 0, mem);
2354 ir_node *get_CopyB_dst(const ir_node *node) {
2355 assert(is_CopyB(node));
2356 return get_irn_n(node, 1);
2359 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2360 assert(is_CopyB(node));
2361 set_irn_n(node, 1, dst);
2364 ir_node *get_CopyB_src(const ir_node *node) {
2365 assert(is_CopyB(node));
2366 return get_irn_n(node, 2);
2369 void set_CopyB_src(ir_node *node, ir_node *src) {
2370 assert(is_CopyB(node));
2371 set_irn_n(node, 2, src);
2374 ir_type *get_CopyB_type(ir_node *node) {
2375 assert(is_CopyB(node));
2376 return node->attr.copyb.type = skip_tid(node->attr.copyb.type);
2379 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2380 assert(is_CopyB(node) && data_type);
2381 node->attr.copyb.type = data_type;
2386 get_InstOf_type(ir_node *node) {
2387 assert(node->op == op_InstOf);
2388 return node->attr.instof.type = skip_tid(node->attr.instof.type);
2392 set_InstOf_type(ir_node *node, ir_type *type) {
2393 assert(node->op == op_InstOf);
2394 node->attr.instof.type = type;
2398 get_InstOf_store(const ir_node *node) {
2399 assert(node->op == op_InstOf);
2400 return get_irn_n(node, 0);
2404 set_InstOf_store(ir_node *node, ir_node *obj) {
2405 assert(node->op == op_InstOf);
2406 set_irn_n(node, 0, obj);
2410 get_InstOf_obj(const ir_node *node) {
2411 assert(node->op == op_InstOf);
2412 return get_irn_n(node, 1);
2416 set_InstOf_obj(ir_node *node, ir_node *obj) {
2417 assert(node->op == op_InstOf);
2418 set_irn_n(node, 1, obj);
2421 /* Returns the memory input of a Raise operation. */
2423 get_Raise_mem(const ir_node *node) {
2424 assert(is_Raise(node));
2425 return get_irn_n(node, 0);
2429 set_Raise_mem(ir_node *node, ir_node *mem) {
2430 assert(is_Raise(node));
2431 set_irn_n(node, 0, mem);
2435 get_Raise_exo_ptr(const ir_node *node) {
2436 assert(is_Raise(node));
2437 return get_irn_n(node, 1);
2441 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2442 assert(is_Raise(node));
2443 set_irn_n(node, 1, exo_ptr);
2448 /* Returns the memory input of a Bound operation. */
2449 ir_node *get_Bound_mem(const ir_node *bound) {
2450 assert(is_Bound(bound));
2451 return get_irn_n(bound, 0);
2454 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2455 assert(is_Bound(bound));
2456 set_irn_n(bound, 0, mem);
2459 /* Returns the index input of a Bound operation. */
2460 ir_node *get_Bound_index(const ir_node *bound) {
2461 assert(is_Bound(bound));
2462 return get_irn_n(bound, 1);
2465 void set_Bound_index(ir_node *bound, ir_node *idx) {
2466 assert(is_Bound(bound));
2467 set_irn_n(bound, 1, idx);
2470 /* Returns the lower bound input of a Bound operation. */
2471 ir_node *get_Bound_lower(const ir_node *bound) {
2472 assert(is_Bound(bound));
2473 return get_irn_n(bound, 2);
2476 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2477 assert(is_Bound(bound));
2478 set_irn_n(bound, 2, lower);
2481 /* Returns the upper bound input of a Bound operation. */
2482 ir_node *get_Bound_upper(const ir_node *bound) {
2483 assert(is_Bound(bound));
2484 return get_irn_n(bound, 3);
2487 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2488 assert(is_Bound(bound));
2489 set_irn_n(bound, 3, upper);
2492 /* Return the operand of a Pin node. */
2493 ir_node *get_Pin_op(const ir_node *pin) {
2494 assert(is_Pin(pin));
2495 return get_irn_n(pin, 0);
2498 void set_Pin_op(ir_node *pin, ir_node *node) {
2499 assert(is_Pin(pin));
2500 set_irn_n(pin, 0, node);
2503 /* Return the assembler text of an ASM pseudo node. */
2504 ident *get_ASM_text(const ir_node *node) {
2505 assert(is_ASM(node));
2506 return node->attr.assem.asm_text;
2509 /* Return the number of input constraints for an ASM node. */
2510 int get_ASM_n_input_constraints(const ir_node *node) {
2511 assert(is_ASM(node));
2512 return ARR_LEN(node->attr.assem.inputs);
2515 /* Return the input constraints for an ASM node. This is a flexible array. */
2516 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2517 assert(is_ASM(node));
2518 return node->attr.assem.inputs;
2521 /* Return the number of output constraints for an ASM node. */
2522 int get_ASM_n_output_constraints(const ir_node *node) {
2523 assert(is_ASM(node));
2524 return ARR_LEN(node->attr.assem.outputs);
2527 /* Return the output constraints for an ASM node. */
2528 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2529 assert(is_ASM(node));
2530 return node->attr.assem.outputs;
2533 /* Return the number of clobbered registers for an ASM node. */
2534 int get_ASM_n_clobbers(const ir_node *node) {
2535 assert(is_ASM(node));
2536 return ARR_LEN(node->attr.assem.clobber);
2539 /* Return the list of clobbered registers for an ASM node. */
2540 ident **get_ASM_clobbers(const ir_node *node) {
2541 assert(is_ASM(node));
2542 return node->attr.assem.clobber;
2545 /* returns the graph of a node */
2547 get_irn_irg(const ir_node *node) {
2549 * Do not use get_nodes_Block() here, because this
2550 * will check the pinned state.
2551 * However even a 'wrong' block is always in the proper
2554 if (! is_Block(node))
2555 node = get_irn_n(node, -1);
2556 /* note that get_Block_irg() can handle Bad nodes */
2557 return get_Block_irg(node);
2561 /*----------------------------------------------------------------*/
2562 /* Auxiliary routines */
2563 /*----------------------------------------------------------------*/
2566 skip_Proj(ir_node *node) {
2567 /* don't assert node !!! */
2572 node = get_Proj_pred(node);
2578 skip_Proj_const(const ir_node *node) {
2579 /* don't assert node !!! */
2584 node = get_Proj_pred(node);
2590 skip_Tuple(ir_node *node) {
2595 if (is_Proj(node)) {
2596 pred = get_Proj_pred(node);
2597 op = get_irn_op(pred);
2600 * Looks strange but calls get_irn_op() only once
2601 * in most often cases.
2603 if (op == op_Proj) { /* nested Tuple ? */
2604 pred = skip_Tuple(pred);
2606 if (is_Tuple(pred)) {
2607 node = get_Tuple_pred(pred, get_Proj_proj(node));
2610 } else if (op == op_Tuple) {
2611 node = get_Tuple_pred(pred, get_Proj_proj(node));
2618 /* returns operand of node if node is a Cast */
2619 ir_node *skip_Cast(ir_node *node) {
2621 return get_Cast_op(node);
2625 /* returns operand of node if node is a Cast */
2626 const ir_node *skip_Cast_const(const ir_node *node) {
2628 return get_Cast_op(node);
2632 /* returns operand of node if node is a Pin */
2633 ir_node *skip_Pin(ir_node *node) {
2635 return get_Pin_op(node);
2639 /* returns operand of node if node is a Confirm */
2640 ir_node *skip_Confirm(ir_node *node) {
2641 if (is_Confirm(node))
2642 return get_Confirm_value(node);
2646 /* skip all high-level ops */
2647 ir_node *skip_HighLevel_ops(ir_node *node) {
2648 while (is_op_highlevel(get_irn_op(node))) {
2649 node = get_irn_n(node, 0);
2655 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2656 * than any other approach, as Id chains are resolved and all point to the real node, or
2657 * all id's are self loops.
2659 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2660 * a little bit "hand optimized".
2662 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2665 skip_Id(ir_node *node) {
2667 /* don't assert node !!! */
2669 if (!node || (node->op != op_Id)) return node;
2671 /* Don't use get_Id_pred(): We get into an endless loop for
2672 self-referencing Ids. */
2673 pred = node->in[0+1];
2675 if (pred->op != op_Id) return pred;
2677 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2678 ir_node *rem_pred, *res;
2680 if (pred->op != op_Id) return pred; /* shortcut */
2683 assert(get_irn_arity (node) > 0);
2685 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2686 res = skip_Id(rem_pred);
2687 if (res->op == op_Id) /* self-loop */ return node;
2689 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2696 void skip_Id_and_store(ir_node **node) {
2699 if (!n || (n->op != op_Id)) return;
2701 /* Don't use get_Id_pred(): We get into an endless loop for
2702 self-referencing Ids. */
2707 (is_strictConv)(const ir_node *node) {
2708 return _is_strictConv(node);
2712 (is_no_Block)(const ir_node *node) {
2713 return _is_no_Block(node);
2716 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2718 (is_SymConst_addr_ent)(const ir_node *node) {
2719 return _is_SymConst_addr_ent(node);
2722 /* Returns true if the operation manipulates control flow. */
2723 int is_cfop(const ir_node *node) {
2724 return is_op_cfopcode(get_irn_op(node));
2727 /* Returns true if the operation manipulates interprocedural control flow:
2728 CallBegin, EndReg, EndExcept */
2729 int is_ip_cfop(const ir_node *node) {
2730 return is_ip_cfopcode(get_irn_op(node));
2733 /* Returns true if the operation can change the control flow because
2736 is_fragile_op(const ir_node *node) {
2737 return is_op_fragile(get_irn_op(node));
2740 /* Returns the memory operand of fragile operations. */
2741 ir_node *get_fragile_op_mem(ir_node *node) {
2742 assert(node && is_fragile_op(node));
2744 switch (get_irn_opcode(node)) {
2755 return get_irn_n(node, pn_Generic_M);
2760 assert(0 && "should not be reached");
2765 /* Returns the result mode of a Div operation. */
2766 ir_mode *get_divop_resmod(const ir_node *node) {
2767 switch (get_irn_opcode(node)) {
2768 case iro_Quot : return get_Quot_resmode(node);
2769 case iro_DivMod: return get_DivMod_resmode(node);
2770 case iro_Div : return get_Div_resmode(node);
2771 case iro_Mod : return get_Mod_resmode(node);
2773 assert(0 && "should not be reached");
2778 /* Returns true if the operation is a forking control flow operation. */
2779 int (is_irn_forking)(const ir_node *node) {
2780 return _is_irn_forking(node);
2783 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
2784 _copy_node_attr(old_node, new_node);
2787 /* Return the type associated with the value produced by n
2788 * if the node remarks this type as it is the case for
2789 * Cast, Const, SymConst and some Proj nodes. */
2790 ir_type *(get_irn_type)(ir_node *node) {
2791 return _get_irn_type(node);
2794 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2796 ir_type *(get_irn_type_attr)(ir_node *node) {
2797 return _get_irn_type_attr(node);
2800 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2801 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2802 return _get_irn_entity_attr(node);
2805 /* Returns non-zero for constant-like nodes. */
2806 int (is_irn_constlike)(const ir_node *node) {
2807 return _is_irn_constlike(node);
2811 * Returns non-zero for nodes that are allowed to have keep-alives and
2812 * are neither Block nor PhiM.
2814 int (is_irn_keep)(const ir_node *node) {
2815 return _is_irn_keep(node);
2819 * Returns non-zero for nodes that are always placed in the start block.
2821 int (is_irn_start_block_placed)(const ir_node *node) {
2822 return _is_irn_start_block_placed(node);
2825 /* Returns non-zero for nodes that are machine operations. */
2826 int (is_irn_machine_op)(const ir_node *node) {
2827 return _is_irn_machine_op(node);
2830 /* Returns non-zero for nodes that are machine operands. */
2831 int (is_irn_machine_operand)(const ir_node *node) {
2832 return _is_irn_machine_operand(node);
2835 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2836 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2837 return _is_irn_machine_user(node, n);
2841 /* Gets the string representation of the jump prediction .*/
2842 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2843 #define X(a) case a: return #a;
2845 X(COND_JMP_PRED_NONE);
2846 X(COND_JMP_PRED_TRUE);
2847 X(COND_JMP_PRED_FALSE);
2853 /* Returns the conditional jump prediction of a Cond node. */
2854 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2855 return _get_Cond_jmp_pred(cond);
2858 /* Sets a new conditional jump prediction. */
2859 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2860 _set_Cond_jmp_pred(cond, pred);
2863 /** the get_type operation must be always implemented and return a firm type */
2864 static ir_type *get_Default_type(ir_node *n) {
2866 return get_unknown_type();
2869 /* Sets the get_type operation for an ir_op_ops. */
2870 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2872 case iro_Const: ops->get_type = get_Const_type; break;
2873 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2874 case iro_Cast: ops->get_type = get_Cast_type; break;
2875 case iro_Proj: ops->get_type = get_Proj_type; break;
2877 /* not allowed to be NULL */
2878 if (! ops->get_type)
2879 ops->get_type = get_Default_type;
2885 /** Return the attribute type of a SymConst node if exists */
2886 static ir_type *get_SymConst_attr_type(ir_node *self) {
2887 symconst_kind kind = get_SymConst_kind(self);
2888 if (SYMCONST_HAS_TYPE(kind))
2889 return get_SymConst_type(self);
2893 /** Return the attribute entity of a SymConst node if exists */
2894 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2895 symconst_kind kind = get_SymConst_kind(self);
2896 if (SYMCONST_HAS_ENT(kind))
2897 return get_SymConst_entity(self);
2901 /** the get_type_attr operation must be always implemented */
2902 static ir_type *get_Null_type(ir_node *n) {
2904 return firm_unknown_type;
2907 /* Sets the get_type operation for an ir_op_ops. */
2908 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2910 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2911 case iro_Call: ops->get_type_attr = get_Call_type; break;
2912 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2913 case iro_Free: ops->get_type_attr = get_Free_type; break;
2914 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2916 /* not allowed to be NULL */
2917 if (! ops->get_type_attr)
2918 ops->get_type_attr = get_Null_type;
2924 /** the get_entity_attr operation must be always implemented */
2925 static ir_entity *get_Null_ent(ir_node *n) {
2930 /* Sets the get_type operation for an ir_op_ops. */
2931 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2933 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2934 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
2936 /* not allowed to be NULL */
2937 if (! ops->get_entity_attr)
2938 ops->get_entity_attr = get_Null_ent;
2944 /* Sets the debug information of a node. */
2945 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2946 _set_irn_dbg_info(n, db);
2950 * Returns the debug information of an node.
2952 * @param n The node.
2954 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2955 return _get_irn_dbg_info(n);
2958 /* checks whether a node represents a global address */
2959 int is_Global(const ir_node *node) {
2960 return is_SymConst_addr_ent(node);
2963 /* returns the entity of a global address */
2964 ir_entity *get_Global_entity(const ir_node *node) {
2965 return get_SymConst_entity(node);
2969 * Calculate a hash value of a node.
2971 unsigned firm_default_hash(const ir_node *node) {
2975 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2976 h = irn_arity = get_irn_intra_arity(node);
2978 /* consider all in nodes... except the block if not a control flow. */
2979 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2980 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
2984 h = 9*h + HASH_PTR(get_irn_mode(node));
2986 h = 9*h + HASH_PTR(get_irn_op(node));
2989 } /* firm_default_hash */
2991 /* include generated code */
2992 #include "gen_irnode.c.inl"