2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc) {
67 assert(pnc >= 0 && pnc <
68 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
69 return pnc_name_arr[pnc];
73 * Calculates the negated (Complement(R)) pnc condition.
75 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
78 /* do NOT add the Uo bit for non-floating point values */
79 if (! mode_is_float(mode))
85 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
86 pn_Cmp get_inversed_pnc(long pnc) {
87 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
88 long lesser = pnc & pn_Cmp_Lt;
89 long greater = pnc & pn_Cmp_Gt;
91 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
97 * Indicates, whether additional data can be registered to ir nodes.
98 * If set to 1, this is not possible anymore.
100 static int forbid_new_data = 0;
103 * The amount of additional space for custom data to be allocated upon
104 * creating a new node.
106 unsigned firm_add_node_size = 0;
109 /* register new space for every node */
110 unsigned firm_register_additional_node_data(unsigned size) {
111 assert(!forbid_new_data && "Too late to register additional node data");
116 return firm_add_node_size += size;
120 void init_irnode(void) {
121 /* Forbid the addition of new data to an ir node. */
126 * irnode constructor.
127 * Create a new irnode in irg, with an op, mode, arity and
128 * some incoming irnodes.
129 * If arity is negative, a node with a dynamic array is created.
132 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
133 int arity, ir_node **in)
136 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
143 p = obstack_alloc(irg->obst, node_size);
144 memset(p, 0, node_size);
145 res = (ir_node *)(p + firm_add_node_size);
147 res->kind = k_ir_node;
151 res->node_idx = irg_register_node_idx(irg, res);
156 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
158 /* not nice but necessary: End and Sync must always have a flexible array */
159 if (op == op_End || op == op_Sync)
160 res->in = NEW_ARR_F(ir_node *, (arity+1));
162 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
163 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
167 set_irn_dbg_info(res, db);
169 res->node_nr = get_irp_new_node_nr();
171 for (i = 0; i < EDGE_KIND_LAST; ++i) {
172 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
173 /* edges will be build immediately */
174 res->edge_info[i].edges_built = 1;
175 res->edge_info[i].out_count = 0;
178 /* don't put this into the for loop, arity is -1 for some nodes! */
179 edges_notify_edge(res, -1, res->in[0], NULL, irg);
180 for (i = 1; i <= arity; ++i)
181 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
183 hook_new_node(irg, res);
184 if (get_irg_phase_state(irg) == phase_backend) {
185 be_info_new_node(res);
191 /*-- getting some parameters from ir_nodes --*/
193 int (is_ir_node)(const void *thing) {
194 return _is_ir_node(thing);
197 int (get_irn_intra_arity)(const ir_node *node) {
198 return _get_irn_intra_arity(node);
201 int (get_irn_inter_arity)(const ir_node *node) {
202 return _get_irn_inter_arity(node);
205 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
207 int (get_irn_arity)(const ir_node *node) {
208 return _get_irn_arity(node);
211 /* Returns the array with ins. This array is shifted with respect to the
212 array accessed by get_irn_n: The block operand is at position 0 not -1.
213 (@@@ This should be changed.)
214 The order of the predecessors in this array is not guaranteed, except that
215 lists of operands as predecessors of Block or arguments of a Call are
217 ir_node **get_irn_in(const ir_node *node) {
219 #ifdef INTERPROCEDURAL_VIEW
220 if (get_interprocedural_view()) { /* handle Filter and Block specially */
221 if (get_irn_opcode(node) == iro_Filter) {
222 assert(node->attr.filter.in_cg);
223 return node->attr.filter.in_cg;
224 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
225 return node->attr.block.in_cg;
227 /* else fall through */
229 #endif /* INTERPROCEDURAL_VIEW */
233 void set_irn_in(ir_node *node, int arity, ir_node **in) {
236 ir_graph *irg = current_ir_graph;
239 #ifdef INTERPROCEDURAL_VIEW
240 if (get_interprocedural_view()) { /* handle Filter and Block specially */
241 ir_opcode code = get_irn_opcode(node);
242 if (code == iro_Filter) {
243 assert(node->attr.filter.in_cg);
244 pOld_in = &node->attr.filter.in_cg;
245 } else if (code == iro_Block && node->attr.block.in_cg) {
246 pOld_in = &node->attr.block.in_cg;
251 #endif /* INTERPROCEDURAL_VIEW */
255 for (i = 0; i < arity; i++) {
256 if (i < ARR_LEN(*pOld_in)-1)
257 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
259 edges_notify_edge(node, i, in[i], NULL, irg);
261 for (;i < ARR_LEN(*pOld_in)-1; i++) {
262 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
265 if (arity != ARR_LEN(*pOld_in) - 1) {
266 ir_node * block = (*pOld_in)[0];
267 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
268 (*pOld_in)[0] = block;
270 fix_backedges(irg->obst, node);
272 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
275 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
276 return _get_irn_intra_n(node, n);
279 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
280 return _get_irn_inter_n(node, n);
283 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
285 ir_node *(get_irn_n)(const ir_node *node, int n) {
286 return _get_irn_n(node, n);
289 void set_irn_n(ir_node *node, int n, ir_node *in) {
290 assert(node && node->kind == k_ir_node);
292 assert(n < get_irn_arity(node));
293 assert(in && in->kind == k_ir_node);
295 #ifdef INTERPROCEDURAL_VIEW
296 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
297 /* Change block pred in both views! */
298 node->in[n + 1] = in;
299 assert(node->attr.filter.in_cg);
300 node->attr.filter.in_cg[n + 1] = in;
303 if (get_interprocedural_view()) { /* handle Filter and Block specially */
304 if (get_irn_opcode(node) == iro_Filter) {
305 assert(node->attr.filter.in_cg);
306 node->attr.filter.in_cg[n + 1] = in;
308 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
309 node->attr.block.in_cg[n + 1] = in;
312 /* else fall through */
314 #endif /* INTERPROCEDURAL_VIEW */
317 hook_set_irn_n(node, n, in, node->in[n + 1]);
319 /* Here, we rely on src and tgt being in the current ir graph */
320 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
322 node->in[n + 1] = in;
325 int add_irn_n(ir_node *node, ir_node *in) {
327 ir_graph *irg = get_irn_irg(node);
329 assert(node->op->opar == oparity_dynamic);
330 pos = ARR_LEN(node->in) - 1;
331 ARR_APP1(ir_node *, node->in, in);
332 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
335 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
340 void del_Sync_n(ir_node *n, int i)
342 int arity = get_Sync_n_preds(n);
343 ir_node *last_pred = get_Sync_pred(n, arity - 1);
344 set_Sync_pred(n, i, last_pred);
345 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
346 ARR_SHRINKLEN(get_irn_in(n), arity);
349 int (get_irn_deps)(const ir_node *node) {
350 return _get_irn_deps(node);
353 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
354 return _get_irn_dep(node, pos);
357 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
358 _set_irn_dep(node, pos, dep);
361 int add_irn_dep(ir_node *node, ir_node *dep) {
364 /* DEP edges are only allowed in backend phase */
365 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
366 if (node->deps == NULL) {
367 node->deps = NEW_ARR_F(ir_node *, 1);
373 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
374 if(node->deps[i] == NULL)
377 if(node->deps[i] == dep)
381 if (first_zero >= 0) {
382 node->deps[first_zero] = dep;
385 ARR_APP1(ir_node *, node->deps, dep);
390 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
395 void add_irn_deps(ir_node *tgt, ir_node *src) {
398 for (i = 0, n = get_irn_deps(src); i < n; ++i)
399 add_irn_dep(tgt, get_irn_dep(src, i));
403 ir_mode *(get_irn_mode)(const ir_node *node) {
404 return _get_irn_mode(node);
407 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
408 _set_irn_mode(node, mode);
411 /** Gets the string representation of the mode .*/
412 const char *get_irn_modename(const ir_node *node) {
414 return get_mode_name(node->mode);
417 ident *get_irn_modeident(const ir_node *node) {
419 return get_mode_ident(node->mode);
422 ir_op *(get_irn_op)(const ir_node *node) {
423 return _get_irn_op(node);
426 /* should be private to the library: */
427 void (set_irn_op)(ir_node *node, ir_op *op) {
428 _set_irn_op(node, op);
431 unsigned (get_irn_opcode)(const ir_node *node) {
432 return _get_irn_opcode(node);
435 const char *get_irn_opname(const ir_node *node) {
437 if (is_Phi0(node)) return "Phi0";
438 return get_id_str(node->op->name);
441 ident *get_irn_opident(const ir_node *node) {
443 return node->op->name;
446 ir_visited_t (get_irn_visited)(const ir_node *node) {
447 return _get_irn_visited(node);
450 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
451 _set_irn_visited(node, visited);
454 void (mark_irn_visited)(ir_node *node) {
455 _mark_irn_visited(node);
458 int (irn_visited)(const ir_node *node) {
459 return _irn_visited(node);
462 int (irn_visited_else_mark)(ir_node *node) {
463 return _irn_visited_else_mark(node);
466 void (set_irn_link)(ir_node *node, void *link) {
467 _set_irn_link(node, link);
470 void *(get_irn_link)(const ir_node *node) {
471 return _get_irn_link(node);
474 op_pin_state (get_irn_pinned)(const ir_node *node) {
475 return _get_irn_pinned(node);
478 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
479 return _is_irn_pinned_in_irg(node);
482 void set_irn_pinned(ir_node *node, op_pin_state state) {
483 /* due to optimization an opt may be turned into a Tuple */
487 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
488 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
490 node->attr.except.pin_state = state;
493 /* Outputs a unique number for this node */
494 long get_irn_node_nr(const ir_node *node) {
496 return node->node_nr;
499 const_attr *get_irn_const_attr(ir_node *node) {
500 assert(is_Const(node));
501 return &node->attr.con;
504 long get_irn_proj_attr(ir_node *node) {
505 /* BEWARE: check for true Proj node here, no Filter */
506 assert(node->op == op_Proj);
507 return node->attr.proj;
510 alloc_attr *get_irn_alloc_attr(ir_node *node) {
511 assert(is_Alloc(node));
512 return &node->attr.alloc;
515 free_attr *get_irn_free_attr(ir_node *node) {
516 assert(is_Free(node));
517 return &node->attr.free;
520 symconst_attr *get_irn_symconst_attr(ir_node *node) {
521 assert(is_SymConst(node));
522 return &node->attr.symc;
525 call_attr *get_irn_call_attr(ir_node *node) {
526 assert(is_Call(node));
527 return &node->attr.call;
530 sel_attr *get_irn_sel_attr(ir_node *node) {
531 assert(is_Sel(node));
532 return &node->attr.sel;
535 phi_attr *get_irn_phi_attr(ir_node *node) {
536 return &node->attr.phi;
539 block_attr *get_irn_block_attr(ir_node *node) {
540 assert(is_Block(node));
541 return &node->attr.block;
544 load_attr *get_irn_load_attr(ir_node *node) {
545 assert(is_Load(node));
546 return &node->attr.load;
549 store_attr *get_irn_store_attr(ir_node *node) {
550 assert(is_Store(node));
551 return &node->attr.store;
554 except_attr *get_irn_except_attr(ir_node *node) {
555 assert(node->op == op_Div || node->op == op_Quot ||
556 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
557 return &node->attr.except;
560 divmod_attr *get_irn_divmod_attr(ir_node *node) {
561 assert(node->op == op_Div || node->op == op_Quot ||
562 node->op == op_DivMod || node->op == op_Mod);
563 return &node->attr.divmod;
566 builtin_attr *get_irn_builtin_attr(ir_node *node) {
567 assert(is_Builtin(node));
568 return &node->attr.builtin;
571 void *(get_irn_generic_attr)(ir_node *node) {
572 assert(is_ir_node(node));
573 return _get_irn_generic_attr(node);
576 const void *(get_irn_generic_attr_const)(const ir_node *node) {
577 assert(is_ir_node(node));
578 return _get_irn_generic_attr_const(node);
581 unsigned (get_irn_idx)(const ir_node *node) {
582 assert(is_ir_node(node));
583 return _get_irn_idx(node);
586 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
588 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
589 if (get_irn_n(node, i) == arg)
595 /** manipulate fields of individual nodes **/
597 /* this works for all except Block */
598 ir_node *get_nodes_block(const ir_node *node) {
599 assert(node->op != op_Block);
600 return get_irn_n(node, -1);
603 void set_nodes_block(ir_node *node, ir_node *block) {
604 assert(node->op != op_Block);
605 set_irn_n(node, -1, block);
608 /* this works for all except Block */
609 ir_node *get_nodes_MacroBlock(const ir_node *node) {
610 assert(node->op != op_Block);
611 return get_Block_MacroBlock(get_irn_n(node, -1));
614 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
615 * from Start. If so returns frame type, else Null. */
616 ir_type *is_frame_pointer(const ir_node *n) {
617 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
618 ir_node *start = get_Proj_pred(n);
619 if (is_Start(start)) {
620 return get_irg_frame_type(get_irn_irg(start));
626 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
627 * from Start. If so returns tls type, else Null. */
628 ir_type *is_tls_pointer(const ir_node *n) {
629 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
630 ir_node *start = get_Proj_pred(n);
631 if (is_Start(start)) {
632 return get_tls_type();
638 ir_node **get_Block_cfgpred_arr(ir_node *node) {
639 assert(is_Block(node));
640 return (ir_node **)&(get_irn_in(node)[1]);
643 int (get_Block_n_cfgpreds)(const ir_node *node) {
644 return _get_Block_n_cfgpreds(node);
647 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
648 return _get_Block_cfgpred(node, pos);
651 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
652 assert(is_Block(node));
653 set_irn_n(node, pos, pred);
656 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
659 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
660 if (get_Block_cfgpred_block(block, i) == pred)
666 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
667 return _get_Block_cfgpred_block(node, pos);
670 int get_Block_matured(const ir_node *node) {
671 assert(is_Block(node));
672 return (int)node->attr.block.is_matured;
675 void set_Block_matured(ir_node *node, int matured) {
676 assert(is_Block(node));
677 node->attr.block.is_matured = matured;
680 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
681 return _get_Block_block_visited(node);
684 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
685 _set_Block_block_visited(node, visit);
688 /* For this current_ir_graph must be set. */
689 void (mark_Block_block_visited)(ir_node *node) {
690 _mark_Block_block_visited(node);
693 int (Block_block_visited)(const ir_node *node) {
694 return _Block_block_visited(node);
697 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
698 assert(is_Block(node));
699 return node->attr.block.graph_arr[pos+1];
702 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
703 assert(is_Block(node));
704 node->attr.block.graph_arr[pos+1] = value;
707 #ifdef INTERPROCEDURAL_VIEW
708 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
709 assert(is_Block(node));
710 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
711 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
712 node->attr.block.in_cg[0] = NULL;
713 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
715 /* Fix backedge array. fix_backedges() operates depending on
716 interprocedural_view. */
717 int ipv = get_interprocedural_view();
718 set_interprocedural_view(1);
719 fix_backedges(current_ir_graph->obst, node);
720 set_interprocedural_view(ipv);
723 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
726 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
727 assert(is_Block(node) && node->attr.block.in_cg &&
728 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
729 node->attr.block.in_cg[pos + 1] = pred;
732 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
733 assert(is_Block(node));
734 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
737 int get_Block_cg_n_cfgpreds(const ir_node *node) {
738 assert(is_Block(node));
739 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
742 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
743 assert(is_Block(node) && node->attr.block.in_cg);
744 return node->attr.block.in_cg[pos + 1];
747 void remove_Block_cg_cfgpred_arr(ir_node *node) {
748 assert(is_Block(node));
749 node->attr.block.in_cg = NULL;
751 #endif /* INTERPROCEDURAL_VIEW */
753 ir_node *(set_Block_dead)(ir_node *block) {
754 return _set_Block_dead(block);
757 int (is_Block_dead)(const ir_node *block) {
758 return _is_Block_dead(block);
761 ir_extblk *get_Block_extbb(const ir_node *block) {
763 assert(is_Block(block));
764 res = block->attr.block.extblk;
765 assert(res == NULL || is_ir_extbb(res));
769 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
770 assert(is_Block(block));
771 assert(extblk == NULL || is_ir_extbb(extblk));
772 block->attr.block.extblk = extblk;
775 /* Returns the macro block header of a block.*/
776 ir_node *get_Block_MacroBlock(const ir_node *block) {
778 assert(is_Block(block));
779 mbh = get_irn_n(block, -1);
780 /* once macro block header is respected by all optimizations,
781 this assert can be removed */
786 /* Sets the macro block header of a block. */
787 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
788 assert(is_Block(block));
790 assert(is_Block(mbh));
791 set_irn_n(block, -1, mbh);
794 /* returns the macro block header of a node. */
795 ir_node *get_irn_MacroBlock(const ir_node *n) {
797 n = get_nodes_block(n);
798 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
802 return get_Block_MacroBlock(n);
805 /* returns the graph of a Block. */
806 ir_graph *(get_Block_irg)(const ir_node *block) {
807 return _get_Block_irg(block);
810 ir_entity *create_Block_entity(ir_node *block) {
812 assert(is_Block(block));
814 entity = block->attr.block.entity;
815 if (entity == NULL) {
819 glob = get_glob_type();
820 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
821 nr = get_irp_next_label_nr();
822 set_entity_label(entity, nr);
823 set_entity_compiler_generated(entity, 1);
824 set_entity_allocation(entity, allocation_static);
826 block->attr.block.entity = entity;
831 ir_entity *get_Block_entity(const ir_node *block) {
832 assert(is_Block(block));
833 return block->attr.block.entity;
836 void set_Block_entity(ir_node *block, ir_entity *entity)
838 assert(is_Block(block));
839 assert(get_entity_type(entity) == get_code_type());
840 block->attr.block.entity = entity;
843 int has_Block_entity(const ir_node *block)
845 return block->attr.block.entity != NULL;
848 ir_node *(get_Block_phis)(const ir_node *block) {
849 return _get_Block_phis(block);
852 void (set_Block_phis)(ir_node *block, ir_node *phi) {
853 _set_Block_phis(block, phi);
856 void (add_Block_phi)(ir_node *block, ir_node *phi) {
857 _add_Block_phi(block, phi);
860 /* Get the Block mark (single bit). */
861 unsigned (get_Block_mark)(const ir_node *block) {
862 return _get_Block_mark(block);
865 /* Set the Block mark (single bit). */
866 void (set_Block_mark)(ir_node *block, unsigned mark) {
867 _set_Block_mark(block, mark);
870 int get_End_n_keepalives(const ir_node *end) {
872 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
875 ir_node *get_End_keepalive(const ir_node *end, int pos) {
877 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
880 void add_End_keepalive(ir_node *end, ir_node *ka) {
885 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
887 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
890 /* Set new keep-alives */
891 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
893 ir_graph *irg = get_irn_irg(end);
895 /* notify that edges are deleted */
896 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
897 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
899 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
901 for (i = 0; i < n; ++i) {
902 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
903 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
907 /* Set new keep-alives from old keep-alives, skipping irn */
908 void remove_End_keepalive(ir_node *end, ir_node *irn) {
909 int n = get_End_n_keepalives(end);
914 for (i = n -1; i >= 0; --i) {
915 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
925 irg = get_irn_irg(end);
927 /* remove the edge */
928 edges_notify_edge(end, idx, NULL, irn, irg);
931 /* exchange with the last one */
932 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
933 edges_notify_edge(end, n - 1, NULL, old, irg);
934 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
935 edges_notify_edge(end, idx, old, NULL, irg);
937 /* now n - 1 keeps, 1 block input */
938 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
941 /* remove Bads, NoMems and doublets from the keep-alive set */
942 void remove_End_Bads_and_doublets(ir_node *end) {
944 int idx, n = get_End_n_keepalives(end);
950 irg = get_irn_irg(end);
951 pset_new_init(&keeps);
953 for (idx = n - 1; idx >= 0; --idx) {
954 ir_node *ka = get_End_keepalive(end, idx);
956 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
957 /* remove the edge */
958 edges_notify_edge(end, idx, NULL, ka, irg);
961 /* exchange with the last one */
962 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
963 edges_notify_edge(end, n - 1, NULL, old, irg);
964 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
965 edges_notify_edge(end, idx, old, NULL, irg);
969 pset_new_insert(&keeps, ka);
972 /* n keeps, 1 block input */
973 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
975 pset_new_destroy(&keeps);
978 void free_End(ir_node *end) {
982 end->in = NULL; /* @@@ make sure we get an error if we use the
983 in array afterwards ... */
986 /* Return the target address of an IJmp */
987 ir_node *get_IJmp_target(const ir_node *ijmp) {
988 assert(is_IJmp(ijmp));
989 return get_irn_n(ijmp, 0);
992 /** Sets the target address of an IJmp */
993 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
994 assert(is_IJmp(ijmp));
995 set_irn_n(ijmp, 0, tgt);
999 > Implementing the case construct (which is where the constant Proj node is
1000 > important) involves far more than simply determining the constant values.
1001 > We could argue that this is more properly a function of the translator from
1002 > Firm to the target machine. That could be done if there was some way of
1003 > projecting "default" out of the Cond node.
1004 I know it's complicated.
1005 Basically there are two problems:
1006 - determining the gaps between the Projs
1007 - determining the biggest case constant to know the proj number for
1009 I see several solutions:
1010 1. Introduce a ProjDefault node. Solves both problems.
1011 This means to extend all optimizations executed during construction.
1012 2. Give the Cond node for switch two flavors:
1013 a) there are no gaps in the Projs (existing flavor)
1014 b) gaps may exist, default proj is still the Proj with the largest
1015 projection number. This covers also the gaps.
1016 3. Fix the semantic of the Cond to that of 2b)
1018 Solution 2 seems to be the best:
1019 Computing the gaps in the Firm representation is not too hard, i.e.,
1020 libFIRM can implement a routine that transforms between the two
1021 flavours. This is also possible for 1) but 2) does not require to
1022 change any existing optimization.
1023 Further it should be far simpler to determine the biggest constant than
1024 to compute all gaps.
1025 I don't want to choose 3) as 2a) seems to have advantages for
1026 dataflow analysis and 3) does not allow to convert the representation to
1030 const char *get_cond_kind_name(cond_kind kind)
1032 #define X(a) case a: return #a;
1042 get_Cond_selector(const ir_node *node) {
1043 assert(is_Cond(node));
1044 return get_irn_n(node, 0);
1048 set_Cond_selector(ir_node *node, ir_node *selector) {
1049 assert(is_Cond(node));
1050 set_irn_n(node, 0, selector);
1054 get_Cond_kind(const ir_node *node) {
1055 assert(is_Cond(node));
1056 return node->attr.cond.kind;
1060 set_Cond_kind(ir_node *node, cond_kind kind) {
1061 assert(is_Cond(node));
1062 node->attr.cond.kind = kind;
1066 get_Cond_default_proj(const ir_node *node) {
1067 assert(is_Cond(node));
1068 return node->attr.cond.default_proj;
1071 void set_Cond_default_proj(ir_node *node, long defproj) {
1072 assert(is_Cond(node));
1073 node->attr.cond.default_proj = defproj;
1077 get_Return_mem(const ir_node *node) {
1078 assert(is_Return(node));
1079 return get_irn_n(node, 0);
1083 set_Return_mem(ir_node *node, ir_node *mem) {
1084 assert(is_Return(node));
1085 set_irn_n(node, 0, mem);
1089 get_Return_n_ress(const ir_node *node) {
1090 assert(is_Return(node));
1091 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1095 get_Return_res_arr(ir_node *node) {
1096 assert(is_Return(node));
1097 if (get_Return_n_ress(node) > 0)
1098 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1105 set_Return_n_res(ir_node *node, int results) {
1106 assert(is_Return(node));
1111 get_Return_res(const ir_node *node, int pos) {
1112 assert(is_Return(node));
1113 assert(get_Return_n_ress(node) > pos);
1114 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1118 set_Return_res(ir_node *node, int pos, ir_node *res){
1119 assert(is_Return(node));
1120 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1123 tarval *(get_Const_tarval)(const ir_node *node) {
1124 return _get_Const_tarval(node);
1128 set_Const_tarval(ir_node *node, tarval *con) {
1129 assert(is_Const(node));
1130 node->attr.con.tv = con;
1133 int (is_Const_null)(const ir_node *node) {
1134 return _is_Const_null(node);
1137 int (is_Const_one)(const ir_node *node) {
1138 return _is_Const_one(node);
1141 int (is_Const_all_one)(const ir_node *node) {
1142 return _is_Const_all_one(node);
1146 /* The source language type. Must be an atomic type. Mode of type must
1147 be mode of node. For tarvals from entities type must be pointer to
1150 get_Const_type(ir_node *node) {
1151 assert(is_Const(node));
1152 return node->attr.con.tp;
1156 set_Const_type(ir_node *node, ir_type *tp) {
1157 assert(is_Const(node));
1158 if (tp != firm_unknown_type) {
1159 assert(is_atomic_type(tp));
1160 assert(get_type_mode(tp) == get_irn_mode(node));
1162 node->attr.con.tp = tp;
1167 get_SymConst_kind(const ir_node *node) {
1168 assert(is_SymConst(node));
1169 return node->attr.symc.kind;
1173 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1174 assert(is_SymConst(node));
1175 node->attr.symc.kind = kind;
1179 get_SymConst_type(const ir_node *node) {
1180 /* the cast here is annoying, but we have to compensate for
1182 ir_node *irn = (ir_node *)node;
1183 assert(is_SymConst(node) &&
1184 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1185 return irn->attr.symc.sym.type_p;
1189 set_SymConst_type(ir_node *node, ir_type *tp) {
1190 assert(is_SymConst(node) &&
1191 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1192 node->attr.symc.sym.type_p = tp;
1196 get_SymConst_name(const ir_node *node) {
1197 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1198 return node->attr.symc.sym.ident_p;
1202 set_SymConst_name(ir_node *node, ident *name) {
1203 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1204 node->attr.symc.sym.ident_p = name;
1208 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1209 ir_entity *get_SymConst_entity(const ir_node *node) {
1210 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1211 return node->attr.symc.sym.entity_p;
1214 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1215 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1216 node->attr.symc.sym.entity_p = ent;
1219 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1220 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1221 return node->attr.symc.sym.enum_p;
1224 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1225 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1226 node->attr.symc.sym.enum_p = ec;
1229 union symconst_symbol
1230 get_SymConst_symbol(const ir_node *node) {
1231 assert(is_SymConst(node));
1232 return node->attr.symc.sym;
1236 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1237 assert(is_SymConst(node));
1238 node->attr.symc.sym = sym;
1242 get_SymConst_value_type(ir_node *node) {
1243 assert(is_SymConst(node));
1244 return node->attr.symc.tp;
1248 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1249 assert(is_SymConst(node));
1250 node->attr.symc.tp = tp;
1254 get_Sel_mem(const ir_node *node) {
1255 assert(is_Sel(node));
1256 return get_irn_n(node, 0);
1260 set_Sel_mem(ir_node *node, ir_node *mem) {
1261 assert(is_Sel(node));
1262 set_irn_n(node, 0, mem);
1266 get_Sel_ptr(const ir_node *node) {
1267 assert(is_Sel(node));
1268 return get_irn_n(node, 1);
1272 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1273 assert(is_Sel(node));
1274 set_irn_n(node, 1, ptr);
1278 get_Sel_n_indexs(const ir_node *node) {
1279 assert(is_Sel(node));
1280 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1284 get_Sel_index_arr(ir_node *node) {
1285 assert(is_Sel(node));
1286 if (get_Sel_n_indexs(node) > 0)
1287 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1293 get_Sel_index(const ir_node *node, int pos) {
1294 assert(is_Sel(node));
1295 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1299 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1300 assert(is_Sel(node));
1301 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1305 get_Sel_entity(const ir_node *node) {
1306 assert(is_Sel(node));
1307 return node->attr.sel.entity;
1310 /* need a version without const to prevent warning */
1311 static ir_entity *_get_Sel_entity(ir_node *node) {
1312 return get_Sel_entity(node);
1316 set_Sel_entity(ir_node *node, ir_entity *ent) {
1317 assert(is_Sel(node));
1318 node->attr.sel.entity = ent;
1322 /* For unary and binary arithmetic operations the access to the
1323 operands can be factored out. Left is the first, right the
1324 second arithmetic value as listed in tech report 0999-33.
1325 unops are: Minus, Abs, Not, Conv, Cast
1326 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1327 Shr, Shrs, Rotate, Cmp */
1331 get_Call_mem(const ir_node *node) {
1332 assert(is_Call(node));
1333 return get_irn_n(node, 0);
1337 set_Call_mem(ir_node *node, ir_node *mem) {
1338 assert(is_Call(node));
1339 set_irn_n(node, 0, mem);
1343 get_Call_ptr(const ir_node *node) {
1344 assert(is_Call(node));
1345 return get_irn_n(node, 1);
1349 set_Call_ptr(ir_node *node, ir_node *ptr) {
1350 assert(is_Call(node));
1351 set_irn_n(node, 1, ptr);
1355 get_Call_param_arr(ir_node *node) {
1356 assert(is_Call(node));
1357 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1361 get_Call_n_params(const ir_node *node) {
1362 assert(is_Call(node));
1363 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1367 get_Call_param(const ir_node *node, int pos) {
1368 assert(is_Call(node));
1369 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1373 set_Call_param(ir_node *node, int pos, ir_node *param) {
1374 assert(is_Call(node));
1375 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1379 get_Call_type(ir_node *node) {
1380 assert(is_Call(node));
1381 return node->attr.call.type;
1385 set_Call_type(ir_node *node, ir_type *tp) {
1386 assert(is_Call(node));
1387 assert((get_unknown_type() == tp) || is_Method_type(tp));
1388 node->attr.call.type = tp;
1392 get_Call_tail_call(const ir_node *node) {
1393 assert(is_Call(node));
1394 return node->attr.call.tail_call;
1398 set_Call_tail_call(ir_node *node, unsigned tail_call) {
1399 assert(is_Call(node));
1400 node->attr.call.tail_call = tail_call != 0;
1404 get_Builtin_mem(const ir_node *node) {
1405 assert(is_Builtin(node));
1406 return get_irn_n(node, 0);
1410 set_Builin_mem(ir_node *node, ir_node *mem) {
1411 assert(is_Builtin(node));
1412 set_irn_n(node, 0, mem);
1416 get_Builtin_kind(const ir_node *node) {
1417 assert(is_Builtin(node));
1418 return node->attr.builtin.kind;
1422 set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
1423 assert(is_Builtin(node));
1424 node->attr.builtin.kind = kind;
1428 get_Builtin_param_arr(ir_node *node) {
1429 assert(is_Builtin(node));
1430 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1434 get_Builtin_n_params(const ir_node *node) {
1435 assert(is_Builtin(node));
1436 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1440 get_Builtin_param(const ir_node *node, int pos) {
1441 assert(is_Builtin(node));
1442 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1446 set_Builtin_param(ir_node *node, int pos, ir_node *param) {
1447 assert(is_Builtin(node));
1448 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1452 get_Builtin_type(ir_node *node) {
1453 assert(is_Builtin(node));
1454 return node->attr.builtin.type;
1458 set_Builtin_type(ir_node *node, ir_type *tp) {
1459 assert(is_Builtin(node));
1460 assert((get_unknown_type() == tp) || is_Method_type(tp));
1461 node->attr.builtin.type = tp;
1464 /* Returns a human readable string for the ir_builtin_kind. */
1465 const char *get_builtin_kind_name(ir_builtin_kind kind) {
1466 #define X(a) case a: return #a;
1469 X(ir_bk_debugbreak);
1470 X(ir_bk_return_address);
1471 X(ir_bk_frame_address);
1481 X(ir_bk_inner_trampoline);
1488 int Call_has_callees(const ir_node *node) {
1489 assert(is_Call(node));
1490 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1491 (node->attr.call.callee_arr != NULL));
1494 int get_Call_n_callees(const ir_node *node) {
1495 assert(is_Call(node) && node->attr.call.callee_arr);
1496 return ARR_LEN(node->attr.call.callee_arr);
1499 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1500 assert(pos >= 0 && pos < get_Call_n_callees(node));
1501 return node->attr.call.callee_arr[pos];
1504 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1505 assert(is_Call(node));
1506 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1507 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1509 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1512 void remove_Call_callee_arr(ir_node *node) {
1513 assert(is_Call(node));
1514 node->attr.call.callee_arr = NULL;
1517 ir_node *get_CallBegin_ptr(const ir_node *node) {
1518 assert(is_CallBegin(node));
1519 return get_irn_n(node, 0);
1522 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1523 assert(is_CallBegin(node));
1524 set_irn_n(node, 0, ptr);
1527 ir_node *get_CallBegin_call(const ir_node *node) {
1528 assert(is_CallBegin(node));
1529 return node->attr.callbegin.call;
1532 void set_CallBegin_call(ir_node *node, ir_node *call) {
1533 assert(is_CallBegin(node));
1534 node->attr.callbegin.call = call;
1538 * Returns non-zero if a Call is surely a self-recursive Call.
1539 * Beware: if this functions returns 0, the call might be self-recursive!
1541 int is_self_recursive_Call(const ir_node *call) {
1542 const ir_node *callee = get_Call_ptr(call);
1544 if (is_SymConst_addr_ent(callee)) {
1545 const ir_entity *ent = get_SymConst_entity(callee);
1546 const ir_graph *irg = get_entity_irg(ent);
1547 if (irg == get_irn_irg(call))
1554 ir_node * get_##OP##_left(const ir_node *node) { \
1555 assert(is_##OP(node)); \
1556 return get_irn_n(node, node->op->op_index); \
1558 void set_##OP##_left(ir_node *node, ir_node *left) { \
1559 assert(is_##OP(node)); \
1560 set_irn_n(node, node->op->op_index, left); \
1562 ir_node *get_##OP##_right(const ir_node *node) { \
1563 assert(is_##OP(node)); \
1564 return get_irn_n(node, node->op->op_index + 1); \
1566 void set_##OP##_right(ir_node *node, ir_node *right) { \
1567 assert(is_##OP(node)); \
1568 set_irn_n(node, node->op->op_index + 1, right); \
1572 ir_node *get_##OP##_op(const ir_node *node) { \
1573 assert(is_##OP(node)); \
1574 return get_irn_n(node, node->op->op_index); \
1576 void set_##OP##_op(ir_node *node, ir_node *op) { \
1577 assert(is_##OP(node)); \
1578 set_irn_n(node, node->op->op_index, op); \
1581 #define BINOP_MEM(OP) \
1585 get_##OP##_mem(const ir_node *node) { \
1586 assert(is_##OP(node)); \
1587 return get_irn_n(node, 0); \
1591 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1592 assert(is_##OP(node)); \
1593 set_irn_n(node, 0, mem); \
1599 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1600 assert(is_##OP(node)); \
1601 return node->attr.divmod.resmode; \
1604 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1605 assert(is_##OP(node)); \
1606 node->attr.divmod.resmode = mode; \
1634 int get_Div_no_remainder(const ir_node *node) {
1635 assert(is_Div(node));
1636 return node->attr.divmod.no_remainder;
1639 void set_Div_no_remainder(ir_node *node, int no_remainder) {
1640 assert(is_Div(node));
1641 node->attr.divmod.no_remainder = no_remainder;
1644 int get_Conv_strict(const ir_node *node) {
1645 assert(is_Conv(node));
1646 return node->attr.conv.strict;
1649 void set_Conv_strict(ir_node *node, int strict_flag) {
1650 assert(is_Conv(node));
1651 node->attr.conv.strict = (char)strict_flag;
1655 get_Cast_type(ir_node *node) {
1656 assert(is_Cast(node));
1657 return node->attr.cast.type;
1661 set_Cast_type(ir_node *node, ir_type *to_tp) {
1662 assert(is_Cast(node));
1663 node->attr.cast.type = to_tp;
1667 /* Checks for upcast.
1669 * Returns true if the Cast node casts a class type to a super type.
1671 int is_Cast_upcast(ir_node *node) {
1672 ir_type *totype = get_Cast_type(node);
1673 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1675 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1678 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1679 totype = get_pointer_points_to_type(totype);
1680 fromtype = get_pointer_points_to_type(fromtype);
1685 if (!is_Class_type(totype)) return 0;
1686 return is_SubClass_of(fromtype, totype);
1689 /* Checks for downcast.
1691 * Returns true if the Cast node casts a class type to a sub type.
1693 int is_Cast_downcast(ir_node *node) {
1694 ir_type *totype = get_Cast_type(node);
1695 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1697 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1700 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1701 totype = get_pointer_points_to_type(totype);
1702 fromtype = get_pointer_points_to_type(fromtype);
1707 if (!is_Class_type(totype)) return 0;
1708 return is_SubClass_of(totype, fromtype);
1712 (is_unop)(const ir_node *node) {
1713 return _is_unop(node);
1717 get_unop_op(const ir_node *node) {
1718 if (node->op->opar == oparity_unary)
1719 return get_irn_n(node, node->op->op_index);
1721 assert(node->op->opar == oparity_unary);
1726 set_unop_op(ir_node *node, ir_node *op) {
1727 if (node->op->opar == oparity_unary)
1728 set_irn_n(node, node->op->op_index, op);
1730 assert(node->op->opar == oparity_unary);
1734 (is_binop)(const ir_node *node) {
1735 return _is_binop(node);
1739 get_binop_left(const ir_node *node) {
1740 assert(node->op->opar == oparity_binary);
1741 return get_irn_n(node, node->op->op_index);
1745 set_binop_left(ir_node *node, ir_node *left) {
1746 assert(node->op->opar == oparity_binary);
1747 set_irn_n(node, node->op->op_index, left);
1751 get_binop_right(const ir_node *node) {
1752 assert(node->op->opar == oparity_binary);
1753 return get_irn_n(node, node->op->op_index + 1);
1757 set_binop_right(ir_node *node, ir_node *right) {
1758 assert(node->op->opar == oparity_binary);
1759 set_irn_n(node, node->op->op_index + 1, right);
1762 int is_Phi0(const ir_node *n) {
1765 return ((get_irn_op(n) == op_Phi) &&
1766 (get_irn_arity(n) == 0) &&
1767 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1771 get_Phi_preds_arr(ir_node *node) {
1772 assert(node->op == op_Phi);
1773 return (ir_node **)&(get_irn_in(node)[1]);
1777 get_Phi_n_preds(const ir_node *node) {
1778 assert(is_Phi(node) || is_Phi0(node));
1779 return (get_irn_arity(node));
1783 void set_Phi_n_preds(ir_node *node, int n_preds) {
1784 assert(node->op == op_Phi);
1789 get_Phi_pred(const ir_node *node, int pos) {
1790 assert(is_Phi(node) || is_Phi0(node));
1791 return get_irn_n(node, pos);
1795 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1796 assert(is_Phi(node) || is_Phi0(node));
1797 set_irn_n(node, pos, pred);
1800 ir_node *(get_Phi_next)(const ir_node *phi) {
1801 return _get_Phi_next(phi);
1804 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1805 _set_Phi_next(phi, next);
1808 int is_memop(const ir_node *node) {
1809 ir_opcode code = get_irn_opcode(node);
1810 return (code == iro_Load || code == iro_Store);
1813 ir_node *get_memop_mem(const ir_node *node) {
1814 assert(is_memop(node));
1815 return get_irn_n(node, 0);
1818 void set_memop_mem(ir_node *node, ir_node *mem) {
1819 assert(is_memop(node));
1820 set_irn_n(node, 0, mem);
1823 ir_node *get_memop_ptr(const ir_node *node) {
1824 assert(is_memop(node));
1825 return get_irn_n(node, 1);
1828 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1829 assert(is_memop(node));
1830 set_irn_n(node, 1, ptr);
1834 get_Load_mem(const ir_node *node) {
1835 assert(is_Load(node));
1836 return get_irn_n(node, 0);
1840 set_Load_mem(ir_node *node, ir_node *mem) {
1841 assert(is_Load(node));
1842 set_irn_n(node, 0, mem);
1846 get_Load_ptr(const ir_node *node) {
1847 assert(is_Load(node));
1848 return get_irn_n(node, 1);
1852 set_Load_ptr(ir_node *node, ir_node *ptr) {
1853 assert(is_Load(node));
1854 set_irn_n(node, 1, ptr);
1858 get_Load_mode(const ir_node *node) {
1859 assert(is_Load(node));
1860 return node->attr.load.mode;
1864 set_Load_mode(ir_node *node, ir_mode *mode) {
1865 assert(is_Load(node));
1866 node->attr.load.mode = mode;
1870 get_Load_volatility(const ir_node *node) {
1871 assert(is_Load(node));
1872 return node->attr.load.volatility;
1876 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1877 assert(is_Load(node));
1878 node->attr.load.volatility = volatility;
1882 get_Load_align(const ir_node *node) {
1883 assert(is_Load(node));
1884 return node->attr.load.aligned;
1888 set_Load_align(ir_node *node, ir_align align) {
1889 assert(is_Load(node));
1890 node->attr.load.aligned = align;
1895 get_Store_mem(const ir_node *node) {
1896 assert(is_Store(node));
1897 return get_irn_n(node, 0);
1901 set_Store_mem(ir_node *node, ir_node *mem) {
1902 assert(is_Store(node));
1903 set_irn_n(node, 0, mem);
1907 get_Store_ptr(const ir_node *node) {
1908 assert(is_Store(node));
1909 return get_irn_n(node, 1);
1913 set_Store_ptr(ir_node *node, ir_node *ptr) {
1914 assert(is_Store(node));
1915 set_irn_n(node, 1, ptr);
1919 get_Store_value(const ir_node *node) {
1920 assert(is_Store(node));
1921 return get_irn_n(node, 2);
1925 set_Store_value(ir_node *node, ir_node *value) {
1926 assert(is_Store(node));
1927 set_irn_n(node, 2, value);
1931 get_Store_volatility(const ir_node *node) {
1932 assert(is_Store(node));
1933 return node->attr.store.volatility;
1937 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1938 assert(is_Store(node));
1939 node->attr.store.volatility = volatility;
1943 get_Store_align(const ir_node *node) {
1944 assert(is_Store(node));
1945 return node->attr.store.aligned;
1949 set_Store_align(ir_node *node, ir_align align) {
1950 assert(is_Store(node));
1951 node->attr.store.aligned = align;
1956 get_Alloc_mem(const ir_node *node) {
1957 assert(is_Alloc(node));
1958 return get_irn_n(node, 0);
1962 set_Alloc_mem(ir_node *node, ir_node *mem) {
1963 assert(is_Alloc(node));
1964 set_irn_n(node, 0, mem);
1968 get_Alloc_size(const ir_node *node) {
1969 assert(is_Alloc(node));
1970 return get_irn_n(node, 1);
1974 set_Alloc_size(ir_node *node, ir_node *size) {
1975 assert(is_Alloc(node));
1976 set_irn_n(node, 1, size);
1980 get_Alloc_type(ir_node *node) {
1981 assert(is_Alloc(node));
1982 return node->attr.alloc.type;
1986 set_Alloc_type(ir_node *node, ir_type *tp) {
1987 assert(is_Alloc(node));
1988 node->attr.alloc.type = tp;
1992 get_Alloc_where(const ir_node *node) {
1993 assert(is_Alloc(node));
1994 return node->attr.alloc.where;
1998 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1999 assert(is_Alloc(node));
2000 node->attr.alloc.where = where;
2005 get_Free_mem(const ir_node *node) {
2006 assert(is_Free(node));
2007 return get_irn_n(node, 0);
2011 set_Free_mem(ir_node *node, ir_node *mem) {
2012 assert(is_Free(node));
2013 set_irn_n(node, 0, mem);
2017 get_Free_ptr(const ir_node *node) {
2018 assert(is_Free(node));
2019 return get_irn_n(node, 1);
2023 set_Free_ptr(ir_node *node, ir_node *ptr) {
2024 assert(is_Free(node));
2025 set_irn_n(node, 1, ptr);
2029 get_Free_size(const ir_node *node) {
2030 assert(is_Free(node));
2031 return get_irn_n(node, 2);
2035 set_Free_size(ir_node *node, ir_node *size) {
2036 assert(is_Free(node));
2037 set_irn_n(node, 2, size);
2041 get_Free_type(ir_node *node) {
2042 assert(is_Free(node));
2043 return node->attr.free.type;
2047 set_Free_type(ir_node *node, ir_type *tp) {
2048 assert(is_Free(node));
2049 node->attr.free.type = tp;
2053 get_Free_where(const ir_node *node) {
2054 assert(is_Free(node));
2055 return node->attr.free.where;
2059 set_Free_where(ir_node *node, ir_where_alloc where) {
2060 assert(is_Free(node));
2061 node->attr.free.where = where;
2064 ir_node **get_Sync_preds_arr(ir_node *node) {
2065 assert(is_Sync(node));
2066 return (ir_node **)&(get_irn_in(node)[1]);
2069 int get_Sync_n_preds(const ir_node *node) {
2070 assert(is_Sync(node));
2071 return (get_irn_arity(node));
2075 void set_Sync_n_preds(ir_node *node, int n_preds) {
2076 assert(is_Sync(node));
2080 ir_node *get_Sync_pred(const ir_node *node, int pos) {
2081 assert(is_Sync(node));
2082 return get_irn_n(node, pos);
2085 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
2086 assert(is_Sync(node));
2087 set_irn_n(node, pos, pred);
2090 /* Add a new Sync predecessor */
2091 void add_Sync_pred(ir_node *node, ir_node *pred) {
2092 assert(is_Sync(node));
2093 add_irn_n(node, pred);
2096 /* Returns the source language type of a Proj node. */
2097 ir_type *get_Proj_type(ir_node *n) {
2098 ir_type *tp = firm_unknown_type;
2099 ir_node *pred = get_Proj_pred(n);
2101 switch (get_irn_opcode(pred)) {
2104 /* Deal with Start / Call here: we need to know the Proj Nr. */
2105 assert(get_irn_mode(pred) == mode_T);
2106 pred_pred = get_Proj_pred(pred);
2108 if (is_Start(pred_pred)) {
2109 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2110 tp = get_method_param_type(mtp, get_Proj_proj(n));
2111 } else if (is_Call(pred_pred)) {
2112 ir_type *mtp = get_Call_type(pred_pred);
2113 tp = get_method_res_type(mtp, get_Proj_proj(n));
2116 case iro_Start: break;
2117 case iro_Call: break;
2119 ir_node *a = get_Load_ptr(pred);
2121 tp = get_entity_type(get_Sel_entity(a));
2130 get_Proj_pred(const ir_node *node) {
2131 assert(is_Proj(node));
2132 return get_irn_n(node, 0);
2136 set_Proj_pred(ir_node *node, ir_node *pred) {
2137 assert(is_Proj(node));
2138 set_irn_n(node, 0, pred);
2142 get_Proj_proj(const ir_node *node) {
2143 #ifdef INTERPROCEDURAL_VIEW
2144 ir_opcode code = get_irn_opcode(node);
2146 if (code == iro_Proj) {
2147 return node->attr.proj;
2150 assert(code == iro_Filter);
2151 return node->attr.filter.proj;
2154 assert(is_Proj(node));
2155 return node->attr.proj;
2156 #endif /* INTERPROCEDURAL_VIEW */
2160 set_Proj_proj(ir_node *node, long proj) {
2161 #ifdef INTERPROCEDURAL_VIEW
2162 ir_opcode code = get_irn_opcode(node);
2164 if (code == iro_Proj) {
2165 node->attr.proj = proj;
2168 assert(code == iro_Filter);
2169 node->attr.filter.proj = proj;
2172 assert(is_Proj(node));
2173 node->attr.proj = proj;
2174 #endif /* INTERPROCEDURAL_VIEW */
2177 /* Returns non-zero if a node is a routine parameter. */
2178 int (is_arg_Proj)(const ir_node *node) {
2179 return _is_arg_Proj(node);
2183 get_Tuple_preds_arr(ir_node *node) {
2184 assert(is_Tuple(node));
2185 return (ir_node **)&(get_irn_in(node)[1]);
2189 get_Tuple_n_preds(const ir_node *node) {
2190 assert(is_Tuple(node));
2191 return get_irn_arity(node);
2196 set_Tuple_n_preds(ir_node *node, int n_preds) {
2197 assert(is_Tuple(node));
2202 get_Tuple_pred(const ir_node *node, int pos) {
2203 assert(is_Tuple(node));
2204 return get_irn_n(node, pos);
2208 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2209 assert(is_Tuple(node));
2210 set_irn_n(node, pos, pred);
2214 get_Id_pred(const ir_node *node) {
2215 assert(is_Id(node));
2216 return get_irn_n(node, 0);
2220 set_Id_pred(ir_node *node, ir_node *pred) {
2221 assert(is_Id(node));
2222 set_irn_n(node, 0, pred);
2225 ir_node *get_Confirm_value(const ir_node *node) {
2226 assert(is_Confirm(node));
2227 return get_irn_n(node, 0);
2230 void set_Confirm_value(ir_node *node, ir_node *value) {
2231 assert(is_Confirm(node));
2232 set_irn_n(node, 0, value);
2235 ir_node *get_Confirm_bound(const ir_node *node) {
2236 assert(is_Confirm(node));
2237 return get_irn_n(node, 1);
2240 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2241 assert(is_Confirm(node));
2242 set_irn_n(node, 0, bound);
2245 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2246 assert(is_Confirm(node));
2247 return node->attr.confirm.cmp;
2250 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2251 assert(is_Confirm(node));
2252 node->attr.confirm.cmp = cmp;
2256 get_Filter_pred(ir_node *node) {
2257 assert(is_Filter(node));
2262 set_Filter_pred(ir_node *node, ir_node *pred) {
2263 assert(is_Filter(node));
2268 get_Filter_proj(ir_node *node) {
2269 assert(is_Filter(node));
2270 return node->attr.filter.proj;
2274 set_Filter_proj(ir_node *node, long proj) {
2275 assert(is_Filter(node));
2276 node->attr.filter.proj = proj;
2279 /* Don't use get_irn_arity, get_irn_n in implementation as access
2280 shall work independent of view!!! */
2281 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2282 assert(is_Filter(node));
2283 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2284 ir_graph *irg = get_irn_irg(node);
2285 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2286 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2287 node->attr.filter.in_cg[0] = node->in[0];
2289 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2292 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2293 assert(is_Filter(node) && node->attr.filter.in_cg &&
2294 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2295 node->attr.filter.in_cg[pos + 1] = pred;
2298 int get_Filter_n_cg_preds(ir_node *node) {
2299 assert(is_Filter(node) && node->attr.filter.in_cg);
2300 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2303 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2305 assert(is_Filter(node) && node->attr.filter.in_cg &&
2307 arity = ARR_LEN(node->attr.filter.in_cg);
2308 assert(pos < arity - 1);
2309 return node->attr.filter.in_cg[pos + 1];
2313 ir_node *get_Mux_sel(const ir_node *node) {
2314 assert(is_Mux(node));
2318 void set_Mux_sel(ir_node *node, ir_node *sel) {
2319 assert(is_Mux(node));
2323 ir_node *get_Mux_false(const ir_node *node) {
2324 assert(is_Mux(node));
2328 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2329 assert(is_Mux(node));
2330 node->in[2] = ir_false;
2333 ir_node *get_Mux_true(const ir_node *node) {
2334 assert(is_Mux(node));
2338 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2339 assert(is_Mux(node));
2340 node->in[3] = ir_true;
2344 ir_node *get_CopyB_mem(const ir_node *node) {
2345 assert(is_CopyB(node));
2346 return get_irn_n(node, 0);
2349 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2350 assert(node->op == op_CopyB);
2351 set_irn_n(node, 0, mem);
2354 ir_node *get_CopyB_dst(const ir_node *node) {
2355 assert(is_CopyB(node));
2356 return get_irn_n(node, 1);
2359 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2360 assert(is_CopyB(node));
2361 set_irn_n(node, 1, dst);
2364 ir_node *get_CopyB_src(const ir_node *node) {
2365 assert(is_CopyB(node));
2366 return get_irn_n(node, 2);
2369 void set_CopyB_src(ir_node *node, ir_node *src) {
2370 assert(is_CopyB(node));
2371 set_irn_n(node, 2, src);
2374 ir_type *get_CopyB_type(ir_node *node) {
2375 assert(is_CopyB(node));
2376 return node->attr.copyb.type;
2379 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2380 assert(is_CopyB(node) && data_type);
2381 node->attr.copyb.type = data_type;
2386 get_InstOf_type(ir_node *node) {
2387 assert(node->op == op_InstOf);
2388 return node->attr.instof.type;
2392 set_InstOf_type(ir_node *node, ir_type *type) {
2393 assert(node->op == op_InstOf);
2394 node->attr.instof.type = type;
2398 get_InstOf_store(const ir_node *node) {
2399 assert(node->op == op_InstOf);
2400 return get_irn_n(node, 0);
2404 set_InstOf_store(ir_node *node, ir_node *obj) {
2405 assert(node->op == op_InstOf);
2406 set_irn_n(node, 0, obj);
2410 get_InstOf_obj(const ir_node *node) {
2411 assert(node->op == op_InstOf);
2412 return get_irn_n(node, 1);
2416 set_InstOf_obj(ir_node *node, ir_node *obj) {
2417 assert(node->op == op_InstOf);
2418 set_irn_n(node, 1, obj);
2421 /* Returns the memory input of a Raise operation. */
2423 get_Raise_mem(const ir_node *node) {
2424 assert(is_Raise(node));
2425 return get_irn_n(node, 0);
2429 set_Raise_mem(ir_node *node, ir_node *mem) {
2430 assert(is_Raise(node));
2431 set_irn_n(node, 0, mem);
2435 get_Raise_exo_ptr(const ir_node *node) {
2436 assert(is_Raise(node));
2437 return get_irn_n(node, 1);
2441 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2442 assert(is_Raise(node));
2443 set_irn_n(node, 1, exo_ptr);
2448 /* Returns the memory input of a Bound operation. */
2449 ir_node *get_Bound_mem(const ir_node *bound) {
2450 assert(is_Bound(bound));
2451 return get_irn_n(bound, 0);
2454 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2455 assert(is_Bound(bound));
2456 set_irn_n(bound, 0, mem);
2459 /* Returns the index input of a Bound operation. */
2460 ir_node *get_Bound_index(const ir_node *bound) {
2461 assert(is_Bound(bound));
2462 return get_irn_n(bound, 1);
2465 void set_Bound_index(ir_node *bound, ir_node *idx) {
2466 assert(is_Bound(bound));
2467 set_irn_n(bound, 1, idx);
2470 /* Returns the lower bound input of a Bound operation. */
2471 ir_node *get_Bound_lower(const ir_node *bound) {
2472 assert(is_Bound(bound));
2473 return get_irn_n(bound, 2);
2476 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2477 assert(is_Bound(bound));
2478 set_irn_n(bound, 2, lower);
2481 /* Returns the upper bound input of a Bound operation. */
2482 ir_node *get_Bound_upper(const ir_node *bound) {
2483 assert(is_Bound(bound));
2484 return get_irn_n(bound, 3);
2487 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2488 assert(is_Bound(bound));
2489 set_irn_n(bound, 3, upper);
2492 /* Return the operand of a Pin node. */
2493 ir_node *get_Pin_op(const ir_node *pin) {
2494 assert(is_Pin(pin));
2495 return get_irn_n(pin, 0);
2498 void set_Pin_op(ir_node *pin, ir_node *node) {
2499 assert(is_Pin(pin));
2500 set_irn_n(pin, 0, node);
2503 /* Return the assembler text of an ASM pseudo node. */
2504 ident *get_ASM_text(const ir_node *node) {
2505 assert(is_ASM(node));
2506 return node->attr.assem.asm_text;
2509 /* Return the number of input constraints for an ASM node. */
2510 int get_ASM_n_input_constraints(const ir_node *node) {
2511 assert(is_ASM(node));
2512 return ARR_LEN(node->attr.assem.inputs);
2515 /* Return the input constraints for an ASM node. This is a flexible array. */
2516 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2517 assert(is_ASM(node));
2518 return node->attr.assem.inputs;
2521 /* Return the number of output constraints for an ASM node. */
2522 int get_ASM_n_output_constraints(const ir_node *node) {
2523 assert(is_ASM(node));
2524 return ARR_LEN(node->attr.assem.outputs);
2527 /* Return the output constraints for an ASM node. */
2528 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2529 assert(is_ASM(node));
2530 return node->attr.assem.outputs;
2533 /* Return the number of clobbered registers for an ASM node. */
2534 int get_ASM_n_clobbers(const ir_node *node) {
2535 assert(is_ASM(node));
2536 return ARR_LEN(node->attr.assem.clobber);
2539 /* Return the list of clobbered registers for an ASM node. */
2540 ident **get_ASM_clobbers(const ir_node *node) {
2541 assert(is_ASM(node));
2542 return node->attr.assem.clobber;
2545 /* returns the graph of a node */
2547 get_irn_irg(const ir_node *node) {
2549 * Do not use get_nodes_Block() here, because this
2550 * will check the pinned state.
2551 * However even a 'wrong' block is always in the proper
2554 if (! is_Block(node))
2555 node = get_irn_n(node, -1);
2556 /* note that get_Block_irg() can handle Bad nodes */
2557 return get_Block_irg(node);
2561 /*----------------------------------------------------------------*/
2562 /* Auxiliary routines */
2563 /*----------------------------------------------------------------*/
2566 skip_Proj(ir_node *node) {
2567 /* don't assert node !!! */
2572 node = get_Proj_pred(node);
2578 skip_Proj_const(const ir_node *node) {
2579 /* don't assert node !!! */
2584 node = get_Proj_pred(node);
2590 skip_Tuple(ir_node *node) {
2595 if (is_Proj(node)) {
2596 pred = get_Proj_pred(node);
2597 op = get_irn_op(pred);
2600 * Looks strange but calls get_irn_op() only once
2601 * in most often cases.
2603 if (op == op_Proj) { /* nested Tuple ? */
2604 pred = skip_Tuple(pred);
2606 if (is_Tuple(pred)) {
2607 node = get_Tuple_pred(pred, get_Proj_proj(node));
2610 } else if (op == op_Tuple) {
2611 node = get_Tuple_pred(pred, get_Proj_proj(node));
2618 /* returns operand of node if node is a Cast */
2619 ir_node *skip_Cast(ir_node *node) {
2621 return get_Cast_op(node);
2625 /* returns operand of node if node is a Cast */
2626 const ir_node *skip_Cast_const(const ir_node *node) {
2628 return get_Cast_op(node);
2632 /* returns operand of node if node is a Pin */
2633 ir_node *skip_Pin(ir_node *node) {
2635 return get_Pin_op(node);
2639 /* returns operand of node if node is a Confirm */
2640 ir_node *skip_Confirm(ir_node *node) {
2641 if (is_Confirm(node))
2642 return get_Confirm_value(node);
2646 /* skip all high-level ops */
2647 ir_node *skip_HighLevel_ops(ir_node *node) {
2648 while (is_op_highlevel(get_irn_op(node))) {
2649 node = get_irn_n(node, 0);
2655 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2656 * than any other approach, as Id chains are resolved and all point to the real node, or
2657 * all id's are self loops.
2659 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2660 * a little bit "hand optimized".
2662 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2665 skip_Id(ir_node *node) {
2667 /* don't assert node !!! */
2669 if (!node || (node->op != op_Id)) return node;
2671 /* Don't use get_Id_pred(): We get into an endless loop for
2672 self-referencing Ids. */
2673 pred = node->in[0+1];
2675 if (pred->op != op_Id) return pred;
2677 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2678 ir_node *rem_pred, *res;
2680 if (pred->op != op_Id) return pred; /* shortcut */
2683 assert(get_irn_arity (node) > 0);
2685 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2686 res = skip_Id(rem_pred);
2687 if (res->op == op_Id) /* self-loop */ return node;
2689 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2696 void skip_Id_and_store(ir_node **node) {
2699 if (!n || (n->op != op_Id)) return;
2701 /* Don't use get_Id_pred(): We get into an endless loop for
2702 self-referencing Ids. */
2707 (is_strictConv)(const ir_node *node) {
2708 return _is_strictConv(node);
2712 (is_no_Block)(const ir_node *node) {
2713 return _is_no_Block(node);
2716 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2718 (is_SymConst_addr_ent)(const ir_node *node) {
2719 return _is_SymConst_addr_ent(node);
2722 /* Returns true if the operation manipulates control flow. */
2723 int is_cfop(const ir_node *node) {
2724 return is_op_cfopcode(get_irn_op(node));
2727 /* Returns true if the operation manipulates interprocedural control flow:
2728 CallBegin, EndReg, EndExcept */
2729 int is_ip_cfop(const ir_node *node) {
2730 return is_ip_cfopcode(get_irn_op(node));
2733 /* Returns true if the operation can change the control flow because
2736 is_fragile_op(const ir_node *node) {
2737 return is_op_fragile(get_irn_op(node));
2740 /* Returns the memory operand of fragile operations. */
2741 ir_node *get_fragile_op_mem(ir_node *node) {
2742 assert(node && is_fragile_op(node));
2744 switch (get_irn_opcode(node)) {
2755 return get_irn_n(node, pn_Generic_M);
2760 assert(0 && "should not be reached");
2765 /* Returns the result mode of a Div operation. */
2766 ir_mode *get_divop_resmod(const ir_node *node) {
2767 switch (get_irn_opcode(node)) {
2768 case iro_Quot : return get_Quot_resmode(node);
2769 case iro_DivMod: return get_DivMod_resmode(node);
2770 case iro_Div : return get_Div_resmode(node);
2771 case iro_Mod : return get_Mod_resmode(node);
2773 assert(0 && "should not be reached");
2778 /* Returns true if the operation is a forking control flow operation. */
2779 int (is_irn_forking)(const ir_node *node) {
2780 return _is_irn_forking(node);
2783 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
2784 _copy_node_attr(old_node, new_node);
2787 /* Return the type associated with the value produced by n
2788 * if the node remarks this type as it is the case for
2789 * Cast, Const, SymConst and some Proj nodes. */
2790 ir_type *(get_irn_type)(ir_node *node) {
2791 return _get_irn_type(node);
2794 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2796 ir_type *(get_irn_type_attr)(ir_node *node) {
2797 return _get_irn_type_attr(node);
2800 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2801 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2802 return _get_irn_entity_attr(node);
2805 /* Returns non-zero for constant-like nodes. */
2806 int (is_irn_constlike)(const ir_node *node) {
2807 return _is_irn_constlike(node);
2811 * Returns non-zero for nodes that are allowed to have keep-alives and
2812 * are neither Block nor PhiM.
2814 int (is_irn_keep)(const ir_node *node) {
2815 return _is_irn_keep(node);
2819 * Returns non-zero for nodes that are always placed in the start block.
2821 int (is_irn_start_block_placed)(const ir_node *node) {
2822 return _is_irn_start_block_placed(node);
2825 /* Returns non-zero for nodes that are machine operations. */
2826 int (is_irn_machine_op)(const ir_node *node) {
2827 return _is_irn_machine_op(node);
2830 /* Returns non-zero for nodes that are machine operands. */
2831 int (is_irn_machine_operand)(const ir_node *node) {
2832 return _is_irn_machine_operand(node);
2835 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2836 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2837 return _is_irn_machine_user(node, n);
2841 /* Gets the string representation of the jump prediction .*/
2842 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2843 #define X(a) case a: return #a;
2845 X(COND_JMP_PRED_NONE);
2846 X(COND_JMP_PRED_TRUE);
2847 X(COND_JMP_PRED_FALSE);
2853 /* Returns the conditional jump prediction of a Cond node. */
2854 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2855 return _get_Cond_jmp_pred(cond);
2858 /* Sets a new conditional jump prediction. */
2859 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2860 _set_Cond_jmp_pred(cond, pred);
2863 /** the get_type operation must be always implemented and return a firm type */
2864 static ir_type *get_Default_type(ir_node *n) {
2866 return get_unknown_type();
2869 /* Sets the get_type operation for an ir_op_ops. */
2870 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
2872 case iro_Const: ops->get_type = get_Const_type; break;
2873 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2874 case iro_Cast: ops->get_type = get_Cast_type; break;
2875 case iro_Proj: ops->get_type = get_Proj_type; break;
2877 /* not allowed to be NULL */
2878 if (! ops->get_type)
2879 ops->get_type = get_Default_type;
2885 /** Return the attribute type of a SymConst node if exists */
2886 static ir_type *get_SymConst_attr_type(ir_node *self) {
2887 symconst_kind kind = get_SymConst_kind(self);
2888 if (SYMCONST_HAS_TYPE(kind))
2889 return get_SymConst_type(self);
2893 /** Return the attribute entity of a SymConst node if exists */
2894 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
2895 symconst_kind kind = get_SymConst_kind(self);
2896 if (SYMCONST_HAS_ENT(kind))
2897 return get_SymConst_entity(self);
2901 /** the get_type_attr operation must be always implemented */
2902 static ir_type *get_Null_type(ir_node *n) {
2904 return firm_unknown_type;
2907 /* Sets the get_type operation for an ir_op_ops. */
2908 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
2910 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2911 case iro_Call: ops->get_type_attr = get_Call_type; break;
2912 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2913 case iro_Free: ops->get_type_attr = get_Free_type; break;
2914 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2916 /* not allowed to be NULL */
2917 if (! ops->get_type_attr)
2918 ops->get_type_attr = get_Null_type;
2924 /** the get_entity_attr operation must be always implemented */
2925 static ir_entity *get_Null_ent(ir_node *n) {
2930 /* Sets the get_type operation for an ir_op_ops. */
2931 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
2933 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2934 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
2936 /* not allowed to be NULL */
2937 if (! ops->get_entity_attr)
2938 ops->get_entity_attr = get_Null_ent;
2944 /* Sets the debug information of a node. */
2945 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
2946 _set_irn_dbg_info(n, db);
2950 * Returns the debug information of an node.
2952 * @param n The node.
2954 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
2955 return _get_irn_dbg_info(n);
2958 /* checks whether a node represents a global address */
2959 int is_Global(const ir_node *node) {
2960 return is_SymConst_addr_ent(node);
2963 /* returns the entity of a global address */
2964 ir_entity *get_Global_entity(const ir_node *node) {
2965 return get_SymConst_entity(node);
2969 * Calculate a hash value of a node.
2971 unsigned firm_default_hash(const ir_node *node) {
2975 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2976 h = irn_arity = get_irn_intra_arity(node);
2978 /* consider all in nodes... except the block if not a control flow. */
2979 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
2980 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
2984 h = 9*h + HASH_PTR(get_irn_mode(node));
2986 h = 9*h + HASH_PTR(get_irn_op(node));
2989 } /* firm_default_hash */
2991 /* include generated code */
2992 #include "gen_irnode.c.inl"