2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
33 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
39 #include "iredgekinds.h"
40 #include "iredges_t.h"
48 /* some constants fixing the positions of nodes predecessors
50 #define CALL_PARAM_OFFSET 2
51 #define BUILDIN_PARAM_OFFSET 1
52 #define SEL_INDEX_OFFSET 2
53 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
54 #define END_KEEPALIVE_OFFSET 0
56 static const char *pnc_name_arr [] = {
57 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
58 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
59 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
60 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
64 * returns the pnc name from an pnc constant
66 const char *get_pnc_string(int pnc)
68 assert(pnc >= 0 && pnc <
69 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
70 return pnc_name_arr[pnc];
74 * Calculates the negated (Complement(R)) pnc condition.
76 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
80 /* do NOT add the Uo bit for non-floating point values */
81 if (! mode_is_float(mode))
87 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
88 pn_Cmp get_inversed_pnc(long pnc)
90 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
91 long lesser = pnc & pn_Cmp_Lt;
92 long greater = pnc & pn_Cmp_Gt;
94 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
100 * Indicates, whether additional data can be registered to ir nodes.
101 * If set to 1, this is not possible anymore.
103 static int forbid_new_data = 0;
106 * The amount of additional space for custom data to be allocated upon
107 * creating a new node.
109 unsigned firm_add_node_size = 0;
112 /* register new space for every node */
113 unsigned firm_register_additional_node_data(unsigned size)
115 assert(!forbid_new_data && "Too late to register additional node data");
120 return firm_add_node_size += size;
124 void init_irnode(void)
126 /* Forbid the addition of new data to an ir node. */
131 * irnode constructor.
132 * Create a new irnode in irg, with an op, mode, arity and
133 * some incoming irnodes.
134 * If arity is negative, a node with a dynamic array is created.
137 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
138 int arity, ir_node **in)
141 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
148 p = obstack_alloc(irg->obst, node_size);
149 memset(p, 0, node_size);
150 res = (ir_node *)(p + firm_add_node_size);
152 res->kind = k_ir_node;
156 res->node_idx = irg_register_node_idx(irg, res);
161 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
163 /* not nice but necessary: End and Sync must always have a flexible array */
164 if (op == op_End || op == op_Sync)
165 res->in = NEW_ARR_F(ir_node *, (arity+1));
167 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
168 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
172 set_irn_dbg_info(res, db);
174 res->node_nr = get_irp_new_node_nr();
176 for (i = 0; i < EDGE_KIND_LAST; ++i) {
177 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
178 /* edges will be build immediately */
179 res->edge_info[i].edges_built = 1;
180 res->edge_info[i].out_count = 0;
183 /* don't put this into the for loop, arity is -1 for some nodes! */
184 edges_notify_edge(res, -1, res->in[0], NULL, irg);
185 for (i = 1; i <= arity; ++i)
186 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
188 hook_new_node(irg, res);
189 if (get_irg_phase_state(irg) == phase_backend) {
190 be_info_new_node(res);
192 // Init the VRP structures
193 res->vrp.range_type = VRP_UNDEFINED;
195 if(mode_is_int(mode)) {
196 // We are assuming that 0 is always represented as 0x0000
197 res->vrp.bits_set = new_tarval_from_long(0, mode);
198 res->vrp.bits_not_set = new_tarval_from_long(0, mode);
199 res->vrp.range_bottom = get_tarval_top();
200 res->vrp.range_top = get_tarval_top();
202 res->vrp.bits_set = get_tarval_bad();
203 res->vrp.bits_not_set = get_tarval_bad();
204 res->vrp.range_bottom = get_tarval_bad();
205 res->vrp.range_top = get_tarval_bad();
207 res->vrp.bits_node = NULL;
208 res->vrp.range_node = NULL;
209 res->vrp.range_op = VRP_NONE;
215 /*-- getting some parameters from ir_nodes --*/
217 int (is_ir_node)(const void *thing)
219 return _is_ir_node(thing);
222 int (get_irn_intra_arity)(const ir_node *node)
224 return _get_irn_intra_arity(node);
227 int (get_irn_inter_arity)(const ir_node *node)
229 return _get_irn_inter_arity(node);
232 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
234 int (get_irn_arity)(const ir_node *node)
236 return _get_irn_arity(node);
239 /* Returns the array with ins. This array is shifted with respect to the
240 array accessed by get_irn_n: The block operand is at position 0 not -1.
241 (@@@ This should be changed.)
242 The order of the predecessors in this array is not guaranteed, except that
243 lists of operands as predecessors of Block or arguments of a Call are
245 ir_node **get_irn_in(const ir_node *node)
248 #ifdef INTERPROCEDURAL_VIEW
249 if (get_interprocedural_view()) { /* handle Filter and Block specially */
250 if (get_irn_opcode(node) == iro_Filter) {
251 assert(node->attr.filter.in_cg);
252 return node->attr.filter.in_cg;
253 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
254 return node->attr.block.in_cg;
256 /* else fall through */
258 #endif /* INTERPROCEDURAL_VIEW */
262 void set_irn_in(ir_node *node, int arity, ir_node **in)
266 ir_graph *irg = get_irn_irg(node);
269 #ifdef INTERPROCEDURAL_VIEW
270 if (get_interprocedural_view()) { /* handle Filter and Block specially */
271 ir_opcode code = get_irn_opcode(node);
272 if (code == iro_Filter) {
273 assert(node->attr.filter.in_cg);
274 pOld_in = &node->attr.filter.in_cg;
275 } else if (code == iro_Block && node->attr.block.in_cg) {
276 pOld_in = &node->attr.block.in_cg;
281 #endif /* INTERPROCEDURAL_VIEW */
285 for (i = 0; i < arity; i++) {
286 if (i < ARR_LEN(*pOld_in)-1)
287 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
289 edges_notify_edge(node, i, in[i], NULL, irg);
291 for (;i < ARR_LEN(*pOld_in)-1; i++) {
292 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
295 if (arity != ARR_LEN(*pOld_in) - 1) {
296 ir_node * block = (*pOld_in)[0];
297 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
298 (*pOld_in)[0] = block;
300 fix_backedges(irg->obst, node);
302 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
305 ir_node *(get_irn_intra_n)(const ir_node *node, int n)
307 return _get_irn_intra_n(node, n);
310 ir_node *(get_irn_inter_n)(const ir_node *node, int n)
312 return _get_irn_inter_n(node, n);
315 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
317 ir_node *(get_irn_n)(const ir_node *node, int n)
319 return _get_irn_n(node, n);
322 void set_irn_n(ir_node *node, int n, ir_node *in)
324 assert(node && node->kind == k_ir_node);
326 assert(n < get_irn_arity(node));
327 assert(in && in->kind == k_ir_node);
329 #ifdef INTERPROCEDURAL_VIEW
330 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
331 /* Change block pred in both views! */
332 node->in[n + 1] = in;
333 assert(node->attr.filter.in_cg);
334 node->attr.filter.in_cg[n + 1] = in;
337 if (get_interprocedural_view()) { /* handle Filter and Block specially */
338 if (get_irn_opcode(node) == iro_Filter) {
339 assert(node->attr.filter.in_cg);
340 node->attr.filter.in_cg[n + 1] = in;
342 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
343 node->attr.block.in_cg[n + 1] = in;
346 /* else fall through */
348 #endif /* INTERPROCEDURAL_VIEW */
351 hook_set_irn_n(node, n, in, node->in[n + 1]);
353 /* Here, we rely on src and tgt being in the current ir graph */
354 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
356 node->in[n + 1] = in;
359 int add_irn_n(ir_node *node, ir_node *in)
362 ir_graph *irg = get_irn_irg(node);
364 assert(node->op->opar == oparity_dynamic);
365 pos = ARR_LEN(node->in) - 1;
366 ARR_APP1(ir_node *, node->in, in);
367 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
370 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
375 void del_Sync_n(ir_node *n, int i)
377 int arity = get_Sync_n_preds(n);
378 ir_node *last_pred = get_Sync_pred(n, arity - 1);
379 set_Sync_pred(n, i, last_pred);
380 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
381 ARR_SHRINKLEN(get_irn_in(n), arity);
384 int (get_irn_deps)(const ir_node *node)
386 return _get_irn_deps(node);
389 ir_node *(get_irn_dep)(const ir_node *node, int pos)
391 return _get_irn_dep(node, pos);
394 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
396 _set_irn_dep(node, pos, dep);
399 int add_irn_dep(ir_node *node, ir_node *dep)
403 /* DEP edges are only allowed in backend phase */
404 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
405 if (node->deps == NULL) {
406 node->deps = NEW_ARR_F(ir_node *, 1);
412 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
413 if(node->deps[i] == NULL)
416 if(node->deps[i] == dep)
420 if (first_zero >= 0) {
421 node->deps[first_zero] = dep;
424 ARR_APP1(ir_node *, node->deps, dep);
429 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
434 void add_irn_deps(ir_node *tgt, ir_node *src)
438 for (i = 0, n = get_irn_deps(src); i < n; ++i)
439 add_irn_dep(tgt, get_irn_dep(src, i));
443 ir_mode *(get_irn_mode)(const ir_node *node)
445 return _get_irn_mode(node);
448 void (set_irn_mode)(ir_node *node, ir_mode *mode)
450 _set_irn_mode(node, mode);
453 /** Gets the string representation of the mode .*/
454 const char *get_irn_modename(const ir_node *node)
457 return get_mode_name(node->mode);
460 ident *get_irn_modeident(const ir_node *node)
463 return get_mode_ident(node->mode);
466 ir_op *(get_irn_op)(const ir_node *node)
468 return _get_irn_op(node);
471 /* should be private to the library: */
472 void (set_irn_op)(ir_node *node, ir_op *op)
474 _set_irn_op(node, op);
477 unsigned (get_irn_opcode)(const ir_node *node)
479 return _get_irn_opcode(node);
482 const char *get_irn_opname(const ir_node *node)
485 if (is_Phi0(node)) return "Phi0";
486 return get_id_str(node->op->name);
489 ident *get_irn_opident(const ir_node *node)
492 return node->op->name;
495 ir_visited_t (get_irn_visited)(const ir_node *node)
497 return _get_irn_visited(node);
500 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
502 _set_irn_visited(node, visited);
505 void (mark_irn_visited)(ir_node *node)
507 _mark_irn_visited(node);
510 int (irn_visited)(const ir_node *node)
512 return _irn_visited(node);
515 int (irn_visited_else_mark)(ir_node *node)
517 return _irn_visited_else_mark(node);
520 void (set_irn_link)(ir_node *node, void *link)
522 _set_irn_link(node, link);
525 void *(get_irn_link)(const ir_node *node)
527 return _get_irn_link(node);
530 op_pin_state (get_irn_pinned)(const ir_node *node)
532 return _get_irn_pinned(node);
535 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
537 return _is_irn_pinned_in_irg(node);
540 void set_irn_pinned(ir_node *node, op_pin_state state)
542 /* due to optimization an opt may be turned into a Tuple */
546 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
547 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
549 node->attr.except.pin_state = state;
552 /* Outputs a unique number for this node */
553 long get_irn_node_nr(const ir_node *node)
556 return node->node_nr;
559 const_attr *get_irn_const_attr(ir_node *node)
561 assert(is_Const(node));
562 return &node->attr.con;
565 long get_irn_proj_attr(ir_node *node)
567 /* BEWARE: check for true Proj node here, no Filter */
568 assert(node->op == op_Proj);
569 return node->attr.proj;
572 alloc_attr *get_irn_alloc_attr(ir_node *node)
574 assert(is_Alloc(node));
575 return &node->attr.alloc;
578 free_attr *get_irn_free_attr(ir_node *node)
580 assert(is_Free(node));
581 return &node->attr.free;
584 symconst_attr *get_irn_symconst_attr(ir_node *node)
586 assert(is_SymConst(node));
587 return &node->attr.symc;
590 call_attr *get_irn_call_attr(ir_node *node)
592 assert(is_Call(node));
593 return &node->attr.call;
596 sel_attr *get_irn_sel_attr(ir_node *node)
598 assert(is_Sel(node));
599 return &node->attr.sel;
602 phi_attr *get_irn_phi_attr(ir_node *node)
604 return &node->attr.phi;
607 block_attr *get_irn_block_attr(ir_node *node)
609 assert(is_Block(node));
610 return &node->attr.block;
613 load_attr *get_irn_load_attr(ir_node *node)
615 assert(is_Load(node));
616 return &node->attr.load;
619 store_attr *get_irn_store_attr(ir_node *node)
621 assert(is_Store(node));
622 return &node->attr.store;
625 except_attr *get_irn_except_attr(ir_node *node)
627 assert(node->op == op_Div || node->op == op_Quot ||
628 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
629 return &node->attr.except;
632 divmod_attr *get_irn_divmod_attr(ir_node *node)
634 assert(node->op == op_Div || node->op == op_Quot ||
635 node->op == op_DivMod || node->op == op_Mod);
636 return &node->attr.divmod;
639 builtin_attr *get_irn_builtin_attr(ir_node *node)
641 assert(is_Builtin(node));
642 return &node->attr.builtin;
645 void *(get_irn_generic_attr)(ir_node *node)
647 assert(is_ir_node(node));
648 return _get_irn_generic_attr(node);
651 const void *(get_irn_generic_attr_const)(const ir_node *node)
653 assert(is_ir_node(node));
654 return _get_irn_generic_attr_const(node);
657 unsigned (get_irn_idx)(const ir_node *node)
659 assert(is_ir_node(node));
660 return _get_irn_idx(node);
663 int get_irn_pred_pos(ir_node *node, ir_node *arg)
666 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
667 if (get_irn_n(node, i) == arg)
673 /** manipulate fields of individual nodes **/
675 /* this works for all except Block */
676 ir_node *get_nodes_block(const ir_node *node)
678 assert(node->op != op_Block);
679 return get_irn_n(node, -1);
682 void set_nodes_block(ir_node *node, ir_node *block)
684 assert(node->op != op_Block);
685 set_irn_n(node, -1, block);
688 /* this works for all except Block */
689 ir_node *get_nodes_MacroBlock(const ir_node *node)
691 assert(node->op != op_Block);
692 return get_Block_MacroBlock(get_irn_n(node, -1));
695 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
696 * from Start. If so returns frame type, else Null. */
697 ir_type *is_frame_pointer(const ir_node *n)
699 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
700 ir_node *start = get_Proj_pred(n);
701 if (is_Start(start)) {
702 return get_irg_frame_type(get_irn_irg(start));
708 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
709 * from Start. If so returns tls type, else Null. */
710 ir_type *is_tls_pointer(const ir_node *n)
712 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
713 ir_node *start = get_Proj_pred(n);
714 if (is_Start(start)) {
715 return get_tls_type();
721 ir_node **get_Block_cfgpred_arr(ir_node *node)
723 assert(is_Block(node));
724 return (ir_node **)&(get_irn_in(node)[1]);
727 int (get_Block_n_cfgpreds)(const ir_node *node)
729 return _get_Block_n_cfgpreds(node);
732 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
734 return _get_Block_cfgpred(node, pos);
737 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
739 assert(is_Block(node));
740 set_irn_n(node, pos, pred);
743 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
747 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
748 if (get_Block_cfgpred_block(block, i) == pred)
754 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
756 return _get_Block_cfgpred_block(node, pos);
759 int get_Block_matured(const ir_node *node)
761 assert(is_Block(node));
762 return (int)node->attr.block.is_matured;
765 void set_Block_matured(ir_node *node, int matured)
767 assert(is_Block(node));
768 node->attr.block.is_matured = matured;
771 ir_visited_t (get_Block_block_visited)(const ir_node *node)
773 return _get_Block_block_visited(node);
776 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
778 _set_Block_block_visited(node, visit);
781 /* For this current_ir_graph must be set. */
782 void (mark_Block_block_visited)(ir_node *node)
784 _mark_Block_block_visited(node);
787 int (Block_block_visited)(const ir_node *node)
789 return _Block_block_visited(node);
792 ir_node *get_Block_graph_arr(ir_node *node, int pos)
794 assert(is_Block(node));
795 return node->attr.block.graph_arr[pos+1];
798 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value)
800 assert(is_Block(node));
801 node->attr.block.graph_arr[pos+1] = value;
804 #ifdef INTERPROCEDURAL_VIEW
805 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
807 assert(is_Block(node));
808 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
809 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
810 node->attr.block.in_cg[0] = NULL;
811 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
813 /* Fix backedge array. fix_backedges() operates depending on
814 interprocedural_view. */
815 int ipv = get_interprocedural_view();
816 set_interprocedural_view(1);
817 fix_backedges(current_ir_graph->obst, node);
818 set_interprocedural_view(ipv);
821 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
824 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
826 assert(is_Block(node) && node->attr.block.in_cg &&
827 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
828 node->attr.block.in_cg[pos + 1] = pred;
831 ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
833 assert(is_Block(node));
834 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
837 int get_Block_cg_n_cfgpreds(const ir_node *node)
839 assert(is_Block(node));
840 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
843 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
845 assert(is_Block(node) && node->attr.block.in_cg);
846 return node->attr.block.in_cg[pos + 1];
849 void remove_Block_cg_cfgpred_arr(ir_node *node)
851 assert(is_Block(node));
852 node->attr.block.in_cg = NULL;
854 #endif /* INTERPROCEDURAL_VIEW */
856 ir_node *(set_Block_dead)(ir_node *block)
858 return _set_Block_dead(block);
861 int (is_Block_dead)(const ir_node *block)
863 return _is_Block_dead(block);
866 ir_extblk *get_Block_extbb(const ir_node *block)
869 assert(is_Block(block));
870 res = block->attr.block.extblk;
871 assert(res == NULL || is_ir_extbb(res));
875 void set_Block_extbb(ir_node *block, ir_extblk *extblk)
877 assert(is_Block(block));
878 assert(extblk == NULL || is_ir_extbb(extblk));
879 block->attr.block.extblk = extblk;
882 /* Returns the macro block header of a block.*/
883 ir_node *get_Block_MacroBlock(const ir_node *block)
886 assert(is_Block(block));
887 mbh = get_irn_n(block, -1);
888 /* once macro block header is respected by all optimizations,
889 this assert can be removed */
894 /* Sets the macro block header of a block. */
895 void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
897 assert(is_Block(block));
899 assert(is_Block(mbh));
900 set_irn_n(block, -1, mbh);
903 /* returns the macro block header of a node. */
904 ir_node *get_irn_MacroBlock(const ir_node *n)
907 n = get_nodes_block(n);
908 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
912 return get_Block_MacroBlock(n);
915 /* returns the graph of a Block. */
916 ir_graph *(get_Block_irg)(const ir_node *block)
918 return _get_Block_irg(block);
921 ir_entity *create_Block_entity(ir_node *block)
924 assert(is_Block(block));
926 entity = block->attr.block.entity;
927 if (entity == NULL) {
931 glob = get_glob_type();
932 entity = new_entity(glob, id_unique("block_%u"), get_code_type());
933 set_entity_visibility(entity, ir_visibility_local);
934 set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
935 nr = get_irp_next_label_nr();
936 set_entity_label(entity, nr);
937 set_entity_compiler_generated(entity, 1);
939 block->attr.block.entity = entity;
944 ir_entity *get_Block_entity(const ir_node *block)
946 assert(is_Block(block));
947 return block->attr.block.entity;
950 void set_Block_entity(ir_node *block, ir_entity *entity)
952 assert(is_Block(block));
953 assert(get_entity_type(entity) == get_code_type());
954 block->attr.block.entity = entity;
957 int has_Block_entity(const ir_node *block)
959 return block->attr.block.entity != NULL;
962 ir_node *(get_Block_phis)(const ir_node *block)
964 return _get_Block_phis(block);
967 void (set_Block_phis)(ir_node *block, ir_node *phi)
969 _set_Block_phis(block, phi);
972 void (add_Block_phi)(ir_node *block, ir_node *phi)
974 _add_Block_phi(block, phi);
977 /* Get the Block mark (single bit). */
978 unsigned (get_Block_mark)(const ir_node *block)
980 return _get_Block_mark(block);
983 /* Set the Block mark (single bit). */
984 void (set_Block_mark)(ir_node *block, unsigned mark)
986 _set_Block_mark(block, mark);
989 int get_End_n_keepalives(const ir_node *end)
992 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
995 ir_node *get_End_keepalive(const ir_node *end, int pos)
998 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
1001 void add_End_keepalive(ir_node *end, ir_node *ka)
1003 assert(is_End(end));
1007 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
1009 assert(is_End(end));
1010 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
1013 /* Set new keep-alives */
1014 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
1017 ir_graph *irg = get_irn_irg(end);
1019 /* notify that edges are deleted */
1020 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
1021 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
1023 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1025 for (i = 0; i < n; ++i) {
1026 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
1027 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
1031 /* Set new keep-alives from old keep-alives, skipping irn */
1032 void remove_End_keepalive(ir_node *end, ir_node *irn)
1034 int n = get_End_n_keepalives(end);
1039 for (i = n -1; i >= 0; --i) {
1040 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
1043 if (old_ka == irn) {
1050 irg = get_irn_irg(end);
1052 /* remove the edge */
1053 edges_notify_edge(end, idx, NULL, irn, irg);
1056 /* exchange with the last one */
1057 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1058 edges_notify_edge(end, n - 1, NULL, old, irg);
1059 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1060 edges_notify_edge(end, idx, old, NULL, irg);
1062 /* now n - 1 keeps, 1 block input */
1063 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
1066 /* remove Bads, NoMems and doublets from the keep-alive set */
1067 void remove_End_Bads_and_doublets(ir_node *end)
1070 int idx, n = get_End_n_keepalives(end);
1076 irg = get_irn_irg(end);
1077 pset_new_init(&keeps);
1079 for (idx = n - 1; idx >= 0; --idx) {
1080 ir_node *ka = get_End_keepalive(end, idx);
1082 if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
1083 /* remove the edge */
1084 edges_notify_edge(end, idx, NULL, ka, irg);
1087 /* exchange with the last one */
1088 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
1089 edges_notify_edge(end, n - 1, NULL, old, irg);
1090 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
1091 edges_notify_edge(end, idx, old, NULL, irg);
1095 pset_new_insert(&keeps, ka);
1098 /* n keeps, 1 block input */
1099 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
1101 pset_new_destroy(&keeps);
1104 void free_End(ir_node *end)
1106 assert(is_End(end));
1109 end->in = NULL; /* @@@ make sure we get an error if we use the
1110 in array afterwards ... */
1113 /* Return the target address of an IJmp */
1114 ir_node *get_IJmp_target(const ir_node *ijmp)
1116 assert(is_IJmp(ijmp));
1117 return get_irn_n(ijmp, 0);
1120 /** Sets the target address of an IJmp */
1121 void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
1123 assert(is_IJmp(ijmp));
1124 set_irn_n(ijmp, 0, tgt);
1128 get_Cond_selector(const ir_node *node)
1130 assert(is_Cond(node));
1131 return get_irn_n(node, 0);
1135 set_Cond_selector(ir_node *node, ir_node *selector)
1137 assert(is_Cond(node));
1138 set_irn_n(node, 0, selector);
1142 get_Cond_default_proj(const ir_node *node)
1144 assert(is_Cond(node));
1145 return node->attr.cond.default_proj;
1148 void set_Cond_default_proj(ir_node *node, long defproj)
1150 assert(is_Cond(node));
1151 node->attr.cond.default_proj = defproj;
1155 get_Return_mem(const ir_node *node)
1157 assert(is_Return(node));
1158 return get_irn_n(node, 0);
1162 set_Return_mem(ir_node *node, ir_node *mem)
1164 assert(is_Return(node));
1165 set_irn_n(node, 0, mem);
1169 get_Return_n_ress(const ir_node *node)
1171 assert(is_Return(node));
1172 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1176 get_Return_res_arr(ir_node *node)
1178 assert(is_Return(node));
1179 if (get_Return_n_ress(node) > 0)
1180 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1187 set_Return_n_res(ir_node *node, int results)
1189 assert(is_Return(node));
1194 get_Return_res(const ir_node *node, int pos)
1196 assert(is_Return(node));
1197 assert(get_Return_n_ress(node) > pos);
1198 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1202 set_Return_res(ir_node *node, int pos, ir_node *res)
1204 assert(is_Return(node));
1205 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1208 tarval *(get_Const_tarval)(const ir_node *node)
1210 return _get_Const_tarval(node);
1214 set_Const_tarval(ir_node *node, tarval *con)
1216 assert(is_Const(node));
1217 node->attr.con.tv = con;
1220 int (is_Const_null)(const ir_node *node)
1222 return _is_Const_null(node);
1225 int (is_Const_one)(const ir_node *node)
1227 return _is_Const_one(node);
1230 int (is_Const_all_one)(const ir_node *node)
1232 return _is_Const_all_one(node);
1236 /* The source language type. Must be an atomic type. Mode of type must
1237 be mode of node. For tarvals from entities type must be pointer to
1240 get_Const_type(ir_node *node)
1242 assert(is_Const(node));
1243 return node->attr.con.tp;
1247 set_Const_type(ir_node *node, ir_type *tp)
1249 assert(is_Const(node));
1250 if (tp != firm_unknown_type) {
1251 assert(is_atomic_type(tp));
1252 assert(get_type_mode(tp) == get_irn_mode(node));
1254 node->attr.con.tp = tp;
1259 get_SymConst_kind(const ir_node *node)
1261 assert(is_SymConst(node));
1262 return node->attr.symc.kind;
1266 set_SymConst_kind(ir_node *node, symconst_kind kind)
1268 assert(is_SymConst(node));
1269 node->attr.symc.kind = kind;
1273 get_SymConst_type(const ir_node *node)
1275 /* the cast here is annoying, but we have to compensate for
1277 ir_node *irn = (ir_node *)node;
1278 assert(is_SymConst(node) &&
1279 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1280 return irn->attr.symc.sym.type_p;
1284 set_SymConst_type(ir_node *node, ir_type *tp)
1286 assert(is_SymConst(node) &&
1287 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1288 node->attr.symc.sym.type_p = tp;
1292 get_SymConst_name(const ir_node *node)
1294 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1295 return node->attr.symc.sym.ident_p;
1299 set_SymConst_name(ir_node *node, ident *name)
1301 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1302 node->attr.symc.sym.ident_p = name;
1306 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1307 ir_entity *get_SymConst_entity(const ir_node *node)
1309 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1310 return node->attr.symc.sym.entity_p;
1313 void set_SymConst_entity(ir_node *node, ir_entity *ent)
1315 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1316 node->attr.symc.sym.entity_p = ent;
1319 ir_enum_const *get_SymConst_enum(const ir_node *node)
1321 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1322 return node->attr.symc.sym.enum_p;
1325 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
1327 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1328 node->attr.symc.sym.enum_p = ec;
1331 union symconst_symbol
1332 get_SymConst_symbol(const ir_node *node)
1334 assert(is_SymConst(node));
1335 return node->attr.symc.sym;
1339 set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
1341 assert(is_SymConst(node));
1342 node->attr.symc.sym = sym;
1346 get_SymConst_value_type(ir_node *node)
1348 assert(is_SymConst(node));
1349 return node->attr.symc.tp;
1353 set_SymConst_value_type(ir_node *node, ir_type *tp)
1355 assert(is_SymConst(node));
1356 node->attr.symc.tp = tp;
1360 get_Sel_mem(const ir_node *node)
1362 assert(is_Sel(node));
1363 return get_irn_n(node, 0);
1367 set_Sel_mem(ir_node *node, ir_node *mem)
1369 assert(is_Sel(node));
1370 set_irn_n(node, 0, mem);
1374 get_Sel_ptr(const ir_node *node)
1376 assert(is_Sel(node));
1377 return get_irn_n(node, 1);
1381 set_Sel_ptr(ir_node *node, ir_node *ptr)
1383 assert(is_Sel(node));
1384 set_irn_n(node, 1, ptr);
1388 get_Sel_n_indexs(const ir_node *node)
1390 assert(is_Sel(node));
1391 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1395 get_Sel_index_arr(ir_node *node)
1397 assert(is_Sel(node));
1398 if (get_Sel_n_indexs(node) > 0)
1399 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1405 get_Sel_index(const ir_node *node, int pos)
1407 assert(is_Sel(node));
1408 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1412 set_Sel_index(ir_node *node, int pos, ir_node *index)
1414 assert(is_Sel(node));
1415 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1419 get_Sel_entity(const ir_node *node)
1421 assert(is_Sel(node));
1422 return node->attr.sel.entity;
1425 /* need a version without const to prevent warning */
1426 static ir_entity *_get_Sel_entity(ir_node *node)
1428 return get_Sel_entity(node);
1432 set_Sel_entity(ir_node *node, ir_entity *ent)
1434 assert(is_Sel(node));
1435 node->attr.sel.entity = ent;
1439 /* For unary and binary arithmetic operations the access to the
1440 operands can be factored out. Left is the first, right the
1441 second arithmetic value as listed in tech report 0999-33.
1442 unops are: Minus, Abs, Not, Conv, Cast
1443 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1444 Shr, Shrs, Rotate, Cmp */
1448 get_Call_mem(const ir_node *node)
1450 assert(is_Call(node));
1451 return get_irn_n(node, 0);
1455 set_Call_mem(ir_node *node, ir_node *mem)
1457 assert(is_Call(node));
1458 set_irn_n(node, 0, mem);
1462 get_Call_ptr(const ir_node *node)
1464 assert(is_Call(node));
1465 return get_irn_n(node, 1);
1469 set_Call_ptr(ir_node *node, ir_node *ptr)
1471 assert(is_Call(node));
1472 set_irn_n(node, 1, ptr);
1476 get_Call_param_arr(ir_node *node)
1478 assert(is_Call(node));
1479 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1483 get_Call_n_params(const ir_node *node)
1485 assert(is_Call(node));
1486 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1490 get_Call_param(const ir_node *node, int pos)
1492 assert(is_Call(node));
1493 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1497 set_Call_param(ir_node *node, int pos, ir_node *param)
1499 assert(is_Call(node));
1500 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1504 get_Call_type(ir_node *node)
1506 assert(is_Call(node));
1507 return node->attr.call.type;
1511 set_Call_type(ir_node *node, ir_type *tp)
1513 assert(is_Call(node));
1514 assert((get_unknown_type() == tp) || is_Method_type(tp));
1515 node->attr.call.type = tp;
1519 get_Call_tail_call(const ir_node *node)
1521 assert(is_Call(node));
1522 return node->attr.call.tail_call;
1526 set_Call_tail_call(ir_node *node, unsigned tail_call)
1528 assert(is_Call(node));
1529 node->attr.call.tail_call = tail_call != 0;
1533 get_Builtin_mem(const ir_node *node)
1535 assert(is_Builtin(node));
1536 return get_irn_n(node, 0);
1540 set_Builin_mem(ir_node *node, ir_node *mem)
1542 assert(is_Builtin(node));
1543 set_irn_n(node, 0, mem);
1547 get_Builtin_kind(const ir_node *node)
1549 assert(is_Builtin(node));
1550 return node->attr.builtin.kind;
1554 set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
1556 assert(is_Builtin(node));
1557 node->attr.builtin.kind = kind;
1561 get_Builtin_param_arr(ir_node *node)
1563 assert(is_Builtin(node));
1564 return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
1568 get_Builtin_n_params(const ir_node *node)
1570 assert(is_Builtin(node));
1571 return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
1575 get_Builtin_param(const ir_node *node, int pos)
1577 assert(is_Builtin(node));
1578 return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
1582 set_Builtin_param(ir_node *node, int pos, ir_node *param)
1584 assert(is_Builtin(node));
1585 set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
1589 get_Builtin_type(ir_node *node)
1591 assert(is_Builtin(node));
1592 return node->attr.builtin.type;
1596 set_Builtin_type(ir_node *node, ir_type *tp)
1598 assert(is_Builtin(node));
1599 assert((get_unknown_type() == tp) || is_Method_type(tp));
1600 node->attr.builtin.type = tp;
1603 /* Returns a human readable string for the ir_builtin_kind. */
1604 const char *get_builtin_kind_name(ir_builtin_kind kind)
1606 #define X(a) case a: return #a;
1609 X(ir_bk_debugbreak);
1610 X(ir_bk_return_address);
1611 X(ir_bk_frame_address);
1621 X(ir_bk_inner_trampoline);
1628 int Call_has_callees(const ir_node *node)
1630 assert(is_Call(node));
1631 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1632 (node->attr.call.callee_arr != NULL));
1635 int get_Call_n_callees(const ir_node *node)
1637 assert(is_Call(node) && node->attr.call.callee_arr);
1638 return ARR_LEN(node->attr.call.callee_arr);
1641 ir_entity *get_Call_callee(const ir_node *node, int pos)
1643 assert(pos >= 0 && pos < get_Call_n_callees(node));
1644 return node->attr.call.callee_arr[pos];
1647 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
1649 assert(is_Call(node));
1650 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1651 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1653 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1656 void remove_Call_callee_arr(ir_node *node)
1658 assert(is_Call(node));
1659 node->attr.call.callee_arr = NULL;
1662 ir_node *get_CallBegin_ptr(const ir_node *node)
1664 assert(is_CallBegin(node));
1665 return get_irn_n(node, 0);
1668 void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
1670 assert(is_CallBegin(node));
1671 set_irn_n(node, 0, ptr);
1674 ir_node *get_CallBegin_call(const ir_node *node)
1676 assert(is_CallBegin(node));
1677 return node->attr.callbegin.call;
1680 void set_CallBegin_call(ir_node *node, ir_node *call)
1682 assert(is_CallBegin(node));
1683 node->attr.callbegin.call = call;
1687 * Returns non-zero if a Call is surely a self-recursive Call.
1688 * Beware: if this functions returns 0, the call might be self-recursive!
1690 int is_self_recursive_Call(const ir_node *call)
1692 const ir_node *callee = get_Call_ptr(call);
1694 if (is_SymConst_addr_ent(callee)) {
1695 const ir_entity *ent = get_SymConst_entity(callee);
1696 const ir_graph *irg = get_entity_irg(ent);
1697 if (irg == get_irn_irg(call))
1704 ir_node * get_##OP##_left(const ir_node *node) { \
1705 assert(is_##OP(node)); \
1706 return get_irn_n(node, node->op->op_index); \
1708 void set_##OP##_left(ir_node *node, ir_node *left) { \
1709 assert(is_##OP(node)); \
1710 set_irn_n(node, node->op->op_index, left); \
1712 ir_node *get_##OP##_right(const ir_node *node) { \
1713 assert(is_##OP(node)); \
1714 return get_irn_n(node, node->op->op_index + 1); \
1716 void set_##OP##_right(ir_node *node, ir_node *right) { \
1717 assert(is_##OP(node)); \
1718 set_irn_n(node, node->op->op_index + 1, right); \
1722 ir_node *get_##OP##_op(const ir_node *node) { \
1723 assert(is_##OP(node)); \
1724 return get_irn_n(node, node->op->op_index); \
1726 void set_##OP##_op(ir_node *node, ir_node *op) { \
1727 assert(is_##OP(node)); \
1728 set_irn_n(node, node->op->op_index, op); \
1731 #define BINOP_MEM(OP) \
1735 get_##OP##_mem(const ir_node *node) { \
1736 assert(is_##OP(node)); \
1737 return get_irn_n(node, 0); \
1741 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1742 assert(is_##OP(node)); \
1743 set_irn_n(node, 0, mem); \
1749 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1750 assert(is_##OP(node)); \
1751 return node->attr.divmod.resmode; \
1754 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1755 assert(is_##OP(node)); \
1756 node->attr.divmod.resmode = mode; \
1784 int get_Div_no_remainder(const ir_node *node)
1786 assert(is_Div(node));
1787 return node->attr.divmod.no_remainder;
1790 void set_Div_no_remainder(ir_node *node, int no_remainder)
1792 assert(is_Div(node));
1793 node->attr.divmod.no_remainder = no_remainder;
1796 int get_Conv_strict(const ir_node *node)
1798 assert(is_Conv(node));
1799 return node->attr.conv.strict;
1802 void set_Conv_strict(ir_node *node, int strict_flag)
1804 assert(is_Conv(node));
1805 node->attr.conv.strict = (char)strict_flag;
1809 get_Cast_type(ir_node *node)
1811 assert(is_Cast(node));
1812 return node->attr.cast.type;
1816 set_Cast_type(ir_node *node, ir_type *to_tp)
1818 assert(is_Cast(node));
1819 node->attr.cast.type = to_tp;
1823 /* Checks for upcast.
1825 * Returns true if the Cast node casts a class type to a super type.
1827 int is_Cast_upcast(ir_node *node)
1829 ir_type *totype = get_Cast_type(node);
1830 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1832 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1835 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1836 totype = get_pointer_points_to_type(totype);
1837 fromtype = get_pointer_points_to_type(fromtype);
1842 if (!is_Class_type(totype)) return 0;
1843 return is_SubClass_of(fromtype, totype);
1846 /* Checks for downcast.
1848 * Returns true if the Cast node casts a class type to a sub type.
1850 int is_Cast_downcast(ir_node *node)
1852 ir_type *totype = get_Cast_type(node);
1853 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1855 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1858 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1859 totype = get_pointer_points_to_type(totype);
1860 fromtype = get_pointer_points_to_type(fromtype);
1865 if (!is_Class_type(totype)) return 0;
1866 return is_SubClass_of(totype, fromtype);
1870 (is_unop)(const ir_node *node) {
1871 return _is_unop(node);
1875 get_unop_op(const ir_node *node)
1877 if (node->op->opar == oparity_unary)
1878 return get_irn_n(node, node->op->op_index);
1880 assert(node->op->opar == oparity_unary);
1885 set_unop_op(ir_node *node, ir_node *op)
1887 if (node->op->opar == oparity_unary)
1888 set_irn_n(node, node->op->op_index, op);
1890 assert(node->op->opar == oparity_unary);
1894 (is_binop)(const ir_node *node) {
1895 return _is_binop(node);
1899 get_binop_left(const ir_node *node)
1901 assert(node->op->opar == oparity_binary);
1902 return get_irn_n(node, node->op->op_index);
1906 set_binop_left(ir_node *node, ir_node *left)
1908 assert(node->op->opar == oparity_binary);
1909 set_irn_n(node, node->op->op_index, left);
1913 get_binop_right(const ir_node *node)
1915 assert(node->op->opar == oparity_binary);
1916 return get_irn_n(node, node->op->op_index + 1);
1920 set_binop_right(ir_node *node, ir_node *right)
1922 assert(node->op->opar == oparity_binary);
1923 set_irn_n(node, node->op->op_index + 1, right);
1926 int is_Phi0(const ir_node *n)
1930 return ((get_irn_op(n) == op_Phi) &&
1931 (get_irn_arity(n) == 0) &&
1932 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1936 get_Phi_preds_arr(ir_node *node)
1938 assert(node->op == op_Phi);
1939 return (ir_node **)&(get_irn_in(node)[1]);
1943 get_Phi_n_preds(const ir_node *node)
1945 assert(is_Phi(node) || is_Phi0(node));
1946 return (get_irn_arity(node));
1950 void set_Phi_n_preds(ir_node *node, int n_preds)
1952 assert(node->op == op_Phi);
1957 get_Phi_pred(const ir_node *node, int pos)
1959 assert(is_Phi(node) || is_Phi0(node));
1960 return get_irn_n(node, pos);
1964 set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1966 assert(is_Phi(node) || is_Phi0(node));
1967 set_irn_n(node, pos, pred);
1970 ir_node *(get_Phi_next)(const ir_node *phi)
1972 return _get_Phi_next(phi);
1975 void (set_Phi_next)(ir_node *phi, ir_node *next)
1977 _set_Phi_next(phi, next);
1980 int is_memop(const ir_node *node)
1982 ir_opcode code = get_irn_opcode(node);
1983 return (code == iro_Load || code == iro_Store);
1986 ir_node *get_memop_mem(const ir_node *node)
1988 assert(is_memop(node));
1989 return get_irn_n(node, 0);
1992 void set_memop_mem(ir_node *node, ir_node *mem)
1994 assert(is_memop(node));
1995 set_irn_n(node, 0, mem);
1998 ir_node *get_memop_ptr(const ir_node *node)
2000 assert(is_memop(node));
2001 return get_irn_n(node, 1);
2004 void set_memop_ptr(ir_node *node, ir_node *ptr)
2006 assert(is_memop(node));
2007 set_irn_n(node, 1, ptr);
2011 get_Load_mem(const ir_node *node)
2013 assert(is_Load(node));
2014 return get_irn_n(node, 0);
2018 set_Load_mem(ir_node *node, ir_node *mem)
2020 assert(is_Load(node));
2021 set_irn_n(node, 0, mem);
2025 get_Load_ptr(const ir_node *node)
2027 assert(is_Load(node));
2028 return get_irn_n(node, 1);
2032 set_Load_ptr(ir_node *node, ir_node *ptr)
2034 assert(is_Load(node));
2035 set_irn_n(node, 1, ptr);
2039 get_Load_mode(const ir_node *node)
2041 assert(is_Load(node));
2042 return node->attr.load.mode;
2046 set_Load_mode(ir_node *node, ir_mode *mode)
2048 assert(is_Load(node));
2049 node->attr.load.mode = mode;
2053 get_Load_volatility(const ir_node *node)
2055 assert(is_Load(node));
2056 return node->attr.load.volatility;
2060 set_Load_volatility(ir_node *node, ir_volatility volatility)
2062 assert(is_Load(node));
2063 node->attr.load.volatility = volatility;
2067 get_Load_align(const ir_node *node)
2069 assert(is_Load(node));
2070 return node->attr.load.aligned;
2074 set_Load_align(ir_node *node, ir_align align)
2076 assert(is_Load(node));
2077 node->attr.load.aligned = align;
2082 get_Store_mem(const ir_node *node)
2084 assert(is_Store(node));
2085 return get_irn_n(node, 0);
2089 set_Store_mem(ir_node *node, ir_node *mem)
2091 assert(is_Store(node));
2092 set_irn_n(node, 0, mem);
2096 get_Store_ptr(const ir_node *node)
2098 assert(is_Store(node));
2099 return get_irn_n(node, 1);
2103 set_Store_ptr(ir_node *node, ir_node *ptr)
2105 assert(is_Store(node));
2106 set_irn_n(node, 1, ptr);
2110 get_Store_value(const ir_node *node)
2112 assert(is_Store(node));
2113 return get_irn_n(node, 2);
2117 set_Store_value(ir_node *node, ir_node *value)
2119 assert(is_Store(node));
2120 set_irn_n(node, 2, value);
2124 get_Store_volatility(const ir_node *node)
2126 assert(is_Store(node));
2127 return node->attr.store.volatility;
2131 set_Store_volatility(ir_node *node, ir_volatility volatility)
2133 assert(is_Store(node));
2134 node->attr.store.volatility = volatility;
2138 get_Store_align(const ir_node *node)
2140 assert(is_Store(node));
2141 return node->attr.store.aligned;
2145 set_Store_align(ir_node *node, ir_align align)
2147 assert(is_Store(node));
2148 node->attr.store.aligned = align;
2153 get_Alloc_mem(const ir_node *node)
2155 assert(is_Alloc(node));
2156 return get_irn_n(node, 0);
2160 set_Alloc_mem(ir_node *node, ir_node *mem)
2162 assert(is_Alloc(node));
2163 set_irn_n(node, 0, mem);
2167 get_Alloc_size(const ir_node *node)
2169 assert(is_Alloc(node));
2170 return get_irn_n(node, 1);
2174 set_Alloc_size(ir_node *node, ir_node *size)
2176 assert(is_Alloc(node));
2177 set_irn_n(node, 1, size);
2181 get_Alloc_type(ir_node *node)
2183 assert(is_Alloc(node));
2184 return node->attr.alloc.type;
2188 set_Alloc_type(ir_node *node, ir_type *tp)
2190 assert(is_Alloc(node));
2191 node->attr.alloc.type = tp;
2195 get_Alloc_where(const ir_node *node)
2197 assert(is_Alloc(node));
2198 return node->attr.alloc.where;
2202 set_Alloc_where(ir_node *node, ir_where_alloc where)
2204 assert(is_Alloc(node));
2205 node->attr.alloc.where = where;
2210 get_Free_mem(const ir_node *node)
2212 assert(is_Free(node));
2213 return get_irn_n(node, 0);
2217 set_Free_mem(ir_node *node, ir_node *mem)
2219 assert(is_Free(node));
2220 set_irn_n(node, 0, mem);
2224 get_Free_ptr(const ir_node *node)
2226 assert(is_Free(node));
2227 return get_irn_n(node, 1);
2231 set_Free_ptr(ir_node *node, ir_node *ptr)
2233 assert(is_Free(node));
2234 set_irn_n(node, 1, ptr);
2238 get_Free_size(const ir_node *node)
2240 assert(is_Free(node));
2241 return get_irn_n(node, 2);
2245 set_Free_size(ir_node *node, ir_node *size)
2247 assert(is_Free(node));
2248 set_irn_n(node, 2, size);
2252 get_Free_type(ir_node *node)
2254 assert(is_Free(node));
2255 return node->attr.free.type;
2259 set_Free_type(ir_node *node, ir_type *tp)
2261 assert(is_Free(node));
2262 node->attr.free.type = tp;
2266 get_Free_where(const ir_node *node)
2268 assert(is_Free(node));
2269 return node->attr.free.where;
2273 set_Free_where(ir_node *node, ir_where_alloc where)
2275 assert(is_Free(node));
2276 node->attr.free.where = where;
2279 ir_node **get_Sync_preds_arr(ir_node *node)
2281 assert(is_Sync(node));
2282 return (ir_node **)&(get_irn_in(node)[1]);
2285 int get_Sync_n_preds(const ir_node *node)
2287 assert(is_Sync(node));
2288 return (get_irn_arity(node));
2292 void set_Sync_n_preds(ir_node *node, int n_preds)
2294 assert(is_Sync(node));
2298 ir_node *get_Sync_pred(const ir_node *node, int pos)
2300 assert(is_Sync(node));
2301 return get_irn_n(node, pos);
2304 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
2306 assert(is_Sync(node));
2307 set_irn_n(node, pos, pred);
2310 /* Add a new Sync predecessor */
2311 void add_Sync_pred(ir_node *node, ir_node *pred)
2313 assert(is_Sync(node));
2314 add_irn_n(node, pred);
2317 /* Returns the source language type of a Proj node. */
2318 ir_type *get_Proj_type(ir_node *n)
2320 ir_type *tp = firm_unknown_type;
2321 ir_node *pred = get_Proj_pred(n);
2323 switch (get_irn_opcode(pred)) {
2326 /* Deal with Start / Call here: we need to know the Proj Nr. */
2327 assert(get_irn_mode(pred) == mode_T);
2328 pred_pred = get_Proj_pred(pred);
2330 if (is_Start(pred_pred)) {
2331 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
2332 tp = get_method_param_type(mtp, get_Proj_proj(n));
2333 } else if (is_Call(pred_pred)) {
2334 ir_type *mtp = get_Call_type(pred_pred);
2335 tp = get_method_res_type(mtp, get_Proj_proj(n));
2338 case iro_Start: break;
2339 case iro_Call: break;
2341 ir_node *a = get_Load_ptr(pred);
2343 tp = get_entity_type(get_Sel_entity(a));
2352 get_Proj_pred(const ir_node *node)
2354 assert(is_Proj(node));
2355 return get_irn_n(node, 0);
2359 set_Proj_pred(ir_node *node, ir_node *pred)
2361 assert(is_Proj(node));
2362 set_irn_n(node, 0, pred);
2366 get_Proj_proj(const ir_node *node)
2368 #ifdef INTERPROCEDURAL_VIEW
2369 ir_opcode code = get_irn_opcode(node);
2371 if (code == iro_Proj) {
2372 return node->attr.proj;
2375 assert(code == iro_Filter);
2376 return node->attr.filter.proj;
2379 assert(is_Proj(node));
2380 return node->attr.proj;
2381 #endif /* INTERPROCEDURAL_VIEW */
2385 set_Proj_proj(ir_node *node, long proj)
2387 #ifdef INTERPROCEDURAL_VIEW
2388 ir_opcode code = get_irn_opcode(node);
2390 if (code == iro_Proj) {
2391 node->attr.proj = proj;
2394 assert(code == iro_Filter);
2395 node->attr.filter.proj = proj;
2398 assert(is_Proj(node));
2399 node->attr.proj = proj;
2400 #endif /* INTERPROCEDURAL_VIEW */
2403 /* Returns non-zero if a node is a routine parameter. */
2404 int (is_arg_Proj)(const ir_node *node)
2406 return _is_arg_Proj(node);
2410 get_Tuple_preds_arr(ir_node *node)
2412 assert(is_Tuple(node));
2413 return (ir_node **)&(get_irn_in(node)[1]);
2417 get_Tuple_n_preds(const ir_node *node)
2419 assert(is_Tuple(node));
2420 return get_irn_arity(node);
2425 set_Tuple_n_preds(ir_node *node, int n_preds)
2427 assert(is_Tuple(node));
2432 get_Tuple_pred(const ir_node *node, int pos)
2434 assert(is_Tuple(node));
2435 return get_irn_n(node, pos);
2439 set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
2441 assert(is_Tuple(node));
2442 set_irn_n(node, pos, pred);
2446 get_Id_pred(const ir_node *node)
2448 assert(is_Id(node));
2449 return get_irn_n(node, 0);
2453 set_Id_pred(ir_node *node, ir_node *pred)
2455 assert(is_Id(node));
2456 set_irn_n(node, 0, pred);
2459 ir_node *get_Confirm_value(const ir_node *node)
2461 assert(is_Confirm(node));
2462 return get_irn_n(node, 0);
2465 void set_Confirm_value(ir_node *node, ir_node *value)
2467 assert(is_Confirm(node));
2468 set_irn_n(node, 0, value);
2471 ir_node *get_Confirm_bound(const ir_node *node)
2473 assert(is_Confirm(node));
2474 return get_irn_n(node, 1);
2477 void set_Confirm_bound(ir_node *node, ir_node *bound)
2479 assert(is_Confirm(node));
2480 set_irn_n(node, 0, bound);
2483 pn_Cmp get_Confirm_cmp(const ir_node *node)
2485 assert(is_Confirm(node));
2486 return node->attr.confirm.cmp;
2489 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
2491 assert(is_Confirm(node));
2492 node->attr.confirm.cmp = cmp;
2496 get_Filter_pred(ir_node *node)
2498 assert(is_Filter(node));
2503 set_Filter_pred(ir_node *node, ir_node *pred)
2505 assert(is_Filter(node));
2510 get_Filter_proj(ir_node *node)
2512 assert(is_Filter(node));
2513 return node->attr.filter.proj;
2517 set_Filter_proj(ir_node *node, long proj)
2519 assert(is_Filter(node));
2520 node->attr.filter.proj = proj;
2523 /* Don't use get_irn_arity, get_irn_n in implementation as access
2524 shall work independent of view!!! */
2525 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
2527 assert(is_Filter(node));
2528 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2529 ir_graph *irg = get_irn_irg(node);
2530 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2531 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2532 node->attr.filter.in_cg[0] = node->in[0];
2534 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2537 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
2539 assert(is_Filter(node) && node->attr.filter.in_cg &&
2540 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2541 node->attr.filter.in_cg[pos + 1] = pred;
2544 int get_Filter_n_cg_preds(ir_node *node)
2546 assert(is_Filter(node) && node->attr.filter.in_cg);
2547 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2550 ir_node *get_Filter_cg_pred(ir_node *node, int pos)
2553 assert(is_Filter(node) && node->attr.filter.in_cg &&
2555 arity = ARR_LEN(node->attr.filter.in_cg);
2556 assert(pos < arity - 1);
2557 return node->attr.filter.in_cg[pos + 1];
2561 ir_node *get_Mux_sel(const ir_node *node)
2563 assert(is_Mux(node));
2567 void set_Mux_sel(ir_node *node, ir_node *sel)
2569 assert(is_Mux(node));
2573 ir_node *get_Mux_false(const ir_node *node)
2575 assert(is_Mux(node));
2579 void set_Mux_false(ir_node *node, ir_node *ir_false)
2581 assert(is_Mux(node));
2582 node->in[2] = ir_false;
2585 ir_node *get_Mux_true(const ir_node *node)
2587 assert(is_Mux(node));
2591 void set_Mux_true(ir_node *node, ir_node *ir_true)
2593 assert(is_Mux(node));
2594 node->in[3] = ir_true;
2598 ir_node *get_CopyB_mem(const ir_node *node)
2600 assert(is_CopyB(node));
2601 return get_irn_n(node, 0);
2604 void set_CopyB_mem(ir_node *node, ir_node *mem)
2606 assert(node->op == op_CopyB);
2607 set_irn_n(node, 0, mem);
2610 ir_node *get_CopyB_dst(const ir_node *node)
2612 assert(is_CopyB(node));
2613 return get_irn_n(node, 1);
2616 void set_CopyB_dst(ir_node *node, ir_node *dst)
2618 assert(is_CopyB(node));
2619 set_irn_n(node, 1, dst);
2622 ir_node *get_CopyB_src(const ir_node *node)
2624 assert(is_CopyB(node));
2625 return get_irn_n(node, 2);
2628 void set_CopyB_src(ir_node *node, ir_node *src)
2630 assert(is_CopyB(node));
2631 set_irn_n(node, 2, src);
2634 ir_type *get_CopyB_type(ir_node *node)
2636 assert(is_CopyB(node));
2637 return node->attr.copyb.type;
2640 void set_CopyB_type(ir_node *node, ir_type *data_type)
2642 assert(is_CopyB(node) && data_type);
2643 node->attr.copyb.type = data_type;
2648 get_InstOf_type(ir_node *node)
2650 assert(node->op == op_InstOf);
2651 return node->attr.instof.type;
2655 set_InstOf_type(ir_node *node, ir_type *type)
2657 assert(node->op == op_InstOf);
2658 node->attr.instof.type = type;
2662 get_InstOf_store(const ir_node *node)
2664 assert(node->op == op_InstOf);
2665 return get_irn_n(node, 0);
2669 set_InstOf_store(ir_node *node, ir_node *obj)
2671 assert(node->op == op_InstOf);
2672 set_irn_n(node, 0, obj);
2676 get_InstOf_obj(const ir_node *node)
2678 assert(node->op == op_InstOf);
2679 return get_irn_n(node, 1);
2683 set_InstOf_obj(ir_node *node, ir_node *obj)
2685 assert(node->op == op_InstOf);
2686 set_irn_n(node, 1, obj);
2689 /* Returns the memory input of a Raise operation. */
2691 get_Raise_mem(const ir_node *node)
2693 assert(is_Raise(node));
2694 return get_irn_n(node, 0);
2698 set_Raise_mem(ir_node *node, ir_node *mem)
2700 assert(is_Raise(node));
2701 set_irn_n(node, 0, mem);
2705 get_Raise_exo_ptr(const ir_node *node)
2707 assert(is_Raise(node));
2708 return get_irn_n(node, 1);
2712 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
2714 assert(is_Raise(node));
2715 set_irn_n(node, 1, exo_ptr);
2720 /* Returns the memory input of a Bound operation. */
2721 ir_node *get_Bound_mem(const ir_node *bound)
2723 assert(is_Bound(bound));
2724 return get_irn_n(bound, 0);
2727 void set_Bound_mem(ir_node *bound, ir_node *mem)
2729 assert(is_Bound(bound));
2730 set_irn_n(bound, 0, mem);
2733 /* Returns the index input of a Bound operation. */
2734 ir_node *get_Bound_index(const ir_node *bound)
2736 assert(is_Bound(bound));
2737 return get_irn_n(bound, 1);
2740 void set_Bound_index(ir_node *bound, ir_node *idx)
2742 assert(is_Bound(bound));
2743 set_irn_n(bound, 1, idx);
2746 /* Returns the lower bound input of a Bound operation. */
2747 ir_node *get_Bound_lower(const ir_node *bound)
2749 assert(is_Bound(bound));
2750 return get_irn_n(bound, 2);
2753 void set_Bound_lower(ir_node *bound, ir_node *lower)
2755 assert(is_Bound(bound));
2756 set_irn_n(bound, 2, lower);
2759 /* Returns the upper bound input of a Bound operation. */
2760 ir_node *get_Bound_upper(const ir_node *bound)
2762 assert(is_Bound(bound));
2763 return get_irn_n(bound, 3);
2766 void set_Bound_upper(ir_node *bound, ir_node *upper)
2768 assert(is_Bound(bound));
2769 set_irn_n(bound, 3, upper);
2772 /* Return the operand of a Pin node. */
2773 ir_node *get_Pin_op(const ir_node *pin)
2775 assert(is_Pin(pin));
2776 return get_irn_n(pin, 0);
2779 void set_Pin_op(ir_node *pin, ir_node *node)
2781 assert(is_Pin(pin));
2782 set_irn_n(pin, 0, node);
2785 /* Return the assembler text of an ASM pseudo node. */
2786 ident *get_ASM_text(const ir_node *node)
2788 assert(is_ASM(node));
2789 return node->attr.assem.asm_text;
2792 /* Return the number of input constraints for an ASM node. */
2793 int get_ASM_n_input_constraints(const ir_node *node)
2795 assert(is_ASM(node));
2796 return ARR_LEN(node->attr.assem.inputs);
2799 /* Return the input constraints for an ASM node. This is a flexible array. */
2800 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
2802 assert(is_ASM(node));
2803 return node->attr.assem.inputs;
2806 /* Return the number of output constraints for an ASM node. */
2807 int get_ASM_n_output_constraints(const ir_node *node)
2809 assert(is_ASM(node));
2810 return ARR_LEN(node->attr.assem.outputs);
2813 /* Return the output constraints for an ASM node. */
2814 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
2816 assert(is_ASM(node));
2817 return node->attr.assem.outputs;
2820 /* Return the number of clobbered registers for an ASM node. */
2821 int get_ASM_n_clobbers(const ir_node *node)
2823 assert(is_ASM(node));
2824 return ARR_LEN(node->attr.assem.clobber);
2827 /* Return the list of clobbered registers for an ASM node. */
2828 ident **get_ASM_clobbers(const ir_node *node)
2830 assert(is_ASM(node));
2831 return node->attr.assem.clobber;
2834 /* returns the graph of a node */
2836 get_irn_irg(const ir_node *node)
2839 * Do not use get_nodes_Block() here, because this
2840 * will check the pinned state.
2841 * However even a 'wrong' block is always in the proper
2844 if (! is_Block(node))
2845 node = get_irn_n(node, -1);
2846 /* note that get_Block_irg() can handle Bad nodes */
2847 return get_Block_irg(node);
2851 /*----------------------------------------------------------------*/
2852 /* Auxiliary routines */
2853 /*----------------------------------------------------------------*/
2856 skip_Proj(ir_node *node)
2858 /* don't assert node !!! */
2863 node = get_Proj_pred(node);
2869 skip_Proj_const(const ir_node *node)
2871 /* don't assert node !!! */
2876 node = get_Proj_pred(node);
2882 skip_Tuple(ir_node *node)
2888 if (is_Proj(node)) {
2889 pred = get_Proj_pred(node);
2890 op = get_irn_op(pred);
2893 * Looks strange but calls get_irn_op() only once
2894 * in most often cases.
2896 if (op == op_Proj) { /* nested Tuple ? */
2897 pred = skip_Tuple(pred);
2899 if (is_Tuple(pred)) {
2900 node = get_Tuple_pred(pred, get_Proj_proj(node));
2903 } else if (op == op_Tuple) {
2904 node = get_Tuple_pred(pred, get_Proj_proj(node));
2911 /* returns operand of node if node is a Cast */
2912 ir_node *skip_Cast(ir_node *node)
2915 return get_Cast_op(node);
2919 /* returns operand of node if node is a Cast */
2920 const ir_node *skip_Cast_const(const ir_node *node)
2923 return get_Cast_op(node);
2927 /* returns operand of node if node is a Pin */
2928 ir_node *skip_Pin(ir_node *node)
2931 return get_Pin_op(node);
2935 /* returns operand of node if node is a Confirm */
2936 ir_node *skip_Confirm(ir_node *node)
2938 if (is_Confirm(node))
2939 return get_Confirm_value(node);
2943 /* skip all high-level ops */
2944 ir_node *skip_HighLevel_ops(ir_node *node)
2946 while (is_op_highlevel(get_irn_op(node))) {
2947 node = get_irn_n(node, 0);
2953 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2954 * than any other approach, as Id chains are resolved and all point to the real node, or
2955 * all id's are self loops.
2957 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2958 * a little bit "hand optimized".
2960 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2963 skip_Id(ir_node *node)
2966 /* don't assert node !!! */
2968 if (!node || (node->op != op_Id)) return node;
2970 /* Don't use get_Id_pred(): We get into an endless loop for
2971 self-referencing Ids. */
2972 pred = node->in[0+1];
2974 if (pred->op != op_Id) return pred;
2976 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2977 ir_node *rem_pred, *res;
2979 if (pred->op != op_Id) return pred; /* shortcut */
2982 assert(get_irn_arity (node) > 0);
2984 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2985 res = skip_Id(rem_pred);
2986 if (res->op == op_Id) /* self-loop */ return node;
2988 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2995 void skip_Id_and_store(ir_node **node)
2999 if (!n || (n->op != op_Id)) return;
3001 /* Don't use get_Id_pred(): We get into an endless loop for
3002 self-referencing Ids. */
3007 (is_strictConv)(const ir_node *node) {
3008 return _is_strictConv(node);
3012 (is_no_Block)(const ir_node *node) {
3013 return _is_no_Block(node);
3016 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
3018 (is_SymConst_addr_ent)(const ir_node *node) {
3019 return _is_SymConst_addr_ent(node);
3022 /* Returns true if the operation manipulates control flow. */
3023 int is_cfop(const ir_node *node)
3025 return is_op_cfopcode(get_irn_op(node));
3028 /* Returns true if the operation manipulates interprocedural control flow:
3029 CallBegin, EndReg, EndExcept */
3030 int is_ip_cfop(const ir_node *node)
3032 return is_ip_cfopcode(get_irn_op(node));
3035 /* Returns true if the operation can change the control flow because
3038 is_fragile_op(const ir_node *node)
3040 return is_op_fragile(get_irn_op(node));
3043 /* Returns the memory operand of fragile operations. */
3044 ir_node *get_fragile_op_mem(ir_node *node)
3046 assert(node && is_fragile_op(node));
3048 switch (get_irn_opcode(node)) {
3059 return get_irn_n(node, pn_Generic_M);
3064 assert(0 && "should not be reached");
3069 /* Returns the result mode of a Div operation. */
3070 ir_mode *get_divop_resmod(const ir_node *node)
3072 switch (get_irn_opcode(node)) {
3073 case iro_Quot : return get_Quot_resmode(node);
3074 case iro_DivMod: return get_DivMod_resmode(node);
3075 case iro_Div : return get_Div_resmode(node);
3076 case iro_Mod : return get_Mod_resmode(node);
3078 assert(0 && "should not be reached");
3083 /* Returns true if the operation is a forking control flow operation. */
3084 int (is_irn_forking)(const ir_node *node)
3086 return _is_irn_forking(node);
3089 void (copy_node_attr)(const ir_node *old_node, ir_node *new_node)
3091 _copy_node_attr(old_node, new_node);
3094 /* Return the type associated with the value produced by n
3095 * if the node remarks this type as it is the case for
3096 * Cast, Const, SymConst and some Proj nodes. */
3097 ir_type *(get_irn_type)(ir_node *node)
3099 return _get_irn_type(node);
3102 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
3104 ir_type *(get_irn_type_attr)(ir_node *node)
3106 return _get_irn_type_attr(node);
3109 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
3110 ir_entity *(get_irn_entity_attr)(ir_node *node)
3112 return _get_irn_entity_attr(node);
3115 /* Returns non-zero for constant-like nodes. */
3116 int (is_irn_constlike)(const ir_node *node)
3118 return _is_irn_constlike(node);
3122 * Returns non-zero for nodes that are allowed to have keep-alives and
3123 * are neither Block nor PhiM.
3125 int (is_irn_keep)(const ir_node *node)
3127 return _is_irn_keep(node);
3131 * Returns non-zero for nodes that are always placed in the start block.
3133 int (is_irn_start_block_placed)(const ir_node *node)
3135 return _is_irn_start_block_placed(node);
3138 /* Returns non-zero for nodes that are machine operations. */
3139 int (is_irn_machine_op)(const ir_node *node)
3141 return _is_irn_machine_op(node);
3144 /* Returns non-zero for nodes that are machine operands. */
3145 int (is_irn_machine_operand)(const ir_node *node)
3147 return _is_irn_machine_operand(node);
3150 /* Returns non-zero for nodes that have the n'th user machine flag set. */
3151 int (is_irn_machine_user)(const ir_node *node, unsigned n)
3153 return _is_irn_machine_user(node, n);
3156 /* Returns non-zero for nodes that are CSE neutral to its users. */
3157 int (is_irn_cse_neutral)(const ir_node *node)
3159 return _is_irn_cse_neutral(node);
3162 /* Gets the string representation of the jump prediction .*/
3163 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
3165 #define X(a) case a: return #a;
3167 X(COND_JMP_PRED_NONE);
3168 X(COND_JMP_PRED_TRUE);
3169 X(COND_JMP_PRED_FALSE);
3175 /* Returns the conditional jump prediction of a Cond node. */
3176 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
3178 return _get_Cond_jmp_pred(cond);
3181 /* Sets a new conditional jump prediction. */
3182 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
3184 _set_Cond_jmp_pred(cond, pred);
3187 /** the get_type operation must be always implemented and return a firm type */
3188 static ir_type *get_Default_type(ir_node *n)
3191 return get_unknown_type();
3194 /* Sets the get_type operation for an ir_op_ops. */
3195 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
3198 case iro_Const: ops->get_type = get_Const_type; break;
3199 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3200 case iro_Cast: ops->get_type = get_Cast_type; break;
3201 case iro_Proj: ops->get_type = get_Proj_type; break;
3203 /* not allowed to be NULL */
3204 if (! ops->get_type)
3205 ops->get_type = get_Default_type;
3211 /** Return the attribute type of a SymConst node if exists */
3212 static ir_type *get_SymConst_attr_type(ir_node *self)
3214 symconst_kind kind = get_SymConst_kind(self);
3215 if (SYMCONST_HAS_TYPE(kind))
3216 return get_SymConst_type(self);
3220 /** Return the attribute entity of a SymConst node if exists */
3221 static ir_entity *get_SymConst_attr_entity(ir_node *self)
3223 symconst_kind kind = get_SymConst_kind(self);
3224 if (SYMCONST_HAS_ENT(kind))
3225 return get_SymConst_entity(self);
3229 /** the get_type_attr operation must be always implemented */
3230 static ir_type *get_Null_type(ir_node *n)
3233 return firm_unknown_type;
3236 /* Sets the get_type operation for an ir_op_ops. */
3237 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
3240 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3241 case iro_Call: ops->get_type_attr = get_Call_type; break;
3242 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3243 case iro_Free: ops->get_type_attr = get_Free_type; break;
3244 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3246 /* not allowed to be NULL */
3247 if (! ops->get_type_attr)
3248 ops->get_type_attr = get_Null_type;
3254 /** the get_entity_attr operation must be always implemented */
3255 static ir_entity *get_Null_ent(ir_node *n)
3261 /* Sets the get_type operation for an ir_op_ops. */
3262 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
3265 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3266 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3268 /* not allowed to be NULL */
3269 if (! ops->get_entity_attr)
3270 ops->get_entity_attr = get_Null_ent;
3276 /* Sets the debug information of a node. */
3277 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
3279 _set_irn_dbg_info(n, db);
3283 * Returns the debug information of an node.
3285 * @param n The node.
3287 dbg_info *(get_irn_dbg_info)(const ir_node *n)
3289 return _get_irn_dbg_info(n);
3292 /* checks whether a node represents a global address */
3293 int is_Global(const ir_node *node)
3295 return is_SymConst_addr_ent(node);
3298 /* returns the entity of a global address */
3299 ir_entity *get_Global_entity(const ir_node *node)
3301 return get_SymConst_entity(node);
3305 * Calculate a hash value of a node.
3307 unsigned firm_default_hash(const ir_node *node)
3312 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3313 h = irn_arity = get_irn_intra_arity(node);
3315 /* consider all in nodes... except the block if not a control flow. */
3316 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3317 ir_node *pred = get_irn_intra_n(node, i);
3318 if (is_irn_cse_neutral(pred))
3321 h = 9*h + HASH_PTR(pred);
3325 h = 9*h + HASH_PTR(get_irn_mode(node));
3327 h = 9*h + HASH_PTR(get_irn_op(node));
3330 } /* firm_default_hash */
3332 /* include generated code */
3333 #include "gen_irnode.c.inl"