2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation of an intermediate operation.
23 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
32 #include "irgraph_t.h"
34 #include "irbackedge_t.h"
38 #include "iredgekinds.h"
39 #include "iredges_t.h"
47 /* some constants fixing the positions of nodes predecessors
49 #define CALL_PARAM_OFFSET 2
50 #define FUNCCALL_PARAM_OFFSET 1
51 #define SEL_INDEX_OFFSET 2
52 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
53 #define END_KEEPALIVE_OFFSET 0
55 static const char *pnc_name_arr [] = {
56 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
57 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
58 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
59 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
63 * returns the pnc name from an pnc constant
65 const char *get_pnc_string(int pnc) {
66 assert(pnc >= 0 && pnc <
67 (int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
68 return pnc_name_arr[pnc];
72 * Calculates the negated (Complement(R)) pnc condition.
74 pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
77 /* do NOT add the Uo bit for non-floating point values */
78 if (! mode_is_float(mode))
84 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
85 pn_Cmp get_inversed_pnc(long pnc) {
86 long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
87 long lesser = pnc & pn_Cmp_Lt;
88 long greater = pnc & pn_Cmp_Gt;
90 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
96 * Indicates, whether additional data can be registered to ir nodes.
97 * If set to 1, this is not possible anymore.
99 static int forbid_new_data = 0;
102 * The amount of additional space for custom data to be allocated upon
103 * creating a new node.
105 unsigned firm_add_node_size = 0;
108 /* register new space for every node */
109 unsigned firm_register_additional_node_data(unsigned size) {
110 assert(!forbid_new_data && "Too late to register additional node data");
115 return firm_add_node_size += size;
119 void init_irnode(void) {
120 /* Forbid the addition of new data to an ir node. */
125 * irnode constructor.
126 * Create a new irnode in irg, with an op, mode, arity and
127 * some incoming irnodes.
128 * If arity is negative, a node with a dynamic array is created.
131 new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
132 int arity, ir_node **in)
135 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
142 p = obstack_alloc(irg->obst, node_size);
143 memset(p, 0, node_size);
144 res = (ir_node *)(p + firm_add_node_size);
146 res->kind = k_ir_node;
150 res->node_idx = irg_register_node_idx(irg, res);
155 res->in = NEW_ARR_F(ir_node *, 1); /* 1: space for block */
157 /* not nice but necessary: End and Sync must always have a flexible array */
158 if (op == op_End || op == op_Sync)
159 res->in = NEW_ARR_F(ir_node *, (arity+1));
161 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
162 memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
166 set_irn_dbg_info(res, db);
170 res->node_nr = get_irp_new_node_nr();
173 for (i = 0; i < EDGE_KIND_LAST; ++i)
174 INIT_LIST_HEAD(&res->edge_info[i].outs_head);
176 /* don't put this into the for loop, arity is -1 for some nodes! */
177 edges_notify_edge(res, -1, res->in[0], NULL, irg);
178 for (i = 1; i <= arity; ++i)
179 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
181 hook_new_node(irg, res);
182 if (get_irg_phase_state(irg) == phase_backend) {
183 be_info_new_node(res);
189 /*-- getting some parameters from ir_nodes --*/
191 int (is_ir_node)(const void *thing) {
192 return _is_ir_node(thing);
195 int (get_irn_intra_arity)(const ir_node *node) {
196 return _get_irn_intra_arity(node);
199 int (get_irn_inter_arity)(const ir_node *node) {
200 return _get_irn_inter_arity(node);
203 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
205 int (get_irn_arity)(const ir_node *node) {
206 return _get_irn_arity(node);
209 /* Returns the array with ins. This array is shifted with respect to the
210 array accessed by get_irn_n: The block operand is at position 0 not -1.
211 (@@@ This should be changed.)
212 The order of the predecessors in this array is not guaranteed, except that
213 lists of operands as predecessors of Block or arguments of a Call are
215 ir_node **get_irn_in(const ir_node *node) {
217 #ifdef INTERPROCEDURAL_VIEW
218 if (get_interprocedural_view()) { /* handle Filter and Block specially */
219 if (get_irn_opcode(node) == iro_Filter) {
220 assert(node->attr.filter.in_cg);
221 return node->attr.filter.in_cg;
222 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
223 return node->attr.block.in_cg;
225 /* else fall through */
227 #endif /* INTERPROCEDURAL_VIEW */
231 void set_irn_in(ir_node *node, int arity, ir_node **in) {
234 ir_graph *irg = current_ir_graph;
237 #ifdef INTERPROCEDURAL_VIEW
238 if (get_interprocedural_view()) { /* handle Filter and Block specially */
239 ir_opcode code = get_irn_opcode(node);
240 if (code == iro_Filter) {
241 assert(node->attr.filter.in_cg);
242 pOld_in = &node->attr.filter.in_cg;
243 } else if (code == iro_Block && node->attr.block.in_cg) {
244 pOld_in = &node->attr.block.in_cg;
249 #endif /* INTERPROCEDURAL_VIEW */
253 for (i = 0; i < arity; i++) {
254 if (i < ARR_LEN(*pOld_in)-1)
255 edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
257 edges_notify_edge(node, i, in[i], NULL, irg);
259 for (;i < ARR_LEN(*pOld_in)-1; i++) {
260 edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
263 if (arity != ARR_LEN(*pOld_in) - 1) {
264 ir_node * block = (*pOld_in)[0];
265 *pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
266 (*pOld_in)[0] = block;
268 fix_backedges(irg->obst, node);
270 memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
273 ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
274 return _get_irn_intra_n (node, n);
277 ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
278 return _get_irn_inter_n (node, n);
281 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
283 ir_node *(get_irn_n)(const ir_node *node, int n) {
284 return _get_irn_n(node, n);
287 void set_irn_n(ir_node *node, int n, ir_node *in) {
288 assert(node && node->kind == k_ir_node);
290 assert(n < get_irn_arity(node));
291 assert(in && in->kind == k_ir_node);
293 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
294 /* Change block pred in both views! */
295 node->in[n + 1] = in;
296 assert(node->attr.filter.in_cg);
297 node->attr.filter.in_cg[n + 1] = in;
300 #ifdef INTERPROCEDURAL_VIEW
301 if (get_interprocedural_view()) { /* handle Filter and Block specially */
302 if (get_irn_opcode(node) == iro_Filter) {
303 assert(node->attr.filter.in_cg);
304 node->attr.filter.in_cg[n + 1] = in;
306 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
307 node->attr.block.in_cg[n + 1] = in;
310 /* else fall through */
312 #endif /* INTERPROCEDURAL_VIEW */
315 hook_set_irn_n(node, n, in, node->in[n + 1]);
317 /* Here, we rely on src and tgt being in the current ir graph */
318 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
320 node->in[n + 1] = in;
323 int add_irn_n(ir_node *node, ir_node *in) {
325 ir_graph *irg = get_irn_irg(node);
327 assert(node->op->opar == oparity_dynamic);
328 pos = ARR_LEN(node->in) - 1;
329 ARR_APP1(ir_node *, node->in, in);
330 edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
333 hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
338 void del_Sync_n(ir_node *n, int i)
340 int arity = get_Sync_n_preds(n);
341 ir_node *last_pred = get_Sync_pred(n, arity - 1);
342 set_Sync_pred(n, i, last_pred);
343 edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
344 ARR_SHRINKLEN(get_irn_in(n), arity);
347 int (get_irn_deps)(const ir_node *node) {
348 return _get_irn_deps(node);
351 ir_node *(get_irn_dep)(const ir_node *node, int pos) {
352 return _get_irn_dep(node, pos);
355 void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
356 _set_irn_dep(node, pos, dep);
359 int add_irn_dep(ir_node *node, ir_node *dep) {
362 /* DEP edges are only allowed in backend phase */
363 assert(get_irg_phase_state(get_irn_irg(node)) == phase_backend);
364 if (node->deps == NULL) {
365 node->deps = NEW_ARR_F(ir_node *, 1);
371 for(i = 0, n = ARR_LEN(node->deps); i < n; ++i) {
372 if(node->deps[i] == NULL)
375 if(node->deps[i] == dep)
379 if (first_zero >= 0) {
380 node->deps[first_zero] = dep;
383 ARR_APP1(ir_node *, node->deps, dep);
388 edges_notify_edge_kind(node, res, dep, NULL, EDGE_KIND_DEP, get_irn_irg(node));
393 void add_irn_deps(ir_node *tgt, ir_node *src) {
396 for (i = 0, n = get_irn_deps(src); i < n; ++i)
397 add_irn_dep(tgt, get_irn_dep(src, i));
401 ir_mode *(get_irn_mode)(const ir_node *node) {
402 return _get_irn_mode(node);
405 void (set_irn_mode)(ir_node *node, ir_mode *mode) {
406 _set_irn_mode(node, mode);
409 ir_modecode get_irn_modecode(const ir_node *node) {
411 return node->mode->code;
414 /** Gets the string representation of the mode .*/
415 const char *get_irn_modename(const ir_node *node) {
417 return get_mode_name(node->mode);
420 ident *get_irn_modeident(const ir_node *node) {
422 return get_mode_ident(node->mode);
425 ir_op *(get_irn_op)(const ir_node *node) {
426 return _get_irn_op(node);
429 /* should be private to the library: */
430 void (set_irn_op)(ir_node *node, ir_op *op) {
431 _set_irn_op(node, op);
434 unsigned (get_irn_opcode)(const ir_node *node) {
435 return _get_irn_opcode(node);
438 const char *get_irn_opname(const ir_node *node) {
440 if (is_Phi0(node)) return "Phi0";
441 return get_id_str(node->op->name);
444 ident *get_irn_opident(const ir_node *node) {
446 return node->op->name;
449 ir_visited_t (get_irn_visited)(const ir_node *node) {
450 return _get_irn_visited(node);
453 void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
454 _set_irn_visited(node, visited);
457 void (mark_irn_visited)(ir_node *node) {
458 _mark_irn_visited(node);
461 int (irn_visited)(const ir_node *node) {
462 return _irn_visited(node);
465 int (irn_visited_else_mark)(ir_node *node) {
466 return _irn_visited_else_mark(node);
469 void (set_irn_link)(ir_node *node, void *link) {
470 _set_irn_link(node, link);
473 void *(get_irn_link)(const ir_node *node) {
474 return _get_irn_link(node);
477 op_pin_state (get_irn_pinned)(const ir_node *node) {
478 return _get_irn_pinned(node);
481 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
482 return _is_irn_pinned_in_irg(node);
485 void set_irn_pinned(ir_node *node, op_pin_state state) {
486 /* due to optimization an opt may be turned into a Tuple */
490 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
491 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
493 node->attr.except.pin_state = state;
496 /* Outputs a unique number for this node */
497 long get_irn_node_nr(const ir_node *node) {
500 return node->node_nr;
502 return (long)PTR_TO_INT(node);
506 const_attr *get_irn_const_attr(ir_node *node) {
507 assert(is_Const(node));
508 return &node->attr.con;
511 long get_irn_proj_attr(ir_node *node) {
512 /* BEWARE: check for true Proj node here, no Filter */
513 assert(node->op == op_Proj);
514 return node->attr.proj;
517 alloc_attr *get_irn_alloc_attr(ir_node *node) {
518 assert(is_Alloc(node));
519 return &node->attr.alloc;
522 free_attr *get_irn_free_attr(ir_node *node) {
523 assert(is_Free(node));
524 return &node->attr.free;
527 symconst_attr *get_irn_symconst_attr(ir_node *node) {
528 assert(is_SymConst(node));
529 return &node->attr.symc;
532 ir_type *get_irn_call_attr(ir_node *node) {
533 assert(is_Call(node));
534 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
537 sel_attr *get_irn_sel_attr(ir_node *node) {
538 assert(is_Sel(node));
539 return &node->attr.sel;
542 phi_attr *get_irn_phi_attr(ir_node *node) {
543 return &node->attr.phi;
546 block_attr *get_irn_block_attr(ir_node *node) {
547 assert(is_Block(node));
548 return &node->attr.block;
551 load_attr *get_irn_load_attr(ir_node *node) {
552 assert(is_Load(node));
553 return &node->attr.load;
556 store_attr *get_irn_store_attr(ir_node *node) {
557 assert(is_Store(node));
558 return &node->attr.store;
561 except_attr *get_irn_except_attr(ir_node *node) {
562 assert(node->op == op_Div || node->op == op_Quot ||
563 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
564 return &node->attr.except;
567 divmod_attr *get_irn_divmod_attr(ir_node *node) {
568 assert(node->op == op_Div || node->op == op_Quot ||
569 node->op == op_DivMod || node->op == op_Mod);
570 return &node->attr.divmod;
573 void *(get_irn_generic_attr)(ir_node *node) {
574 assert(is_ir_node(node));
575 return _get_irn_generic_attr(node);
578 const void *(get_irn_generic_attr_const)(const ir_node *node) {
579 assert(is_ir_node(node));
580 return _get_irn_generic_attr_const(node);
583 unsigned (get_irn_idx)(const ir_node *node) {
584 assert(is_ir_node(node));
585 return _get_irn_idx(node);
588 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
590 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
591 if (get_irn_n(node, i) == arg)
597 /** manipulate fields of individual nodes **/
599 /* this works for all except Block */
600 ir_node *get_nodes_block(const ir_node *node) {
601 assert(node->op != op_Block);
602 return get_irn_n(node, -1);
605 void set_nodes_block(ir_node *node, ir_node *block) {
606 assert(node->op != op_Block);
607 set_irn_n(node, -1, block);
610 /* this works for all except Block */
611 ir_node *get_nodes_MacroBlock(const ir_node *node) {
612 assert(node->op != op_Block);
613 return get_Block_MacroBlock(get_irn_n(node, -1));
616 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
617 * from Start. If so returns frame type, else Null. */
618 ir_type *is_frame_pointer(const ir_node *n) {
619 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
620 ir_node *start = get_Proj_pred(n);
621 if (is_Start(start)) {
622 return get_irg_frame_type(get_irn_irg(start));
628 /* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
629 * from Start. If so returns tls type, else Null. */
630 ir_type *is_tls_pointer(const ir_node *n) {
631 if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
632 ir_node *start = get_Proj_pred(n);
633 if (is_Start(start)) {
634 return get_tls_type();
640 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
641 * from Start. If so returns 1, else 0. */
642 int is_value_arg_pointer(const ir_node *n) {
644 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
645 is_Start(get_Proj_pred(n)))
650 /* Returns an array with the predecessors of the Block. Depending on
651 the implementation of the graph data structure this can be a copy of
652 the internal representation of predecessors as well as the internal
653 array itself. Therefore writing to this array might obstruct the ir. */
654 ir_node **get_Block_cfgpred_arr(ir_node *node) {
655 assert(is_Block(node));
656 return (ir_node **)&(get_irn_in(node)[1]);
659 int (get_Block_n_cfgpreds)(const ir_node *node) {
660 return _get_Block_n_cfgpreds(node);
663 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
664 return _get_Block_cfgpred(node, pos);
667 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
668 assert(is_Block(node));
669 set_irn_n(node, pos, pred);
672 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
673 return _get_Block_cfgpred_block(node, pos);
676 int get_Block_matured(const ir_node *node) {
677 assert(is_Block(node));
678 return (int)node->attr.block.is_matured;
681 void set_Block_matured(ir_node *node, int matured) {
682 assert(is_Block(node));
683 node->attr.block.is_matured = matured;
686 ir_visited_t (get_Block_block_visited)(const ir_node *node) {
687 return _get_Block_block_visited(node);
690 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
691 _set_Block_block_visited(node, visit);
694 /* For this current_ir_graph must be set. */
695 void (mark_Block_block_visited)(ir_node *node) {
696 _mark_Block_block_visited(node);
699 int (Block_block_visited)(const ir_node *node) {
700 return _Block_block_visited(node);
703 ir_node *get_Block_graph_arr(ir_node *node, int pos) {
704 assert(is_Block(node));
705 return node->attr.block.graph_arr[pos+1];
708 void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
709 assert(is_Block(node));
710 node->attr.block.graph_arr[pos+1] = value;
713 #ifdef INTERPROCEDURAL_VIEW
714 void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
715 assert(is_Block(node));
716 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
717 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
718 node->attr.block.in_cg[0] = NULL;
719 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
721 /* Fix backedge array. fix_backedges() operates depending on
722 interprocedural_view. */
723 int ipv = get_interprocedural_view();
724 set_interprocedural_view(1);
725 fix_backedges(current_ir_graph->obst, node);
726 set_interprocedural_view(ipv);
729 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
732 void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
733 assert(is_Block(node) && node->attr.block.in_cg &&
734 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
735 node->attr.block.in_cg[pos + 1] = pred;
738 ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
739 assert(is_Block(node));
740 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
743 int get_Block_cg_n_cfgpreds(const ir_node *node) {
744 assert(is_Block(node));
745 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
748 ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
749 assert(is_Block(node) && node->attr.block.in_cg);
750 return node->attr.block.in_cg[pos + 1];
753 void remove_Block_cg_cfgpred_arr(ir_node *node) {
754 assert(is_Block(node));
755 node->attr.block.in_cg = NULL;
757 #endif /* INTERPROCEDURAL_VIEW */
759 ir_node *(set_Block_dead)(ir_node *block) {
760 return _set_Block_dead(block);
763 int (is_Block_dead)(const ir_node *block) {
764 return _is_Block_dead(block);
767 ir_extblk *get_Block_extbb(const ir_node *block) {
769 assert(is_Block(block));
770 res = block->attr.block.extblk;
771 assert(res == NULL || is_ir_extbb(res));
775 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
776 assert(is_Block(block));
777 assert(extblk == NULL || is_ir_extbb(extblk));
778 block->attr.block.extblk = extblk;
781 /* Returns the macro block header of a block.*/
782 ir_node *get_Block_MacroBlock(const ir_node *block) {
784 assert(is_Block(block));
785 mbh = get_irn_n(block, -1);
786 /* once macro block header is respected by all optimizations,
787 this assert can be removed */
792 /* Sets the macro block header of a block. */
793 void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
794 assert(is_Block(block));
795 assert(is_Block(mbh));
796 set_irn_n(block, -1, mbh);
799 /* returns the macro block header of a node. */
800 ir_node *get_irn_MacroBlock(const ir_node *n) {
802 n = get_nodes_block(n);
803 /* if the Block is Bad, do NOT try to get it's MB, it will fail. */
807 return get_Block_MacroBlock(n);
810 /* returns the graph of a Block. */
811 ir_graph *get_Block_irg(const ir_node *block) {
812 assert(is_Block(block));
813 return block->attr.block.irg;
816 int has_Block_label(const ir_node *block) {
817 assert(is_Block(block));
818 return block->attr.block.has_label;
821 ir_label_t get_Block_label(const ir_node *block) {
822 assert(is_Block(block));
823 return block->attr.block.label;
826 void set_Block_label(ir_node *block, ir_label_t label) {
827 assert(is_Block(block));
828 block->attr.block.has_label = 1;
829 block->attr.block.label = label;
832 ir_node *(get_Block_phis)(const ir_node *block) {
833 return _get_Block_phis(block);
836 void (set_Block_phis)(ir_node *block, ir_node *phi) {
837 _set_Block_phis(block, phi);
840 void (add_Block_phi)(ir_node *block, ir_node *phi) {
841 _add_Block_phi(block, phi);
844 /* Get the Block mark (single bit). */
845 unsigned (get_Block_mark)(const ir_node *block) {
846 return _get_Block_mark(block);
849 /* Set the Block mark (single bit). */
850 void (set_Block_mark)(ir_node *block, unsigned mark) {
851 _set_Block_mark(block, mark);
854 int get_End_n_keepalives(const ir_node *end) {
856 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
859 ir_node *get_End_keepalive(const ir_node *end, int pos) {
861 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
864 void add_End_keepalive(ir_node *end, ir_node *ka) {
869 void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
871 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
874 /* Set new keep-alives */
875 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
877 ir_graph *irg = get_irn_irg(end);
879 /* notify that edges are deleted */
880 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in) - 1; ++i) {
881 edges_notify_edge(end, i, NULL, end->in[i + 1], irg);
883 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
885 for (i = 0; i < n; ++i) {
886 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
887 edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
891 /* Set new keep-alives from old keep-alives, skipping irn */
892 void remove_End_keepalive(ir_node *end, ir_node *irn) {
893 int n = get_End_n_keepalives(end);
898 for (i = n -1; i >= 0; --i) {
899 ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
909 irg = get_irn_irg(end);
911 /* remove the edge */
912 edges_notify_edge(end, idx, NULL, irn, irg);
915 /* exchange with the last one */
916 ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
917 edges_notify_edge(end, n - 1, NULL, old, irg);
918 end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
919 edges_notify_edge(end, idx, old, NULL, irg);
921 ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
925 free_End(ir_node *end) {
929 end->in = NULL; /* @@@ make sure we get an error if we use the
930 in array afterwards ... */
933 /* Return the target address of an IJmp */
934 ir_node *get_IJmp_target(const ir_node *ijmp) {
935 assert(is_IJmp(ijmp));
936 return get_irn_n(ijmp, 0);
939 /** Sets the target address of an IJmp */
940 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
941 assert(is_IJmp(ijmp));
942 set_irn_n(ijmp, 0, tgt);
946 > Implementing the case construct (which is where the constant Proj node is
947 > important) involves far more than simply determining the constant values.
948 > We could argue that this is more properly a function of the translator from
949 > Firm to the target machine. That could be done if there was some way of
950 > projecting "default" out of the Cond node.
951 I know it's complicated.
952 Basically there are two problems:
953 - determining the gaps between the Projs
954 - determining the biggest case constant to know the proj number for
956 I see several solutions:
957 1. Introduce a ProjDefault node. Solves both problems.
958 This means to extend all optimizations executed during construction.
959 2. Give the Cond node for switch two flavors:
960 a) there are no gaps in the Projs (existing flavor)
961 b) gaps may exist, default proj is still the Proj with the largest
962 projection number. This covers also the gaps.
963 3. Fix the semantic of the Cond to that of 2b)
965 Solution 2 seems to be the best:
966 Computing the gaps in the Firm representation is not too hard, i.e.,
967 libFIRM can implement a routine that transforms between the two
968 flavours. This is also possible for 1) but 2) does not require to
969 change any existing optimization.
970 Further it should be far simpler to determine the biggest constant than
972 I don't want to choose 3) as 2a) seems to have advantages for
973 dataflow analysis and 3) does not allow to convert the representation to
977 get_Cond_selector(const ir_node *node) {
978 assert(is_Cond(node));
979 return get_irn_n(node, 0);
983 set_Cond_selector(ir_node *node, ir_node *selector) {
984 assert(is_Cond(node));
985 set_irn_n(node, 0, selector);
989 get_Cond_kind(const ir_node *node) {
990 assert(is_Cond(node));
991 return node->attr.cond.kind;
995 set_Cond_kind(ir_node *node, cond_kind kind) {
996 assert(is_Cond(node));
997 node->attr.cond.kind = kind;
1001 get_Cond_defaultProj(const ir_node *node) {
1002 assert(is_Cond(node));
1003 return node->attr.cond.default_proj;
1007 get_Return_mem(const ir_node *node) {
1008 assert(is_Return(node));
1009 return get_irn_n(node, 0);
1013 set_Return_mem(ir_node *node, ir_node *mem) {
1014 assert(is_Return(node));
1015 set_irn_n(node, 0, mem);
1019 get_Return_n_ress(const ir_node *node) {
1020 assert(is_Return(node));
1021 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
1025 get_Return_res_arr(ir_node *node) {
1026 assert(is_Return(node));
1027 if (get_Return_n_ress(node) > 0)
1028 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
1035 set_Return_n_res(ir_node *node, int results) {
1036 assert(is_Return(node));
1041 get_Return_res(const ir_node *node, int pos) {
1042 assert(is_Return(node));
1043 assert(get_Return_n_ress(node) > pos);
1044 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
1048 set_Return_res(ir_node *node, int pos, ir_node *res){
1049 assert(is_Return(node));
1050 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
1053 tarval *(get_Const_tarval)(const ir_node *node) {
1054 return _get_Const_tarval(node);
1058 set_Const_tarval(ir_node *node, tarval *con) {
1059 assert(is_Const(node));
1060 node->attr.con.tv = con;
1063 int (is_Const_null)(const ir_node *node) {
1064 return _is_Const_null(node);
1067 int (is_Const_one)(const ir_node *node) {
1068 return _is_Const_one(node);
1071 int (is_Const_all_one)(const ir_node *node) {
1072 return _is_Const_all_one(node);
1076 /* The source language type. Must be an atomic type. Mode of type must
1077 be mode of node. For tarvals from entities type must be pointer to
1080 get_Const_type(ir_node *node) {
1081 assert(is_Const(node));
1082 node->attr.con.tp = skip_tid(node->attr.con.tp);
1083 return node->attr.con.tp;
1087 set_Const_type(ir_node *node, ir_type *tp) {
1088 assert(is_Const(node));
1089 if (tp != firm_unknown_type) {
1090 assert(is_atomic_type(tp));
1091 assert(get_type_mode(tp) == get_irn_mode(node));
1093 node->attr.con.tp = tp;
1098 get_SymConst_kind(const ir_node *node) {
1099 assert(is_SymConst(node));
1100 return node->attr.symc.kind;
1104 set_SymConst_kind(ir_node *node, symconst_kind kind) {
1105 assert(is_SymConst(node));
1106 node->attr.symc.kind = kind;
1110 get_SymConst_type(const ir_node *node) {
1111 /* the cast here is annoying, but we have to compensate for
1113 ir_node *irn = (ir_node *)node;
1114 assert(is_SymConst(node) &&
1115 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1116 return irn->attr.symc.sym.type_p = skip_tid(irn->attr.symc.sym.type_p);
1120 set_SymConst_type(ir_node *node, ir_type *tp) {
1121 assert(is_SymConst(node) &&
1122 (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
1123 node->attr.symc.sym.type_p = tp;
1127 get_SymConst_name(const ir_node *node) {
1128 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1129 return node->attr.symc.sym.ident_p;
1133 set_SymConst_name(ir_node *node, ident *name) {
1134 assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
1135 node->attr.symc.sym.ident_p = name;
1139 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1140 ir_entity *get_SymConst_entity(const ir_node *node) {
1141 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1142 return node->attr.symc.sym.entity_p;
1145 void set_SymConst_entity(ir_node *node, ir_entity *ent) {
1146 assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
1147 node->attr.symc.sym.entity_p = ent;
1150 ir_enum_const *get_SymConst_enum(const ir_node *node) {
1151 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1152 return node->attr.symc.sym.enum_p;
1155 void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
1156 assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
1157 node->attr.symc.sym.enum_p = ec;
1160 union symconst_symbol
1161 get_SymConst_symbol(const ir_node *node) {
1162 assert(is_SymConst(node));
1163 return node->attr.symc.sym;
1167 set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
1168 assert(is_SymConst(node));
1169 node->attr.symc.sym = sym;
1172 ir_label_t get_SymConst_label(const ir_node *node) {
1173 assert(is_SymConst(node) && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1174 return node->attr.symc.sym.label;
1177 void set_SymConst_label(ir_node *node, ir_label_t label) {
1178 assert(is_SymConst(node) && SYMCONST_HAS_LABEL(get_SymConst_kind(node)));
1179 node->attr.symc.sym.label = label;
1183 get_SymConst_value_type(ir_node *node) {
1184 assert(is_SymConst(node));
1185 if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
1186 return node->attr.symc.tp;
1190 set_SymConst_value_type(ir_node *node, ir_type *tp) {
1191 assert(is_SymConst(node));
1192 node->attr.symc.tp = tp;
1196 get_Sel_mem(const ir_node *node) {
1197 assert(is_Sel(node));
1198 return get_irn_n(node, 0);
1202 set_Sel_mem(ir_node *node, ir_node *mem) {
1203 assert(is_Sel(node));
1204 set_irn_n(node, 0, mem);
1208 get_Sel_ptr(const ir_node *node) {
1209 assert(is_Sel(node));
1210 return get_irn_n(node, 1);
1214 set_Sel_ptr(ir_node *node, ir_node *ptr) {
1215 assert(is_Sel(node));
1216 set_irn_n(node, 1, ptr);
1220 get_Sel_n_indexs(const ir_node *node) {
1221 assert(is_Sel(node));
1222 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1226 get_Sel_index_arr(ir_node *node) {
1227 assert(is_Sel(node));
1228 if (get_Sel_n_indexs(node) > 0)
1229 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1235 get_Sel_index(const ir_node *node, int pos) {
1236 assert(is_Sel(node));
1237 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1241 set_Sel_index(ir_node *node, int pos, ir_node *index) {
1242 assert(is_Sel(node));
1243 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1247 get_Sel_entity(const ir_node *node) {
1248 assert(is_Sel(node));
1249 return node->attr.sel.ent;
1252 /* need a version without const to prevent warning */
1253 static ir_entity *_get_Sel_entity(ir_node *node) {
1254 return get_Sel_entity(node);
1258 set_Sel_entity(ir_node *node, ir_entity *ent) {
1259 assert(is_Sel(node));
1260 node->attr.sel.ent = ent;
1264 /* For unary and binary arithmetic operations the access to the
1265 operands can be factored out. Left is the first, right the
1266 second arithmetic value as listed in tech report 0999-33.
1267 unops are: Minus, Abs, Not, Conv, Cast
1268 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1269 Shr, Shrs, Rotate, Cmp */
1273 get_Call_mem(const ir_node *node) {
1274 assert(is_Call(node));
1275 return get_irn_n(node, 0);
1279 set_Call_mem(ir_node *node, ir_node *mem) {
1280 assert(is_Call(node));
1281 set_irn_n(node, 0, mem);
1285 get_Call_ptr(const ir_node *node) {
1286 assert(is_Call(node));
1287 return get_irn_n(node, 1);
1291 set_Call_ptr(ir_node *node, ir_node *ptr) {
1292 assert(is_Call(node));
1293 set_irn_n(node, 1, ptr);
1297 get_Call_param_arr(ir_node *node) {
1298 assert(is_Call(node));
1299 return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1303 get_Call_n_params(const ir_node *node) {
1304 assert(is_Call(node));
1305 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1309 get_Call_arity(const ir_node *node) {
1310 assert(is_Call(node));
1311 return get_Call_n_params(node);
1315 set_Call_arity(ir_node *node, ir_node *arity) {
1316 assert(is_Call(node));
1321 get_Call_param(const ir_node *node, int pos) {
1322 assert(is_Call(node));
1323 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1327 set_Call_param(ir_node *node, int pos, ir_node *param) {
1328 assert(is_Call(node));
1329 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1333 get_Call_type(ir_node *node) {
1334 assert(is_Call(node));
1335 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1339 set_Call_type(ir_node *node, ir_type *tp) {
1340 assert(is_Call(node));
1341 assert((get_unknown_type() == tp) || is_Method_type(tp));
1342 node->attr.call.cld_tp = tp;
1345 int Call_has_callees(const ir_node *node) {
1346 assert(is_Call(node));
1347 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1348 (node->attr.call.callee_arr != NULL));
1351 int get_Call_n_callees(const ir_node *node) {
1352 assert(is_Call(node) && node->attr.call.callee_arr);
1353 return ARR_LEN(node->attr.call.callee_arr);
1356 ir_entity *get_Call_callee(const ir_node *node, int pos) {
1357 assert(pos >= 0 && pos < get_Call_n_callees(node));
1358 return node->attr.call.callee_arr[pos];
1361 void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
1362 assert(is_Call(node));
1363 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1364 node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
1366 memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
1369 void remove_Call_callee_arr(ir_node *node) {
1370 assert(is_Call(node));
1371 node->attr.call.callee_arr = NULL;
1374 ir_node *get_CallBegin_ptr(const ir_node *node) {
1375 assert(is_CallBegin(node));
1376 return get_irn_n(node, 0);
1379 void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
1380 assert(is_CallBegin(node));
1381 set_irn_n(node, 0, ptr);
1384 ir_node *get_CallBegin_call(const ir_node *node) {
1385 assert(is_CallBegin(node));
1386 return node->attr.callbegin.call;
1389 void set_CallBegin_call(ir_node *node, ir_node *call) {
1390 assert(is_CallBegin(node));
1391 node->attr.callbegin.call = call;
1395 * Returns non-zero if a Call is surely a self-recursive Call.
1396 * Beware: if this functions returns 0, the call might be self-recursive!
1398 int is_self_recursive_Call(const ir_node *call) {
1399 const ir_node *callee = get_Call_ptr(call);
1401 if (is_SymConst_addr_ent(callee)) {
1402 const ir_entity *ent = get_SymConst_entity(callee);
1403 const ir_graph *irg = get_entity_irg(ent);
1404 if (irg == get_irn_irg(call))
1411 ir_node * get_##OP##_left(const ir_node *node) { \
1412 assert(is_##OP(node)); \
1413 return get_irn_n(node, node->op->op_index); \
1415 void set_##OP##_left(ir_node *node, ir_node *left) { \
1416 assert(is_##OP(node)); \
1417 set_irn_n(node, node->op->op_index, left); \
1419 ir_node *get_##OP##_right(const ir_node *node) { \
1420 assert(is_##OP(node)); \
1421 return get_irn_n(node, node->op->op_index + 1); \
1423 void set_##OP##_right(ir_node *node, ir_node *right) { \
1424 assert(is_##OP(node)); \
1425 set_irn_n(node, node->op->op_index + 1, right); \
1429 ir_node *get_##OP##_op(const ir_node *node) { \
1430 assert(is_##OP(node)); \
1431 return get_irn_n(node, node->op->op_index); \
1433 void set_##OP##_op(ir_node *node, ir_node *op) { \
1434 assert(is_##OP(node)); \
1435 set_irn_n(node, node->op->op_index, op); \
1438 #define BINOP_MEM(OP) \
1442 get_##OP##_mem(const ir_node *node) { \
1443 assert(is_##OP(node)); \
1444 return get_irn_n(node, 0); \
1448 set_##OP##_mem(ir_node *node, ir_node *mem) { \
1449 assert(is_##OP(node)); \
1450 set_irn_n(node, 0, mem); \
1456 ir_mode *get_##OP##_resmode(const ir_node *node) { \
1457 assert(is_##OP(node)); \
1458 return node->attr.divmod.res_mode; \
1461 void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \
1462 assert(is_##OP(node)); \
1463 node->attr.divmod.res_mode = mode; \
1490 int is_Div_remainderless(const ir_node *node) {
1491 assert(is_Div(node));
1492 return node->attr.divmod.no_remainder;
1495 int get_Conv_strict(const ir_node *node) {
1496 assert(is_Conv(node));
1497 return node->attr.conv.strict;
1500 void set_Conv_strict(ir_node *node, int strict_flag) {
1501 assert(is_Conv(node));
1502 node->attr.conv.strict = (char)strict_flag;
1506 get_Cast_type(ir_node *node) {
1507 assert(is_Cast(node));
1508 node->attr.cast.totype = skip_tid(node->attr.cast.totype);
1509 return node->attr.cast.totype;
1513 set_Cast_type(ir_node *node, ir_type *to_tp) {
1514 assert(is_Cast(node));
1515 node->attr.cast.totype = to_tp;
1519 /* Checks for upcast.
1521 * Returns true if the Cast node casts a class type to a super type.
1523 int is_Cast_upcast(ir_node *node) {
1524 ir_type *totype = get_Cast_type(node);
1525 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1527 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1530 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1531 totype = get_pointer_points_to_type(totype);
1532 fromtype = get_pointer_points_to_type(fromtype);
1537 if (!is_Class_type(totype)) return 0;
1538 return is_SubClass_of(fromtype, totype);
1541 /* Checks for downcast.
1543 * Returns true if the Cast node casts a class type to a sub type.
1545 int is_Cast_downcast(ir_node *node) {
1546 ir_type *totype = get_Cast_type(node);
1547 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1549 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1552 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1553 totype = get_pointer_points_to_type(totype);
1554 fromtype = get_pointer_points_to_type(fromtype);
1559 if (!is_Class_type(totype)) return 0;
1560 return is_SubClass_of(totype, fromtype);
1564 (is_unop)(const ir_node *node) {
1565 return _is_unop(node);
1569 get_unop_op(const ir_node *node) {
1570 if (node->op->opar == oparity_unary)
1571 return get_irn_n(node, node->op->op_index);
1573 assert(node->op->opar == oparity_unary);
1578 set_unop_op(ir_node *node, ir_node *op) {
1579 if (node->op->opar == oparity_unary)
1580 set_irn_n(node, node->op->op_index, op);
1582 assert(node->op->opar == oparity_unary);
1586 (is_binop)(const ir_node *node) {
1587 return _is_binop(node);
1591 get_binop_left(const ir_node *node) {
1592 assert(node->op->opar == oparity_binary);
1593 return get_irn_n(node, node->op->op_index);
1597 set_binop_left(ir_node *node, ir_node *left) {
1598 assert(node->op->opar == oparity_binary);
1599 set_irn_n(node, node->op->op_index, left);
1603 get_binop_right(const ir_node *node) {
1604 assert(node->op->opar == oparity_binary);
1605 return get_irn_n(node, node->op->op_index + 1);
1609 set_binop_right(ir_node *node, ir_node *right) {
1610 assert(node->op->opar == oparity_binary);
1611 set_irn_n(node, node->op->op_index + 1, right);
1615 (is_Phi)(const ir_node *n) {
1619 int is_Phi0(const ir_node *n) {
1622 return ((get_irn_op(n) == op_Phi) &&
1623 (get_irn_arity(n) == 0) &&
1624 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1628 get_Phi_preds_arr(ir_node *node) {
1629 assert(node->op == op_Phi);
1630 return (ir_node **)&(get_irn_in(node)[1]);
1634 get_Phi_n_preds(const ir_node *node) {
1635 assert(is_Phi(node) || is_Phi0(node));
1636 return (get_irn_arity(node));
1640 void set_Phi_n_preds(ir_node *node, int n_preds) {
1641 assert(node->op == op_Phi);
1646 get_Phi_pred(const ir_node *node, int pos) {
1647 assert(is_Phi(node) || is_Phi0(node));
1648 return get_irn_n(node, pos);
1652 set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
1653 assert(is_Phi(node) || is_Phi0(node));
1654 set_irn_n(node, pos, pred);
1657 ir_node *(get_Phi_next)(const ir_node *phi) {
1658 return _get_Phi_next(phi);
1661 void (set_Phi_next)(ir_node *phi, ir_node *next) {
1662 _set_Phi_next(phi, next);
1665 int is_memop(const ir_node *node) {
1666 ir_opcode code = get_irn_opcode(node);
1667 return (code == iro_Load || code == iro_Store);
1670 ir_node *get_memop_mem(const ir_node *node) {
1671 assert(is_memop(node));
1672 return get_irn_n(node, 0);
1675 void set_memop_mem(ir_node *node, ir_node *mem) {
1676 assert(is_memop(node));
1677 set_irn_n(node, 0, mem);
1680 ir_node *get_memop_ptr(const ir_node *node) {
1681 assert(is_memop(node));
1682 return get_irn_n(node, 1);
1685 void set_memop_ptr(ir_node *node, ir_node *ptr) {
1686 assert(is_memop(node));
1687 set_irn_n(node, 1, ptr);
1691 get_Load_mem(const ir_node *node) {
1692 assert(is_Load(node));
1693 return get_irn_n(node, 0);
1697 set_Load_mem(ir_node *node, ir_node *mem) {
1698 assert(is_Load(node));
1699 set_irn_n(node, 0, mem);
1703 get_Load_ptr(const ir_node *node) {
1704 assert(is_Load(node));
1705 return get_irn_n(node, 1);
1709 set_Load_ptr(ir_node *node, ir_node *ptr) {
1710 assert(is_Load(node));
1711 set_irn_n(node, 1, ptr);
1715 get_Load_mode(const ir_node *node) {
1716 assert(is_Load(node));
1717 return node->attr.load.load_mode;
1721 set_Load_mode(ir_node *node, ir_mode *mode) {
1722 assert(is_Load(node));
1723 node->attr.load.load_mode = mode;
1727 get_Load_volatility(const ir_node *node) {
1728 assert(is_Load(node));
1729 return node->attr.load.volatility;
1733 set_Load_volatility(ir_node *node, ir_volatility volatility) {
1734 assert(is_Load(node));
1735 node->attr.load.volatility = volatility;
1739 get_Load_align(const ir_node *node) {
1740 assert(is_Load(node));
1741 return node->attr.load.aligned;
1745 set_Load_align(ir_node *node, ir_align align) {
1746 assert(is_Load(node));
1747 node->attr.load.aligned = align;
1752 get_Store_mem(const ir_node *node) {
1753 assert(is_Store(node));
1754 return get_irn_n(node, 0);
1758 set_Store_mem(ir_node *node, ir_node *mem) {
1759 assert(is_Store(node));
1760 set_irn_n(node, 0, mem);
1764 get_Store_ptr(const ir_node *node) {
1765 assert(is_Store(node));
1766 return get_irn_n(node, 1);
1770 set_Store_ptr(ir_node *node, ir_node *ptr) {
1771 assert(is_Store(node));
1772 set_irn_n(node, 1, ptr);
1776 get_Store_value(const ir_node *node) {
1777 assert(is_Store(node));
1778 return get_irn_n(node, 2);
1782 set_Store_value(ir_node *node, ir_node *value) {
1783 assert(is_Store(node));
1784 set_irn_n(node, 2, value);
1788 get_Store_volatility(const ir_node *node) {
1789 assert(is_Store(node));
1790 return node->attr.store.volatility;
1794 set_Store_volatility(ir_node *node, ir_volatility volatility) {
1795 assert(is_Store(node));
1796 node->attr.store.volatility = volatility;
1800 get_Store_align(const ir_node *node) {
1801 assert(is_Store(node));
1802 return node->attr.store.aligned;
1806 set_Store_align(ir_node *node, ir_align align) {
1807 assert(is_Store(node));
1808 node->attr.store.aligned = align;
1813 get_Alloc_mem(const ir_node *node) {
1814 assert(is_Alloc(node));
1815 return get_irn_n(node, 0);
1819 set_Alloc_mem(ir_node *node, ir_node *mem) {
1820 assert(is_Alloc(node));
1821 set_irn_n(node, 0, mem);
1825 get_Alloc_size(const ir_node *node) {
1826 assert(is_Alloc(node));
1827 return get_irn_n(node, 1);
1831 set_Alloc_size(ir_node *node, ir_node *size) {
1832 assert(is_Alloc(node));
1833 set_irn_n(node, 1, size);
1837 get_Alloc_type(ir_node *node) {
1838 assert(is_Alloc(node));
1839 return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
1843 set_Alloc_type(ir_node *node, ir_type *tp) {
1844 assert(is_Alloc(node));
1845 node->attr.alloc.type = tp;
1849 get_Alloc_where(const ir_node *node) {
1850 assert(is_Alloc(node));
1851 return node->attr.alloc.where;
1855 set_Alloc_where(ir_node *node, ir_where_alloc where) {
1856 assert(is_Alloc(node));
1857 node->attr.alloc.where = where;
1862 get_Free_mem(const ir_node *node) {
1863 assert(is_Free(node));
1864 return get_irn_n(node, 0);
1868 set_Free_mem(ir_node *node, ir_node *mem) {
1869 assert(is_Free(node));
1870 set_irn_n(node, 0, mem);
1874 get_Free_ptr(const ir_node *node) {
1875 assert(is_Free(node));
1876 return get_irn_n(node, 1);
1880 set_Free_ptr(ir_node *node, ir_node *ptr) {
1881 assert(is_Free(node));
1882 set_irn_n(node, 1, ptr);
1886 get_Free_size(const ir_node *node) {
1887 assert(is_Free(node));
1888 return get_irn_n(node, 2);
1892 set_Free_size(ir_node *node, ir_node *size) {
1893 assert(is_Free(node));
1894 set_irn_n(node, 2, size);
1898 get_Free_type(ir_node *node) {
1899 assert(is_Free(node));
1900 return node->attr.free.type = skip_tid(node->attr.free.type);
1904 set_Free_type(ir_node *node, ir_type *tp) {
1905 assert(is_Free(node));
1906 node->attr.free.type = tp;
1910 get_Free_where(const ir_node *node) {
1911 assert(is_Free(node));
1912 return node->attr.free.where;
1916 set_Free_where(ir_node *node, ir_where_alloc where) {
1917 assert(is_Free(node));
1918 node->attr.free.where = where;
1921 ir_node **get_Sync_preds_arr(ir_node *node) {
1922 assert(is_Sync(node));
1923 return (ir_node **)&(get_irn_in(node)[1]);
1926 int get_Sync_n_preds(const ir_node *node) {
1927 assert(is_Sync(node));
1928 return (get_irn_arity(node));
1932 void set_Sync_n_preds(ir_node *node, int n_preds) {
1933 assert(is_Sync(node));
1937 ir_node *get_Sync_pred(const ir_node *node, int pos) {
1938 assert(is_Sync(node));
1939 return get_irn_n(node, pos);
1942 void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
1943 assert(is_Sync(node));
1944 set_irn_n(node, pos, pred);
1947 /* Add a new Sync predecessor */
1948 void add_Sync_pred(ir_node *node, ir_node *pred) {
1949 assert(is_Sync(node));
1950 add_irn_n(node, pred);
1953 /* Returns the source language type of a Proj node. */
1954 ir_type *get_Proj_type(ir_node *n) {
1955 ir_type *tp = firm_unknown_type;
1956 ir_node *pred = get_Proj_pred(n);
1958 switch (get_irn_opcode(pred)) {
1961 /* Deal with Start / Call here: we need to know the Proj Nr. */
1962 assert(get_irn_mode(pred) == mode_T);
1963 pred_pred = get_Proj_pred(pred);
1965 if (is_Start(pred_pred)) {
1966 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1967 tp = get_method_param_type(mtp, get_Proj_proj(n));
1968 } else if (is_Call(pred_pred)) {
1969 ir_type *mtp = get_Call_type(pred_pred);
1970 tp = get_method_res_type(mtp, get_Proj_proj(n));
1973 case iro_Start: break;
1974 case iro_Call: break;
1976 ir_node *a = get_Load_ptr(pred);
1978 tp = get_entity_type(get_Sel_entity(a));
1987 get_Proj_pred(const ir_node *node) {
1988 assert(is_Proj(node));
1989 return get_irn_n(node, 0);
1993 set_Proj_pred(ir_node *node, ir_node *pred) {
1994 assert(is_Proj(node));
1995 set_irn_n(node, 0, pred);
1999 get_Proj_proj(const ir_node *node) {
2000 #ifdef INTERPROCEDURAL_VIEW
2001 ir_opcode code = get_irn_opcode(node);
2003 if (code == iro_Proj) {
2004 return node->attr.proj;
2007 assert(code == iro_Filter);
2008 return node->attr.filter.proj;
2011 assert(is_Proj(node));
2012 return node->attr.proj;
2013 #endif /* INTERPROCEDURAL_VIEW */
2017 set_Proj_proj(ir_node *node, long proj) {
2018 #ifdef INTERPROCEDURAL_VIEW
2019 ir_opcode code = get_irn_opcode(node);
2021 if (code == iro_Proj) {
2022 node->attr.proj = proj;
2025 assert(code == iro_Filter);
2026 node->attr.filter.proj = proj;
2029 assert(is_Proj(node));
2030 node->attr.proj = proj;
2031 #endif /* INTERPROCEDURAL_VIEW */
2034 /* Returns non-zero if a node is a routine parameter. */
2035 int (is_arg_Proj)(const ir_node *node) {
2036 return _is_arg_Proj(node);
2040 get_Tuple_preds_arr(ir_node *node) {
2041 assert(is_Tuple(node));
2042 return (ir_node **)&(get_irn_in(node)[1]);
2046 get_Tuple_n_preds(const ir_node *node) {
2047 assert(is_Tuple(node));
2048 return get_irn_arity(node);
2053 set_Tuple_n_preds(ir_node *node, int n_preds) {
2054 assert(is_Tuple(node));
2059 get_Tuple_pred(const ir_node *node, int pos) {
2060 assert(is_Tuple(node));
2061 return get_irn_n(node, pos);
2065 set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
2066 assert(is_Tuple(node));
2067 set_irn_n(node, pos, pred);
2071 get_Id_pred(const ir_node *node) {
2072 assert(is_Id(node));
2073 return get_irn_n(node, 0);
2077 set_Id_pred(ir_node *node, ir_node *pred) {
2078 assert(is_Id(node));
2079 set_irn_n(node, 0, pred);
2082 ir_node *get_Confirm_value(const ir_node *node) {
2083 assert(is_Confirm(node));
2084 return get_irn_n(node, 0);
2087 void set_Confirm_value(ir_node *node, ir_node *value) {
2088 assert(is_Confirm(node));
2089 set_irn_n(node, 0, value);
2092 ir_node *get_Confirm_bound(const ir_node *node) {
2093 assert(is_Confirm(node));
2094 return get_irn_n(node, 1);
2097 void set_Confirm_bound(ir_node *node, ir_node *bound) {
2098 assert(is_Confirm(node));
2099 set_irn_n(node, 0, bound);
2102 pn_Cmp get_Confirm_cmp(const ir_node *node) {
2103 assert(is_Confirm(node));
2104 return node->attr.confirm.cmp;
2107 void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
2108 assert(is_Confirm(node));
2109 node->attr.confirm.cmp = cmp;
2113 get_Filter_pred(ir_node *node) {
2114 assert(is_Filter(node));
2119 set_Filter_pred(ir_node *node, ir_node *pred) {
2120 assert(is_Filter(node));
2125 get_Filter_proj(ir_node *node) {
2126 assert(is_Filter(node));
2127 return node->attr.filter.proj;
2131 set_Filter_proj(ir_node *node, long proj) {
2132 assert(is_Filter(node));
2133 node->attr.filter.proj = proj;
2136 /* Don't use get_irn_arity, get_irn_n in implementation as access
2137 shall work independent of view!!! */
2138 void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
2139 assert(is_Filter(node));
2140 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2141 ir_graph *irg = get_irn_irg(node);
2142 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2143 node->attr.filter.backedge = new_backedge_arr(irg->obst, arity);
2144 node->attr.filter.in_cg[0] = node->in[0];
2146 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2149 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2150 assert(is_Filter(node) && node->attr.filter.in_cg &&
2151 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2152 node->attr.filter.in_cg[pos + 1] = pred;
2155 int get_Filter_n_cg_preds(ir_node *node) {
2156 assert(is_Filter(node) && node->attr.filter.in_cg);
2157 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2160 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2162 assert(is_Filter(node) && node->attr.filter.in_cg &&
2164 arity = ARR_LEN(node->attr.filter.in_cg);
2165 assert(pos < arity - 1);
2166 return node->attr.filter.in_cg[pos + 1];
2170 ir_node *get_Mux_sel(const ir_node *node) {
2171 assert(is_Mux(node));
2175 void set_Mux_sel(ir_node *node, ir_node *sel) {
2176 assert(is_Mux(node));
2180 ir_node *get_Mux_false(const ir_node *node) {
2181 assert(is_Mux(node));
2185 void set_Mux_false(ir_node *node, ir_node *ir_false) {
2186 assert(is_Mux(node));
2187 node->in[2] = ir_false;
2190 ir_node *get_Mux_true(const ir_node *node) {
2191 assert(is_Mux(node));
2195 void set_Mux_true(ir_node *node, ir_node *ir_true) {
2196 assert(is_Mux(node));
2197 node->in[3] = ir_true;
2201 ir_node *get_CopyB_mem(const ir_node *node) {
2202 assert(is_CopyB(node));
2203 return get_irn_n(node, 0);
2206 void set_CopyB_mem(ir_node *node, ir_node *mem) {
2207 assert(node->op == op_CopyB);
2208 set_irn_n(node, 0, mem);
2211 ir_node *get_CopyB_dst(const ir_node *node) {
2212 assert(is_CopyB(node));
2213 return get_irn_n(node, 1);
2216 void set_CopyB_dst(ir_node *node, ir_node *dst) {
2217 assert(is_CopyB(node));
2218 set_irn_n(node, 1, dst);
2221 ir_node *get_CopyB_src(const ir_node *node) {
2222 assert(is_CopyB(node));
2223 return get_irn_n(node, 2);
2226 void set_CopyB_src(ir_node *node, ir_node *src) {
2227 assert(is_CopyB(node));
2228 set_irn_n(node, 2, src);
2231 ir_type *get_CopyB_type(ir_node *node) {
2232 assert(is_CopyB(node));
2233 return node->attr.copyb.data_type = skip_tid(node->attr.copyb.data_type);
2236 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2237 assert(is_CopyB(node) && data_type);
2238 node->attr.copyb.data_type = data_type;
2243 get_InstOf_type(ir_node *node) {
2244 assert(node->op == op_InstOf);
2245 return node->attr.instof.type = skip_tid(node->attr.instof.type);
2249 set_InstOf_type(ir_node *node, ir_type *type) {
2250 assert(node->op == op_InstOf);
2251 node->attr.instof.type = type;
2255 get_InstOf_store(const ir_node *node) {
2256 assert(node->op == op_InstOf);
2257 return get_irn_n(node, 0);
2261 set_InstOf_store(ir_node *node, ir_node *obj) {
2262 assert(node->op == op_InstOf);
2263 set_irn_n(node, 0, obj);
2267 get_InstOf_obj(const ir_node *node) {
2268 assert(node->op == op_InstOf);
2269 return get_irn_n(node, 1);
2273 set_InstOf_obj(ir_node *node, ir_node *obj) {
2274 assert(node->op == op_InstOf);
2275 set_irn_n(node, 1, obj);
2278 /* Returns the memory input of a Raise operation. */
2280 get_Raise_mem(const ir_node *node) {
2281 assert(is_Raise(node));
2282 return get_irn_n(node, 0);
2286 set_Raise_mem(ir_node *node, ir_node *mem) {
2287 assert(is_Raise(node));
2288 set_irn_n(node, 0, mem);
2292 get_Raise_exo_ptr(const ir_node *node) {
2293 assert(is_Raise(node));
2294 return get_irn_n(node, 1);
2298 set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
2299 assert(is_Raise(node));
2300 set_irn_n(node, 1, exo_ptr);
2305 /* Returns the memory input of a Bound operation. */
2306 ir_node *get_Bound_mem(const ir_node *bound) {
2307 assert(is_Bound(bound));
2308 return get_irn_n(bound, 0);
2311 void set_Bound_mem(ir_node *bound, ir_node *mem) {
2312 assert(is_Bound(bound));
2313 set_irn_n(bound, 0, mem);
2316 /* Returns the index input of a Bound operation. */
2317 ir_node *get_Bound_index(const ir_node *bound) {
2318 assert(is_Bound(bound));
2319 return get_irn_n(bound, 1);
2322 void set_Bound_index(ir_node *bound, ir_node *idx) {
2323 assert(is_Bound(bound));
2324 set_irn_n(bound, 1, idx);
2327 /* Returns the lower bound input of a Bound operation. */
2328 ir_node *get_Bound_lower(const ir_node *bound) {
2329 assert(is_Bound(bound));
2330 return get_irn_n(bound, 2);
2333 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2334 assert(is_Bound(bound));
2335 set_irn_n(bound, 2, lower);
2338 /* Returns the upper bound input of a Bound operation. */
2339 ir_node *get_Bound_upper(const ir_node *bound) {
2340 assert(is_Bound(bound));
2341 return get_irn_n(bound, 3);
2344 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2345 assert(is_Bound(bound));
2346 set_irn_n(bound, 3, upper);
2349 /* Return the operand of a Pin node. */
2350 ir_node *get_Pin_op(const ir_node *pin) {
2351 assert(is_Pin(pin));
2352 return get_irn_n(pin, 0);
2355 void set_Pin_op(ir_node *pin, ir_node *node) {
2356 assert(is_Pin(pin));
2357 set_irn_n(pin, 0, node);
2360 /* Return the assembler text of an ASM pseudo node. */
2361 ident *get_ASM_text(const ir_node *node) {
2362 assert(is_ASM(node));
2363 return node->attr.assem.asm_text;
2366 /* Return the number of input constraints for an ASM node. */
2367 int get_ASM_n_input_constraints(const ir_node *node) {
2368 assert(is_ASM(node));
2369 return ARR_LEN(node->attr.assem.inputs);
2372 /* Return the input constraints for an ASM node. This is a flexible array. */
2373 const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
2374 assert(is_ASM(node));
2375 return node->attr.assem.inputs;
2378 /* Return the number of output constraints for an ASM node. */
2379 int get_ASM_n_output_constraints(const ir_node *node) {
2380 assert(is_ASM(node));
2381 return ARR_LEN(node->attr.assem.outputs);
2384 /* Return the output constraints for an ASM node. */
2385 const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
2386 assert(is_ASM(node));
2387 return node->attr.assem.outputs;
2390 /* Return the number of clobbered registers for an ASM node. */
2391 int get_ASM_n_clobbers(const ir_node *node) {
2392 assert(is_ASM(node));
2393 return ARR_LEN(node->attr.assem.clobber);
2396 /* Return the list of clobbered registers for an ASM node. */
2397 ident **get_ASM_clobbers(const ir_node *node) {
2398 assert(is_ASM(node));
2399 return node->attr.assem.clobber;
2402 /* returns the graph of a node */
2404 get_irn_irg(const ir_node *node) {
2406 * Do not use get_nodes_Block() here, because this
2407 * will check the pinned state.
2408 * However even a 'wrong' block is always in the proper
2411 if (! is_Block(node))
2412 node = get_irn_n(node, -1);
2413 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2414 node = get_irn_n(node, -1);
2415 assert(is_Block(node));
2416 return node->attr.block.irg;
2420 /*----------------------------------------------------------------*/
2421 /* Auxiliary routines */
2422 /*----------------------------------------------------------------*/
2425 skip_Proj(ir_node *node) {
2426 /* don't assert node !!! */
2431 node = get_Proj_pred(node);
2437 skip_Proj_const(const ir_node *node) {
2438 /* don't assert node !!! */
2443 node = get_Proj_pred(node);
2449 skip_Tuple(ir_node *node) {
2453 if (!get_opt_normalize()) return node;
2456 if (get_irn_op(node) == op_Proj) {
2457 pred = get_Proj_pred(node);
2458 op = get_irn_op(pred);
2461 * Looks strange but calls get_irn_op() only once
2462 * in most often cases.
2464 if (op == op_Proj) { /* nested Tuple ? */
2465 pred = skip_Tuple(pred);
2466 op = get_irn_op(pred);
2468 if (op == op_Tuple) {
2469 node = get_Tuple_pred(pred, get_Proj_proj(node));
2472 } else if (op == op_Tuple) {
2473 node = get_Tuple_pred(pred, get_Proj_proj(node));
2480 /* returns operand of node if node is a Cast */
2481 ir_node *skip_Cast(ir_node *node) {
2483 return get_Cast_op(node);
2487 /* returns operand of node if node is a Cast */
2488 const ir_node *skip_Cast_const(const ir_node *node) {
2490 return get_Cast_op(node);
2494 /* returns operand of node if node is a Pin */
2495 ir_node *skip_Pin(ir_node *node) {
2497 return get_Pin_op(node);
2501 /* returns operand of node if node is a Confirm */
2502 ir_node *skip_Confirm(ir_node *node) {
2503 if (is_Confirm(node))
2504 return get_Confirm_value(node);
2508 /* skip all high-level ops */
2509 ir_node *skip_HighLevel_ops(ir_node *node) {
2510 while (is_op_highlevel(get_irn_op(node))) {
2511 node = get_irn_n(node, 0);
2517 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2518 * than any other approach, as Id chains are resolved and all point to the real node, or
2519 * all id's are self loops.
2521 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2522 * a little bit "hand optimized".
2524 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2527 skip_Id(ir_node *node) {
2529 /* don't assert node !!! */
2531 if (!node || (node->op != op_Id)) return node;
2533 /* Don't use get_Id_pred(): We get into an endless loop for
2534 self-referencing Ids. */
2535 pred = node->in[0+1];
2537 if (pred->op != op_Id) return pred;
2539 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2540 ir_node *rem_pred, *res;
2542 if (pred->op != op_Id) return pred; /* shortcut */
2545 assert(get_irn_arity (node) > 0);
2547 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2548 res = skip_Id(rem_pred);
2549 if (res->op == op_Id) /* self-loop */ return node;
2551 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2558 void skip_Id_and_store(ir_node **node) {
2561 if (!n || (n->op != op_Id)) return;
2563 /* Don't use get_Id_pred(): We get into an endless loop for
2564 self-referencing Ids. */
2569 (is_Bad)(const ir_node *node) {
2570 return _is_Bad(node);
2574 (is_NoMem)(const ir_node *node) {
2575 return _is_NoMem(node);
2579 (is_Minus)(const ir_node *node) {
2580 return _is_Minus(node);
2584 (is_Abs)(const ir_node *node) {
2585 return _is_Abs(node);
2589 (is_Mod)(const ir_node *node) {
2590 return _is_Mod(node);
2594 (is_Div)(const ir_node *node) {
2595 return _is_Div(node);
2599 (is_DivMod)(const ir_node *node) {
2600 return _is_DivMod(node);
2604 (is_Quot)(const ir_node *node) {
2605 return _is_Quot(node);
2609 (is_Add)(const ir_node *node) {
2610 return _is_Add(node);
2614 (is_Carry)(const ir_node *node) {
2615 return _is_Carry(node);
2619 (is_And)(const ir_node *node) {
2620 return _is_And(node);
2624 (is_Or)(const ir_node *node) {
2625 return _is_Or(node);
2629 (is_Eor)(const ir_node *node) {
2630 return _is_Eor(node);
2634 (is_Sub)(const ir_node *node) {
2635 return _is_Sub(node);
2639 (is_Shl)(const ir_node *node) {
2640 return _is_Shl(node);
2644 (is_Shr)(const ir_node *node) {
2645 return _is_Shr(node);
2649 (is_Shrs)(const ir_node *node) {
2650 return _is_Shrs(node);
2654 (is_Rotl)(const ir_node *node) {
2655 return _is_Rotl(node);
2659 (is_Not)(const ir_node *node) {
2660 return _is_Not(node);
2664 (is_Id)(const ir_node *node) {
2665 return _is_Id(node);
2669 (is_Tuple)(const ir_node *node) {
2670 return _is_Tuple(node);
2674 (is_Bound)(const ir_node *node) {
2675 return _is_Bound(node);
2679 (is_Start)(const ir_node *node) {
2680 return _is_Start(node);
2684 (is_End)(const ir_node *node) {
2685 return _is_End(node);
2689 (is_Const)(const ir_node *node) {
2690 return _is_Const(node);
2694 (is_Conv)(const ir_node *node) {
2695 return _is_Conv(node);
2699 (is_strictConv)(const ir_node *node) {
2700 return _is_strictConv(node);
2704 (is_Cast)(const ir_node *node) {
2705 return _is_Cast(node);
2709 (is_no_Block)(const ir_node *node) {
2710 return _is_no_Block(node);
2714 (is_Block)(const ir_node *node) {
2715 return _is_Block(node);
2718 /* returns true if node is an Unknown node. */
2720 (is_Unknown)(const ir_node *node) {
2721 return _is_Unknown(node);
2724 /* returns true if node is a Return node. */
2726 (is_Return)(const ir_node *node) {
2727 return _is_Return(node);
2730 /* returns true if node is a Call node. */
2732 (is_Call)(const ir_node *node) {
2733 return _is_Call(node);
2736 /* returns true if node is a CallBegin node. */
2738 (is_CallBegin)(const ir_node *node) {
2739 return _is_CallBegin(node);
2742 /* returns true if node is a Sel node. */
2744 (is_Sel)(const ir_node *node) {
2745 return _is_Sel(node);
2748 /* returns true if node is a Mux node. */
2750 (is_Mux)(const ir_node *node) {
2751 return _is_Mux(node);
2754 /* returns true if node is a Load node. */
2756 (is_Load)(const ir_node *node) {
2757 return _is_Load(node);
2760 /* returns true if node is a Load node. */
2762 (is_Store)(const ir_node *node) {
2763 return _is_Store(node);
2766 /* returns true if node is a Sync node. */
2768 (is_Sync)(const ir_node *node) {
2769 return _is_Sync(node);
2772 /* Returns true if node is a Confirm node. */
2774 (is_Confirm)(const ir_node *node) {
2775 return _is_Confirm(node);
2778 /* Returns true if node is a Pin node. */
2780 (is_Pin)(const ir_node *node) {
2781 return _is_Pin(node);
2784 /* Returns true if node is a SymConst node. */
2786 (is_SymConst)(const ir_node *node) {
2787 return _is_SymConst(node);
2790 /* Returns true if node is a SymConst node with kind symconst_addr_ent. */
2792 (is_SymConst_addr_ent)(const ir_node *node) {
2793 return _is_SymConst_addr_ent(node);
2796 /* Returns true if node is a Cond node. */
2798 (is_Cond)(const ir_node *node) {
2799 return _is_Cond(node);
2803 (is_CopyB)(const ir_node *node) {
2804 return _is_CopyB(node);
2807 /* returns true if node is a Cmp node. */
2809 (is_Cmp)(const ir_node *node) {
2810 return _is_Cmp(node);
2813 /* returns true if node is an Alloc node. */
2815 (is_Alloc)(const ir_node *node) {
2816 return _is_Alloc(node);
2819 /* returns true if node is a Free node. */
2821 (is_Free)(const ir_node *node) {
2822 return _is_Free(node);
2825 /* returns true if a node is a Jmp node. */
2827 (is_Jmp)(const ir_node *node) {
2828 return _is_Jmp(node);
2831 /* returns true if a node is a IJmp node. */
2833 (is_IJmp)(const ir_node *node) {
2834 return _is_IJmp(node);
2837 /* returns true if a node is a Raise node. */
2839 (is_Raise)(const ir_node *node) {
2840 return _is_Raise(node);
2843 /* returns true if a node is an ASM node. */
2845 (is_ASM)(const ir_node *node) {
2846 return _is_ASM(node);
2850 (is_Proj)(const ir_node *node) {
2851 return _is_Proj(node);
2854 /* Returns true if node is a Filter node. */
2856 (is_Filter)(const ir_node *node) {
2857 return _is_Filter(node);
2860 /* Returns true if the operation manipulates control flow. */
2861 int is_cfop(const ir_node *node) {
2862 return is_op_cfopcode(get_irn_op(node));
2865 /* Returns true if the operation manipulates interprocedural control flow:
2866 CallBegin, EndReg, EndExcept */
2867 int is_ip_cfop(const ir_node *node) {
2868 return is_ip_cfopcode(get_irn_op(node));
2871 /* Returns true if the operation can change the control flow because
2874 is_fragile_op(const ir_node *node) {
2875 return is_op_fragile(get_irn_op(node));
2878 /* Returns the memory operand of fragile operations. */
2879 ir_node *get_fragile_op_mem(ir_node *node) {
2880 assert(node && is_fragile_op(node));
2882 switch (get_irn_opcode(node)) {
2893 return get_irn_n(node, pn_Generic_M_regular);
2898 assert(0 && "should not be reached");
2903 /* Returns the result mode of a Div operation. */
2904 ir_mode *get_divop_resmod(const ir_node *node) {
2905 switch (get_irn_opcode(node)) {
2906 case iro_Quot : return get_Quot_resmode(node);
2907 case iro_DivMod: return get_DivMod_resmode(node);
2908 case iro_Div : return get_Div_resmode(node);
2909 case iro_Mod : return get_Mod_resmode(node);
2911 assert(0 && "should not be reached");
2916 /* Returns true if the operation is a forking control flow operation. */
2917 int (is_irn_forking)(const ir_node *node) {
2918 return _is_irn_forking(node);
2921 /* Return the type associated with the value produced by n
2922 * if the node remarks this type as it is the case for
2923 * Cast, Const, SymConst and some Proj nodes. */
2924 ir_type *(get_irn_type)(ir_node *node) {
2925 return _get_irn_type(node);
2928 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2930 ir_type *(get_irn_type_attr)(ir_node *node) {
2931 return _get_irn_type_attr(node);
2934 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2935 ir_entity *(get_irn_entity_attr)(ir_node *node) {
2936 return _get_irn_entity_attr(node);
2939 /* Returns non-zero for constant-like nodes. */
2940 int (is_irn_constlike)(const ir_node *node) {
2941 return _is_irn_constlike(node);
2945 * Returns non-zero for nodes that are allowed to have keep-alives and
2946 * are neither Block nor PhiM.
2948 int (is_irn_keep)(const ir_node *node) {
2949 return _is_irn_keep(node);
2953 * Returns non-zero for nodes that are always placed in the start block.
2955 int (is_irn_start_block_placed)(const ir_node *node) {
2956 return _is_irn_start_block_placed(node);
2959 /* Returns non-zero for nodes that are machine operations. */
2960 int (is_irn_machine_op)(const ir_node *node) {
2961 return _is_irn_machine_op(node);
2964 /* Returns non-zero for nodes that are machine operands. */
2965 int (is_irn_machine_operand)(const ir_node *node) {
2966 return _is_irn_machine_operand(node);
2969 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2970 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2971 return _is_irn_machine_user(node, n);
2975 /* Gets the string representation of the jump prediction .*/
2976 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
2979 case COND_JMP_PRED_NONE: return "no prediction";
2980 case COND_JMP_PRED_TRUE: return "true taken";
2981 case COND_JMP_PRED_FALSE: return "false taken";
2985 /* Returns the conditional jump prediction of a Cond node. */
2986 cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
2987 return _get_Cond_jmp_pred(cond);
2990 /* Sets a new conditional jump prediction. */
2991 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2992 _set_Cond_jmp_pred(cond, pred);
2995 /** the get_type operation must be always implemented and return a firm type */
2996 static ir_type *get_Default_type(ir_node *n) {
2998 return get_unknown_type();
3001 /* Sets the get_type operation for an ir_op_ops. */
3002 ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
3004 case iro_Const: ops->get_type = get_Const_type; break;
3005 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
3006 case iro_Cast: ops->get_type = get_Cast_type; break;
3007 case iro_Proj: ops->get_type = get_Proj_type; break;
3009 /* not allowed to be NULL */
3010 if (! ops->get_type)
3011 ops->get_type = get_Default_type;
3017 /** Return the attribute type of a SymConst node if exists */
3018 static ir_type *get_SymConst_attr_type(ir_node *self) {
3019 symconst_kind kind = get_SymConst_kind(self);
3020 if (SYMCONST_HAS_TYPE(kind))
3021 return get_SymConst_type(self);
3025 /** Return the attribute entity of a SymConst node if exists */
3026 static ir_entity *get_SymConst_attr_entity(ir_node *self) {
3027 symconst_kind kind = get_SymConst_kind(self);
3028 if (SYMCONST_HAS_ENT(kind))
3029 return get_SymConst_entity(self);
3033 /** the get_type_attr operation must be always implemented */
3034 static ir_type *get_Null_type(ir_node *n) {
3036 return firm_unknown_type;
3039 /* Sets the get_type operation for an ir_op_ops. */
3040 ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
3042 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
3043 case iro_Call: ops->get_type_attr = get_Call_type; break;
3044 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
3045 case iro_Free: ops->get_type_attr = get_Free_type; break;
3046 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
3048 /* not allowed to be NULL */
3049 if (! ops->get_type_attr)
3050 ops->get_type_attr = get_Null_type;
3056 /** the get_entity_attr operation must be always implemented */
3057 static ir_entity *get_Null_ent(ir_node *n) {
3062 /* Sets the get_type operation for an ir_op_ops. */
3063 ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
3065 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
3066 case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
3068 /* not allowed to be NULL */
3069 if (! ops->get_entity_attr)
3070 ops->get_entity_attr = get_Null_ent;
3076 /* Sets the debug information of a node. */
3077 void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
3078 _set_irn_dbg_info(n, db);
3082 * Returns the debug information of an node.
3084 * @param n The node.
3086 dbg_info *(get_irn_dbg_info)(const ir_node *n) {
3087 return _get_irn_dbg_info(n);
3090 #if 0 /* allow the global pointer */
3092 /* checks whether a node represents a global address */
3093 int is_Global(const ir_node *node) {
3096 if (is_SymConst_addr_ent(node))
3101 ptr = get_Sel_ptr(node);
3102 return is_globals_pointer(ptr) != NULL;
3105 /* returns the entity of a global address */
3106 ir_entity *get_Global_entity(const ir_node *node) {
3107 if (is_SymConst(node))
3108 return get_SymConst_entity(node);
3110 return get_Sel_entity(node);
3114 /* checks whether a node represents a global address */
3115 int is_Global(const ir_node *node) {
3116 return is_SymConst_addr_ent(node);
3119 /* returns the entity of a global address */
3120 ir_entity *get_Global_entity(const ir_node *node) {
3121 return get_SymConst_entity(node);
3126 * Calculate a hash value of a node.
3128 unsigned firm_default_hash(const ir_node *node) {
3132 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
3133 h = irn_arity = get_irn_intra_arity(node);
3135 /* consider all in nodes... except the block if not a control flow. */
3136 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
3137 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
3141 h = 9*h + HASH_PTR(get_irn_mode(node));
3143 h = 9*h + HASH_PTR(get_irn_op(node));
3146 } /* firm_default_hash */