3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
141 /*-- getting some parameters from ir_nodes --*/
144 (is_ir_node)(const void *thing) {
145 return __is_ir_node(thing);
149 (get_irn_intra_arity)(const ir_node *node) {
150 return __get_irn_intra_arity(node);
154 (get_irn_inter_arity)(const ir_node *node) {
155 return __get_irn_inter_arity(node);
159 (get_irn_arity)(const ir_node *node) {
160 return __get_irn_arity(node);
163 /* Returns the array with ins. This array is shifted with respect to the
164 array accessed by get_irn_n: The block operand is at position 0 not -1.
165 (@@@ This should be changed.)
166 The order of the predecessors in this array is not guaranteed, except that
167 lists of operands as predecessors of Block or arguments of a Call are
170 get_irn_in (const ir_node *node) {
172 if (interprocedural_view) { /* handle Filter and Block specially */
173 if (get_irn_opcode(node) == iro_Filter) {
174 assert(node->attr.filter.in_cg);
175 return node->attr.filter.in_cg;
176 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
177 return node->attr.block.in_cg;
179 /* else fall through */
185 set_irn_in (ir_node *node, int arity, ir_node **in) {
188 if (interprocedural_view) { /* handle Filter and Block specially */
189 if (get_irn_opcode(node) == iro_Filter) {
190 assert(node->attr.filter.in_cg);
191 arr = &node->attr.filter.in_cg;
192 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
193 arr = &node->attr.block.in_cg;
200 if (arity != ARR_LEN(*arr) - 1) {
201 ir_node * block = (*arr)[0];
202 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
205 fix_backedges(current_ir_graph->obst, node);
206 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
210 (get_irn_intra_n)(ir_node *node, int n) {
211 return __get_irn_intra_n (node, n);
215 (get_irn_inter_n)(ir_node *node, int n) {
216 return __get_irn_inter_n (node, n);
220 (get_irn_n)(ir_node *node, int n) {
221 return __get_irn_n (node, n);
225 set_irn_n (ir_node *node, int n, ir_node *in) {
226 assert(node && -1 <= n && n < get_irn_arity(node));
227 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
228 /* Change block pred in both views! */
229 node->in[n + 1] = in;
230 assert(node->attr.filter.in_cg);
231 node->attr.filter.in_cg[n + 1] = in;
234 if (interprocedural_view) { /* handle Filter and Block specially */
235 if (get_irn_opcode(node) == iro_Filter) {
236 assert(node->attr.filter.in_cg);
237 node->attr.filter.in_cg[n + 1] = in;
239 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
240 node->attr.block.in_cg[n + 1] = in;
243 /* else fall through */
245 node->in[n + 1] = in;
249 (get_irn_mode)(const ir_node *node) {
250 return __get_irn_mode(node);
254 (set_irn_mode)(ir_node *node, ir_mode *mode)
256 __set_irn_mode(node, mode);
260 get_irn_modecode (const ir_node *node)
263 return node->mode->code;
266 /** Gets the string representation of the mode .*/
268 get_irn_modename (const ir_node *node)
271 return get_mode_name(node->mode);
275 get_irn_modeident (const ir_node *node)
278 return get_mode_ident(node->mode);
282 (get_irn_op)(const ir_node *node)
284 return __get_irn_op(node);
287 /* should be private to the library: */
289 set_irn_op (ir_node *node, ir_op *op)
296 (get_irn_opcode)(const ir_node *node)
298 return __get_irn_opcode(node);
302 get_irn_opname (const ir_node *node)
305 if ((get_irn_op((ir_node *)node) == op_Phi) &&
306 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
307 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
308 return get_id_str(node->op->name);
312 get_irn_opident (const ir_node *node)
315 return node->op->name;
319 (get_irn_visited)(const ir_node *node)
321 return __get_irn_visited(node);
325 (set_irn_visited)(ir_node *node, unsigned long visited)
327 __set_irn_visited(node, visited);
331 (mark_irn_visited)(ir_node *node) {
332 __mark_irn_visited(node);
336 (irn_not_visited)(const ir_node *node) {
337 return __irn_not_visited(node);
341 (irn_visited)(const ir_node *node) {
342 return __irn_visited(node);
346 (set_irn_link)(ir_node *node, void *link) {
347 __set_irn_link(node, link);
351 (get_irn_link)(const ir_node *node) {
352 return __get_irn_link(node);
356 #ifdef DO_HEAPANALYSIS
357 /* Access the abstract interpretation information of a node.
358 Returns NULL if no such information is available. */
359 struct abstval *get_irn_abst_value(ir_node *n) {
362 /* Set the abstract interpretation information of a node. */
363 void set_irn_abst_value(ir_node *n, struct abstval *os) {
366 struct section *firm_get_irn_section(ir_node *n) {
369 void firm_set_irn_section(ir_node *n, struct section *s) {
372 #endif /* DO_HEAPANALYSIS */
375 /* Outputs a unique number for this node */
377 get_irn_node_nr(const ir_node *node) {
380 return node->node_nr;
387 get_irn_const_attr (ir_node *node)
389 assert (node->op == op_Const);
390 return node->attr.con;
394 get_irn_proj_attr (ir_node *node)
396 assert (node->op == op_Proj);
397 return node->attr.proj;
401 get_irn_alloc_attr (ir_node *node)
403 assert (node->op == op_Alloc);
408 get_irn_free_attr (ir_node *node)
410 assert (node->op == op_Free);
411 return node->attr.f = skip_tid(node->attr.f);
415 get_irn_symconst_attr (ir_node *node)
417 assert (node->op == op_SymConst);
422 get_irn_call_attr (ir_node *node)
424 assert (node->op == op_Call);
425 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
429 get_irn_funccall_attr (ir_node *node)
431 assert (node->op == op_FuncCall);
432 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
436 get_irn_sel_attr (ir_node *node)
438 assert (node->op == op_Sel);
443 get_irn_phi_attr (ir_node *node)
445 assert (node->op == op_Phi);
446 return node->attr.phi0_pos;
450 get_irn_block_attr (ir_node *node)
452 assert (node->op == op_Block);
453 return node->attr.block;
456 /** manipulate fields of individual nodes **/
458 /* this works for all except Block */
460 get_nodes_Block (ir_node *node) {
461 assert (!(node->op == op_Block));
462 return get_irn_n(node, -1);
466 set_nodes_Block (ir_node *node, ir_node *block) {
467 assert (!(node->op == op_Block));
468 set_irn_n(node, -1, block);
471 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
472 * from Start. If so returns frame type, else Null. */
473 type *is_frame_pointer(ir_node *n) {
474 if ((get_irn_op(n) == op_Proj) &&
475 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
476 ir_node *start = get_Proj_pred(n);
477 if (get_irn_op(start) == op_Start) {
478 return get_irg_frame_type(get_irn_irg(start));
484 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
485 * from Start. If so returns global type, else Null. */
486 type *is_globals_pointer(ir_node *n) {
487 if ((get_irn_op(n) == op_Proj) &&
488 (get_Proj_proj(n) == pn_Start_P_globals)) {
489 ir_node *start = get_Proj_pred(n);
490 if (get_irn_op(start) == op_Start) {
491 return get_glob_type();
497 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
498 * from Start. If so returns 1, else 0. */
499 int is_value_arg_pointer(ir_node *n) {
500 if ((get_irn_op(n) == op_Proj) &&
501 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
502 (get_irn_op(get_Proj_pred(n)) == op_Start))
507 /* Returns an array with the predecessors of the Block. Depending on
508 the implementation of the graph data structure this can be a copy of
509 the internal representation of predecessors as well as the internal
510 array itself. Therefore writing to this array might obstruct the ir. */
512 get_Block_cfgpred_arr (ir_node *node)
514 assert ((node->op == op_Block));
515 return (ir_node **)&(get_irn_in(node)[1]);
520 get_Block_n_cfgpreds (ir_node *node) {
521 assert ((node->op == op_Block));
522 return get_irn_arity(node);
526 get_Block_cfgpred (ir_node *node, int pos) {
528 assert (node->op == op_Block);
529 assert(-1 <= pos && pos < get_irn_arity(node));
530 return get_irn_n(node, pos);
534 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
535 assert (node->op == op_Block);
536 set_irn_n(node, pos, pred);
540 get_Block_matured (ir_node *node) {
541 assert (node->op == op_Block);
542 return node->attr.block.matured;
546 set_Block_matured (ir_node *node, bool matured) {
547 assert (node->op == op_Block);
548 node->attr.block.matured = matured;
551 get_Block_block_visited (ir_node *node) {
552 assert (node->op == op_Block);
553 return node->attr.block.block_visited;
557 set_Block_block_visited (ir_node *node, unsigned long visit) {
558 assert (node->op == op_Block);
559 node->attr.block.block_visited = visit;
562 /* For this current_ir_graph must be set. */
564 mark_Block_block_visited (ir_node *node) {
565 assert (node->op == op_Block);
566 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
570 Block_not_block_visited(ir_node *node) {
571 assert (node->op == op_Block);
572 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
576 get_Block_graph_arr (ir_node *node, int pos) {
577 assert (node->op == op_Block);
578 return node->attr.block.graph_arr[pos+1];
582 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
583 assert (node->op == op_Block);
584 node->attr.block.graph_arr[pos+1] = value;
587 /* handler handling for Blocks * /
589 set_Block_handler (ir_node *block, ir_node *handler) {
590 assert ((block->op == op_Block));
591 assert ((handler->op == op_Block));
592 block->attr.block.handler_entry = handler;
596 get_Block_handler (ir_node *block) {
597 assert ((block->op == op_Block));
598 return (block->attr.block.handler_entry);
601 / * handler handling for Nodes * /
603 set_Node_handler (ir_node *node, ir_node *handler) {
604 set_Block_handler (get_nodes_Block (node), handler);
608 get_Node_handler (ir_node *node) {
609 return (get_Block_handler (get_nodes_Block (node)));
612 / * exc_t handling for Blocks * /
613 void set_Block_exc (ir_node *block, exc_t exc) {
614 assert ((block->op == op_Block));
615 block->attr.block.exc = exc;
618 exc_t get_Block_exc (ir_node *block) {
619 assert ((block->op == op_Block));
620 return (block->attr.block.exc);
623 / * exc_t handling for Nodes * /
624 void set_Node_exc (ir_node *node, exc_t exc) {
625 set_Block_exc (get_nodes_Block (node), exc);
628 exc_t get_Node_exc (ir_node *node) {
629 return (get_Block_exc (get_nodes_Block (node)));
633 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
634 assert(node->op == op_Block);
635 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
636 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
637 node->attr.block.in_cg[0] = NULL;
638 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
640 /* Fix backedge array. fix_backedges operates depending on
641 interprocedural_view. */
642 bool ipv = interprocedural_view;
643 interprocedural_view = true;
644 fix_backedges(current_ir_graph->obst, node);
645 interprocedural_view = ipv;
648 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
651 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
652 assert(node->op == op_Block &&
653 node->attr.block.in_cg &&
654 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
655 node->attr.block.in_cg[pos + 1] = pred;
658 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
659 assert(node->op == op_Block);
660 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
663 int get_Block_cg_n_cfgpreds(ir_node * node) {
664 assert(node->op == op_Block);
665 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
668 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
669 assert(node->op == op_Block && node->attr.block.in_cg);
670 return node->attr.block.in_cg[pos + 1];
673 void remove_Block_cg_cfgpred_arr(ir_node * node) {
674 assert(node->op == op_Block);
675 node->attr.block.in_cg = NULL;
678 /* Start references the irg it is in. */
680 get_Start_irg(ir_node *node) {
681 return get_irn_irg(node);
685 set_Start_irg(ir_node *node, ir_graph *irg) {
686 assert(node->op == op_Start);
687 assert(is_ir_graph(irg));
688 assert(0 && " Why set irg? -- use set_irn_irg");
692 get_End_n_keepalives(ir_node *end) {
693 assert (end->op == op_End);
694 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
698 get_End_keepalive(ir_node *end, int pos) {
699 assert (end->op == op_End);
700 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
704 add_End_keepalive (ir_node *end, ir_node *ka) {
705 assert (end->op == op_End);
706 ARR_APP1 (ir_node *, end->in, ka);
710 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
711 assert (end->op == op_End);
712 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
716 free_End (ir_node *end) {
717 assert (end->op == op_End);
719 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
720 end->in = NULL; /* @@@ make sure we get an error if we use the
721 in array afterwards ... */
724 ir_graph *get_EndReg_irg (ir_node *end) {
725 return get_irn_irg(end);
728 ir_graph *get_EndExcept_irg (ir_node *end) {
729 return get_irn_irg(end);
733 > Implementing the case construct (which is where the constant Proj node is
734 > important) involves far more than simply determining the constant values.
735 > We could argue that this is more properly a function of the translator from
736 > Firm to the target machine. That could be done if there was some way of
737 > projecting "default" out of the Cond node.
738 I know it's complicated.
739 Basically there are two proglems:
740 - determining the gaps between the projs
741 - determining the biggest case constant to know the proj number for
743 I see several solutions:
744 1. Introduce a ProjDefault node. Solves both problems.
745 This means to extend all optimizations executed during construction.
746 2. Give the Cond node for switch two flavors:
747 a) there are no gaps in the projs (existing flavor)
748 b) gaps may exist, default proj is still the Proj with the largest
749 projection number. This covers also the gaps.
750 3. Fix the semantic of the Cond to that of 2b)
752 Solution 2 seems to be the best:
753 Computing the gaps in the Firm representation is not too hard, i.e.,
754 libFIRM can implement a routine that transforms between the two
755 flavours. This is also possible for 1) but 2) does not require to
756 change any existing optimization.
757 Further it should be far simpler to determine the biggest constant than
759 I don't want to choose 3) as 2a) seems to have advantages for
760 dataflow analysis and 3) does not allow to convert the representation to
764 get_Cond_selector (ir_node *node) {
765 assert (node->op == op_Cond);
766 return get_irn_n(node, 0);
770 set_Cond_selector (ir_node *node, ir_node *selector) {
771 assert (node->op == op_Cond);
772 set_irn_n(node, 0, selector);
776 get_Cond_kind (ir_node *node) {
777 assert (node->op == op_Cond);
778 return node->attr.c.kind;
782 set_Cond_kind (ir_node *node, cond_kind kind) {
783 assert (node->op == op_Cond);
784 node->attr.c.kind = kind;
788 get_Cond_defaultProj (ir_node *node) {
789 assert (node->op == op_Cond);
790 return node->attr.c.default_proj;
794 get_Return_mem (ir_node *node) {
795 assert (node->op == op_Return);
796 return get_irn_n(node, 0);
800 set_Return_mem (ir_node *node, ir_node *mem) {
801 assert (node->op == op_Return);
802 set_irn_n(node, 0, mem);
806 get_Return_n_ress (ir_node *node) {
807 assert (node->op == op_Return);
808 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
812 get_Return_res_arr (ir_node *node)
814 assert ((node->op == op_Return));
815 if (get_Return_n_ress(node) > 0)
816 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
823 set_Return_n_res (ir_node *node, int results) {
824 assert (node->op == op_Return);
829 get_Return_res (ir_node *node, int pos) {
830 assert (node->op == op_Return);
831 assert (get_Return_n_ress(node) > pos);
832 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
836 set_Return_res (ir_node *node, int pos, ir_node *res){
837 assert (node->op == op_Return);
838 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
842 get_Raise_mem (ir_node *node) {
843 assert (node->op == op_Raise);
844 return get_irn_n(node, 0);
848 set_Raise_mem (ir_node *node, ir_node *mem) {
849 assert (node->op == op_Raise);
850 set_irn_n(node, 0, mem);
854 get_Raise_exo_ptr (ir_node *node) {
855 assert (node->op == op_Raise);
856 return get_irn_n(node, 1);
860 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
861 assert (node->op == op_Raise);
862 set_irn_n(node, 1, exo_ptr);
865 tarval *get_Const_tarval (ir_node *node) {
866 assert (node->op == op_Const);
867 return node->attr.con.tv;
871 set_Const_tarval (ir_node *node, tarval *con) {
872 assert (node->op == op_Const);
873 node->attr.con.tv = con;
877 /* The source language type. Must be an atomic type. Mode of type must
878 be mode of node. For tarvals from entities type must be pointer to
881 get_Const_type (ir_node *node) {
882 assert (node->op == op_Const);
883 return node->attr.con.tp;
887 set_Const_type (ir_node *node, type *tp) {
888 assert (node->op == op_Const);
889 if (tp != unknown_type) {
890 assert (is_atomic_type(tp));
891 assert (get_type_mode(tp) == get_irn_mode(node));
892 assert (!tarval_is_entity(get_Const_tarval(node)) ||
893 (is_pointer_type(tp) &&
894 (get_pointer_points_to_type(tp) ==
895 get_entity_type(get_tarval_entity(get_Const_tarval(node))))));
898 node->attr.con.tp = tp;
903 get_SymConst_kind (const ir_node *node) {
904 assert (node->op == op_SymConst);
905 return node->attr.i.num;
909 set_SymConst_kind (ir_node *node, symconst_kind num) {
910 assert (node->op == op_SymConst);
911 node->attr.i.num = num;
915 get_SymConst_type (ir_node *node) {
916 assert ( (node->op == op_SymConst)
917 && ( get_SymConst_kind(node) == symconst_type_tag
918 || get_SymConst_kind(node) == symconst_size));
919 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
923 set_SymConst_type (ir_node *node, type *tp) {
924 assert ( (node->op == op_SymConst)
925 && ( get_SymConst_kind(node) == symconst_type_tag
926 || get_SymConst_kind(node) == symconst_size));
927 node->attr.i.sym.type_p = tp;
931 get_SymConst_name (ir_node *node) {
932 assert ( (node->op == op_SymConst)
933 && (get_SymConst_kind(node) == symconst_addr_name));
934 return node->attr.i.sym.ident_p;
938 set_SymConst_name (ir_node *node, ident *name) {
939 assert ( (node->op == op_SymConst)
940 && (get_SymConst_kind(node) == symconst_addr_name));
941 node->attr.i.sym.ident_p = name;
945 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
946 entity *get_SymConst_entity (ir_node *node) {
947 assert ( (node->op == op_SymConst)
948 && (get_SymConst_kind (node) == symconst_addr_ent));
949 return node->attr.i.sym.entity_p;
952 void set_SymConst_entity (ir_node *node, entity *ent) {
953 assert ( (node->op == op_SymConst)
954 && (get_SymConst_kind(node) == symconst_addr_ent));
955 node->attr.i.sym.entity_p = ent;
959 union symconst_symbol
960 get_SymConst_symbol (ir_node *node) {
961 assert (node->op == op_SymConst);
962 return node->attr.i.sym;
966 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
967 assert (node->op == op_SymConst);
968 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
969 node->attr.i.sym = sym;
973 get_Sel_mem (ir_node *node) {
974 assert (node->op == op_Sel);
975 return get_irn_n(node, 0);
979 set_Sel_mem (ir_node *node, ir_node *mem) {
980 assert (node->op == op_Sel);
981 set_irn_n(node, 0, mem);
985 get_Sel_ptr (ir_node *node) {
986 assert (node->op == op_Sel);
987 return get_irn_n(node, 1);
991 set_Sel_ptr (ir_node *node, ir_node *ptr) {
992 assert (node->op == op_Sel);
993 set_irn_n(node, 1, ptr);
997 get_Sel_n_indexs (ir_node *node) {
998 assert (node->op == op_Sel);
999 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1003 get_Sel_index_arr (ir_node *node)
1005 assert ((node->op == op_Sel));
1006 if (get_Sel_n_indexs(node) > 0)
1007 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1013 get_Sel_index (ir_node *node, int pos) {
1014 assert (node->op == op_Sel);
1015 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1019 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1020 assert (node->op == op_Sel);
1021 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1025 get_Sel_entity (ir_node *node) {
1026 assert (node->op == op_Sel);
1027 return node->attr.s.ent;
1031 set_Sel_entity (ir_node *node, entity *ent) {
1032 assert (node->op == op_Sel);
1033 node->attr.s.ent = ent;
1037 get_InstOf_ent (ir_node *node) {
1038 assert (node->op = op_InstOf);
1039 return (node->attr.io.ent);
1043 set_InstOf_ent (ir_node *node, type *ent) {
1044 assert (node->op = op_InstOf);
1045 node->attr.io.ent = ent;
1049 get_InstOf_store (ir_node *node) {
1050 assert (node->op = op_InstOf);
1051 return (get_irn_n (node, 0));
1055 set_InstOf_store (ir_node *node, ir_node *obj) {
1056 assert (node->op = op_InstOf);
1057 set_irn_n (node, 0, obj);
1061 get_InstOf_obj (ir_node *node) {
1062 assert (node->op = op_InstOf);
1063 return (get_irn_n (node, 1));
1067 set_InstOf_obj (ir_node *node, ir_node *obj) {
1068 assert (node->op = op_InstOf);
1069 set_irn_n (node, 1, obj);
1073 /* For unary and binary arithmetic operations the access to the
1074 operands can be factored out. Left is the first, right the
1075 second arithmetic value as listed in tech report 0999-33.
1076 unops are: Minus, Abs, Not, Conv, Cast
1077 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1078 Shr, Shrs, Rotate, Cmp */
1082 get_Call_mem (ir_node *node) {
1083 assert (node->op == op_Call);
1084 return get_irn_n(node, 0);
1088 set_Call_mem (ir_node *node, ir_node *mem) {
1089 assert (node->op == op_Call);
1090 set_irn_n(node, 0, mem);
1094 get_Call_ptr (ir_node *node) {
1095 assert (node->op == op_Call);
1096 return get_irn_n(node, 1);
1100 set_Call_ptr (ir_node *node, ir_node *ptr) {
1101 assert (node->op == op_Call);
1102 set_irn_n(node, 1, ptr);
1106 get_Call_param_arr (ir_node *node) {
1107 assert (node->op == op_Call);
1108 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1112 get_Call_n_params (ir_node *node) {
1113 assert (node->op == op_Call);
1114 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1118 get_Call_arity (ir_node *node) {
1119 assert (node->op == op_Call);
1120 return get_Call_n_params(node);
1124 set_Call_arity (ir_node *node, ir_node *arity) {
1125 assert (node->op == op_Call);
1130 get_Call_param (ir_node *node, int pos) {
1131 assert (node->op == op_Call);
1132 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1136 set_Call_param (ir_node *node, int pos, ir_node *param) {
1137 assert (node->op == op_Call);
1138 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1142 get_Call_type (ir_node *node) {
1143 assert (node->op == op_Call);
1144 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1148 set_Call_type (ir_node *node, type *tp) {
1149 assert (node->op == op_Call);
1150 assert (is_method_type(tp));
1151 node->attr.call.cld_tp = tp;
1154 int Call_has_callees(ir_node *node) {
1155 return (node->attr.call.callee_arr != NULL);
1158 int get_Call_n_callees(ir_node * node) {
1159 assert(node->op == op_Call && node->attr.call.callee_arr);
1160 return ARR_LEN(node->attr.call.callee_arr);
1163 entity * get_Call_callee(ir_node * node, int pos) {
1164 assert(node->op == op_Call && node->attr.call.callee_arr);
1165 return node->attr.call.callee_arr[pos];
1168 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1169 assert(node->op == op_Call);
1170 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1171 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1173 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1176 void remove_Call_callee_arr(ir_node * node) {
1177 assert(node->op == op_Call);
1178 node->attr.call.callee_arr = NULL;
1181 ir_node * get_CallBegin_ptr (ir_node *node) {
1182 assert(node->op == op_CallBegin);
1183 return get_irn_n(node, 0);
1185 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1186 assert(node->op == op_CallBegin);
1187 set_irn_n(node, 0, ptr);
1189 ir_graph * get_CallBegin_irg (ir_node *node) {
1190 return get_irn_irg(node);
1192 ir_node * get_CallBegin_call (ir_node *node) {
1193 assert(node->op == op_CallBegin);
1194 return node->attr.callbegin.call;
1196 void set_CallBegin_call (ir_node *node, ir_node *call) {
1197 assert(node->op == op_CallBegin);
1198 node->attr.callbegin.call = call;
1202 get_FuncCall_ptr (ir_node *node) {
1203 assert (node->op == op_FuncCall);
1204 return get_irn_n(node, 0);
1208 set_FuncCall_ptr (ir_node *node, ir_node *ptr) {
1209 assert (node->op == op_FuncCall);
1210 set_irn_n(node, 0, ptr);
1214 get_FuncCall_param_arr (ir_node *node) {
1215 assert (node->op == op_FuncCall);
1216 return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET];
1220 get_FuncCall_n_params (ir_node *node) {
1221 assert (node->op == op_FuncCall);
1222 return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET);
1226 get_FuncCall_arity (ir_node *node) {
1227 assert (node->op == op_FuncCall);
1228 return get_FuncCall_n_params(node);
1232 set_FuncCall_arity (ir_node *node, ir_node *arity) {
1233 assert (node->op == op_FuncCall);
1238 get_FuncCall_param (ir_node *node, int pos) {
1239 assert (node->op == op_FuncCall);
1240 return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET);
1244 set_FuncCall_param (ir_node *node, int pos, ir_node *param) {
1245 assert (node->op == op_FuncCall);
1246 set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param);
1250 get_FuncCall_type (ir_node *node) {
1251 assert (node->op == op_FuncCall);
1252 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1256 set_FuncCall_type (ir_node *node, type *tp) {
1257 assert (node->op == op_FuncCall);
1258 assert (is_method_type(tp));
1259 node->attr.call.cld_tp = tp;
1262 int FuncCall_has_callees(ir_node *node) {
1263 return (node->attr.call.callee_arr != NULL);
1266 int get_FuncCall_n_callees(ir_node * node) {
1267 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1268 return ARR_LEN(node->attr.call.callee_arr);
1271 entity * get_FuncCall_callee(ir_node * node, int pos) {
1272 assert(node->op == op_FuncCall && node->attr.call.callee_arr);
1273 return node->attr.call.callee_arr[pos];
1276 void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) {
1277 assert(node->op == op_FuncCall);
1278 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1279 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1281 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1284 void remove_FuncCall_callee_arr(ir_node * node) {
1285 assert(node->op == op_FuncCall);
1286 node->attr.call.callee_arr = NULL;
1291 ir_node * get_##OP##_left(ir_node *node) { \
1292 assert(node->op == op_##OP); \
1293 return get_irn_n(node, node->op->op_index); \
1295 void set_##OP##_left(ir_node *node, ir_node *left) { \
1296 assert(node->op == op_##OP); \
1297 set_irn_n(node, node->op->op_index, left); \
1299 ir_node *get_##OP##_right(ir_node *node) { \
1300 assert(node->op == op_##OP); \
1301 return get_irn_n(node, node->op->op_index + 1); \
1303 void set_##OP##_right(ir_node *node, ir_node *right) { \
1304 assert(node->op == op_##OP); \
1305 set_irn_n(node, node->op->op_index + 1, right); \
1309 ir_node *get_##OP##_op(ir_node *node) { \
1310 assert(node->op == op_##OP); \
1311 return get_irn_n(node, node->op->op_index); \
1313 void set_##OP##_op (ir_node *node, ir_node *op) { \
1314 assert(node->op == op_##OP); \
1315 set_irn_n(node, node->op->op_index, op); \
1325 get_Quot_mem (ir_node *node) {
1326 assert (node->op == op_Quot);
1327 return get_irn_n(node, 0);
1331 set_Quot_mem (ir_node *node, ir_node *mem) {
1332 assert (node->op == op_Quot);
1333 set_irn_n(node, 0, mem);
1339 get_DivMod_mem (ir_node *node) {
1340 assert (node->op == op_DivMod);
1341 return get_irn_n(node, 0);
1345 set_DivMod_mem (ir_node *node, ir_node *mem) {
1346 assert (node->op == op_DivMod);
1347 set_irn_n(node, 0, mem);
1353 get_Div_mem (ir_node *node) {
1354 assert (node->op == op_Div);
1355 return get_irn_n(node, 0);
1359 set_Div_mem (ir_node *node, ir_node *mem) {
1360 assert (node->op == op_Div);
1361 set_irn_n(node, 0, mem);
1367 get_Mod_mem (ir_node *node) {
1368 assert (node->op == op_Mod);
1369 return get_irn_n(node, 0);
1373 set_Mod_mem (ir_node *node, ir_node *mem) {
1374 assert (node->op == op_Mod);
1375 set_irn_n(node, 0, mem);
1392 get_Cast_type (ir_node *node) {
1393 assert (node->op == op_Cast);
1394 return node->attr.cast.totype;
1398 set_Cast_type (ir_node *node, type *to_tp) {
1399 assert (node->op == op_Cast);
1400 node->attr.cast.totype = to_tp;
1404 is_unop (ir_node *node) {
1405 return (node->op->opar == oparity_unary);
1409 get_unop_op (ir_node *node) {
1410 if (node->op->opar == oparity_unary)
1411 return get_irn_n(node, node->op->op_index);
1413 assert(node->op->opar == oparity_unary);
1418 set_unop_op (ir_node *node, ir_node *op) {
1419 if (node->op->opar == oparity_unary)
1420 set_irn_n(node, node->op->op_index, op);
1422 assert(node->op->opar == oparity_unary);
1426 is_binop (ir_node *node) {
1427 return (node->op->opar == oparity_binary);
1431 get_binop_left (ir_node *node) {
1432 if (node->op->opar == oparity_binary)
1433 return get_irn_n(node, node->op->op_index);
1435 assert(node->op->opar == oparity_binary);
1440 set_binop_left (ir_node *node, ir_node *left) {
1441 if (node->op->opar == oparity_binary)
1442 set_irn_n(node, node->op->op_index, left);
1444 assert (node->op->opar == oparity_binary);
1448 get_binop_right (ir_node *node) {
1449 if (node->op->opar == oparity_binary)
1450 return get_irn_n(node, node->op->op_index + 1);
1452 assert(node->op->opar == oparity_binary);
1457 set_binop_right (ir_node *node, ir_node *right) {
1458 if (node->op->opar == oparity_binary)
1459 set_irn_n(node, node->op->op_index + 1, right);
1461 assert (node->op->opar == oparity_binary);
1464 int is_Phi (ir_node *n) {
1470 if (op == op_Filter) return interprocedural_view;
1473 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1474 (get_irn_arity(n) > 0));
1479 int is_Phi0 (ir_node *n) {
1482 return ((get_irn_op(n) == op_Phi) &&
1483 (get_irn_arity(n) == 0) &&
1484 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1488 get_Phi_preds_arr (ir_node *node) {
1489 assert (node->op == op_Phi);
1490 return (ir_node **)&(get_irn_in(node)[1]);
1494 get_Phi_n_preds (ir_node *node) {
1495 assert (is_Phi(node) || is_Phi0(node));
1496 return (get_irn_arity(node));
1500 void set_Phi_n_preds (ir_node *node, int n_preds) {
1501 assert (node->op == op_Phi);
1506 get_Phi_pred (ir_node *node, int pos) {
1507 assert (is_Phi(node) || is_Phi0(node));
1508 return get_irn_n(node, pos);
1512 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1513 assert (is_Phi(node) || is_Phi0(node));
1514 set_irn_n(node, pos, pred);
1518 get_Load_mem (ir_node *node) {
1519 assert (node->op == op_Load);
1520 return get_irn_n(node, 0);
1524 set_Load_mem (ir_node *node, ir_node *mem) {
1525 assert (node->op == op_Load);
1526 set_irn_n(node, 0, mem);
1530 get_Load_ptr (ir_node *node) {
1531 assert (node->op == op_Load);
1532 return get_irn_n(node, 1);
1536 set_Load_ptr (ir_node *node, ir_node *ptr) {
1537 assert (node->op == op_Load);
1538 set_irn_n(node, 1, ptr);
1543 get_Store_mem (ir_node *node) {
1544 assert (node->op == op_Store);
1545 return get_irn_n(node, 0);
1549 set_Store_mem (ir_node *node, ir_node *mem) {
1550 assert (node->op == op_Store);
1551 set_irn_n(node, 0, mem);
1555 get_Store_ptr (ir_node *node) {
1556 assert (node->op == op_Store);
1557 return get_irn_n(node, 1);
1561 set_Store_ptr (ir_node *node, ir_node *ptr) {
1562 assert (node->op == op_Store);
1563 set_irn_n(node, 1, ptr);
1567 get_Store_value (ir_node *node) {
1568 assert (node->op == op_Store);
1569 return get_irn_n(node, 2);
1573 set_Store_value (ir_node *node, ir_node *value) {
1574 assert (node->op == op_Store);
1575 set_irn_n(node, 2, value);
1579 get_Alloc_mem (ir_node *node) {
1580 assert (node->op == op_Alloc);
1581 return get_irn_n(node, 0);
1585 set_Alloc_mem (ir_node *node, ir_node *mem) {
1586 assert (node->op == op_Alloc);
1587 set_irn_n(node, 0, mem);
1591 get_Alloc_size (ir_node *node) {
1592 assert (node->op == op_Alloc);
1593 return get_irn_n(node, 1);
1597 set_Alloc_size (ir_node *node, ir_node *size) {
1598 assert (node->op == op_Alloc);
1599 set_irn_n(node, 1, size);
1603 get_Alloc_type (ir_node *node) {
1604 assert (node->op == op_Alloc);
1605 return node->attr.a.type = skip_tid(node->attr.a.type);
1609 set_Alloc_type (ir_node *node, type *tp) {
1610 assert (node->op == op_Alloc);
1611 node->attr.a.type = tp;
1615 get_Alloc_where (ir_node *node) {
1616 assert (node->op == op_Alloc);
1617 return node->attr.a.where;
1621 set_Alloc_where (ir_node *node, where_alloc where) {
1622 assert (node->op == op_Alloc);
1623 node->attr.a.where = where;
1628 get_Free_mem (ir_node *node) {
1629 assert (node->op == op_Free);
1630 return get_irn_n(node, 0);
1634 set_Free_mem (ir_node *node, ir_node *mem) {
1635 assert (node->op == op_Free);
1636 set_irn_n(node, 0, mem);
1640 get_Free_ptr (ir_node *node) {
1641 assert (node->op == op_Free);
1642 return get_irn_n(node, 1);
1646 set_Free_ptr (ir_node *node, ir_node *ptr) {
1647 assert (node->op == op_Free);
1648 set_irn_n(node, 1, ptr);
1652 get_Free_size (ir_node *node) {
1653 assert (node->op == op_Free);
1654 return get_irn_n(node, 2);
1658 set_Free_size (ir_node *node, ir_node *size) {
1659 assert (node->op == op_Free);
1660 set_irn_n(node, 2, size);
1664 get_Free_type (ir_node *node) {
1665 assert (node->op == op_Free);
1666 return node->attr.f = skip_tid(node->attr.f);
1670 set_Free_type (ir_node *node, type *tp) {
1671 assert (node->op == op_Free);
1676 get_Sync_preds_arr (ir_node *node) {
1677 assert (node->op == op_Sync);
1678 return (ir_node **)&(get_irn_in(node)[1]);
1682 get_Sync_n_preds (ir_node *node) {
1683 assert (node->op == op_Sync);
1684 return (get_irn_arity(node));
1689 set_Sync_n_preds (ir_node *node, int n_preds) {
1690 assert (node->op == op_Sync);
1695 get_Sync_pred (ir_node *node, int pos) {
1696 assert (node->op == op_Sync);
1697 return get_irn_n(node, pos);
1701 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1702 assert (node->op == op_Sync);
1703 set_irn_n(node, pos, pred);
1707 get_Proj_pred (ir_node *node) {
1708 assert (is_Proj(node));
1709 return get_irn_n(node, 0);
1713 set_Proj_pred (ir_node *node, ir_node *pred) {
1714 assert (is_Proj(node));
1715 set_irn_n(node, 0, pred);
1719 get_Proj_proj (ir_node *node) {
1720 assert (is_Proj(node));
1721 if (get_irn_opcode(node) == iro_Proj) {
1722 return node->attr.proj;
1724 assert(get_irn_opcode(node) == iro_Filter);
1725 return node->attr.filter.proj;
1730 set_Proj_proj (ir_node *node, long proj) {
1731 assert (node->op == op_Proj);
1732 node->attr.proj = proj;
1736 get_Tuple_preds_arr (ir_node *node) {
1737 assert (node->op == op_Tuple);
1738 return (ir_node **)&(get_irn_in(node)[1]);
1742 get_Tuple_n_preds (ir_node *node) {
1743 assert (node->op == op_Tuple);
1744 return (get_irn_arity(node));
1749 set_Tuple_n_preds (ir_node *node, int n_preds) {
1750 assert (node->op == op_Tuple);
1755 get_Tuple_pred (ir_node *node, int pos) {
1756 assert (node->op == op_Tuple);
1757 return get_irn_n(node, pos);
1761 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1762 assert (node->op == op_Tuple);
1763 set_irn_n(node, pos, pred);
1767 get_Id_pred (ir_node *node) {
1768 assert (node->op == op_Id);
1769 return get_irn_n(node, 0);
1773 set_Id_pred (ir_node *node, ir_node *pred) {
1774 assert (node->op == op_Id);
1775 set_irn_n(node, 0, pred);
1778 ir_node *get_Confirm_value (ir_node *node) {
1779 assert (node->op == op_Confirm);
1780 return get_irn_n(node, 0);
1782 void set_Confirm_value (ir_node *node, ir_node *value) {
1783 assert (node->op == op_Confirm);
1784 set_irn_n(node, 0, value);
1786 ir_node *get_Confirm_bound (ir_node *node) {
1787 assert (node->op == op_Confirm);
1788 return get_irn_n(node, 1);
1790 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1791 assert (node->op == op_Confirm);
1792 set_irn_n(node, 0, bound);
1794 pn_Cmp get_Confirm_cmp (ir_node *node) {
1795 assert (node->op == op_Confirm);
1796 return node->attr.confirm_cmp;
1798 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1799 assert (node->op == op_Confirm);
1800 node->attr.confirm_cmp = cmp;
1805 get_Filter_pred (ir_node *node) {
1806 assert(node->op == op_Filter);
1810 set_Filter_pred (ir_node *node, ir_node *pred) {
1811 assert(node->op == op_Filter);
1815 get_Filter_proj(ir_node *node) {
1816 assert(node->op == op_Filter);
1817 return node->attr.filter.proj;
1820 set_Filter_proj (ir_node *node, long proj) {
1821 assert(node->op == op_Filter);
1822 node->attr.filter.proj = proj;
1825 /* Don't use get_irn_arity, get_irn_n in implementation as access
1826 shall work independent of view!!! */
1827 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1828 assert(node->op == op_Filter);
1829 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1830 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1831 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1832 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1833 node->attr.filter.in_cg[0] = node->in[0];
1835 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1838 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1839 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1840 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1841 node->attr.filter.in_cg[pos + 1] = pred;
1843 int get_Filter_n_cg_preds(ir_node *node) {
1844 assert(node->op == op_Filter && node->attr.filter.in_cg);
1845 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1847 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1849 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1851 arity = ARR_LEN(node->attr.filter.in_cg);
1852 assert(pos < arity - 1);
1853 return node->attr.filter.in_cg[pos + 1];
1858 get_irn_irg(ir_node *node) {
1859 if (get_irn_op(node) != op_Block)
1860 node = get_nodes_block(node);
1861 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1862 node = get_nodes_block(node);
1863 assert(get_irn_op(node) == op_Block);
1864 return node->attr.block.irg;
1868 /*----------------------------------------------------------------*/
1869 /* Auxiliary routines */
1870 /*----------------------------------------------------------------*/
1873 skip_Proj (ir_node *node) {
1874 /* don't assert node !!! */
1875 if (node && is_Proj(node)) {
1876 return get_Proj_pred(node);
1883 skip_Tuple (ir_node *node) {
1886 if (!get_opt_normalize()) return node;
1888 node = skip_nop(node);
1889 if (get_irn_op(node) == op_Proj) {
1890 pred = skip_nop(get_Proj_pred(node));
1891 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1892 pred = skip_nop(skip_Tuple(pred));
1893 if (get_irn_op(pred) == op_Tuple)
1894 return get_Tuple_pred(pred, get_Proj_proj(node));
1899 /** returns operand of node if node is a Cast */
1900 ir_node *skip_Cast (ir_node *node) {
1901 if (node && get_irn_op(node) == op_Cast) {
1902 return skip_nop(get_irn_n(node, 0));
1909 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1910 than any other approach, as Id chains are resolved and all point to the real node, or
1911 all id's are self loops. */
1913 skip_nop (ir_node *node) {
1914 /* don't assert node !!! */
1916 if (!get_opt_normalize()) return node;
1918 /* Don't use get_Id_pred: We get into an endless loop for
1919 self-referencing Ids. */
1920 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1921 ir_node *rem_pred = node->in[0+1];
1924 assert (get_irn_arity (node) > 0);
1926 node->in[0+1] = node;
1927 res = skip_nop(rem_pred);
1928 if (res->op == op_Id) /* self-loop */ return node;
1930 node->in[0+1] = res;
1937 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1938 than any other approach, as Id chains are resolved and all point to the real node, or
1939 all id's are self loops. */
1941 skip_nop (ir_node *node) {
1943 /* don't assert node !!! */
1945 if (!node || (node->op != op_Id)) return node;
1947 if (!get_opt_normalize()) return node;
1949 /* Don't use get_Id_pred: We get into an endless loop for
1950 self-referencing Ids. */
1951 pred = node->in[0+1];
1953 if (pred->op != op_Id) return pred;
1955 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1956 ir_node *rem_pred, *res;
1958 if (pred->op != op_Id) return pred; /* shortcut */
1961 assert (get_irn_arity (node) > 0);
1963 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1964 res = skip_nop(rem_pred);
1965 if (res->op == op_Id) /* self-loop */ return node;
1967 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1976 skip_Id (ir_node *node) {
1977 return skip_nop(node);
1981 is_Bad (ir_node *node) {
1983 if ((node) && get_irn_opcode(node) == iro_Bad)
1989 is_no_Block (ir_node *node) {
1991 return (get_irn_opcode(node) != iro_Block);
1995 is_Block (ir_node *node) {
1997 return (get_irn_opcode(node) == iro_Block);
2000 /* returns true if node is a Unknown node. */
2002 is_Unknown (ir_node *node) {
2004 return (get_irn_opcode(node) == iro_Unknown);
2008 is_Proj (const ir_node *node) {
2010 return node->op == op_Proj
2011 || (!interprocedural_view && node->op == op_Filter);
2014 /* Returns true if the operation manipulates control flow. */
2016 is_cfop(ir_node *node) {
2017 return is_cfopcode(get_irn_op(node));
2020 /* Returns true if the operation manipulates interprocedural control flow:
2021 CallBegin, EndReg, EndExcept */
2022 int is_ip_cfop(ir_node *node) {
2023 return is_ip_cfopcode(get_irn_op(node));
2026 ir_graph *get_ip_cfop_irg(ir_node *n) {
2027 return get_irn_irg(n);
2030 /* Returns true if the operation can change the control flow because
2033 is_fragile_op(ir_node *node) {
2034 return is_op_fragile(get_irn_op(node));
2037 /* Returns the memory operand of fragile operations. */
2038 ir_node *get_fragile_op_mem(ir_node *node) {
2039 assert(node && is_fragile_op(node));
2041 switch (get_irn_opcode (node)) {
2050 return get_irn_n(node, 0);
2055 assert(0 && "should not be reached");
2060 #ifdef DEBUG_libfirm
2061 void dump_irn (ir_node *n) {
2062 int i, arity = get_irn_arity(n);
2063 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2065 ir_node *pred = get_irn_n(n, -1);
2066 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2067 get_irn_node_nr(pred), (void *)pred);
2069 printf(" preds: \n");
2070 for (i = 0; i < arity; ++i) {
2071 ir_node *pred = get_irn_n(n, i);
2072 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2073 get_irn_node_nr(pred), (void *)pred);
2077 #else /* DEBUG_libfirm */
2078 void dump_irn (ir_node *n) {}
2079 #endif /* DEBUG_libfirm */