3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
23 #include "irbackedge_t.h"
30 /* some constants fixing the positions of nodes predecessors
32 #define CALL_PARAM_OFFSET 2
33 #define FUNCCALL_PARAM_OFFSET 1
34 #define SEL_INDEX_OFFSET 2
35 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
36 #define END_KEEPALIVE_OFFSET 0
38 static const char *pnc_name_arr [] = {
39 "False", "Eq", "Lt", "Le",
40 "Gt", "Ge", "Lg", "Leg", "Uo",
41 "Ue", "Ul", "Ule", "Ug", "Uge",
46 * returns the pnc name from an pnc constant
48 const char *get_pnc_string(int pnc) {
49 return pnc_name_arr[pnc];
53 * Calculates the negated pnc condition.
56 get_negated_pnc(int pnc) {
58 case False: return True; break;
59 case Eq: return Ne; break;
60 case Lt: return Uge; break;
61 case Le: return Ug; break;
62 case Gt: return Ule; break;
63 case Ge: return Ul; break;
64 case Lg: return Ue; break;
65 case Leg: return Uo; break;
66 case Uo: return Leg; break;
67 case Ue: return Lg; break;
68 case Ul: return Ge; break;
69 case Ule: return Gt; break;
70 case Ug: return Le; break;
71 case Uge: return Lt; break;
72 case Ne: return Eq; break;
73 case True: return False; break;
75 return 99; /* to shut up gcc */
78 const char *pns_name_arr [] = {
79 "initial_exec", "global_store",
80 "frame_base", "globals", "args"
83 const char *symconst_name_arr [] = {
84 "type_tag", "size", "addr_name", "addr_ent"
94 * Create a new irnode in irg, with an op, mode, arity and
95 * some incoming irnodes.
96 * If arity is negative, a node with a dynamic array is created.
99 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
100 int arity, ir_node **in)
103 int node_size = offsetof (ir_node, attr) + op->attr_size;
105 assert(irg && op && mode);
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
107 memset((void *)res, 0, node_size);
109 res->kind = k_ir_node;
115 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
117 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
118 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
121 set_irn_dbg_info(res, db);
125 res->node_nr = get_irp_new_node_nr();
133 /* Copies all attributes stored in the old node to the new node.
134 Assumes both have the same opcode and sufficient size. */
136 copy_attrs (const ir_node *old_node, ir_node *new_node) {
137 assert(get_irn_op(old_node) == get_irn_op(new_node));
138 memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node)));
139 if (get_irn_op(new_node) == op_Call) remove_Call_callee_arr(new_node);
142 /*-- getting some parameters from ir_nodes --*/
145 (is_ir_node)(const void *thing) {
146 return __is_ir_node(thing);
150 (get_irn_intra_arity)(const ir_node *node) {
151 return __get_irn_intra_arity(node);
155 (get_irn_inter_arity)(const ir_node *node) {
156 return __get_irn_inter_arity(node);
159 int (*__get_irn_arity)(const ir_node *node) = __get_irn_intra_arity;
162 (get_irn_arity)(const ir_node *node) {
163 return __get_irn_arity(node);
166 /* Returns the array with ins. This array is shifted with respect to the
167 array accessed by get_irn_n: The block operand is at position 0 not -1.
168 (@@@ This should be changed.)
169 The order of the predecessors in this array is not guaranteed, except that
170 lists of operands as predecessors of Block or arguments of a Call are
173 get_irn_in (const ir_node *node) {
175 if (get_interprocedural_view()) { /* handle Filter and Block specially */
176 if (get_irn_opcode(node) == iro_Filter) {
177 assert(node->attr.filter.in_cg);
178 return node->attr.filter.in_cg;
179 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
180 return node->attr.block.in_cg;
182 /* else fall through */
188 set_irn_in (ir_node *node, int arity, ir_node **in) {
191 if (get_interprocedural_view()) { /* handle Filter and Block specially */
192 if (get_irn_opcode(node) == iro_Filter) {
193 assert(node->attr.filter.in_cg);
194 arr = &node->attr.filter.in_cg;
195 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
196 arr = &node->attr.block.in_cg;
203 if (arity != ARR_LEN(*arr) - 1) {
204 ir_node * block = (*arr)[0];
205 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
208 fix_backedges(current_ir_graph->obst, node);
209 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
213 (get_irn_intra_n)(ir_node *node, int n) {
214 return __get_irn_intra_n (node, n);
218 (get_irn_inter_n)(ir_node *node, int n) {
219 return __get_irn_inter_n (node, n);
222 ir_node *(*__get_irn_n)(ir_node *node, int n) = __get_irn_intra_n;
225 (get_irn_n)(ir_node *node, int n) {
226 return __get_irn_n(node, n);
230 set_irn_n (ir_node *node, int n, ir_node *in) {
231 assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node));
232 assert(in && in->kind == k_ir_node);
233 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
234 /* Change block pred in both views! */
235 node->in[n + 1] = in;
236 assert(node->attr.filter.in_cg);
237 node->attr.filter.in_cg[n + 1] = in;
240 if (get_interprocedural_view()) { /* handle Filter and Block specially */
241 if (get_irn_opcode(node) == iro_Filter) {
242 assert(node->attr.filter.in_cg);
243 node->attr.filter.in_cg[n + 1] = in;
245 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
246 node->attr.block.in_cg[n + 1] = in;
249 /* else fall through */
251 node->in[n + 1] = in;
255 (get_irn_mode)(const ir_node *node) {
256 return __get_irn_mode(node);
260 (set_irn_mode)(ir_node *node, ir_mode *mode)
262 __set_irn_mode(node, mode);
266 get_irn_modecode (const ir_node *node)
269 return node->mode->code;
272 /** Gets the string representation of the mode .*/
274 get_irn_modename (const ir_node *node)
277 return get_mode_name(node->mode);
281 get_irn_modeident (const ir_node *node)
284 return get_mode_ident(node->mode);
288 (get_irn_op)(const ir_node *node)
290 return __get_irn_op(node);
293 /* should be private to the library: */
295 set_irn_op (ir_node *node, ir_op *op)
302 (get_irn_opcode)(const ir_node *node)
304 return __get_irn_opcode(node);
308 get_irn_opname (const ir_node *node)
311 if ((get_irn_op((ir_node *)node) == op_Phi) &&
312 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
313 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
314 return get_id_str(node->op->name);
318 get_irn_opident (const ir_node *node)
321 return node->op->name;
325 (get_irn_visited)(const ir_node *node)
327 return __get_irn_visited(node);
331 (set_irn_visited)(ir_node *node, unsigned long visited)
333 __set_irn_visited(node, visited);
337 (mark_irn_visited)(ir_node *node) {
338 __mark_irn_visited(node);
342 (irn_not_visited)(const ir_node *node) {
343 return __irn_not_visited(node);
347 (irn_visited)(const ir_node *node) {
348 return __irn_visited(node);
352 (set_irn_link)(ir_node *node, void *link) {
353 __set_irn_link(node, link);
357 (get_irn_link)(const ir_node *node) {
358 return __get_irn_link(node);
362 (get_irn_pinned)(const ir_node *node) {
363 return __get_irn_pinned(node);
366 void set_irn_pinned(ir_node *node, op_pin_state state) {
367 /* due to optimization an opt may be turned into a Tuple */
368 if (get_irn_op(node) == op_Tuple)
371 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
372 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
374 node->attr.except.pin_state = state;
377 #ifdef DO_HEAPANALYSIS
378 /* Access the abstract interpretation information of a node.
379 Returns NULL if no such information is available. */
380 struct abstval *get_irn_abst_value(ir_node *n) {
383 /* Set the abstract interpretation information of a node. */
384 void set_irn_abst_value(ir_node *n, struct abstval *os) {
387 struct section *firm_get_irn_section(ir_node *n) {
390 void firm_set_irn_section(ir_node *n, struct section *s) {
394 /* Dummies needed for firmjni. */
395 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
396 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
397 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
398 void firm_set_irn_section(ir_node *n, struct section *s) {}
399 #endif /* DO_HEAPANALYSIS */
402 /* Outputs a unique number for this node */
404 get_irn_node_nr(const ir_node *node) {
407 return node->node_nr;
414 get_irn_const_attr (ir_node *node)
416 assert (node->op == op_Const);
417 return node->attr.con;
421 get_irn_proj_attr (ir_node *node)
423 assert (node->op == op_Proj);
424 return node->attr.proj;
428 get_irn_alloc_attr (ir_node *node)
430 assert (node->op == op_Alloc);
435 get_irn_free_attr (ir_node *node)
437 assert (node->op == op_Free);
438 return node->attr.f = skip_tid(node->attr.f);
442 get_irn_symconst_attr (ir_node *node)
444 assert (node->op == op_SymConst);
449 get_irn_call_attr (ir_node *node)
451 assert (node->op == op_Call);
452 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
456 get_irn_sel_attr (ir_node *node)
458 assert (node->op == op_Sel);
463 get_irn_phi_attr (ir_node *node)
465 assert (node->op == op_Phi);
466 return node->attr.phi0_pos;
470 get_irn_block_attr (ir_node *node)
472 assert (node->op == op_Block);
473 return node->attr.block;
477 get_irn_load_attr (ir_node *node)
479 assert (node->op == op_Load);
480 return node->attr.load;
484 get_irn_store_attr (ir_node *node)
486 assert (node->op == op_Store);
487 return node->attr.store;
491 get_irn_except_attr (ir_node *node)
493 assert (node->op == op_Div || node->op == op_Quot ||
494 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
495 return node->attr.except;
498 /** manipulate fields of individual nodes **/
500 /* this works for all except Block */
502 get_nodes_block (ir_node *node) {
503 assert (!(node->op == op_Block));
504 return get_irn_n(node, -1);
508 set_nodes_block (ir_node *node, ir_node *block) {
509 assert (!(node->op == op_Block));
510 set_irn_n(node, -1, block);
513 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
514 * from Start. If so returns frame type, else Null. */
515 type *is_frame_pointer(ir_node *n) {
516 if ((get_irn_op(n) == op_Proj) &&
517 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
518 ir_node *start = get_Proj_pred(n);
519 if (get_irn_op(start) == op_Start) {
520 return get_irg_frame_type(get_irn_irg(start));
526 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
527 * from Start. If so returns global type, else Null. */
528 type *is_globals_pointer(ir_node *n) {
529 if ((get_irn_op(n) == op_Proj) &&
530 (get_Proj_proj(n) == pn_Start_P_globals)) {
531 ir_node *start = get_Proj_pred(n);
532 if (get_irn_op(start) == op_Start) {
533 return get_glob_type();
539 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
540 * from Start. If so returns 1, else 0. */
541 int is_value_arg_pointer(ir_node *n) {
542 if ((get_irn_op(n) == op_Proj) &&
543 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
544 (get_irn_op(get_Proj_pred(n)) == op_Start))
549 /* Returns an array with the predecessors of the Block. Depending on
550 the implementation of the graph data structure this can be a copy of
551 the internal representation of predecessors as well as the internal
552 array itself. Therefore writing to this array might obstruct the ir. */
554 get_Block_cfgpred_arr (ir_node *node)
556 assert ((node->op == op_Block));
557 return (ir_node **)&(get_irn_in(node)[1]);
562 get_Block_n_cfgpreds (ir_node *node) {
563 assert ((node->op == op_Block));
564 return get_irn_arity(node);
568 get_Block_cfgpred (ir_node *node, int pos) {
569 assert(-1 <= pos && pos < get_irn_arity(node));
570 assert(node->op == op_Block);
571 return get_irn_n(node, pos);
575 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
576 assert (node->op == op_Block);
577 set_irn_n(node, pos, pred);
581 get_Block_matured (ir_node *node) {
582 assert (node->op == op_Block);
583 return node->attr.block.matured;
587 set_Block_matured (ir_node *node, bool matured) {
588 assert (node->op == op_Block);
589 node->attr.block.matured = matured;
592 get_Block_block_visited (ir_node *node) {
593 assert (node->op == op_Block);
594 return node->attr.block.block_visited;
598 set_Block_block_visited (ir_node *node, unsigned long visit) {
599 assert (node->op == op_Block);
600 node->attr.block.block_visited = visit;
603 /* For this current_ir_graph must be set. */
605 mark_Block_block_visited (ir_node *node) {
606 assert (node->op == op_Block);
607 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
611 Block_not_block_visited(ir_node *node) {
612 assert (node->op == op_Block);
613 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
617 get_Block_graph_arr (ir_node *node, int pos) {
618 assert (node->op == op_Block);
619 return node->attr.block.graph_arr[pos+1];
623 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
624 assert (node->op == op_Block);
625 node->attr.block.graph_arr[pos+1] = value;
628 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
629 assert(node->op == op_Block);
630 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
631 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
632 node->attr.block.in_cg[0] = NULL;
633 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
635 /* Fix backedge array. fix_backedges operates depending on
636 interprocedural_view. */
637 int ipv = get_interprocedural_view();
638 set_interprocedural_view(true);
639 fix_backedges(current_ir_graph->obst, node);
640 set_interprocedural_view(ipv);
643 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
646 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
647 assert(node->op == op_Block &&
648 node->attr.block.in_cg &&
649 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
650 node->attr.block.in_cg[pos + 1] = pred;
653 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
654 assert(node->op == op_Block);
655 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
658 int get_Block_cg_n_cfgpreds(ir_node * node) {
659 assert(node->op == op_Block);
660 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
663 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
664 assert(node->op == op_Block && node->attr.block.in_cg);
665 return node->attr.block.in_cg[pos + 1];
668 void remove_Block_cg_cfgpred_arr(ir_node * node) {
669 assert(node->op == op_Block);
670 node->attr.block.in_cg = NULL;
674 set_Start_irg(ir_node *node, ir_graph *irg) {
675 assert(node->op == op_Start);
676 assert(is_ir_graph(irg));
677 assert(0 && " Why set irg? -- use set_irn_irg");
681 get_End_n_keepalives(ir_node *end) {
682 assert (end->op == op_End);
683 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
687 get_End_keepalive(ir_node *end, int pos) {
688 assert (end->op == op_End);
689 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
693 add_End_keepalive (ir_node *end, ir_node *ka) {
694 assert (end->op == op_End);
695 ARR_APP1 (ir_node *, end->in, ka);
699 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
700 assert (end->op == op_End);
701 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
705 free_End (ir_node *end) {
706 assert (end->op == op_End);
708 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
709 end->in = NULL; /* @@@ make sure we get an error if we use the
710 in array afterwards ... */
715 > Implementing the case construct (which is where the constant Proj node is
716 > important) involves far more than simply determining the constant values.
717 > We could argue that this is more properly a function of the translator from
718 > Firm to the target machine. That could be done if there was some way of
719 > projecting "default" out of the Cond node.
720 I know it's complicated.
721 Basically there are two proglems:
722 - determining the gaps between the projs
723 - determining the biggest case constant to know the proj number for
725 I see several solutions:
726 1. Introduce a ProjDefault node. Solves both problems.
727 This means to extend all optimizations executed during construction.
728 2. Give the Cond node for switch two flavors:
729 a) there are no gaps in the projs (existing flavor)
730 b) gaps may exist, default proj is still the Proj with the largest
731 projection number. This covers also the gaps.
732 3. Fix the semantic of the Cond to that of 2b)
734 Solution 2 seems to be the best:
735 Computing the gaps in the Firm representation is not too hard, i.e.,
736 libFIRM can implement a routine that transforms between the two
737 flavours. This is also possible for 1) but 2) does not require to
738 change any existing optimization.
739 Further it should be far simpler to determine the biggest constant than
741 I don't want to choose 3) as 2a) seems to have advantages for
742 dataflow analysis and 3) does not allow to convert the representation to
746 get_Cond_selector (ir_node *node) {
747 assert (node->op == op_Cond);
748 return get_irn_n(node, 0);
752 set_Cond_selector (ir_node *node, ir_node *selector) {
753 assert (node->op == op_Cond);
754 set_irn_n(node, 0, selector);
758 get_Cond_kind (ir_node *node) {
759 assert (node->op == op_Cond);
760 return node->attr.c.kind;
764 set_Cond_kind (ir_node *node, cond_kind kind) {
765 assert (node->op == op_Cond);
766 node->attr.c.kind = kind;
770 get_Cond_defaultProj (ir_node *node) {
771 assert (node->op == op_Cond);
772 return node->attr.c.default_proj;
776 get_Return_mem (ir_node *node) {
777 assert (node->op == op_Return);
778 return get_irn_n(node, 0);
782 set_Return_mem (ir_node *node, ir_node *mem) {
783 assert (node->op == op_Return);
784 set_irn_n(node, 0, mem);
788 get_Return_n_ress (ir_node *node) {
789 assert (node->op == op_Return);
790 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
794 get_Return_res_arr (ir_node *node)
796 assert ((node->op == op_Return));
797 if (get_Return_n_ress(node) > 0)
798 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
805 set_Return_n_res (ir_node *node, int results) {
806 assert (node->op == op_Return);
811 get_Return_res (ir_node *node, int pos) {
812 assert (node->op == op_Return);
813 assert (get_Return_n_ress(node) > pos);
814 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
818 set_Return_res (ir_node *node, int pos, ir_node *res){
819 assert (node->op == op_Return);
820 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
824 get_Raise_mem (ir_node *node) {
825 assert (node->op == op_Raise);
826 return get_irn_n(node, 0);
830 set_Raise_mem (ir_node *node, ir_node *mem) {
831 assert (node->op == op_Raise);
832 set_irn_n(node, 0, mem);
836 get_Raise_exo_ptr (ir_node *node) {
837 assert (node->op == op_Raise);
838 return get_irn_n(node, 1);
842 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
843 assert (node->op == op_Raise);
844 set_irn_n(node, 1, exo_ptr);
847 tarval *get_Const_tarval (ir_node *node) {
848 assert (node->op == op_Const);
849 return node->attr.con.tv;
853 set_Const_tarval (ir_node *node, tarval *con) {
854 assert (node->op == op_Const);
855 node->attr.con.tv = con;
859 /* The source language type. Must be an atomic type. Mode of type must
860 be mode of node. For tarvals from entities type must be pointer to
863 get_Const_type (ir_node *node) {
864 assert (node->op == op_Const);
865 return node->attr.con.tp;
869 set_Const_type (ir_node *node, type *tp) {
870 assert (node->op == op_Const);
871 if (tp != unknown_type) {
872 assert (is_atomic_type(tp));
873 assert (get_type_mode(tp) == get_irn_mode(node));
875 node->attr.con.tp = tp;
880 get_SymConst_kind (const ir_node *node) {
881 assert (node->op == op_SymConst);
882 return node->attr.i.num;
886 set_SymConst_kind (ir_node *node, symconst_kind num) {
887 assert (node->op == op_SymConst);
888 node->attr.i.num = num;
892 get_SymConst_type (ir_node *node) {
893 assert ( (node->op == op_SymConst)
894 && ( get_SymConst_kind(node) == symconst_type_tag
895 || get_SymConst_kind(node) == symconst_size));
896 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
900 set_SymConst_type (ir_node *node, type *tp) {
901 assert ( (node->op == op_SymConst)
902 && ( get_SymConst_kind(node) == symconst_type_tag
903 || get_SymConst_kind(node) == symconst_size));
904 node->attr.i.sym.type_p = tp;
908 get_SymConst_name (ir_node *node) {
909 assert ( (node->op == op_SymConst)
910 && (get_SymConst_kind(node) == symconst_addr_name));
911 return node->attr.i.sym.ident_p;
915 set_SymConst_name (ir_node *node, ident *name) {
916 assert ( (node->op == op_SymConst)
917 && (get_SymConst_kind(node) == symconst_addr_name));
918 node->attr.i.sym.ident_p = name;
922 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
923 entity *get_SymConst_entity (ir_node *node) {
924 assert ( (node->op == op_SymConst)
925 && (get_SymConst_kind (node) == symconst_addr_ent));
926 return node->attr.i.sym.entity_p;
929 void set_SymConst_entity (ir_node *node, entity *ent) {
930 assert ( (node->op == op_SymConst)
931 && (get_SymConst_kind(node) == symconst_addr_ent));
932 node->attr.i.sym.entity_p = ent;
935 union symconst_symbol
936 get_SymConst_symbol (ir_node *node) {
937 assert (node->op == op_SymConst);
938 return node->attr.i.sym;
942 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
943 assert (node->op == op_SymConst);
944 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
945 node->attr.i.sym = sym;
949 get_SymConst_value_type (ir_node *node) {
950 assert (node->op == op_SymConst);
951 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
952 return node->attr.i.tp;
956 set_SymConst_value_type (ir_node *node, type *tp) {
957 assert (node->op == op_SymConst);
958 node->attr.i.tp = tp;
962 get_Sel_mem (ir_node *node) {
963 assert (node->op == op_Sel);
964 return get_irn_n(node, 0);
968 set_Sel_mem (ir_node *node, ir_node *mem) {
969 assert (node->op == op_Sel);
970 set_irn_n(node, 0, mem);
974 get_Sel_ptr (ir_node *node) {
975 assert (node->op == op_Sel);
976 return get_irn_n(node, 1);
980 set_Sel_ptr (ir_node *node, ir_node *ptr) {
981 assert (node->op == op_Sel);
982 set_irn_n(node, 1, ptr);
986 get_Sel_n_indexs (ir_node *node) {
987 assert (node->op == op_Sel);
988 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
992 get_Sel_index_arr (ir_node *node)
994 assert ((node->op == op_Sel));
995 if (get_Sel_n_indexs(node) > 0)
996 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1002 get_Sel_index (ir_node *node, int pos) {
1003 assert (node->op == op_Sel);
1004 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1008 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1009 assert (node->op == op_Sel);
1010 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1014 get_Sel_entity (ir_node *node) {
1015 assert (node->op == op_Sel);
1016 return node->attr.s.ent;
1020 set_Sel_entity (ir_node *node, entity *ent) {
1021 assert (node->op == op_Sel);
1022 node->attr.s.ent = ent;
1026 get_InstOf_ent (ir_node *node) {
1027 assert (node->op = op_InstOf);
1028 return (node->attr.io.ent);
1032 set_InstOf_ent (ir_node *node, type *ent) {
1033 assert (node->op = op_InstOf);
1034 node->attr.io.ent = ent;
1038 get_InstOf_store (ir_node *node) {
1039 assert (node->op = op_InstOf);
1040 return (get_irn_n (node, 0));
1044 set_InstOf_store (ir_node *node, ir_node *obj) {
1045 assert (node->op = op_InstOf);
1046 set_irn_n (node, 0, obj);
1050 get_InstOf_obj (ir_node *node) {
1051 assert (node->op = op_InstOf);
1052 return (get_irn_n (node, 1));
1056 set_InstOf_obj (ir_node *node, ir_node *obj) {
1057 assert (node->op = op_InstOf);
1058 set_irn_n (node, 1, obj);
1062 /* For unary and binary arithmetic operations the access to the
1063 operands can be factored out. Left is the first, right the
1064 second arithmetic value as listed in tech report 0999-33.
1065 unops are: Minus, Abs, Not, Conv, Cast
1066 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1067 Shr, Shrs, Rotate, Cmp */
1071 get_Call_mem (ir_node *node) {
1072 assert (node->op == op_Call);
1073 return get_irn_n(node, 0);
1077 set_Call_mem (ir_node *node, ir_node *mem) {
1078 assert (node->op == op_Call);
1079 set_irn_n(node, 0, mem);
1083 get_Call_ptr (ir_node *node) {
1084 assert (node->op == op_Call);
1085 return get_irn_n(node, 1);
1089 set_Call_ptr (ir_node *node, ir_node *ptr) {
1090 assert (node->op == op_Call);
1091 set_irn_n(node, 1, ptr);
1095 get_Call_param_arr (ir_node *node) {
1096 assert (node->op == op_Call);
1097 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1101 get_Call_n_params (ir_node *node) {
1102 assert (node->op == op_Call);
1103 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1107 get_Call_arity (ir_node *node) {
1108 assert (node->op == op_Call);
1109 return get_Call_n_params(node);
1113 set_Call_arity (ir_node *node, ir_node *arity) {
1114 assert (node->op == op_Call);
1119 get_Call_param (ir_node *node, int pos) {
1120 assert (node->op == op_Call);
1121 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1125 set_Call_param (ir_node *node, int pos, ir_node *param) {
1126 assert (node->op == op_Call);
1127 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1131 get_Call_type (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1137 set_Call_type (ir_node *node, type *tp) {
1138 assert (node->op == op_Call);
1139 assert ((get_unknown_type() == tp) || is_method_type(tp));
1140 node->attr.call.cld_tp = tp;
1143 int Call_has_callees(ir_node *node) {
1144 assert(node && node->op == op_Call);
1145 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1146 (node->attr.call.callee_arr != NULL));
1149 int get_Call_n_callees(ir_node * node) {
1150 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1151 return ARR_LEN(node->attr.call.callee_arr);
1154 entity * get_Call_callee(ir_node * node, int pos) {
1155 assert(pos >= 0 && pos < get_Call_n_callees(node));
1156 return node->attr.call.callee_arr[pos];
1159 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1160 assert(node->op == op_Call);
1161 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1162 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1164 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1167 void remove_Call_callee_arr(ir_node * node) {
1168 assert(node->op == op_Call);
1169 node->attr.call.callee_arr = NULL;
1172 ir_node * get_CallBegin_ptr (ir_node *node) {
1173 assert(node->op == op_CallBegin);
1174 return get_irn_n(node, 0);
1176 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1177 assert(node->op == op_CallBegin);
1178 set_irn_n(node, 0, ptr);
1180 ir_node * get_CallBegin_call (ir_node *node) {
1181 assert(node->op == op_CallBegin);
1182 return node->attr.callbegin.call;
1184 void set_CallBegin_call (ir_node *node, ir_node *call) {
1185 assert(node->op == op_CallBegin);
1186 node->attr.callbegin.call = call;
1191 ir_node * get_##OP##_left(ir_node *node) { \
1192 assert(node->op == op_##OP); \
1193 return get_irn_n(node, node->op->op_index); \
1195 void set_##OP##_left(ir_node *node, ir_node *left) { \
1196 assert(node->op == op_##OP); \
1197 set_irn_n(node, node->op->op_index, left); \
1199 ir_node *get_##OP##_right(ir_node *node) { \
1200 assert(node->op == op_##OP); \
1201 return get_irn_n(node, node->op->op_index + 1); \
1203 void set_##OP##_right(ir_node *node, ir_node *right) { \
1204 assert(node->op == op_##OP); \
1205 set_irn_n(node, node->op->op_index + 1, right); \
1209 ir_node *get_##OP##_op(ir_node *node) { \
1210 assert(node->op == op_##OP); \
1211 return get_irn_n(node, node->op->op_index); \
1213 void set_##OP##_op (ir_node *node, ir_node *op) { \
1214 assert(node->op == op_##OP); \
1215 set_irn_n(node, node->op->op_index, op); \
1225 get_Quot_mem (ir_node *node) {
1226 assert (node->op == op_Quot);
1227 return get_irn_n(node, 0);
1231 set_Quot_mem (ir_node *node, ir_node *mem) {
1232 assert (node->op == op_Quot);
1233 set_irn_n(node, 0, mem);
1239 get_DivMod_mem (ir_node *node) {
1240 assert (node->op == op_DivMod);
1241 return get_irn_n(node, 0);
1245 set_DivMod_mem (ir_node *node, ir_node *mem) {
1246 assert (node->op == op_DivMod);
1247 set_irn_n(node, 0, mem);
1253 get_Div_mem (ir_node *node) {
1254 assert (node->op == op_Div);
1255 return get_irn_n(node, 0);
1259 set_Div_mem (ir_node *node, ir_node *mem) {
1260 assert (node->op == op_Div);
1261 set_irn_n(node, 0, mem);
1267 get_Mod_mem (ir_node *node) {
1268 assert (node->op == op_Mod);
1269 return get_irn_n(node, 0);
1273 set_Mod_mem (ir_node *node, ir_node *mem) {
1274 assert (node->op == op_Mod);
1275 set_irn_n(node, 0, mem);
1292 get_Cast_type (ir_node *node) {
1293 assert (node->op == op_Cast);
1294 return node->attr.cast.totype;
1298 set_Cast_type (ir_node *node, type *to_tp) {
1299 assert (node->op == op_Cast);
1300 node->attr.cast.totype = to_tp;
1304 (is_unop)(const ir_node *node) {
1305 return __is_unop(node);
1309 get_unop_op (ir_node *node) {
1310 if (node->op->opar == oparity_unary)
1311 return get_irn_n(node, node->op->op_index);
1313 assert(node->op->opar == oparity_unary);
1318 set_unop_op (ir_node *node, ir_node *op) {
1319 if (node->op->opar == oparity_unary)
1320 set_irn_n(node, node->op->op_index, op);
1322 assert(node->op->opar == oparity_unary);
1326 (is_binop)(const ir_node *node) {
1327 return __is_binop(node);
1331 get_binop_left (ir_node *node) {
1332 if (node->op->opar == oparity_binary)
1333 return get_irn_n(node, node->op->op_index);
1335 assert(node->op->opar == oparity_binary);
1340 set_binop_left (ir_node *node, ir_node *left) {
1341 if (node->op->opar == oparity_binary)
1342 set_irn_n(node, node->op->op_index, left);
1344 assert (node->op->opar == oparity_binary);
1348 get_binop_right (ir_node *node) {
1349 if (node->op->opar == oparity_binary)
1350 return get_irn_n(node, node->op->op_index + 1);
1352 assert(node->op->opar == oparity_binary);
1357 set_binop_right (ir_node *node, ir_node *right) {
1358 if (node->op->opar == oparity_binary)
1359 set_irn_n(node, node->op->op_index + 1, right);
1361 assert (node->op->opar == oparity_binary);
1364 int is_Phi (ir_node *n) {
1370 if (op == op_Filter) return get_interprocedural_view();
1373 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1374 (get_irn_arity(n) > 0));
1379 int is_Phi0 (ir_node *n) {
1382 return ((get_irn_op(n) == op_Phi) &&
1383 (get_irn_arity(n) == 0) &&
1384 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1388 get_Phi_preds_arr (ir_node *node) {
1389 assert (node->op == op_Phi);
1390 return (ir_node **)&(get_irn_in(node)[1]);
1394 get_Phi_n_preds (ir_node *node) {
1395 assert (is_Phi(node) || is_Phi0(node));
1396 return (get_irn_arity(node));
1400 void set_Phi_n_preds (ir_node *node, int n_preds) {
1401 assert (node->op == op_Phi);
1406 get_Phi_pred (ir_node *node, int pos) {
1407 assert (is_Phi(node) || is_Phi0(node));
1408 return get_irn_n(node, pos);
1412 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1413 assert (is_Phi(node) || is_Phi0(node));
1414 set_irn_n(node, pos, pred);
1418 int is_memop(ir_node *node) {
1419 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1422 ir_node *get_memop_mem (ir_node *node) {
1423 assert(is_memop(node));
1424 return get_irn_n(node, 0);
1427 void set_memop_mem (ir_node *node, ir_node *mem) {
1428 assert(is_memop(node));
1429 set_irn_n(node, 0, mem);
1432 ir_node *get_memop_ptr (ir_node *node) {
1433 assert(is_memop(node));
1434 return get_irn_n(node, 1);
1437 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1438 assert(is_memop(node));
1439 set_irn_n(node, 1, ptr);
1443 get_Load_mem (ir_node *node) {
1444 assert (node->op == op_Load);
1445 return get_irn_n(node, 0);
1449 set_Load_mem (ir_node *node, ir_node *mem) {
1450 assert (node->op == op_Load);
1451 set_irn_n(node, 0, mem);
1455 get_Load_ptr (ir_node *node) {
1456 assert (node->op == op_Load);
1457 return get_irn_n(node, 1);
1461 set_Load_ptr (ir_node *node, ir_node *ptr) {
1462 assert (node->op == op_Load);
1463 set_irn_n(node, 1, ptr);
1467 get_Load_mode (ir_node *node) {
1468 assert (node->op == op_Load);
1469 return node->attr.load.load_mode;
1473 set_Load_mode (ir_node *node, ir_mode *mode) {
1474 assert (node->op == op_Load);
1475 node->attr.load.load_mode = mode;
1479 get_Load_volatility (ir_node *node) {
1480 assert (node->op == op_Load);
1481 return node->attr.load.volatility;
1485 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1486 assert (node->op == op_Load);
1487 node->attr.load.volatility = volatility;
1492 get_Store_mem (ir_node *node) {
1493 assert (node->op == op_Store);
1494 return get_irn_n(node, 0);
1498 set_Store_mem (ir_node *node, ir_node *mem) {
1499 assert (node->op == op_Store);
1500 set_irn_n(node, 0, mem);
1504 get_Store_ptr (ir_node *node) {
1505 assert (node->op == op_Store);
1506 return get_irn_n(node, 1);
1510 set_Store_ptr (ir_node *node, ir_node *ptr) {
1511 assert (node->op == op_Store);
1512 set_irn_n(node, 1, ptr);
1516 get_Store_value (ir_node *node) {
1517 assert (node->op == op_Store);
1518 return get_irn_n(node, 2);
1522 set_Store_value (ir_node *node, ir_node *value) {
1523 assert (node->op == op_Store);
1524 set_irn_n(node, 2, value);
1528 get_Store_volatility (ir_node *node) {
1529 assert (node->op == op_Store);
1530 return node->attr.store.volatility;
1534 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1535 assert (node->op == op_Store);
1536 node->attr.store.volatility = volatility;
1541 get_Alloc_mem (ir_node *node) {
1542 assert (node->op == op_Alloc);
1543 return get_irn_n(node, 0);
1547 set_Alloc_mem (ir_node *node, ir_node *mem) {
1548 assert (node->op == op_Alloc);
1549 set_irn_n(node, 0, mem);
1553 get_Alloc_size (ir_node *node) {
1554 assert (node->op == op_Alloc);
1555 return get_irn_n(node, 1);
1559 set_Alloc_size (ir_node *node, ir_node *size) {
1560 assert (node->op == op_Alloc);
1561 set_irn_n(node, 1, size);
1565 get_Alloc_type (ir_node *node) {
1566 assert (node->op == op_Alloc);
1567 return node->attr.a.type = skip_tid(node->attr.a.type);
1571 set_Alloc_type (ir_node *node, type *tp) {
1572 assert (node->op == op_Alloc);
1573 node->attr.a.type = tp;
1577 get_Alloc_where (ir_node *node) {
1578 assert (node->op == op_Alloc);
1579 return node->attr.a.where;
1583 set_Alloc_where (ir_node *node, where_alloc where) {
1584 assert (node->op == op_Alloc);
1585 node->attr.a.where = where;
1590 get_Free_mem (ir_node *node) {
1591 assert (node->op == op_Free);
1592 return get_irn_n(node, 0);
1596 set_Free_mem (ir_node *node, ir_node *mem) {
1597 assert (node->op == op_Free);
1598 set_irn_n(node, 0, mem);
1602 get_Free_ptr (ir_node *node) {
1603 assert (node->op == op_Free);
1604 return get_irn_n(node, 1);
1608 set_Free_ptr (ir_node *node, ir_node *ptr) {
1609 assert (node->op == op_Free);
1610 set_irn_n(node, 1, ptr);
1614 get_Free_size (ir_node *node) {
1615 assert (node->op == op_Free);
1616 return get_irn_n(node, 2);
1620 set_Free_size (ir_node *node, ir_node *size) {
1621 assert (node->op == op_Free);
1622 set_irn_n(node, 2, size);
1626 get_Free_type (ir_node *node) {
1627 assert (node->op == op_Free);
1628 return node->attr.f = skip_tid(node->attr.f);
1632 set_Free_type (ir_node *node, type *tp) {
1633 assert (node->op == op_Free);
1638 get_Sync_preds_arr (ir_node *node) {
1639 assert (node->op == op_Sync);
1640 return (ir_node **)&(get_irn_in(node)[1]);
1644 get_Sync_n_preds (ir_node *node) {
1645 assert (node->op == op_Sync);
1646 return (get_irn_arity(node));
1651 set_Sync_n_preds (ir_node *node, int n_preds) {
1652 assert (node->op == op_Sync);
1657 get_Sync_pred (ir_node *node, int pos) {
1658 assert (node->op == op_Sync);
1659 return get_irn_n(node, pos);
1663 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1664 assert (node->op == op_Sync);
1665 set_irn_n(node, pos, pred);
1669 get_Proj_pred (ir_node *node) {
1670 assert (is_Proj(node));
1671 return get_irn_n(node, 0);
1675 set_Proj_pred (ir_node *node, ir_node *pred) {
1676 assert (is_Proj(node));
1677 set_irn_n(node, 0, pred);
1681 get_Proj_proj (ir_node *node) {
1682 assert (is_Proj(node));
1683 if (get_irn_opcode(node) == iro_Proj) {
1684 return node->attr.proj;
1686 assert(get_irn_opcode(node) == iro_Filter);
1687 return node->attr.filter.proj;
1692 set_Proj_proj (ir_node *node, long proj) {
1693 assert (node->op == op_Proj);
1694 node->attr.proj = proj;
1698 get_Tuple_preds_arr (ir_node *node) {
1699 assert (node->op == op_Tuple);
1700 return (ir_node **)&(get_irn_in(node)[1]);
1704 get_Tuple_n_preds (ir_node *node) {
1705 assert (node->op == op_Tuple);
1706 return (get_irn_arity(node));
1711 set_Tuple_n_preds (ir_node *node, int n_preds) {
1712 assert (node->op == op_Tuple);
1717 get_Tuple_pred (ir_node *node, int pos) {
1718 assert (node->op == op_Tuple);
1719 return get_irn_n(node, pos);
1723 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1724 assert (node->op == op_Tuple);
1725 set_irn_n(node, pos, pred);
1729 get_Id_pred (ir_node *node) {
1730 assert (node->op == op_Id);
1731 return get_irn_n(node, 0);
1735 set_Id_pred (ir_node *node, ir_node *pred) {
1736 assert (node->op == op_Id);
1737 set_irn_n(node, 0, pred);
1740 ir_node *get_Confirm_value (ir_node *node) {
1741 assert (node->op == op_Confirm);
1742 return get_irn_n(node, 0);
1744 void set_Confirm_value (ir_node *node, ir_node *value) {
1745 assert (node->op == op_Confirm);
1746 set_irn_n(node, 0, value);
1748 ir_node *get_Confirm_bound (ir_node *node) {
1749 assert (node->op == op_Confirm);
1750 return get_irn_n(node, 1);
1752 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1753 assert (node->op == op_Confirm);
1754 set_irn_n(node, 0, bound);
1756 pn_Cmp get_Confirm_cmp (ir_node *node) {
1757 assert (node->op == op_Confirm);
1758 return node->attr.confirm_cmp;
1760 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1761 assert (node->op == op_Confirm);
1762 node->attr.confirm_cmp = cmp;
1767 get_Filter_pred (ir_node *node) {
1768 assert(node->op == op_Filter);
1772 set_Filter_pred (ir_node *node, ir_node *pred) {
1773 assert(node->op == op_Filter);
1777 get_Filter_proj(ir_node *node) {
1778 assert(node->op == op_Filter);
1779 return node->attr.filter.proj;
1782 set_Filter_proj (ir_node *node, long proj) {
1783 assert(node->op == op_Filter);
1784 node->attr.filter.proj = proj;
1787 /* Don't use get_irn_arity, get_irn_n in implementation as access
1788 shall work independent of view!!! */
1789 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1790 assert(node->op == op_Filter);
1791 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1792 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1793 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1794 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1795 node->attr.filter.in_cg[0] = node->in[0];
1797 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1800 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1801 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1802 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1803 node->attr.filter.in_cg[pos + 1] = pred;
1805 int get_Filter_n_cg_preds(ir_node *node) {
1806 assert(node->op == op_Filter && node->attr.filter.in_cg);
1807 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1809 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1811 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1813 arity = ARR_LEN(node->attr.filter.in_cg);
1814 assert(pos < arity - 1);
1815 return node->attr.filter.in_cg[pos + 1];
1820 get_irn_irg(ir_node *node) {
1821 if (get_irn_op(node) != op_Block)
1822 node = get_nodes_block(node);
1823 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
1824 node = get_nodes_block(node);
1825 assert(get_irn_op(node) == op_Block);
1826 return node->attr.block.irg;
1830 /*----------------------------------------------------------------*/
1831 /* Auxiliary routines */
1832 /*----------------------------------------------------------------*/
1835 skip_Proj (ir_node *node) {
1836 /* don't assert node !!! */
1837 if (node && is_Proj(node)) {
1838 return get_Proj_pred(node);
1845 skip_Tuple (ir_node *node) {
1848 if (!get_opt_normalize()) return node;
1850 node = skip_Id(node);
1851 if (get_irn_op(node) == op_Proj) {
1852 pred = skip_Id(get_Proj_pred(node));
1853 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
1854 pred = skip_Id(skip_Tuple(pred));
1855 if (get_irn_op(pred) == op_Tuple)
1856 return get_Tuple_pred(pred, get_Proj_proj(node));
1861 /** returns operand of node if node is a Cast */
1862 ir_node *skip_Cast (ir_node *node) {
1863 if (node && get_irn_op(node) == op_Cast) {
1864 return skip_Id(get_irn_n(node, 0));
1871 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1872 than any other approach, as Id chains are resolved and all point to the real node, or
1873 all id's are self loops. */
1875 skip_Id (ir_node *node) {
1876 /* don't assert node !!! */
1878 if (!get_opt_normalize()) return node;
1880 /* Don't use get_Id_pred: We get into an endless loop for
1881 self-referencing Ids. */
1882 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
1883 ir_node *rem_pred = node->in[0+1];
1886 assert (get_irn_arity (node) > 0);
1888 node->in[0+1] = node;
1889 res = skip_Id(rem_pred);
1890 if (res->op == op_Id) /* self-loop */ return node;
1892 node->in[0+1] = res;
1899 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1900 than any other approach, as Id chains are resolved and all point to the real node, or
1901 all id's are self loops. */
1903 skip_Id (ir_node *node) {
1905 /* don't assert node !!! */
1907 if (!node || (node->op != op_Id)) return node;
1909 if (!get_opt_normalize()) return node;
1911 /* Don't use get_Id_pred: We get into an endless loop for
1912 self-referencing Ids. */
1913 pred = node->in[0+1];
1915 if (pred->op != op_Id) return pred;
1917 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
1918 ir_node *rem_pred, *res;
1920 if (pred->op != op_Id) return pred; /* shortcut */
1923 assert (get_irn_arity (node) > 0);
1925 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
1926 res = skip_Id(rem_pred);
1927 if (res->op == op_Id) /* self-loop */ return node;
1929 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
1938 (is_Bad)(const ir_node *node) {
1939 return __is_Bad(node);
1943 (is_no_Block)(const ir_node *node) {
1944 return __is_no_Block(node);
1948 (is_Block)(const ir_node *node) {
1949 return __is_Block(node);
1952 /* returns true if node is a Unknown node. */
1954 is_Unknown (const ir_node *node) {
1956 return (get_irn_op(node) == op_Unknown);
1960 is_Proj (const ir_node *node) {
1962 return node->op == op_Proj
1963 || (!get_interprocedural_view() && node->op == op_Filter);
1966 /* Returns true if the operation manipulates control flow. */
1968 is_cfop(const ir_node *node) {
1969 return is_cfopcode(get_irn_op(node));
1972 /* Returns true if the operation manipulates interprocedural control flow:
1973 CallBegin, EndReg, EndExcept */
1974 int is_ip_cfop(const ir_node *node) {
1975 return is_ip_cfopcode(get_irn_op(node));
1978 /* Returns true if the operation can change the control flow because
1981 is_fragile_op(const ir_node *node) {
1982 return is_op_fragile(get_irn_op(node));
1985 /* Returns the memory operand of fragile operations. */
1986 ir_node *get_fragile_op_mem(ir_node *node) {
1987 assert(node && is_fragile_op(node));
1989 switch (get_irn_opcode (node)) {
1998 return get_irn_n(node, 0);
2003 assert(0 && "should not be reached");
2008 /* Returns true if the operation is a forking control flow operation. */
2010 is_forking_op(const ir_node *node) {
2011 return is_op_forking(get_irn_op(node));
2014 #ifdef DEBUG_libfirm
2015 void dump_irn (ir_node *n) {
2016 int i, arity = get_irn_arity(n);
2017 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2019 ir_node *pred = get_irn_n(n, -1);
2020 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2021 get_irn_node_nr(pred), (void *)pred);
2023 printf(" preds: \n");
2024 for (i = 0; i < arity; ++i) {
2025 ir_node *pred = get_irn_n(n, i);
2026 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2027 get_irn_node_nr(pred), (void *)pred);
2031 #else /* DEBUG_libfirm */
2032 void dump_irn (ir_node *n) {}
2033 #endif /* DEBUG_libfirm */