1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
28 /* memset belongs to string.h */
31 #if USE_EXPICIT_PHI_IN_STACK
32 /* A stack needed for the automatic Phi node construction in constructor
33 Phi_in. Redefinition in irgraph.c!! */
38 typedef struct Phi_in_stack Phi_in_stack;
41 /*** ******************************************** */
42 /** privat interfaces, for professional use only */
44 /* Constructs a Block with a fixed number of predecessors.
45 Does not set current_block. Can not be used with automatic
46 Phi node construction. */
48 new_r_Block (ir_graph *irg, int arity, ir_node **in)
52 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
53 set_Block_matured(res, 1);
54 set_Block_block_visited(res, 0);
61 new_r_Start (ir_graph *irg, ir_node *block)
65 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
72 new_r_End (ir_graph *irg, ir_node *block)
76 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
82 /* Creates a Phi node with all predecessors. Calling this constructor
83 is only allowed if the corresponding block is mature. */
85 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
89 assert( get_Block_matured(block) );
90 assert( get_irn_arity(block) == arity );
92 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
97 /* Memory Phis in endless loops must be kept alive.
98 As we can't distinguish these easily we keep all of them alive. */
99 if ((res->op == op_Phi) && (mode == mode_M))
100 add_End_keepalive(irg->end, res);
105 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
108 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
110 res = optimize (res);
114 res = local_optimize_newby (res);
121 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
123 ir_node *in[1] = {val};
125 res = new_ir_node (irg, block, op_Id, mode, 1, in);
126 res = optimize (res);
132 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
135 ir_node *in[1] = {arg};
137 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
138 res->attr.proj = proj;
141 assert(get_Proj_pred(res));
142 assert(get_nodes_Block(get_Proj_pred(res)));
144 res = optimize (res);
152 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
156 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
157 arg->attr.c.kind = fragmentary;
158 arg->attr.c.default_proj = max_proj;
159 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
164 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
166 ir_node *in[1] = {op};
168 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
169 res = optimize (res);
176 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
180 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
181 res = optimize (res);
187 new_r_Add (ir_graph *irg, ir_node *block,
188 ir_node *op1, ir_node *op2, ir_mode *mode)
190 ir_node *in[2] = {op1, op2};
192 res = new_ir_node (irg, block, op_Add, mode, 2, in);
193 res = optimize (res);
199 new_r_Sub (ir_graph *irg, ir_node *block,
200 ir_node *op1, ir_node *op2, ir_mode *mode)
202 ir_node *in[2] = {op1, op2};
204 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
205 res = optimize (res);
211 new_r_Minus (ir_graph *irg, ir_node *block,
212 ir_node *op, ir_mode *mode)
214 ir_node *in[1] = {op};
216 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
217 res = optimize (res);
223 new_r_Mul (ir_graph *irg, ir_node *block,
224 ir_node *op1, ir_node *op2, ir_mode *mode)
226 ir_node *in[2] = {op1, op2};
228 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
229 res = optimize (res);
235 new_r_Quot (ir_graph *irg, ir_node *block,
236 ir_node *memop, ir_node *op1, ir_node *op2)
238 ir_node *in[3] = {memop, op1, op2};
240 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
241 res = optimize (res);
247 new_r_DivMod (ir_graph *irg, ir_node *block,
248 ir_node *memop, ir_node *op1, ir_node *op2)
250 ir_node *in[3] = {memop, op1, op2};
252 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
253 res = optimize (res);
259 new_r_Div (ir_graph *irg, ir_node *block,
260 ir_node *memop, ir_node *op1, ir_node *op2)
262 ir_node *in[3] = {memop, op1, op2};
264 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
265 res = optimize (res);
271 new_r_Mod (ir_graph *irg, ir_node *block,
272 ir_node *memop, ir_node *op1, ir_node *op2)
274 ir_node *in[3] = {memop, op1, op2};
276 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
277 res = optimize (res);
283 new_r_And (ir_graph *irg, ir_node *block,
284 ir_node *op1, ir_node *op2, ir_mode *mode)
286 ir_node *in[2] = {op1, op2};
288 res = new_ir_node (irg, block, op_And, mode, 2, in);
289 res = optimize (res);
295 new_r_Or (ir_graph *irg, ir_node *block,
296 ir_node *op1, ir_node *op2, ir_mode *mode)
298 ir_node *in[2] = {op1, op2};
300 res = new_ir_node (irg, block, op_Or, mode, 2, in);
301 res = optimize (res);
307 new_r_Eor (ir_graph *irg, ir_node *block,
308 ir_node *op1, ir_node *op2, ir_mode *mode)
310 ir_node *in[2] = {op1, op2};
312 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
313 res = optimize (res);
319 new_r_Not (ir_graph *irg, ir_node *block,
320 ir_node *op, ir_mode *mode)
322 ir_node *in[1] = {op};
324 res = new_ir_node (irg, block, op_Not, mode, 1, in);
325 res = optimize (res);
331 new_r_Shl (ir_graph *irg, ir_node *block,
332 ir_node *op, ir_node *k, ir_mode *mode)
334 ir_node *in[2] = {op, k};
336 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
337 res = optimize (res);
343 new_r_Shr (ir_graph *irg, ir_node *block,
344 ir_node *op, ir_node *k, ir_mode *mode)
346 ir_node *in[2] = {op, k};
348 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
349 res = optimize (res);
355 new_r_Shrs (ir_graph *irg, ir_node *block,
356 ir_node *op, ir_node *k, ir_mode *mode)
358 ir_node *in[2] = {op, k};
360 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
361 res = optimize (res);
367 new_r_Rot (ir_graph *irg, ir_node *block,
368 ir_node *op, ir_node *k, ir_mode *mode)
370 ir_node *in[2] = {op, k};
372 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
373 res = optimize (res);
379 new_r_Abs (ir_graph *irg, ir_node *block,
380 ir_node *op, ir_mode *mode)
382 ir_node *in[1] = {op};
384 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
385 res = optimize (res);
391 new_r_Cmp (ir_graph *irg, ir_node *block,
392 ir_node *op1, ir_node *op2)
394 ir_node *in[2] = {op1, op2};
396 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
397 res = optimize (res);
403 new_r_Jmp (ir_graph *irg, ir_node *block)
407 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
408 res = optimize (res);
414 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
416 ir_node *in[1] = {c};
418 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
419 res->attr.c.kind = dense;
420 res->attr.c.default_proj = 0;
421 res = optimize (res);
427 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
428 ir_node *callee, int arity, ir_node **in, type *type)
435 NEW_ARR_A (ir_node *, r_in, r_arity);
438 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
440 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
442 assert(is_method_type(type));
443 set_Call_type(res, type);
444 res = optimize (res);
450 new_r_Return (ir_graph *irg, ir_node *block,
451 ir_node *store, int arity, ir_node **in)
458 NEW_ARR_A (ir_node *, r_in, r_arity);
460 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
461 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
462 res = optimize (res);
468 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
470 ir_node *in[2] = {store, obj};
472 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
473 res = optimize (res);
479 new_r_Load (ir_graph *irg, ir_node *block,
480 ir_node *store, ir_node *adr)
482 ir_node *in[2] = {store, adr};
484 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
486 res = optimize (res);
492 new_r_Store (ir_graph *irg, ir_node *block,
493 ir_node *store, ir_node *adr, ir_node *val)
495 ir_node *in[3] = {store, adr, val};
497 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
499 res = optimize (res);
505 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
506 ir_node *size, type *alloc_type, where_alloc where)
508 ir_node *in[2] = {store, size};
510 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
512 res->attr.a.where = where;
513 res->attr.a.type = alloc_type;
515 res = optimize (res);
521 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
522 ir_node *ptr, ir_node *size, type *free_type)
524 ir_node *in[3] = {store, ptr, size};
526 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
528 res->attr.f = free_type;
530 res = optimize (res);
536 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
537 int arity, ir_node **in, entity *ent)
544 NEW_ARR_A (ir_node *, r_in, r_arity);
547 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
548 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
550 res->attr.s.ltyp = static_linkage;
551 res->attr.s.ent = ent;
553 res = optimize (res);
559 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
560 symconst_kind symkind)
565 if (symkind == linkage_ptr_info)
569 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
571 res->attr.i.num = symkind;
572 if (symkind == linkage_ptr_info) {
573 res->attr.i.tori.ptrinfo = (ident *)value;
575 assert ( ( (symkind == type_tag)
576 || (symkind == size))
577 && (is_type(value)));
578 res->attr.i.tori.typ = (type *)value;
580 res = optimize (res);
586 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
590 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
592 res = optimize (res);
600 return current_ir_graph->bad;
603 /** ********************/
604 /** public interfaces */
605 /** construction tools */
607 /****f* ircons/new_Start
610 * new_Start -- create a new Start node in the current block
613 * s = new_Start(void);
614 * ir_node* new_Start(void);
617 * s - pointer to the created Start node
626 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
627 op_Start, mode_T, 0, NULL);
629 res = optimize (res);
638 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
639 op_End, mode_X, -1, NULL);
640 res = optimize (res);
646 /* Constructs a Block with a fixed number of predecessors.
647 Does set current_block. Can be used with automatic Phi
648 node construction. */
650 new_Block (int arity, ir_node **in)
654 res = new_r_Block (current_ir_graph, arity, in);
656 /* Create and initialize array for Phi-node construction. */
657 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
658 current_ir_graph->n_loc);
659 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
661 res = optimize (res);
662 current_ir_graph->current_block = res;
669 /* ***********************************************************************/
670 /* Methods necessary for automatic Phi node creation */
672 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
673 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
674 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
675 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
677 Call Graph: ( A ---> B == A "calls" B)
679 get_value mature_block
687 get_r_value_internal |
691 new_r_Phi0 new_r_Phi_in
693 * *************************************************************************** */
695 /* Creates a Phi node with 0 predecessors */
697 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
700 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
705 /* There are two implementations of the Phi node construction. The first
706 is faster, but does not work for blocks with more than 2 predecessors.
707 The second works always but is slower and causes more unnecessary Phi
709 Select the implementations by the following preprocessor flag set in
711 #if USE_FAST_PHI_CONSTRUCTION
713 /* This is a stack used for allocating and deallocating nodes in
714 new_r_Phi_in. The original implementation used the obstack
715 to model this stack, now it is explicit. This reduces side effects.
717 #if USE_EXPICIT_PHI_IN_STACK
722 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
724 res->stack = NEW_ARR_F (ir_node *, 1);
731 free_Phi_in_stack(Phi_in_stack *s) {
736 void free_to_Phi_in_stack(ir_node *phi) {
737 assert(get_irn_opcode(phi) == iro_Phi);
739 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
740 current_ir_graph->Phi_in_stack->pos)
741 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
743 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
745 (current_ir_graph->Phi_in_stack->pos)++;
749 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
750 int arity, ir_node **in) {
752 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
753 int pos = current_ir_graph->Phi_in_stack->pos;
757 /* We need to allocate a new node */
758 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
760 /* reuse the old node and initialize it again. */
763 assert (res->kind == k_ir_node);
764 assert (res->op == op_Phi);
769 /* ???!!! How to free the old in array?? */
770 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
772 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
774 (current_ir_graph->Phi_in_stack->pos)--;
778 #endif /* USE_EXPICIT_PHI_IN_STACK */
780 /* Creates a Phi node with a given, fixed array **in of predecessors.
781 If the Phi node is unnecessary, as the same value reaches the block
782 through all control flow paths, it is eliminated and the value
783 returned directly. This constructor is only intended for use in
784 the automatic Phi node generation triggered by get_value or mature.
785 The implementation is quite tricky and depends on the fact, that
786 the nodes are allocated on a stack:
787 The in array contains predecessors and NULLs. The NULLs appear,
788 if get_r_value_internal, that computed the predecessors, reached
789 the same block on two paths. In this case the same value reaches
790 this block on both paths, there is no definition in between. We need
791 not allocate a Phi where these path's merge, but we have to communicate
792 this fact to the caller. This happens by returning a pointer to the
793 node the caller _will_ allocate. (Yes, we predict the address. We can
794 do so because the nodes are allocated on the obstack.) The caller then
795 finds a pointer to itself and, when this routine is called again,
799 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
800 ir_node **in, int ins)
803 ir_node *res, *known;
805 /* allocate a new node on the obstack.
806 This can return a node to which some of the pointers in the in-array
808 Attention: the constructor copies the in array, i.e., the later changes
809 to the array in this routine do not affect the constructed node! If
810 the in array contains NULLs, there will be missing predecessors in the
812 Is this a possible internal state of the Phi node generation? */
813 #if USE_EXPICIT_PHI_IN_STACK
814 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
816 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
818 /* The in-array can contain NULLs. These were returned by
819 get_r_value_internal if it reached the same block/definition on a
821 The NULLs are replaced by the node itself to simplify the test in the
823 for (i=0; i < ins; ++i)
824 if (in[i] == NULL) in[i] = res;
826 /* This loop checks whether the Phi has more than one predecessor.
827 If so, it is a real Phi node and we break the loop. Else the
828 Phi node merges the same definition on several paths and therefore
830 for (i=0; i < ins; ++i)
832 if (in[i]==res || in[i]==known) continue;
840 /* i==ins: there is at most one predecessor, we don't need a phi node. */
842 #if USE_EXPICIT_PHI_IN_STACK
843 free_to_Phi_in_stack(res);
845 obstack_free (current_ir_graph->obst, res);
849 res = optimize (res);
853 /* return the pointer to the Phi node. This node might be deallocated! */
858 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
860 /** This function computes the predecessors for a real Phi node, and then
861 allocates and returns this node. The routine called to allocate the
862 node might optimize it away and return a real value, or even a pointer
863 to a deallocated Phi node on top of the obstack!
864 This function is called with an in-array of proper size. **/
865 static inline ir_node *
866 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
868 ir_node *prevBlock, *res;
871 /* This loop goes to all predecessor blocks of the block the Phi node is in
872 and there finds the operands of the Phi node by calling
873 get_r_value_internal. */
874 for (i = 1; i <= ins; ++i) {
875 assert (block->in[i]);
876 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
878 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
881 /* After collecting all predecessors into the array nin a new Phi node
882 with these predecessors is created. This constructor contains an
883 optimization: If all predecessors of the Phi node are identical it
884 returns the only operand instead of a new Phi node. If the value
885 passes two different control flow edges without being defined, and
886 this is the second path treated, a pointer to the node that will be
887 allocated for the first path (recursion) is returned. We already
888 know the address of this node, as it is the next node to be allocated
889 and will be placed on top of the obstack. (The obstack is a _stack_!) */
890 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
892 /* Now we now the value for "pos" and can enter it in the array with
893 all known local variables. Attention: this might be a pointer to
894 a node, that later will be allocated!!! See new_r_Phi_in.
895 If this is called in mature, after some set_value in the same block,
896 the proper value must not be overwritten:
898 get_value (makes Phi0, put's it into graph_arr)
899 set_value (overwrites Phi0 in graph_arr)
900 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
903 if (!block->attr.block.graph_arr[pos]) {
904 block->attr.block.graph_arr[pos] = res;
906 /* printf(" value already computed by %s\n",
907 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
913 /* This function returns the last definition of a variable. In case
914 this variable was last defined in a previous block, Phi nodes are
915 inserted. If the part of the firm graph containing the definition
916 is not yet constructed, a dummy Phi node is returned. */
918 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
921 /* There are 4 cases to treat.
923 1. The block is not mature and we visit it the first time. We can not
924 create a proper Phi node, therefore a Phi0, i.e., a Phi without
925 predecessors is returned. This node is added to the linked list (field
926 "link") of the containing block to be completed when this block is
927 matured. (Completion will add a new Phi and turn the Phi0 into an Id
930 2. The value is already known in this block, graph_arr[pos] is set and we
931 visit the block the first time. We can return the value without
932 creating any new nodes.
934 3. The block is mature and we visit it the first time. A Phi node needs
935 to be created (phi_merge). If the Phi is not needed, as all it's
936 operands are the same value reaching the block through different
937 paths, it's optimized away and the value itself is returned.
939 4. The block is mature, and we visit it the second time. Now two
940 subcases are possible:
941 * The value was computed completely the last time we were here. This
942 is the case if there is no loop. We can return the proper value.
943 * The recursion that visited this node and set the flag did not
944 return yet. We are computing a value in a loop and need to
945 break the recursion without knowing the result yet.
946 @@@ strange case. Straight forward we would create a Phi before
947 starting the computation of it's predecessors. In this case we will
948 find a Phi here in any case. The problem is that this implementation
949 only creates a Phi after computing the predecessors, so that it is
950 hard to compute self references of this Phi. @@@
951 There is no simple check for the second subcase. Therefore we check
952 for a second visit and treat all such cases as the second subcase.
953 Anyways, the basic situation is the same: we reached a block
954 on two paths without finding a definition of the value: No Phi
955 nodes are needed on both paths.
956 We return this information "Two paths, no Phi needed" by a very tricky
957 implementation that relies on the fact that an obstack is a stack and
958 will return a node with the same address on different allocations.
959 Look also at phi_merge and new_r_phi_in to understand this.
960 @@@ Unfortunately this does not work, see testprogram
961 three_cfpred_example.
965 /* case 4 -- already visited. */
966 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
968 /* visited the first time */
969 set_irn_visited(block, get_irg_visited(current_ir_graph));
971 /* Get the local valid value */
972 res = block->attr.block.graph_arr[pos];
974 /* case 2 -- If the value is actually computed, return it. */
975 if (res) { return res;};
977 if (block->attr.block.matured) { /* case 3 */
979 /* The Phi has the same amount of ins as the corresponding block. */
980 int ins = get_irn_arity(block);
982 NEW_ARR_A (ir_node *, nin, ins);
984 /* Phi merge collects the predecessors and then creates a node. */
985 res = phi_merge (block, pos, mode, nin, ins);
987 } else { /* case 1 */
988 /* The block is not mature, we don't know how many in's are needed. A Phi
989 with zero predecessors is created. Such a Phi node is called Phi0
990 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
991 to the list of Phi0 nodes in this block to be matured by mature_block
993 The Phi0 has to remember the pos of it's internal value. If the real
994 Phi is computed, pos is used to update the array with the local
997 res = new_r_Phi0 (current_ir_graph, block, mode);
998 res->attr.phi0_pos = pos;
999 res->link = block->link;
1003 /* If we get here, the frontend missed a use-before-definition error */
1006 printf("Error: no value set. Use of undefined variable. Initializing
1008 assert (mode->code >= irm_f && mode->code <= irm_p);
1009 res = new_r_Const (current_ir_graph, block, mode,
1010 tarval_mode_null[mode->code]);
1013 /* The local valid value is available now. */
1014 block->attr.block.graph_arr[pos] = res;
1021 /** This is the simple algorithm. If first generates a Phi0, then
1022 it starts the recursion. This causes an Id at the entry of
1023 every block that has no definition of the value! **/
1025 #if USE_EXPICIT_PHI_IN_STACK
1027 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1028 void free_Phi_in_stack(Phi_in_stack *s) { }
1032 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1033 ir_node **in, int ins)
1036 ir_node *res, *known;
1038 /* Allocate a new node on the obstack. The allocation copies the in
1040 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1042 /* This loop checks whether the Phi has more than one predecessor.
1043 If so, it is a real Phi node and we break the loop. Else the
1044 Phi node merges the same definition on several paths and therefore
1045 is not needed. Don't consider Bad nodes! */
1047 for (i=0; i < ins; ++i)
1051 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1059 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1062 obstack_free (current_ir_graph->obst, res);
1065 /* A undefined value, e.g., in unreachable code. */
1069 res = optimize (res);
1071 /* Memory Phis in endless loops must be kept alive.
1072 As we can't distinguish these easily we keep all of the alive. */
1073 if ((res->op == op_Phi) && (mode == mode_M))
1074 add_End_keepalive(irg->end, res);
1081 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1083 #if PRECISE_EXC_CONTEXT
1084 static inline ir_node *
1085 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1088 new_frag_arr (ir_node *n) {
1091 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1092 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1093 sizeof(ir_node *)*current_ir_graph->n_loc);
1094 /* turn off optimization before allocating Proj nodes, as res isn't
1096 opt = get_optimize(); set_optimize(0);
1097 /* Here we rely on the fact that all frag ops have Memory as first result! */
1098 if (get_irn_op(n) == op_Call)
1099 arr[0] = new_Proj(n, mode_M, 3);
1101 arr[0] = new_Proj(n, mode_M, 0);
1103 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1108 get_frag_arr (ir_node *n) {
1109 if (get_irn_op(n) == op_Call) {
1110 return n->attr.call.frag_arr;
1111 } else if (get_irn_op(n) == op_Alloc) {
1112 return n->attr.a.frag_arr;
1114 return n->attr.frag_arr;
1119 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1120 if (!frag_arr[pos]) frag_arr[pos] = val;
1121 if (frag_arr[current_ir_graph->n_loc - 1])
1122 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1126 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1131 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1133 frag_arr = get_frag_arr(cfOp);
1134 res = frag_arr[pos];
1136 if (block->attr.block.graph_arr[pos]) {
1137 /* There was a set_value after the cfOp and no get_value before that
1138 set_value. We must build a Phi node now. */
1139 if (block->attr.block.matured) {
1140 int ins = get_irn_arity(block);
1142 NEW_ARR_A (ir_node *, nin, ins);
1143 res = phi_merge(block, pos, mode, nin, ins);
1145 res = new_r_Phi0 (current_ir_graph, block, mode);
1146 res->attr.phi0_pos = pos;
1147 res->link = block->link;
1151 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1152 but this should be better: (remove comment if this works) */
1153 /* It's a Phi, we can write this into all graph_arrs with NULL */
1154 set_frag_value(block->attr.block.graph_arr, pos, res);
1156 res = get_r_value_internal(block, pos, mode);
1157 set_frag_value(block->attr.block.graph_arr, pos, res);
1164 /** This function allocates a dummy Phi node to break recursions,
1165 computes the predecessors for the real phi node, and then
1166 allocates and returns this node. The routine called to allocate the
1167 node might optimize it away and return a real value.
1168 This function is called with an in-array of proper size. **/
1169 static inline ir_node *
1170 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1172 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1175 /* If this block has no value at pos create a Phi0 and remember it
1176 in graph_arr to break recursions.
1177 Else we may not set graph_arr as there a later value is remembered. */
1179 if (!block->attr.block.graph_arr[pos]) {
1180 /* This is commented out as collapsing to Bads is no good idea.
1181 Either we need an assert here, or we need to call a routine
1182 that deals with this case as appropriate for the given language.
1183 Right now a self referencing Id is created which will crash irg_vrfy().
1185 Even if all variables are defined before use, it can happen that
1186 we get to the start block, if a cond has been replaced by a tuple
1187 (bad, jmp). As the start has a self referencing control flow edge,
1188 we get a self referencing Id, which is hard to optimize away. We avoid
1189 this by defining the value as a Bad node.
1190 Returning a const with tarval_bad is a preliminary solution. In some
1191 situations we might want a Warning or an Error. */
1193 if (block == get_irg_start_block(current_ir_graph)) {
1194 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1195 /* We don't need to care about exception ops in the start block.
1196 There are none by definition. */
1197 return block->attr.block.graph_arr[pos];
1199 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1200 block->attr.block.graph_arr[pos] = phi0;
1201 #if PRECISE_EXC_CONTEXT
1202 /* Set graph_arr for fragile ops. Also here we should break recursion.
1203 We could choose a cyclic path through an cfop. But the recursion would
1204 break at some point. */
1205 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1210 /* This loop goes to all predecessor blocks of the block the Phi node
1211 is in and there finds the operands of the Phi node by calling
1212 get_r_value_internal. */
1213 for (i = 1; i <= ins; ++i) {
1214 prevCfOp = skip_Proj(block->in[i]);
1216 if (is_Bad(prevCfOp)) {
1217 /* In case a Cond has been optimized we would get right to the start block
1218 with an invalid definition. */
1219 nin[i-1] = new_Bad();
1222 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1224 if (!is_Bad(prevBlock)) {
1225 #if PRECISE_EXC_CONTEXT
1226 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1227 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1228 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1231 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1233 nin[i-1] = new_Bad();
1237 /* After collecting all predecessors into the array nin a new Phi node
1238 with these predecessors is created. This constructor contains an
1239 optimization: If all predecessors of the Phi node are identical it
1240 returns the only operand instead of a new Phi node. */
1241 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1243 /* In case we allocated a Phi0 node at the beginning of this procedure,
1244 we need to exchange this Phi0 with the real Phi. */
1246 exchange(phi0, res);
1247 block->attr.block.graph_arr[pos] = res;
1248 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1249 only an optimization. */
1255 /* This function returns the last definition of a variable. In case
1256 this variable was last defined in a previous block, Phi nodes are
1257 inserted. If the part of the firm graph containing the definition
1258 is not yet constructed, a dummy Phi node is returned. */
1260 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1263 /* There are 4 cases to treat.
1265 1. The block is not mature and we visit it the first time. We can not
1266 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1267 predecessors is returned. This node is added to the linked list (field
1268 "link") of the containing block to be completed when this block is
1269 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1272 2. The value is already known in this block, graph_arr[pos] is set and we
1273 visit the block the first time. We can return the value without
1274 creating any new nodes.
1276 3. The block is mature and we visit it the first time. A Phi node needs
1277 to be created (phi_merge). If the Phi is not needed, as all it's
1278 operands are the same value reaching the block through different
1279 paths, it's optimized away and the value itself is returned.
1281 4. The block is mature, and we visit it the second time. Now two
1282 subcases are possible:
1283 * The value was computed completely the last time we were here. This
1284 is the case if there is no loop. We can return the proper value.
1285 * The recursion that visited this node and set the flag did not
1286 return yet. We are computing a value in a loop and need to
1287 break the recursion. This case only happens if we visited
1288 the same block with phi_merge before, which inserted a Phi0.
1289 So we return the Phi0.
1292 /* case 4 -- already visited. */
1293 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1294 /* As phi_merge allocates a Phi0 this value is always defined. Here
1295 is the critical difference of the two algorithms. */
1296 assert(block->attr.block.graph_arr[pos]);
1297 return block->attr.block.graph_arr[pos];
1300 /* visited the first time */
1301 set_irn_visited(block, get_irg_visited(current_ir_graph));
1303 /* Get the local valid value */
1304 res = block->attr.block.graph_arr[pos];
1306 /* case 2 -- If the value is actually computed, return it. */
1307 if (res) { return res; };
1309 if (block->attr.block.matured) { /* case 3 */
1311 /* The Phi has the same amount of ins as the corresponding block. */
1312 int ins = get_irn_arity(block);
1314 NEW_ARR_A (ir_node *, nin, ins);
1316 /* Phi merge collects the predecessors and then creates a node. */
1317 res = phi_merge (block, pos, mode, nin, ins);
1319 } else { /* case 1 */
1320 /* The block is not mature, we don't know how many in's are needed. A Phi
1321 with zero predecessors is created. Such a Phi node is called Phi0
1322 node. The Phi0 is then added to the list of Phi0 nodes in this block
1323 to be matured by mature_block later.
1324 The Phi0 has to remember the pos of it's internal value. If the real
1325 Phi is computed, pos is used to update the array with the local
1327 res = new_r_Phi0 (current_ir_graph, block, mode);
1328 res->attr.phi0_pos = pos;
1329 res->link = block->link;
1333 /* If we get here, the frontend missed a use-before-definition error */
1336 printf("Error: no value set. Use of undefined variable. Initializing
1338 assert (mode->code >= irm_f && mode->code <= irm_p);
1339 res = new_r_Const (current_ir_graph, block, mode,
1340 tarval_mode_null[mode->code]);
1343 /* The local valid value is available now. */
1344 block->attr.block.graph_arr[pos] = res;
1349 #endif /* USE_FAST_PHI_CONSTRUCTION */
1351 /* ************************************************************************** */
1353 /** Finalize a Block node, when all control flows are known. */
1354 /** Acceptable parameters are only Block nodes. */
1356 mature_block (ir_node *block)
1363 assert (get_irn_opcode(block) == iro_Block);
1364 // assert (!get_Block_matured(block) && "Block already matured");
1366 if (!get_Block_matured(block)) {
1368 /* An array for building the Phi nodes. */
1369 ins = ARR_LEN (block->in)-1;
1370 NEW_ARR_A (ir_node *, nin, ins);
1371 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1373 /* Traverse a chain of Phi nodes attached to this block and mature
1375 for (n = block->link; n; n=next) {
1376 inc_irg_visited(current_ir_graph);
1378 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1381 block->attr.block.matured = 1;
1383 /* Now, as the block is a finished firm node, we can optimize it.
1384 Since other nodes have been allocated since the block was created
1385 we can not free the node on the obstack. Therefore we have to call
1387 Unfortunately the optimization does not change a lot, as all allocated
1388 nodes refer to the unoptimized node.
1389 We can call _2, as global cse has no effect on blocks. */
1390 block = optimize_in_place_2(block);
1396 new_Phi (int arity, ir_node **in, ir_mode *mode)
1398 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1403 new_Const (ir_mode *mode, tarval *con)
1405 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1410 new_Id (ir_node *val, ir_mode *mode)
1412 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1417 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1419 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1424 new_defaultProj (ir_node *arg, long max_proj)
1427 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1428 arg->attr.c.kind = fragmentary;
1429 arg->attr.c.default_proj = max_proj;
1430 res = new_Proj (arg, mode_X, max_proj);
1435 new_Conv (ir_node *op, ir_mode *mode)
1437 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1442 new_Tuple (int arity, ir_node **in)
1444 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1449 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1451 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1456 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1458 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1464 new_Minus (ir_node *op, ir_mode *mode)
1466 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1471 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1473 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1478 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1481 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1483 #if PRECISE_EXC_CONTEXT
1484 if ((current_ir_graph->phase_state == phase_building) &&
1485 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1486 res->attr.frag_arr = new_frag_arr(res);
1493 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1496 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1498 #if PRECISE_EXC_CONTEXT
1499 if ((current_ir_graph->phase_state == phase_building) &&
1500 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1501 res->attr.frag_arr = new_frag_arr(res);
1508 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1511 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1513 #if PRECISE_EXC_CONTEXT
1514 if ((current_ir_graph->phase_state == phase_building) &&
1515 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1516 res->attr.frag_arr = new_frag_arr(res);
1523 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1526 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1528 #if PRECISE_EXC_CONTEXT
1529 if ((current_ir_graph->phase_state == phase_building) &&
1530 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1531 res->attr.frag_arr = new_frag_arr(res);
1538 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1540 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1545 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1547 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1552 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1554 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1559 new_Not (ir_node *op, ir_mode *mode)
1561 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1566 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1568 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1573 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1575 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1580 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1582 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1587 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1589 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1594 new_Abs (ir_node *op, ir_mode *mode)
1596 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1601 new_Cmp (ir_node *op1, ir_node *op2)
1603 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1610 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1614 new_Cond (ir_node *c)
1616 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1620 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1624 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1625 store, callee, arity, in, type);
1626 #if PRECISE_EXC_CONTEXT
1627 if ((current_ir_graph->phase_state == phase_building) &&
1628 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1629 res->attr.call.frag_arr = new_frag_arr(res);
1636 new_Return (ir_node* store, int arity, ir_node **in)
1638 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1643 new_Raise (ir_node *store, ir_node *obj)
1645 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1650 new_Load (ir_node *store, ir_node *addr)
1653 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1655 #if PRECISE_EXC_CONTEXT
1656 if ((current_ir_graph->phase_state == phase_building) &&
1657 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1658 res->attr.frag_arr = new_frag_arr(res);
1665 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1668 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1670 #if PRECISE_EXC_CONTEXT
1671 if ((current_ir_graph->phase_state == phase_building) &&
1672 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1673 res->attr.frag_arr = new_frag_arr(res);
1680 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1684 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1685 store, size, alloc_type, where);
1686 #if PRECISE_EXC_CONTEXT
1687 if ((current_ir_graph->phase_state == phase_building) &&
1688 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1689 res->attr.a.frag_arr = new_frag_arr(res);
1696 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1698 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1699 store, ptr, size, free_type);
1703 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1704 /* GL: objptr was called frame before. Frame was a bad choice for the name
1705 as the operand could as well be a pointer to a dynamic object. */
1707 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1708 store, objptr, 0, NULL, ent);
1712 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1714 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1715 store, objptr, n_index, index, sel);
1719 new_SymConst (type_or_id_p value, symconst_kind kind)
1721 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1726 new_Sync (int arity, ir_node** in)
1728 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1736 return current_ir_graph->bad;
1739 /* ********************************************************************* */
1740 /* Comfortable interface with automatic Phi node construction. */
1741 /* (Uses also constructors of ?? interface, except new_Block. */
1742 /* ********************************************************************* */
1744 /** Block construction **/
1745 /* immature Block without predecessors */
1746 ir_node *new_immBlock (void) {
1749 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1750 /* creates a new dynamic in-array as length of in is -1 */
1751 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1752 current_ir_graph->current_block = res;
1753 res->attr.block.matured = 0;
1754 set_Block_block_visited(res, 0);
1756 /* Create and initialize array for Phi-node construction. */
1757 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1758 current_ir_graph->n_loc);
1759 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1761 /* Immature block may not be optimized! */
1767 /* add an adge to a jmp/control flow node */
1769 add_in_edge (ir_node *block, ir_node *jmp)
1771 if (block->attr.block.matured) {
1772 assert(0 && "Error: Block already matured!\n");
1775 assert (jmp != NULL);
1776 ARR_APP1 (ir_node *, block->in, jmp);
1780 /* changing the current block */
1782 switch_block (ir_node *target)
1784 current_ir_graph->current_block = target;
1787 /* ************************ */
1788 /* parameter administration */
1790 /* get a value from the parameter array from the current block by its index */
1792 get_value (int pos, ir_mode *mode)
1794 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1795 inc_irg_visited(current_ir_graph);
1796 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1800 /* set a value at position pos in the parameter array from the current block */
1802 set_value (int pos, ir_node *value)
1804 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1805 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1808 /* get the current store */
1812 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1813 /* GL: one could call get_value instead */
1814 inc_irg_visited(current_ir_graph);
1815 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1818 /* set the current store */
1820 set_store (ir_node *store)
1822 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1823 /* GL: one could call set_value instead */
1824 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1828 keep_alive (ir_node *ka)
1830 add_End_keepalive(current_ir_graph->end, ka);
1833 /** Useful access routines **/
1834 /* Returns the current block of the current graph. To set the current
1835 block use switch_block(). */
1836 ir_node *get_cur_block() {
1837 return get_irg_current_block(current_ir_graph);
1840 /* Returns the frame type of the current graph */
1841 type *get_cur_frame_type() {
1842 return get_irg_frame_type(current_ir_graph);
1846 /* ********************************************************************* */
1849 /* call once for each run of the library */
1855 /* call for each graph */
1857 finalize_cons (ir_graph *irg) {
1858 irg->phase_state = phase_high;