1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
28 /* memset belongs to string.h */
31 #if USE_EXPICIT_PHI_IN_STACK
32 /* A stack needed for the automatic Phi node construction in constructor
33 Phi_in. Redefinition in irgraph.c!! */
38 typedef struct Phi_in_stack Phi_in_stack;
41 /*** ******************************************** */
42 /** privat interfaces, for professional use only */
44 /* Constructs a Block with a fixed number of predecessors.
45 Does not set current_block. Can not be used with automatic
46 Phi node construction. */
48 new_r_Block (ir_graph *irg, int arity, ir_node **in)
52 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
53 set_Block_matured(res, 1);
54 set_Block_block_visited(res, 0);
61 new_r_Start (ir_graph *irg, ir_node *block)
65 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
72 new_r_End (ir_graph *irg, ir_node *block)
76 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
82 /* Creates a Phi node with all predecessors. Calling this constructor
83 is only allowed if the corresponding block is mature. */
85 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
89 assert( get_Block_matured(block) );
90 assert( get_irn_arity(block) == arity );
92 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
97 /* Memory Phis in endless loops must be kept alive.
98 As we can't distinguish these easily we keep all of them alive. */
99 if ((res->op == op_Phi) && (mode == mode_M))
100 add_End_keepalive(irg->end, res);
105 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
108 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
110 res = optimize (res);
114 res = local_optimize_newby (res);
121 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
123 ir_node *in[1] = {val};
125 res = new_ir_node (irg, block, op_Id, mode, 1, in);
126 res = optimize (res);
132 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
135 ir_node *in[1] = {arg};
137 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
138 res->attr.proj = proj;
141 assert(get_Proj_pred(res));
142 assert(get_nodes_Block(get_Proj_pred(res)));
144 res = optimize (res);
152 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
156 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
157 arg->attr.c.kind = fragmentary;
158 arg->attr.c.default_proj = max_proj;
159 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
164 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
166 ir_node *in[1] = {op};
168 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
169 res = optimize (res);
176 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
180 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
181 res = optimize (res);
187 new_r_Add (ir_graph *irg, ir_node *block,
188 ir_node *op1, ir_node *op2, ir_mode *mode)
190 ir_node *in[2] = {op1, op2};
192 res = new_ir_node (irg, block, op_Add, mode, 2, in);
193 res = optimize (res);
199 new_r_Sub (ir_graph *irg, ir_node *block,
200 ir_node *op1, ir_node *op2, ir_mode *mode)
202 ir_node *in[2] = {op1, op2};
204 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
205 res = optimize (res);
211 new_r_Minus (ir_graph *irg, ir_node *block,
212 ir_node *op, ir_mode *mode)
214 ir_node *in[1] = {op};
216 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
217 res = optimize (res);
223 new_r_Mul (ir_graph *irg, ir_node *block,
224 ir_node *op1, ir_node *op2, ir_mode *mode)
226 ir_node *in[2] = {op1, op2};
228 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
229 res = optimize (res);
235 new_r_Quot (ir_graph *irg, ir_node *block,
236 ir_node *memop, ir_node *op1, ir_node *op2)
238 ir_node *in[3] = {memop, op1, op2};
240 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
241 res = optimize (res);
247 new_r_DivMod (ir_graph *irg, ir_node *block,
248 ir_node *memop, ir_node *op1, ir_node *op2)
250 ir_node *in[3] = {memop, op1, op2};
252 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
253 res = optimize (res);
259 new_r_Div (ir_graph *irg, ir_node *block,
260 ir_node *memop, ir_node *op1, ir_node *op2)
262 ir_node *in[3] = {memop, op1, op2};
264 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
265 res = optimize (res);
271 new_r_Mod (ir_graph *irg, ir_node *block,
272 ir_node *memop, ir_node *op1, ir_node *op2)
274 ir_node *in[3] = {memop, op1, op2};
276 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
277 res = optimize (res);
283 new_r_And (ir_graph *irg, ir_node *block,
284 ir_node *op1, ir_node *op2, ir_mode *mode)
286 ir_node *in[2] = {op1, op2};
288 res = new_ir_node (irg, block, op_And, mode, 2, in);
289 res = optimize (res);
295 new_r_Or (ir_graph *irg, ir_node *block,
296 ir_node *op1, ir_node *op2, ir_mode *mode)
298 ir_node *in[2] = {op1, op2};
300 res = new_ir_node (irg, block, op_Or, mode, 2, in);
301 res = optimize (res);
307 new_r_Eor (ir_graph *irg, ir_node *block,
308 ir_node *op1, ir_node *op2, ir_mode *mode)
310 ir_node *in[2] = {op1, op2};
312 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
313 res = optimize (res);
319 new_r_Not (ir_graph *irg, ir_node *block,
320 ir_node *op, ir_mode *mode)
322 ir_node *in[1] = {op};
324 res = new_ir_node (irg, block, op_Not, mode, 1, in);
325 res = optimize (res);
331 new_r_Shl (ir_graph *irg, ir_node *block,
332 ir_node *op, ir_node *k, ir_mode *mode)
334 ir_node *in[2] = {op, k};
336 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
337 res = optimize (res);
343 new_r_Shr (ir_graph *irg, ir_node *block,
344 ir_node *op, ir_node *k, ir_mode *mode)
346 ir_node *in[2] = {op, k};
348 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
349 res = optimize (res);
355 new_r_Shrs (ir_graph *irg, ir_node *block,
356 ir_node *op, ir_node *k, ir_mode *mode)
358 ir_node *in[2] = {op, k};
360 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
361 res = optimize (res);
367 new_r_Rot (ir_graph *irg, ir_node *block,
368 ir_node *op, ir_node *k, ir_mode *mode)
370 ir_node *in[2] = {op, k};
372 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
373 res = optimize (res);
379 new_r_Abs (ir_graph *irg, ir_node *block,
380 ir_node *op, ir_mode *mode)
382 ir_node *in[1] = {op};
384 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
385 res = optimize (res);
391 new_r_Cmp (ir_graph *irg, ir_node *block,
392 ir_node *op1, ir_node *op2)
394 ir_node *in[2] = {op1, op2};
396 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
397 res = optimize (res);
403 new_r_Jmp (ir_graph *irg, ir_node *block)
407 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
408 res = optimize (res);
414 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
416 ir_node *in[1] = {c};
418 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
419 res->attr.c.kind = dense;
420 res->attr.c.default_proj = 0;
421 res = optimize (res);
427 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
428 ir_node *callee, int arity, ir_node **in, type *type)
435 NEW_ARR_A (ir_node *, r_in, r_arity);
438 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
440 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
442 assert(is_method_type(type));
443 set_Call_type(res, type);
444 res = optimize (res);
450 new_r_Return (ir_graph *irg, ir_node *block,
451 ir_node *store, int arity, ir_node **in)
458 NEW_ARR_A (ir_node *, r_in, r_arity);
460 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
461 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
462 res = optimize (res);
468 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
470 ir_node *in[2] = {store, obj};
472 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
475 fprintf (stdout, "%s: res = %p\n", __PRETTY_FUNCTION__, res);
477 res = optimize (res);
483 new_r_Load (ir_graph *irg, ir_node *block,
484 ir_node *store, ir_node *adr)
486 ir_node *in[2] = {store, adr};
488 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
490 res = optimize (res);
496 new_r_Store (ir_graph *irg, ir_node *block,
497 ir_node *store, ir_node *adr, ir_node *val)
499 ir_node *in[3] = {store, adr, val};
501 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
503 res = optimize (res);
509 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
510 ir_node *size, type *alloc_type, where_alloc where)
512 ir_node *in[2] = {store, size};
514 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
516 res->attr.a.where = where;
517 res->attr.a.type = alloc_type;
519 res = optimize (res);
525 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
526 ir_node *ptr, ir_node *size, type *free_type)
528 ir_node *in[3] = {store, ptr, size};
530 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
532 res->attr.f = free_type;
534 res = optimize (res);
540 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
541 int arity, ir_node **in, entity *ent)
548 NEW_ARR_A (ir_node *, r_in, r_arity);
551 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
552 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
554 res->attr.s.ltyp = static_linkage;
555 res->attr.s.ent = ent;
557 res = optimize (res);
563 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
564 symconst_kind symkind)
569 if (symkind == linkage_ptr_info)
573 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
575 res->attr.i.num = symkind;
576 if (symkind == linkage_ptr_info) {
577 res->attr.i.tori.ptrinfo = (ident *)value;
579 assert ( ( (symkind == type_tag)
580 || (symkind == size))
581 && (is_type(value)));
582 res->attr.i.tori.typ = (type *)value;
584 res = optimize (res);
590 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
594 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
596 res = optimize (res);
604 return current_ir_graph->bad;
607 /** ********************/
608 /** public interfaces */
609 /** construction tools */
611 /****f* ircons/new_Start
614 * new_Start -- create a new Start node in the current block
617 * s = new_Start(void);
618 * ir_node* new_Start(void);
621 * s - pointer to the created Start node
630 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
631 op_Start, mode_T, 0, NULL);
633 res = optimize (res);
642 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
643 op_End, mode_X, -1, NULL);
644 res = optimize (res);
650 /* Constructs a Block with a fixed number of predecessors.
651 Does set current_block. Can be used with automatic Phi
652 node construction. */
654 new_Block (int arity, ir_node **in)
658 res = new_r_Block (current_ir_graph, arity, in);
660 /* Create and initialize array for Phi-node construction. */
661 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
662 current_ir_graph->n_loc);
663 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
665 res = optimize (res);
666 current_ir_graph->current_block = res;
673 /* ***********************************************************************/
674 /* Methods necessary for automatic Phi node creation */
676 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
677 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
678 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
679 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
681 Call Graph: ( A ---> B == A "calls" B)
683 get_value mature_block
691 get_r_value_internal |
695 new_r_Phi0 new_r_Phi_in
697 * *************************************************************************** */
699 /* Creates a Phi node with 0 predecessors */
701 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
704 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
709 /* There are two implementations of the Phi node construction. The first
710 is faster, but does not work for blocks with more than 2 predecessors.
711 The second works always but is slower and causes more unnecessary Phi
713 Select the implementations by the following preprocessor flag set in
715 #if USE_FAST_PHI_CONSTRUCTION
717 /* This is a stack used for allocating and deallocating nodes in
718 new_r_Phi_in. The original implementation used the obstack
719 to model this stack, now it is explicit. This reduces side effects.
721 #if USE_EXPICIT_PHI_IN_STACK
726 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
728 res->stack = NEW_ARR_F (ir_node *, 1);
735 free_Phi_in_stack(Phi_in_stack *s) {
740 void free_to_Phi_in_stack(ir_node *phi) {
741 assert(get_irn_opcode(phi) == iro_Phi);
743 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
744 current_ir_graph->Phi_in_stack->pos)
745 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
747 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
749 (current_ir_graph->Phi_in_stack->pos)++;
753 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
754 int arity, ir_node **in) {
756 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
757 int pos = current_ir_graph->Phi_in_stack->pos;
761 /* We need to allocate a new node */
762 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
764 /* reuse the old node and initialize it again. */
767 assert (res->kind == k_ir_node);
768 assert (res->op == op_Phi);
773 /* ???!!! How to free the old in array?? */
774 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
776 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
778 (current_ir_graph->Phi_in_stack->pos)--;
782 #endif /* USE_EXPICIT_PHI_IN_STACK */
784 /* Creates a Phi node with a given, fixed array **in of predecessors.
785 If the Phi node is unnecessary, as the same value reaches the block
786 through all control flow paths, it is eliminated and the value
787 returned directly. This constructor is only intended for use in
788 the automatic Phi node generation triggered by get_value or mature.
789 The implementation is quite tricky and depends on the fact, that
790 the nodes are allocated on a stack:
791 The in array contains predecessors and NULLs. The NULLs appear,
792 if get_r_value_internal, that computed the predecessors, reached
793 the same block on two paths. In this case the same value reaches
794 this block on both paths, there is no definition in between. We need
795 not allocate a Phi where these path's merge, but we have to communicate
796 this fact to the caller. This happens by returning a pointer to the
797 node the caller _will_ allocate. (Yes, we predict the address. We can
798 do so because the nodes are allocated on the obstack.) The caller then
799 finds a pointer to itself and, when this routine is called again,
803 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
804 ir_node **in, int ins)
807 ir_node *res, *known;
809 /* allocate a new node on the obstack.
810 This can return a node to which some of the pointers in the in-array
812 Attention: the constructor copies the in array, i.e., the later changes
813 to the array in this routine do not affect the constructed node! If
814 the in array contains NULLs, there will be missing predecessors in the
816 Is this a possible internal state of the Phi node generation? */
817 #if USE_EXPICIT_PHI_IN_STACK
818 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
820 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
822 /* The in-array can contain NULLs. These were returned by
823 get_r_value_internal if it reached the same block/definition on a
825 The NULLs are replaced by the node itself to simplify the test in the
827 for (i=0; i < ins; ++i)
828 if (in[i] == NULL) in[i] = res;
830 /* This loop checks whether the Phi has more than one predecessor.
831 If so, it is a real Phi node and we break the loop. Else the
832 Phi node merges the same definition on several paths and therefore
834 for (i=0; i < ins; ++i)
836 if (in[i]==res || in[i]==known) continue;
844 /* i==ins: there is at most one predecessor, we don't need a phi node. */
846 #if USE_EXPICIT_PHI_IN_STACK
847 free_to_Phi_in_stack(res);
849 obstack_free (current_ir_graph->obst, res);
853 res = optimize (res);
857 /* return the pointer to the Phi node. This node might be deallocated! */
862 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
864 /** This function computes the predecessors for a real Phi node, and then
865 allocates and returns this node. The routine called to allocate the
866 node might optimize it away and return a real value, or even a pointer
867 to a deallocated Phi node on top of the obstack!
868 This function is called with an in-array of proper size. **/
869 static inline ir_node *
870 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
872 ir_node *prevBlock, *res;
875 /* This loop goes to all predecessor blocks of the block the Phi node is in
876 and there finds the operands of the Phi node by calling
877 get_r_value_internal. */
878 for (i = 1; i <= ins; ++i) {
879 assert (block->in[i]);
880 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
882 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
885 /* After collecting all predecessors into the array nin a new Phi node
886 with these predecessors is created. This constructor contains an
887 optimization: If all predecessors of the Phi node are identical it
888 returns the only operand instead of a new Phi node. If the value
889 passes two different control flow edges without being defined, and
890 this is the second path treated, a pointer to the node that will be
891 allocated for the first path (recursion) is returned. We already
892 know the address of this node, as it is the next node to be allocated
893 and will be placed on top of the obstack. (The obstack is a _stack_!) */
894 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
896 /* Now we now the value for "pos" and can enter it in the array with
897 all known local variables. Attention: this might be a pointer to
898 a node, that later will be allocated!!! See new_r_Phi_in.
899 If this is called in mature, after some set_value in the same block,
900 the proper value must not be overwritten:
902 get_value (makes Phi0, put's it into graph_arr)
903 set_value (overwrites Phi0 in graph_arr)
904 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
907 if (!block->attr.block.graph_arr[pos]) {
908 block->attr.block.graph_arr[pos] = res;
910 /* printf(" value already computed by %s\n",
911 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
917 /* This function returns the last definition of a variable. In case
918 this variable was last defined in a previous block, Phi nodes are
919 inserted. If the part of the firm graph containing the definition
920 is not yet constructed, a dummy Phi node is returned. */
922 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
925 /* There are 4 cases to treat.
927 1. The block is not mature and we visit it the first time. We can not
928 create a proper Phi node, therefore a Phi0, i.e., a Phi without
929 predecessors is returned. This node is added to the linked list (field
930 "link") of the containing block to be completed when this block is
931 matured. (Completion will add a new Phi and turn the Phi0 into an Id
934 2. The value is already known in this block, graph_arr[pos] is set and we
935 visit the block the first time. We can return the value without
936 creating any new nodes.
938 3. The block is mature and we visit it the first time. A Phi node needs
939 to be created (phi_merge). If the Phi is not needed, as all it's
940 operands are the same value reaching the block through different
941 paths, it's optimized away and the value itself is returned.
943 4. The block is mature, and we visit it the second time. Now two
944 subcases are possible:
945 * The value was computed completely the last time we were here. This
946 is the case if there is no loop. We can return the proper value.
947 * The recursion that visited this node and set the flag did not
948 return yet. We are computing a value in a loop and need to
949 break the recursion without knowing the result yet.
950 @@@ strange case. Straight forward we would create a Phi before
951 starting the computation of it's predecessors. In this case we will
952 find a Phi here in any case. The problem is that this implementation
953 only creates a Phi after computing the predecessors, so that it is
954 hard to compute self references of this Phi. @@@
955 There is no simple check for the second subcase. Therefore we check
956 for a second visit and treat all such cases as the second subcase.
957 Anyways, the basic situation is the same: we reached a block
958 on two paths without finding a definition of the value: No Phi
959 nodes are needed on both paths.
960 We return this information "Two paths, no Phi needed" by a very tricky
961 implementation that relies on the fact that an obstack is a stack and
962 will return a node with the same address on different allocations.
963 Look also at phi_merge and new_r_phi_in to understand this.
964 @@@ Unfortunately this does not work, see testprogram
965 three_cfpred_example.
969 /* case 4 -- already visited. */
970 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
972 /* visited the first time */
973 set_irn_visited(block, get_irg_visited(current_ir_graph));
975 /* Get the local valid value */
976 res = block->attr.block.graph_arr[pos];
978 /* case 2 -- If the value is actually computed, return it. */
979 if (res) { return res;};
981 if (block->attr.block.matured) { /* case 3 */
983 /* The Phi has the same amount of ins as the corresponding block. */
984 int ins = get_irn_arity(block);
986 NEW_ARR_A (ir_node *, nin, ins);
988 /* Phi merge collects the predecessors and then creates a node. */
989 res = phi_merge (block, pos, mode, nin, ins);
991 } else { /* case 1 */
992 /* The block is not mature, we don't know how many in's are needed. A Phi
993 with zero predecessors is created. Such a Phi node is called Phi0
994 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
995 to the list of Phi0 nodes in this block to be matured by mature_block
997 The Phi0 has to remember the pos of it's internal value. If the real
998 Phi is computed, pos is used to update the array with the local
1001 res = new_r_Phi0 (current_ir_graph, block, mode);
1002 res->attr.phi0_pos = pos;
1003 res->link = block->link;
1007 /* If we get here, the frontend missed a use-before-definition error */
1010 printf("Error: no value set. Use of undefined variable. Initializing
1012 assert (mode->code >= irm_f && mode->code <= irm_p);
1013 res = new_r_Const (current_ir_graph, block, mode,
1014 tarval_mode_null[mode->code]);
1017 /* The local valid value is available now. */
1018 block->attr.block.graph_arr[pos] = res;
1025 /** This is the simple algorithm. If first generates a Phi0, then
1026 it starts the recursion. This causes an Id at the entry of
1027 every block that has no definition of the value! **/
1029 #if USE_EXPICIT_PHI_IN_STACK
1031 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1032 void free_Phi_in_stack(Phi_in_stack *s) { }
1036 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1037 ir_node **in, int ins)
1040 ir_node *res, *known;
1042 /* Allocate a new node on the obstack. The allocation copies the in
1044 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1046 /* This loop checks whether the Phi has more than one predecessor.
1047 If so, it is a real Phi node and we break the loop. Else the
1048 Phi node merges the same definition on several paths and therefore
1049 is not needed. Don't consider Bad nodes! */
1051 for (i=0; i < ins; ++i)
1055 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1063 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1066 obstack_free (current_ir_graph->obst, res);
1069 /* A undefined value, e.g., in unreachable code. */
1073 res = optimize (res);
1075 /* Memory Phis in endless loops must be kept alive.
1076 As we can't distinguish these easily we keep all of the alive. */
1077 if ((res->op == op_Phi) && (mode == mode_M))
1078 add_End_keepalive(irg->end, res);
1085 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1087 #if PRECISE_EXC_CONTEXT
1088 static inline ir_node *
1089 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1092 new_frag_arr (ir_node *n) {
1095 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1096 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1097 sizeof(ir_node *)*current_ir_graph->n_loc);
1098 /* turn off optimization before allocating Proj nodes, as res isn't
1100 opt = get_optimize(); set_optimize(0);
1101 /* Here we rely on the fact that all frag ops have Memory as first result! */
1102 if (get_irn_op(n) == op_Call)
1103 arr[0] = new_Proj(n, mode_M, 3);
1105 arr[0] = new_Proj(n, mode_M, 0);
1107 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1112 get_frag_arr (ir_node *n) {
1113 if (get_irn_op(n) == op_Call) {
1114 return n->attr.call.frag_arr;
1115 } else if (get_irn_op(n) == op_Alloc) {
1116 return n->attr.a.frag_arr;
1118 return n->attr.frag_arr;
1123 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1124 if (!frag_arr[pos]) frag_arr[pos] = val;
1125 if (frag_arr[current_ir_graph->n_loc - 1])
1126 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1130 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1135 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1137 frag_arr = get_frag_arr(cfOp);
1138 res = frag_arr[pos];
1140 if (block->attr.block.graph_arr[pos]) {
1141 /* There was a set_value after the cfOp and no get_value before that
1142 set_value. We must build a Phi node now. */
1143 if (block->attr.block.matured) {
1144 int ins = get_irn_arity(block);
1146 NEW_ARR_A (ir_node *, nin, ins);
1147 res = phi_merge(block, pos, mode, nin, ins);
1149 res = new_r_Phi0 (current_ir_graph, block, mode);
1150 res->attr.phi0_pos = pos;
1151 res->link = block->link;
1155 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1156 but this should be better: (remove comment if this works) */
1157 /* It's a Phi, we can write this into all graph_arrs with NULL */
1158 set_frag_value(block->attr.block.graph_arr, pos, res);
1160 res = get_r_value_internal(block, pos, mode);
1161 set_frag_value(block->attr.block.graph_arr, pos, res);
1168 /** This function allocates a dummy Phi node to break recursions,
1169 computes the predecessors for the real phi node, and then
1170 allocates and returns this node. The routine called to allocate the
1171 node might optimize it away and return a real value.
1172 This function is called with an in-array of proper size. **/
1173 static inline ir_node *
1174 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1176 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1179 /* If this block has no value at pos create a Phi0 and remember it
1180 in graph_arr to break recursions.
1181 Else we may not set graph_arr as there a later value is remembered. */
1183 if (!block->attr.block.graph_arr[pos]) {
1184 /* This is commented out as collapsing to Bads is no good idea.
1185 Either we need an assert here, or we need to call a routine
1186 that deals with this case as appropriate for the given language.
1187 Right now a self referencing Id is created which will crash irg_vrfy().
1189 Even if all variables are defined before use, it can happen that
1190 we get to the start block, if a cond has been replaced by a tuple
1191 (bad, jmp). As the start has a self referencing control flow edge,
1192 we get a self referencing Id, which is hard to optimize away. We avoid
1193 this by defining the value as a Bad node.
1194 Returning a const with tarval_bad is a preliminary solution. In some
1195 situations we might want a Warning or an Error. */
1197 if (block == get_irg_start_block(current_ir_graph)) {
1198 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1199 /* We don't need to care about exception ops in the start block.
1200 There are none by definition. */
1201 return block->attr.block.graph_arr[pos];
1203 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1204 block->attr.block.graph_arr[pos] = phi0;
1205 #if PRECISE_EXC_CONTEXT
1206 /* Set graph_arr for fragile ops. Also here we should break recursion.
1207 We could choose a cyclic path through an cfop. But the recursion would
1208 break at some point. */
1209 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1214 /* This loop goes to all predecessor blocks of the block the Phi node
1215 is in and there finds the operands of the Phi node by calling
1216 get_r_value_internal. */
1217 for (i = 1; i <= ins; ++i) {
1218 prevCfOp = skip_Proj(block->in[i]);
1220 if (is_Bad(prevCfOp)) {
1221 /* In case a Cond has been optimized we would get right to the start block
1222 with an invalid definition. */
1223 nin[i-1] = new_Bad();
1226 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1228 if (!is_Bad(prevBlock)) {
1229 #if PRECISE_EXC_CONTEXT
1230 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1231 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1232 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1235 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1237 nin[i-1] = new_Bad();
1241 /* After collecting all predecessors into the array nin a new Phi node
1242 with these predecessors is created. This constructor contains an
1243 optimization: If all predecessors of the Phi node are identical it
1244 returns the only operand instead of a new Phi node. */
1245 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1247 /* In case we allocated a Phi0 node at the beginning of this procedure,
1248 we need to exchange this Phi0 with the real Phi. */
1250 exchange(phi0, res);
1251 block->attr.block.graph_arr[pos] = res;
1252 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1253 only an optimization. */
1259 /* This function returns the last definition of a variable. In case
1260 this variable was last defined in a previous block, Phi nodes are
1261 inserted. If the part of the firm graph containing the definition
1262 is not yet constructed, a dummy Phi node is returned. */
1264 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1267 /* There are 4 cases to treat.
1269 1. The block is not mature and we visit it the first time. We can not
1270 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1271 predecessors is returned. This node is added to the linked list (field
1272 "link") of the containing block to be completed when this block is
1273 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1276 2. The value is already known in this block, graph_arr[pos] is set and we
1277 visit the block the first time. We can return the value without
1278 creating any new nodes.
1280 3. The block is mature and we visit it the first time. A Phi node needs
1281 to be created (phi_merge). If the Phi is not needed, as all it's
1282 operands are the same value reaching the block through different
1283 paths, it's optimized away and the value itself is returned.
1285 4. The block is mature, and we visit it the second time. Now two
1286 subcases are possible:
1287 * The value was computed completely the last time we were here. This
1288 is the case if there is no loop. We can return the proper value.
1289 * The recursion that visited this node and set the flag did not
1290 return yet. We are computing a value in a loop and need to
1291 break the recursion. This case only happens if we visited
1292 the same block with phi_merge before, which inserted a Phi0.
1293 So we return the Phi0.
1296 /* case 4 -- already visited. */
1297 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1298 /* As phi_merge allocates a Phi0 this value is always defined. Here
1299 is the critical difference of the two algorithms. */
1300 assert(block->attr.block.graph_arr[pos]);
1301 return block->attr.block.graph_arr[pos];
1304 /* visited the first time */
1305 set_irn_visited(block, get_irg_visited(current_ir_graph));
1307 /* Get the local valid value */
1308 res = block->attr.block.graph_arr[pos];
1310 /* case 2 -- If the value is actually computed, return it. */
1311 if (res) { return res; };
1313 if (block->attr.block.matured) { /* case 3 */
1315 /* The Phi has the same amount of ins as the corresponding block. */
1316 int ins = get_irn_arity(block);
1318 NEW_ARR_A (ir_node *, nin, ins);
1320 /* Phi merge collects the predecessors and then creates a node. */
1321 res = phi_merge (block, pos, mode, nin, ins);
1323 } else { /* case 1 */
1324 /* The block is not mature, we don't know how many in's are needed. A Phi
1325 with zero predecessors is created. Such a Phi node is called Phi0
1326 node. The Phi0 is then added to the list of Phi0 nodes in this block
1327 to be matured by mature_block later.
1328 The Phi0 has to remember the pos of it's internal value. If the real
1329 Phi is computed, pos is used to update the array with the local
1331 res = new_r_Phi0 (current_ir_graph, block, mode);
1332 res->attr.phi0_pos = pos;
1333 res->link = block->link;
1337 /* If we get here, the frontend missed a use-before-definition error */
1340 printf("Error: no value set. Use of undefined variable. Initializing
1342 assert (mode->code >= irm_f && mode->code <= irm_p);
1343 res = new_r_Const (current_ir_graph, block, mode,
1344 tarval_mode_null[mode->code]);
1347 /* The local valid value is available now. */
1348 block->attr.block.graph_arr[pos] = res;
1353 #endif /* USE_FAST_PHI_CONSTRUCTION */
1355 /* ************************************************************************** */
1357 /** Finalize a Block node, when all control flows are known. */
1358 /** Acceptable parameters are only Block nodes. */
1360 mature_block (ir_node *block)
1367 assert (get_irn_opcode(block) == iro_Block);
1368 // assert (!get_Block_matured(block) && "Block already matured");
1370 if (!get_Block_matured(block)) {
1372 /* An array for building the Phi nodes. */
1373 ins = ARR_LEN (block->in)-1;
1374 NEW_ARR_A (ir_node *, nin, ins);
1375 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1377 /* Traverse a chain of Phi nodes attached to this block and mature
1379 for (n = block->link; n; n=next) {
1380 inc_irg_visited(current_ir_graph);
1382 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1385 block->attr.block.matured = 1;
1387 /* Now, as the block is a finished firm node, we can optimize it.
1388 Since other nodes have been allocated since the block was created
1389 we can not free the node on the obstack. Therefore we have to call
1391 Unfortunately the optimization does not change a lot, as all allocated
1392 nodes refer to the unoptimized node.
1393 We can call _2, as global cse has no effect on blocks. */
1394 block = optimize_in_place_2(block);
1400 new_Phi (int arity, ir_node **in, ir_mode *mode)
1402 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1407 new_Const (ir_mode *mode, tarval *con)
1409 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1414 new_Id (ir_node *val, ir_mode *mode)
1416 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1421 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1423 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1428 new_defaultProj (ir_node *arg, long max_proj)
1431 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1432 arg->attr.c.kind = fragmentary;
1433 arg->attr.c.default_proj = max_proj;
1434 res = new_Proj (arg, mode_X, max_proj);
1439 new_Conv (ir_node *op, ir_mode *mode)
1441 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1446 new_Tuple (int arity, ir_node **in)
1448 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1453 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1455 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1460 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1462 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1468 new_Minus (ir_node *op, ir_mode *mode)
1470 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1475 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1477 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1482 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1485 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1487 #if PRECISE_EXC_CONTEXT
1488 if ((current_ir_graph->phase_state == phase_building) &&
1489 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1490 res->attr.frag_arr = new_frag_arr(res);
1497 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1500 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1502 #if PRECISE_EXC_CONTEXT
1503 if ((current_ir_graph->phase_state == phase_building) &&
1504 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1505 res->attr.frag_arr = new_frag_arr(res);
1512 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1515 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1517 #if PRECISE_EXC_CONTEXT
1518 if ((current_ir_graph->phase_state == phase_building) &&
1519 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1520 res->attr.frag_arr = new_frag_arr(res);
1527 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1530 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1532 #if PRECISE_EXC_CONTEXT
1533 if ((current_ir_graph->phase_state == phase_building) &&
1534 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1535 res->attr.frag_arr = new_frag_arr(res);
1542 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1544 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1549 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1551 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1556 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1558 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1563 new_Not (ir_node *op, ir_mode *mode)
1565 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1570 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1572 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1577 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1579 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1584 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1586 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1591 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1593 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1598 new_Abs (ir_node *op, ir_mode *mode)
1600 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1605 new_Cmp (ir_node *op1, ir_node *op2)
1607 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1614 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1618 new_Cond (ir_node *c)
1620 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1624 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1628 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1629 store, callee, arity, in, type);
1630 #if PRECISE_EXC_CONTEXT
1631 if ((current_ir_graph->phase_state == phase_building) &&
1632 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1633 res->attr.call.frag_arr = new_frag_arr(res);
1640 new_Return (ir_node* store, int arity, ir_node **in)
1642 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1647 new_Raise (ir_node *store, ir_node *obj)
1649 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1654 new_Load (ir_node *store, ir_node *addr)
1657 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1659 #if PRECISE_EXC_CONTEXT
1660 if ((current_ir_graph->phase_state == phase_building) &&
1661 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1662 res->attr.frag_arr = new_frag_arr(res);
1669 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1672 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1674 #if PRECISE_EXC_CONTEXT
1675 if ((current_ir_graph->phase_state == phase_building) &&
1676 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1677 res->attr.frag_arr = new_frag_arr(res);
1684 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1688 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1689 store, size, alloc_type, where);
1690 #if PRECISE_EXC_CONTEXT
1691 if ((current_ir_graph->phase_state == phase_building) &&
1692 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1693 res->attr.a.frag_arr = new_frag_arr(res);
1700 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1702 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1703 store, ptr, size, free_type);
1707 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1708 /* GL: objptr was called frame before. Frame was a bad choice for the name
1709 as the operand could as well be a pointer to a dynamic object. */
1711 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1712 store, objptr, 0, NULL, ent);
1716 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1718 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1719 store, objptr, n_index, index, sel);
1723 new_SymConst (type_or_id_p value, symconst_kind kind)
1725 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1730 new_Sync (int arity, ir_node** in)
1732 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1740 return current_ir_graph->bad;
1743 /* ********************************************************************* */
1744 /* Comfortable interface with automatic Phi node construction. */
1745 /* (Uses also constructors of ?? interface, except new_Block. */
1746 /* ********************************************************************* */
1748 /** Block construction **/
1749 /* immature Block without predecessors */
1750 ir_node *new_immBlock (void) {
1753 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1754 /* creates a new dynamic in-array as length of in is -1 */
1755 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1756 current_ir_graph->current_block = res;
1757 res->attr.block.matured = 0;
1758 set_Block_block_visited(res, 0);
1760 /* Create and initialize array for Phi-node construction. */
1761 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1762 current_ir_graph->n_loc);
1763 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1765 /* Immature block may not be optimized! */
1771 /* add an adge to a jmp/control flow node */
1773 add_in_edge (ir_node *block, ir_node *jmp)
1775 if (block->attr.block.matured) {
1776 assert(0 && "Error: Block already matured!\n");
1779 assert (jmp != NULL);
1780 ARR_APP1 (ir_node *, block->in, jmp);
1784 /* changing the current block */
1786 switch_block (ir_node *target)
1788 current_ir_graph->current_block = target;
1791 /* ************************ */
1792 /* parameter administration */
1794 /* get a value from the parameter array from the current block by its index */
1796 get_value (int pos, ir_mode *mode)
1798 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1799 inc_irg_visited(current_ir_graph);
1800 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1804 /* set a value at position pos in the parameter array from the current block */
1806 set_value (int pos, ir_node *value)
1808 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1809 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1812 /* get the current store */
1816 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1817 /* GL: one could call get_value instead */
1818 inc_irg_visited(current_ir_graph);
1819 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1822 /* set the current store */
1824 set_store (ir_node *store)
1826 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1827 /* GL: one could call set_value instead */
1828 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1832 keep_alive (ir_node *ka)
1834 add_End_keepalive(current_ir_graph->end, ka);
1837 /** Useful access routines **/
1838 /* Returns the current block of the current graph. To set the current
1839 block use switch_block(). */
1840 ir_node *get_cur_block() {
1841 return get_irg_current_block(current_ir_graph);
1844 /* Returns the frame type of the current graph */
1845 type *get_cur_frame_type() {
1846 return get_irg_frame_type(current_ir_graph);
1850 /* ********************************************************************* */
1853 /* call once for each run of the library */
1859 /* call for each graph */
1861 finalize_cons (ir_graph *irg) {
1862 irg->phase_state = phase_high;