1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "common_t.h"
28 /* memset belongs to string.h */
31 /* # include "exc.h" */
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /*** ******************************************** */
44 /** privat interfaces, for professional use only */
46 /* Constructs a Block with a fixed number of predecessors.
47 Does not set current_block. Can not be used with automatic
48 Phi node construction. */
50 new_r_Block (ir_graph *irg, int arity, ir_node **in)
54 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
55 set_Block_matured(res, 1);
56 set_Block_block_visited(res, 0);
58 res->attr.block.exc = exc_normal;
65 new_r_Start (ir_graph *irg, ir_node *block)
69 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
76 new_r_End (ir_graph *irg, ir_node *block)
80 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
86 /* Creates a Phi node with all predecessors. Calling this constructor
87 is only allowed if the corresponding block is mature. */
89 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
93 assert( get_Block_matured(block) );
94 assert( get_irn_arity(block) == arity );
96 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
101 /* Memory Phis in endless loops must be kept alive.
102 As we can't distinguish these easily we keep all of them alive. */
103 if ((res->op == op_Phi) && (mode == mode_M))
104 add_End_keepalive(irg->end, res);
109 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
112 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
114 res = optimize (res);
118 res = local_optimize_newby (res);
125 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
127 ir_node *in[1] = {val};
129 res = new_ir_node (irg, block, op_Id, mode, 1, in);
130 res = optimize (res);
136 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
139 ir_node *in[1] = {arg};
141 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
142 res->attr.proj = proj;
145 assert(get_Proj_pred(res));
146 assert(get_nodes_Block(get_Proj_pred(res)));
148 res = optimize (res);
156 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
160 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
161 arg->attr.c.kind = fragmentary;
162 arg->attr.c.default_proj = max_proj;
163 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
168 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
170 ir_node *in[1] = {op};
172 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
173 res = optimize (res);
180 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
184 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
185 res = optimize (res);
191 new_r_Add (ir_graph *irg, ir_node *block,
192 ir_node *op1, ir_node *op2, ir_mode *mode)
194 ir_node *in[2] = {op1, op2};
196 res = new_ir_node (irg, block, op_Add, mode, 2, in);
197 res = optimize (res);
203 new_r_Sub (ir_graph *irg, ir_node *block,
204 ir_node *op1, ir_node *op2, ir_mode *mode)
206 ir_node *in[2] = {op1, op2};
208 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
209 res = optimize (res);
215 new_r_Minus (ir_graph *irg, ir_node *block,
216 ir_node *op, ir_mode *mode)
218 ir_node *in[1] = {op};
220 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
221 res = optimize (res);
227 new_r_Mul (ir_graph *irg, ir_node *block,
228 ir_node *op1, ir_node *op2, ir_mode *mode)
230 ir_node *in[2] = {op1, op2};
232 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
233 res = optimize (res);
239 new_r_Quot (ir_graph *irg, ir_node *block,
240 ir_node *memop, ir_node *op1, ir_node *op2)
242 ir_node *in[3] = {memop, op1, op2};
244 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
245 res = optimize (res);
251 new_r_DivMod (ir_graph *irg, ir_node *block,
252 ir_node *memop, ir_node *op1, ir_node *op2)
254 ir_node *in[3] = {memop, op1, op2};
256 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
257 res = optimize (res);
263 new_r_Div (ir_graph *irg, ir_node *block,
264 ir_node *memop, ir_node *op1, ir_node *op2)
266 ir_node *in[3] = {memop, op1, op2};
268 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
269 res = optimize (res);
275 new_r_Mod (ir_graph *irg, ir_node *block,
276 ir_node *memop, ir_node *op1, ir_node *op2)
278 ir_node *in[3] = {memop, op1, op2};
280 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
281 res = optimize (res);
287 new_r_And (ir_graph *irg, ir_node *block,
288 ir_node *op1, ir_node *op2, ir_mode *mode)
290 ir_node *in[2] = {op1, op2};
292 res = new_ir_node (irg, block, op_And, mode, 2, in);
293 res = optimize (res);
299 new_r_Or (ir_graph *irg, ir_node *block,
300 ir_node *op1, ir_node *op2, ir_mode *mode)
302 ir_node *in[2] = {op1, op2};
304 res = new_ir_node (irg, block, op_Or, mode, 2, in);
305 res = optimize (res);
311 new_r_Eor (ir_graph *irg, ir_node *block,
312 ir_node *op1, ir_node *op2, ir_mode *mode)
314 ir_node *in[2] = {op1, op2};
316 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
317 res = optimize (res);
323 new_r_Not (ir_graph *irg, ir_node *block,
324 ir_node *op, ir_mode *mode)
326 ir_node *in[1] = {op};
328 res = new_ir_node (irg, block, op_Not, mode, 1, in);
329 res = optimize (res);
335 new_r_Shl (ir_graph *irg, ir_node *block,
336 ir_node *op, ir_node *k, ir_mode *mode)
338 ir_node *in[2] = {op, k};
340 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
341 res = optimize (res);
347 new_r_Shr (ir_graph *irg, ir_node *block,
348 ir_node *op, ir_node *k, ir_mode *mode)
350 ir_node *in[2] = {op, k};
352 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
353 res = optimize (res);
359 new_r_Shrs (ir_graph *irg, ir_node *block,
360 ir_node *op, ir_node *k, ir_mode *mode)
362 ir_node *in[2] = {op, k};
364 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
365 res = optimize (res);
371 new_r_Rot (ir_graph *irg, ir_node *block,
372 ir_node *op, ir_node *k, ir_mode *mode)
374 ir_node *in[2] = {op, k};
376 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
377 res = optimize (res);
383 new_r_Abs (ir_graph *irg, ir_node *block,
384 ir_node *op, ir_mode *mode)
386 ir_node *in[1] = {op};
388 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
389 res = optimize (res);
395 new_r_Cmp (ir_graph *irg, ir_node *block,
396 ir_node *op1, ir_node *op2)
398 ir_node *in[2] = {op1, op2};
400 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
401 res = optimize (res);
407 new_r_Jmp (ir_graph *irg, ir_node *block)
411 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
412 res = optimize (res);
418 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
420 ir_node *in[1] = {c};
422 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
423 res->attr.c.kind = dense;
424 res->attr.c.default_proj = 0;
425 res = optimize (res);
431 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
432 ir_node *callee, int arity, ir_node **in, type *type)
439 NEW_ARR_A (ir_node *, r_in, r_arity);
442 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
444 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
446 assert(is_method_type(type));
447 set_Call_type(res, type);
448 res = optimize (res);
454 new_r_Return (ir_graph *irg, ir_node *block,
455 ir_node *store, int arity, ir_node **in)
462 NEW_ARR_A (ir_node *, r_in, r_arity);
464 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
465 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
466 res = optimize (res);
472 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
474 ir_node *in[2] = {store, obj};
476 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
477 res = optimize (res);
483 new_r_Load (ir_graph *irg, ir_node *block,
484 ir_node *store, ir_node *adr)
486 ir_node *in[2] = {store, adr};
488 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
490 res = optimize (res);
496 new_r_Store (ir_graph *irg, ir_node *block,
497 ir_node *store, ir_node *adr, ir_node *val)
499 ir_node *in[3] = {store, adr, val};
501 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
503 res = optimize (res);
510 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
511 ir_node *size, type *alloc_type, where_alloc where)
513 ir_node *in[2] = {store, size};
515 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
517 res->attr.a.where = where;
518 res->attr.a.type = alloc_type;
520 res = optimize (res);
526 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
527 ir_node *ptr, ir_node *size, type *free_type)
529 ir_node *in[3] = {store, ptr, size};
531 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
533 res->attr.f = free_type;
535 res = optimize (res);
541 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
542 int arity, ir_node **in, entity *ent)
549 NEW_ARR_A (ir_node *, r_in, r_arity);
552 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
553 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
555 res->attr.s.ltyp = static_linkage;
556 res->attr.s.ent = ent;
558 res = optimize (res);
564 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
565 symconst_kind symkind)
570 if (symkind == linkage_ptr_info)
574 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
576 res->attr.i.num = symkind;
577 if (symkind == linkage_ptr_info) {
578 res->attr.i.tori.ptrinfo = (ident *)value;
580 assert ( ( (symkind == type_tag)
581 || (symkind == size))
582 && (is_type(value)));
583 res->attr.i.tori.typ = (type *)value;
585 res = optimize (res);
591 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
595 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
597 res = optimize (res);
605 return current_ir_graph->bad;
608 /** ********************/
609 /** public interfaces */
610 /** construction tools */
612 /****f* ircons/new_Start
615 * new_Start -- create a new Start node in the current block
618 * s = new_Start(void);
619 * ir_node* new_Start(void);
622 * s - pointer to the created Start node
631 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
632 op_Start, mode_T, 0, NULL);
634 res = optimize (res);
643 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
644 op_End, mode_X, -1, NULL);
645 res = optimize (res);
651 /* Constructs a Block with a fixed number of predecessors.
652 Does set current_block. Can be used with automatic Phi
653 node construction. */
655 new_Block (int arity, ir_node **in)
659 res = new_r_Block (current_ir_graph, arity, in);
661 /* Create and initialize array for Phi-node construction. */
662 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
663 current_ir_graph->n_loc);
664 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
666 res = optimize (res);
667 current_ir_graph->current_block = res;
674 /* ***********************************************************************/
675 /* Methods necessary for automatic Phi node creation */
677 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
678 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
679 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
680 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
682 Call Graph: ( A ---> B == A "calls" B)
684 get_value mature_block
692 get_r_value_internal |
696 new_r_Phi0 new_r_Phi_in
698 * *************************************************************************** */
700 /* Creates a Phi node with 0 predecessors */
702 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
705 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
710 /* There are two implementations of the Phi node construction. The first
711 is faster, but does not work for blocks with more than 2 predecessors.
712 The second works always but is slower and causes more unnecessary Phi
714 Select the implementations by the following preprocessor flag set in
716 #if USE_FAST_PHI_CONSTRUCTION
718 /* This is a stack used for allocating and deallocating nodes in
719 new_r_Phi_in. The original implementation used the obstack
720 to model this stack, now it is explicit. This reduces side effects.
722 #if USE_EXPLICIT_PHI_IN_STACK
727 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
729 res->stack = NEW_ARR_F (ir_node *, 1);
736 free_Phi_in_stack(Phi_in_stack *s) {
741 void free_to_Phi_in_stack(ir_node *phi) {
742 assert(get_irn_opcode(phi) == iro_Phi);
744 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
745 current_ir_graph->Phi_in_stack->pos)
746 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
748 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
750 (current_ir_graph->Phi_in_stack->pos)++;
754 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
755 int arity, ir_node **in) {
757 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
758 int pos = current_ir_graph->Phi_in_stack->pos;
762 /* We need to allocate a new node */
763 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
765 /* reuse the old node and initialize it again. */
768 assert (res->kind == k_ir_node);
769 assert (res->op == op_Phi);
774 /* ???!!! How to free the old in array?? */
775 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
777 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
779 (current_ir_graph->Phi_in_stack->pos)--;
783 #endif /* USE_EXPLICIT_PHI_IN_STACK */
785 /* Creates a Phi node with a given, fixed array **in of predecessors.
786 If the Phi node is unnecessary, as the same value reaches the block
787 through all control flow paths, it is eliminated and the value
788 returned directly. This constructor is only intended for use in
789 the automatic Phi node generation triggered by get_value or mature.
790 The implementation is quite tricky and depends on the fact, that
791 the nodes are allocated on a stack:
792 The in array contains predecessors and NULLs. The NULLs appear,
793 if get_r_value_internal, that computed the predecessors, reached
794 the same block on two paths. In this case the same value reaches
795 this block on both paths, there is no definition in between. We need
796 not allocate a Phi where these path's merge, but we have to communicate
797 this fact to the caller. This happens by returning a pointer to the
798 node the caller _will_ allocate. (Yes, we predict the address. We can
799 do so because the nodes are allocated on the obstack.) The caller then
800 finds a pointer to itself and, when this routine is called again,
804 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
805 ir_node **in, int ins)
808 ir_node *res, *known;
810 /* allocate a new node on the obstack.
811 This can return a node to which some of the pointers in the in-array
813 Attention: the constructor copies the in array, i.e., the later changes
814 to the array in this routine do not affect the constructed node! If
815 the in array contains NULLs, there will be missing predecessors in the
817 Is this a possible internal state of the Phi node generation? */
818 #if USE_EXPLICIT_PHI_IN_STACK
819 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
821 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
823 /* The in-array can contain NULLs. These were returned by
824 get_r_value_internal if it reached the same block/definition on a
826 The NULLs are replaced by the node itself to simplify the test in the
828 for (i=0; i < ins; ++i)
829 if (in[i] == NULL) in[i] = res;
831 /* This loop checks whether the Phi has more than one predecessor.
832 If so, it is a real Phi node and we break the loop. Else the
833 Phi node merges the same definition on several paths and therefore
835 for (i=0; i < ins; ++i)
837 if (in[i]==res || in[i]==known) continue;
845 /* i==ins: there is at most one predecessor, we don't need a phi node. */
847 #if USE_EXPLICIT_PHI_IN_STACK
848 free_to_Phi_in_stack(res);
850 obstack_free (current_ir_graph->obst, res);
854 res = optimize (res);
858 /* return the pointer to the Phi node. This node might be deallocated! */
863 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
865 /** This function computes the predecessors for a real Phi node, and then
866 allocates and returns this node. The routine called to allocate the
867 node might optimize it away and return a real value, or even a pointer
868 to a deallocated Phi node on top of the obstack!
869 This function is called with an in-array of proper size. **/
870 static inline ir_node *
871 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
873 ir_node *prevBlock, *res;
876 /* This loop goes to all predecessor blocks of the block the Phi node is in
877 and there finds the operands of the Phi node by calling
878 get_r_value_internal. */
879 for (i = 1; i <= ins; ++i) {
880 assert (block->in[i]);
881 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
883 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
886 /* After collecting all predecessors into the array nin a new Phi node
887 with these predecessors is created. This constructor contains an
888 optimization: If all predecessors of the Phi node are identical it
889 returns the only operand instead of a new Phi node. If the value
890 passes two different control flow edges without being defined, and
891 this is the second path treated, a pointer to the node that will be
892 allocated for the first path (recursion) is returned. We already
893 know the address of this node, as it is the next node to be allocated
894 and will be placed on top of the obstack. (The obstack is a _stack_!) */
895 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
897 /* Now we now the value for "pos" and can enter it in the array with
898 all known local variables. Attention: this might be a pointer to
899 a node, that later will be allocated!!! See new_r_Phi_in.
900 If this is called in mature, after some set_value in the same block,
901 the proper value must not be overwritten:
903 get_value (makes Phi0, put's it into graph_arr)
904 set_value (overwrites Phi0 in graph_arr)
905 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
908 if (!block->attr.block.graph_arr[pos]) {
909 block->attr.block.graph_arr[pos] = res;
911 /* printf(" value already computed by %s\n",
912 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
918 /* This function returns the last definition of a variable. In case
919 this variable was last defined in a previous block, Phi nodes are
920 inserted. If the part of the firm graph containing the definition
921 is not yet constructed, a dummy Phi node is returned. */
923 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
926 /* There are 4 cases to treat.
928 1. The block is not mature and we visit it the first time. We can not
929 create a proper Phi node, therefore a Phi0, i.e., a Phi without
930 predecessors is returned. This node is added to the linked list (field
931 "link") of the containing block to be completed when this block is
932 matured. (Completion will add a new Phi and turn the Phi0 into an Id
935 2. The value is already known in this block, graph_arr[pos] is set and we
936 visit the block the first time. We can return the value without
937 creating any new nodes.
939 3. The block is mature and we visit it the first time. A Phi node needs
940 to be created (phi_merge). If the Phi is not needed, as all it's
941 operands are the same value reaching the block through different
942 paths, it's optimized away and the value itself is returned.
944 4. The block is mature, and we visit it the second time. Now two
945 subcases are possible:
946 * The value was computed completely the last time we were here. This
947 is the case if there is no loop. We can return the proper value.
948 * The recursion that visited this node and set the flag did not
949 return yet. We are computing a value in a loop and need to
950 break the recursion without knowing the result yet.
951 @@@ strange case. Straight forward we would create a Phi before
952 starting the computation of it's predecessors. In this case we will
953 find a Phi here in any case. The problem is that this implementation
954 only creates a Phi after computing the predecessors, so that it is
955 hard to compute self references of this Phi. @@@
956 There is no simple check for the second subcase. Therefore we check
957 for a second visit and treat all such cases as the second subcase.
958 Anyways, the basic situation is the same: we reached a block
959 on two paths without finding a definition of the value: No Phi
960 nodes are needed on both paths.
961 We return this information "Two paths, no Phi needed" by a very tricky
962 implementation that relies on the fact that an obstack is a stack and
963 will return a node with the same address on different allocations.
964 Look also at phi_merge and new_r_phi_in to understand this.
965 @@@ Unfortunately this does not work, see testprogram
966 three_cfpred_example.
970 /* case 4 -- already visited. */
971 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
973 /* visited the first time */
974 set_irn_visited(block, get_irg_visited(current_ir_graph));
976 /* Get the local valid value */
977 res = block->attr.block.graph_arr[pos];
979 /* case 2 -- If the value is actually computed, return it. */
980 if (res) { return res;};
982 if (block->attr.block.matured) { /* case 3 */
984 /* The Phi has the same amount of ins as the corresponding block. */
985 int ins = get_irn_arity(block);
987 NEW_ARR_A (ir_node *, nin, ins);
989 /* Phi merge collects the predecessors and then creates a node. */
990 res = phi_merge (block, pos, mode, nin, ins);
992 } else { /* case 1 */
993 /* The block is not mature, we don't know how many in's are needed. A Phi
994 with zero predecessors is created. Such a Phi node is called Phi0
995 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
996 to the list of Phi0 nodes in this block to be matured by mature_block
998 The Phi0 has to remember the pos of it's internal value. If the real
999 Phi is computed, pos is used to update the array with the local
1002 res = new_r_Phi0 (current_ir_graph, block, mode);
1003 res->attr.phi0_pos = pos;
1004 res->link = block->link;
1008 /* If we get here, the frontend missed a use-before-definition error */
1011 printf("Error: no value set. Use of undefined variable. Initializing
1013 assert (mode->code >= irm_f && mode->code <= irm_p);
1014 res = new_r_Const (current_ir_graph, block, mode,
1015 tarval_mode_null[mode->code]);
1018 /* The local valid value is available now. */
1019 block->attr.block.graph_arr[pos] = res;
1026 /** This is the simple algorithm. If first generates a Phi0, then
1027 it starts the recursion. This causes an Id at the entry of
1028 every block that has no definition of the value! **/
1030 #if USE_EXPLICIT_PHI_IN_STACK
1032 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1033 void free_Phi_in_stack(Phi_in_stack *s) { }
1037 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1038 ir_node **in, int ins)
1041 ir_node *res, *known;
1043 /* Allocate a new node on the obstack. The allocation copies the in
1045 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1047 /* This loop checks whether the Phi has more than one predecessor.
1048 If so, it is a real Phi node and we break the loop. Else the
1049 Phi node merges the same definition on several paths and therefore
1050 is not needed. Don't consider Bad nodes! */
1052 for (i=0; i < ins; ++i)
1056 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1064 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1067 obstack_free (current_ir_graph->obst, res);
1070 /* A undefined value, e.g., in unreachable code. */
1074 res = optimize (res);
1076 /* Memory Phis in endless loops must be kept alive.
1077 As we can't distinguish these easily we keep all of the alive. */
1078 if ((res->op == op_Phi) && (mode == mode_M))
1079 add_End_keepalive(irg->end, res);
1086 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1088 #if PRECISE_EXC_CONTEXT
1089 static inline ir_node *
1090 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1093 new_frag_arr (ir_node *n) {
1096 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1097 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1098 sizeof(ir_node *)*current_ir_graph->n_loc);
1099 /* turn off optimization before allocating Proj nodes, as res isn't
1101 opt = get_optimize(); set_optimize(0);
1102 /* Here we rely on the fact that all frag ops have Memory as first result! */
1103 if (get_irn_op(n) == op_Call)
1104 arr[0] = new_Proj(n, mode_M, 3);
1106 arr[0] = new_Proj(n, mode_M, 0);
1108 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1113 get_frag_arr (ir_node *n) {
1114 if (get_irn_op(n) == op_Call) {
1115 return n->attr.call.frag_arr;
1116 } else if (get_irn_op(n) == op_Alloc) {
1117 return n->attr.a.frag_arr;
1119 return n->attr.frag_arr;
1124 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1125 if (!frag_arr[pos]) frag_arr[pos] = val;
1126 if (frag_arr[current_ir_graph->n_loc - 1])
1127 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1131 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1136 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1138 frag_arr = get_frag_arr(cfOp);
1139 res = frag_arr[pos];
1141 if (block->attr.block.graph_arr[pos]) {
1142 /* There was a set_value after the cfOp and no get_value before that
1143 set_value. We must build a Phi node now. */
1144 if (block->attr.block.matured) {
1145 int ins = get_irn_arity(block);
1147 NEW_ARR_A (ir_node *, nin, ins);
1148 res = phi_merge(block, pos, mode, nin, ins);
1150 res = new_r_Phi0 (current_ir_graph, block, mode);
1151 res->attr.phi0_pos = pos;
1152 res->link = block->link;
1156 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1157 but this should be better: (remove comment if this works) */
1158 /* It's a Phi, we can write this into all graph_arrs with NULL */
1159 set_frag_value(block->attr.block.graph_arr, pos, res);
1161 res = get_r_value_internal(block, pos, mode);
1162 set_frag_value(block->attr.block.graph_arr, pos, res);
1169 /** This function allocates a dummy Phi node to break recursions,
1170 computes the predecessors for the real phi node, and then
1171 allocates and returns this node. The routine called to allocate the
1172 node might optimize it away and return a real value.
1173 This function is called with an in-array of proper size. **/
1174 static inline ir_node *
1175 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1177 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1180 /* If this block has no value at pos create a Phi0 and remember it
1181 in graph_arr to break recursions.
1182 Else we may not set graph_arr as there a later value is remembered. */
1184 if (!block->attr.block.graph_arr[pos]) {
1185 /* This is commented out as collapsing to Bads is no good idea.
1186 Either we need an assert here, or we need to call a routine
1187 that deals with this case as appropriate for the given language.
1188 Right now a self referencing Id is created which will crash irg_vrfy().
1190 Even if all variables are defined before use, it can happen that
1191 we get to the start block, if a cond has been replaced by a tuple
1192 (bad, jmp). As the start has a self referencing control flow edge,
1193 we get a self referencing Id, which is hard to optimize away. We avoid
1194 this by defining the value as a Bad node.
1195 Returning a const with tarval_bad is a preliminary solution. In some
1196 situations we might want a Warning or an Error. */
1198 if (block == get_irg_start_block(current_ir_graph)) {
1199 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1200 /* We don't need to care about exception ops in the start block.
1201 There are none by definition. */
1202 return block->attr.block.graph_arr[pos];
1204 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1205 block->attr.block.graph_arr[pos] = phi0;
1206 #if PRECISE_EXC_CONTEXT
1207 /* Set graph_arr for fragile ops. Also here we should break recursion.
1208 We could choose a cyclic path through an cfop. But the recursion would
1209 break at some point. */
1210 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1215 /* This loop goes to all predecessor blocks of the block the Phi node
1216 is in and there finds the operands of the Phi node by calling
1217 get_r_value_internal. */
1218 for (i = 1; i <= ins; ++i) {
1219 prevCfOp = skip_Proj(block->in[i]);
1221 if (is_Bad(prevCfOp)) {
1222 /* In case a Cond has been optimized we would get right to the start block
1223 with an invalid definition. */
1224 nin[i-1] = new_Bad();
1227 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1229 if (!is_Bad(prevBlock)) {
1230 #if PRECISE_EXC_CONTEXT
1231 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1232 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1233 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1236 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1238 nin[i-1] = new_Bad();
1242 /* After collecting all predecessors into the array nin a new Phi node
1243 with these predecessors is created. This constructor contains an
1244 optimization: If all predecessors of the Phi node are identical it
1245 returns the only operand instead of a new Phi node. */
1246 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1248 /* In case we allocated a Phi0 node at the beginning of this procedure,
1249 we need to exchange this Phi0 with the real Phi. */
1251 exchange(phi0, res);
1252 block->attr.block.graph_arr[pos] = res;
1253 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1254 only an optimization. */
1260 /* This function returns the last definition of a variable. In case
1261 this variable was last defined in a previous block, Phi nodes are
1262 inserted. If the part of the firm graph containing the definition
1263 is not yet constructed, a dummy Phi node is returned. */
1265 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1268 /* There are 4 cases to treat.
1270 1. The block is not mature and we visit it the first time. We can not
1271 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1272 predecessors is returned. This node is added to the linked list (field
1273 "link") of the containing block to be completed when this block is
1274 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1277 2. The value is already known in this block, graph_arr[pos] is set and we
1278 visit the block the first time. We can return the value without
1279 creating any new nodes.
1281 3. The block is mature and we visit it the first time. A Phi node needs
1282 to be created (phi_merge). If the Phi is not needed, as all it's
1283 operands are the same value reaching the block through different
1284 paths, it's optimized away and the value itself is returned.
1286 4. The block is mature, and we visit it the second time. Now two
1287 subcases are possible:
1288 * The value was computed completely the last time we were here. This
1289 is the case if there is no loop. We can return the proper value.
1290 * The recursion that visited this node and set the flag did not
1291 return yet. We are computing a value in a loop and need to
1292 break the recursion. This case only happens if we visited
1293 the same block with phi_merge before, which inserted a Phi0.
1294 So we return the Phi0.
1297 /* case 4 -- already visited. */
1298 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1299 /* As phi_merge allocates a Phi0 this value is always defined. Here
1300 is the critical difference of the two algorithms. */
1301 assert(block->attr.block.graph_arr[pos]);
1302 return block->attr.block.graph_arr[pos];
1305 /* visited the first time */
1306 set_irn_visited(block, get_irg_visited(current_ir_graph));
1308 /* Get the local valid value */
1309 res = block->attr.block.graph_arr[pos];
1311 /* case 2 -- If the value is actually computed, return it. */
1312 if (res) { return res; };
1314 if (block->attr.block.matured) { /* case 3 */
1316 /* The Phi has the same amount of ins as the corresponding block. */
1317 int ins = get_irn_arity(block);
1319 NEW_ARR_A (ir_node *, nin, ins);
1321 /* Phi merge collects the predecessors and then creates a node. */
1322 res = phi_merge (block, pos, mode, nin, ins);
1324 } else { /* case 1 */
1325 /* The block is not mature, we don't know how many in's are needed. A Phi
1326 with zero predecessors is created. Such a Phi node is called Phi0
1327 node. The Phi0 is then added to the list of Phi0 nodes in this block
1328 to be matured by mature_block later.
1329 The Phi0 has to remember the pos of it's internal value. If the real
1330 Phi is computed, pos is used to update the array with the local
1332 res = new_r_Phi0 (current_ir_graph, block, mode);
1333 res->attr.phi0_pos = pos;
1334 res->link = block->link;
1338 /* If we get here, the frontend missed a use-before-definition error */
1341 printf("Error: no value set. Use of undefined variable. Initializing
1343 assert (mode->code >= irm_f && mode->code <= irm_p);
1344 res = new_r_Const (current_ir_graph, block, mode,
1345 tarval_mode_null[mode->code]);
1348 /* The local valid value is available now. */
1349 block->attr.block.graph_arr[pos] = res;
1354 #endif /* USE_FAST_PHI_CONSTRUCTION */
1356 /* ************************************************************************** */
1358 /** Finalize a Block node, when all control flows are known. */
1359 /** Acceptable parameters are only Block nodes. */
1361 mature_block (ir_node *block)
1368 assert (get_irn_opcode(block) == iro_Block);
1369 // assert (!get_Block_matured(block) && "Block already matured");
1371 if (!get_Block_matured(block)) {
1373 /* An array for building the Phi nodes. */
1374 ins = ARR_LEN (block->in)-1;
1375 NEW_ARR_A (ir_node *, nin, ins);
1376 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1378 /* Traverse a chain of Phi nodes attached to this block and mature
1380 for (n = block->link; n; n=next) {
1381 inc_irg_visited(current_ir_graph);
1383 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1386 block->attr.block.matured = 1;
1388 /* Now, as the block is a finished firm node, we can optimize it.
1389 Since other nodes have been allocated since the block was created
1390 we can not free the node on the obstack. Therefore we have to call
1392 Unfortunately the optimization does not change a lot, as all allocated
1393 nodes refer to the unoptimized node.
1394 We can call _2, as global cse has no effect on blocks. */
1395 block = optimize_in_place_2(block);
1401 new_Phi (int arity, ir_node **in, ir_mode *mode)
1403 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1408 new_Const (ir_mode *mode, tarval *con)
1410 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1415 new_Id (ir_node *val, ir_mode *mode)
1417 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1422 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1424 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1429 new_defaultProj (ir_node *arg, long max_proj)
1432 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1433 arg->attr.c.kind = fragmentary;
1434 arg->attr.c.default_proj = max_proj;
1435 res = new_Proj (arg, mode_X, max_proj);
1440 new_Conv (ir_node *op, ir_mode *mode)
1442 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1447 new_Tuple (int arity, ir_node **in)
1449 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1454 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1456 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1461 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1463 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1469 new_Minus (ir_node *op, ir_mode *mode)
1471 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1476 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1478 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1483 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1486 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1488 #if PRECISE_EXC_CONTEXT
1489 if ((current_ir_graph->phase_state == phase_building) &&
1490 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1491 res->attr.frag_arr = new_frag_arr(res);
1498 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1501 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1503 #if PRECISE_EXC_CONTEXT
1504 if ((current_ir_graph->phase_state == phase_building) &&
1505 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1506 res->attr.frag_arr = new_frag_arr(res);
1513 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1516 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1518 #if PRECISE_EXC_CONTEXT
1519 if ((current_ir_graph->phase_state == phase_building) &&
1520 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1521 res->attr.frag_arr = new_frag_arr(res);
1528 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1531 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1533 #if PRECISE_EXC_CONTEXT
1534 if ((current_ir_graph->phase_state == phase_building) &&
1535 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1536 res->attr.frag_arr = new_frag_arr(res);
1543 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1545 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1550 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1552 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1557 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1559 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1564 new_Not (ir_node *op, ir_mode *mode)
1566 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1571 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1573 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1578 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1580 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1585 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1587 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1592 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1594 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1599 new_Abs (ir_node *op, ir_mode *mode)
1601 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1606 new_Cmp (ir_node *op1, ir_node *op2)
1608 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1615 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1619 new_Cond (ir_node *c)
1621 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1625 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1629 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1630 store, callee, arity, in, type);
1631 #if PRECISE_EXC_CONTEXT
1632 if ((current_ir_graph->phase_state == phase_building) &&
1633 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1634 res->attr.call.frag_arr = new_frag_arr(res);
1641 new_Return (ir_node* store, int arity, ir_node **in)
1643 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1648 new_Raise (ir_node *store, ir_node *obj)
1650 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1655 new_Load (ir_node *store, ir_node *addr)
1658 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1660 #if PRECISE_EXC_CONTEXT
1661 if ((current_ir_graph->phase_state == phase_building) &&
1662 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1663 res->attr.frag_arr = new_frag_arr(res);
1670 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1673 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1675 #if PRECISE_EXC_CONTEXT
1676 if ((current_ir_graph->phase_state == phase_building) &&
1677 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1678 res->attr.frag_arr = new_frag_arr(res);
1685 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1689 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1690 store, size, alloc_type, where);
1691 #if PRECISE_EXC_CONTEXT
1692 if ((current_ir_graph->phase_state == phase_building) &&
1693 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1694 res->attr.a.frag_arr = new_frag_arr(res);
1701 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1703 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1704 store, ptr, size, free_type);
1708 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1709 /* GL: objptr was called frame before. Frame was a bad choice for the name
1710 as the operand could as well be a pointer to a dynamic object. */
1712 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1713 store, objptr, 0, NULL, ent);
1717 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1719 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1720 store, objptr, n_index, index, sel);
1724 new_SymConst (type_or_id_p value, symconst_kind kind)
1726 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1731 new_Sync (int arity, ir_node** in)
1733 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1741 return current_ir_graph->bad;
1744 /* ********************************************************************* */
1745 /* Comfortable interface with automatic Phi node construction. */
1746 /* (Uses also constructors of ?? interface, except new_Block. */
1747 /* ********************************************************************* */
1749 /** Block construction **/
1750 /* immature Block without predecessors */
1751 ir_node *new_immBlock (void) {
1754 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1755 /* creates a new dynamic in-array as length of in is -1 */
1756 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1757 current_ir_graph->current_block = res;
1758 res->attr.block.matured = 0;
1759 res->attr.block.exc = exc_normal;
1760 set_Block_block_visited(res, 0);
1762 /* Create and initialize array for Phi-node construction. */
1763 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1764 current_ir_graph->n_loc);
1765 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1767 /* Immature block may not be optimized! */
1773 /* add an adge to a jmp/control flow node */
1775 add_in_edge (ir_node *block, ir_node *jmp)
1777 if (block->attr.block.matured) {
1778 assert(0 && "Error: Block already matured!\n");
1781 assert (jmp != NULL);
1782 ARR_APP1 (ir_node *, block->in, jmp);
1786 /* changing the current block */
1788 switch_block (ir_node *target)
1790 current_ir_graph->current_block = target;
1793 /* ************************ */
1794 /* parameter administration */
1796 /* get a value from the parameter array from the current block by its index */
1798 get_value (int pos, ir_mode *mode)
1800 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1801 inc_irg_visited(current_ir_graph);
1803 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1807 /* set a value at position pos in the parameter array from the current block */
1809 set_value (int pos, ir_node *value)
1811 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1812 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1815 /* get the current store */
1819 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1820 /* GL: one could call get_value instead */
1821 inc_irg_visited(current_ir_graph);
1822 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1825 /* set the current store */
1827 set_store (ir_node *store)
1829 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1830 /* GL: one could call set_value instead */
1831 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1835 keep_alive (ir_node *ka)
1837 add_End_keepalive(current_ir_graph->end, ka);
1840 /** Useful access routines **/
1841 /* Returns the current block of the current graph. To set the current
1842 block use switch_block(). */
1843 ir_node *get_cur_block() {
1844 return get_irg_current_block(current_ir_graph);
1847 /* Returns the frame type of the current graph */
1848 type *get_cur_frame_type() {
1849 return get_irg_frame_type(current_ir_graph);
1853 /* ********************************************************************* */
1856 /* call once for each run of the library */
1862 /* call for each graph */
1864 finalize_cons (ir_graph *irg) {
1865 irg->phase_state = phase_high;