1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
28 /* memset belongs to string.h */
31 #if USE_EXPICIT_PHI_IN_STACK
32 /* A stack needed for the automatic Phi node construction in constructor
33 Phi_in. Redefinition in irgraph.c!! */
38 typedef struct Phi_in_stack Phi_in_stack;
41 /*** ******************************************** */
42 /** privat interfaces, for professional use only */
44 /* Constructs a Block with a fixed number of predecessors.
45 Does not set current_block. Can not be used with automatic
46 Phi node construction. */
48 new_r_Block (ir_graph *irg, int arity, ir_node **in)
52 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
53 set_Block_matured(res, 1);
54 set_Block_block_visited(res, 0);
61 new_r_Start (ir_graph *irg, ir_node *block)
65 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
72 new_r_End (ir_graph *irg, ir_node *block)
76 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
82 /* Creates a Phi node with all predecessors. Calling this constructor
83 is only allowed if the corresponding block is mature. */
85 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
89 assert( get_Block_matured(block) );
90 assert( get_irn_arity(block) == arity );
92 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
97 /* Memory Phis in endless loops must be kept alive.
98 As we can't distinguish these easily we keep all of them alive. */
99 if ((res->op == op_Phi) && (mode == mode_M))
100 add_End_keepalive(irg->end, res);
105 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
108 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
110 res = optimize (res);
114 res = local_optimize_newby (res);
121 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
123 ir_node *in[1] = {val};
125 res = new_ir_node (irg, block, op_Id, mode, 1, in);
126 res = optimize (res);
132 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
135 ir_node *in[1] = {arg};
137 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
138 res->attr.proj = proj;
141 assert(get_Proj_pred(res));
142 assert(get_nodes_Block(get_Proj_pred(res)));
144 res = optimize (res);
152 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
156 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
157 arg->attr.c.kind = fragmentary;
158 arg->attr.c.default_proj = max_proj;
159 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
164 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
166 ir_node *in[1] = {op};
168 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
169 res = optimize (res);
176 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
180 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
181 res = optimize (res);
187 new_r_Add (ir_graph *irg, ir_node *block,
188 ir_node *op1, ir_node *op2, ir_mode *mode)
190 ir_node *in[2] = {op1, op2};
192 res = new_ir_node (irg, block, op_Add, mode, 2, in);
193 res = optimize (res);
199 new_r_Sub (ir_graph *irg, ir_node *block,
200 ir_node *op1, ir_node *op2, ir_mode *mode)
202 ir_node *in[2] = {op1, op2};
204 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
205 res = optimize (res);
211 new_r_Minus (ir_graph *irg, ir_node *block,
212 ir_node *op, ir_mode *mode)
214 ir_node *in[1] = {op};
216 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
217 res = optimize (res);
223 new_r_Mul (ir_graph *irg, ir_node *block,
224 ir_node *op1, ir_node *op2, ir_mode *mode)
226 ir_node *in[2] = {op1, op2};
228 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
229 res = optimize (res);
235 new_r_Quot (ir_graph *irg, ir_node *block,
236 ir_node *memop, ir_node *op1, ir_node *op2)
238 ir_node *in[3] = {memop, op1, op2};
240 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
241 res = optimize (res);
247 new_r_DivMod (ir_graph *irg, ir_node *block,
248 ir_node *memop, ir_node *op1, ir_node *op2)
250 ir_node *in[3] = {memop, op1, op2};
252 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
253 res = optimize (res);
259 new_r_Div (ir_graph *irg, ir_node *block,
260 ir_node *memop, ir_node *op1, ir_node *op2)
262 ir_node *in[3] = {memop, op1, op2};
264 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
265 res = optimize (res);
271 new_r_Mod (ir_graph *irg, ir_node *block,
272 ir_node *memop, ir_node *op1, ir_node *op2)
274 ir_node *in[3] = {memop, op1, op2};
276 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
277 res = optimize (res);
283 new_r_And (ir_graph *irg, ir_node *block,
284 ir_node *op1, ir_node *op2, ir_mode *mode)
286 ir_node *in[2] = {op1, op2};
288 res = new_ir_node (irg, block, op_And, mode, 2, in);
289 res = optimize (res);
295 new_r_Or (ir_graph *irg, ir_node *block,
296 ir_node *op1, ir_node *op2, ir_mode *mode)
298 ir_node *in[2] = {op1, op2};
300 res = new_ir_node (irg, block, op_Or, mode, 2, in);
301 res = optimize (res);
307 new_r_Eor (ir_graph *irg, ir_node *block,
308 ir_node *op1, ir_node *op2, ir_mode *mode)
310 ir_node *in[2] = {op1, op2};
312 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
313 res = optimize (res);
319 new_r_Not (ir_graph *irg, ir_node *block,
320 ir_node *op, ir_mode *mode)
322 ir_node *in[1] = {op};
324 res = new_ir_node (irg, block, op_Not, mode, 1, in);
325 res = optimize (res);
331 new_r_Shl (ir_graph *irg, ir_node *block,
332 ir_node *op, ir_node *k, ir_mode *mode)
334 ir_node *in[2] = {op, k};
336 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
337 res = optimize (res);
343 new_r_Shr (ir_graph *irg, ir_node *block,
344 ir_node *op, ir_node *k, ir_mode *mode)
346 ir_node *in[2] = {op, k};
348 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
349 res = optimize (res);
355 new_r_Shrs (ir_graph *irg, ir_node *block,
356 ir_node *op, ir_node *k, ir_mode *mode)
358 ir_node *in[2] = {op, k};
360 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
361 res = optimize (res);
367 new_r_Rot (ir_graph *irg, ir_node *block,
368 ir_node *op, ir_node *k, ir_mode *mode)
370 ir_node *in[2] = {op, k};
372 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
373 res = optimize (res);
379 new_r_Abs (ir_graph *irg, ir_node *block,
380 ir_node *op, ir_mode *mode)
382 ir_node *in[1] = {op};
384 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
385 res = optimize (res);
391 new_r_Cmp (ir_graph *irg, ir_node *block,
392 ir_node *op1, ir_node *op2)
394 ir_node *in[2] = {op1, op2};
396 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
397 res = optimize (res);
403 new_r_Jmp (ir_graph *irg, ir_node *block)
407 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
408 res = optimize (res);
414 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
416 ir_node *in[1] = {c};
418 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
419 res->attr.c.kind = dense;
420 res->attr.c.default_proj = 0;
421 res = optimize (res);
427 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
428 ir_node *callee, int arity, ir_node **in, type *type)
435 NEW_ARR_A (ir_node *, r_in, r_arity);
438 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
440 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
442 assert(is_method_type(type));
443 set_Call_type(res, type);
444 res = optimize (res);
450 new_r_Return (ir_graph *irg, ir_node *block,
451 ir_node *store, int arity, ir_node **in)
458 NEW_ARR_A (ir_node *, r_in, r_arity);
460 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
461 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
462 res = optimize (res);
468 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
470 ir_node *in[2] = {store, obj};
472 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
474 res = optimize (res);
480 new_r_Load (ir_graph *irg, ir_node *block,
481 ir_node *store, ir_node *adr)
483 ir_node *in[2] = {store, adr};
485 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
487 res = optimize (res);
493 new_r_Store (ir_graph *irg, ir_node *block,
494 ir_node *store, ir_node *adr, ir_node *val)
496 ir_node *in[3] = {store, adr, val};
498 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
500 res = optimize (res);
506 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
507 ir_node *size, type *alloc_type, where_alloc where)
509 ir_node *in[2] = {store, size};
511 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
513 res->attr.a.where = where;
514 res->attr.a.type = alloc_type;
516 res = optimize (res);
522 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
523 ir_node *ptr, ir_node *size, type *free_type)
525 ir_node *in[3] = {store, ptr, size};
527 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
529 res->attr.f = free_type;
531 res = optimize (res);
537 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
538 int arity, ir_node **in, entity *ent)
545 NEW_ARR_A (ir_node *, r_in, r_arity);
548 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
549 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
551 res->attr.s.ltyp = static_linkage;
552 res->attr.s.ent = ent;
554 res = optimize (res);
560 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
561 symconst_kind symkind)
566 if (symkind == linkage_ptr_info)
570 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
572 res->attr.i.num = symkind;
573 if (symkind == linkage_ptr_info) {
574 res->attr.i.tori.ptrinfo = (ident *)value;
576 assert ( ( (symkind == type_tag)
577 || (symkind == size))
578 && (is_type(value)));
579 res->attr.i.tori.typ = (type *)value;
581 res = optimize (res);
587 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
591 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
593 res = optimize (res);
601 return current_ir_graph->bad;
604 /** ********************/
605 /** public interfaces */
606 /** construction tools */
608 /****f* ircons/new_Start
611 * new_Start -- create a new Start node in the current block
614 * s = new_Start(void);
615 * ir_node* new_Start(void);
618 * s - pointer to the created Start node
627 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
628 op_Start, mode_T, 0, NULL);
630 res = optimize (res);
639 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
640 op_End, mode_X, -1, NULL);
641 res = optimize (res);
647 /* Constructs a Block with a fixed number of predecessors.
648 Does set current_block. Can be used with automatic Phi
649 node construction. */
651 new_Block (int arity, ir_node **in)
655 res = new_r_Block (current_ir_graph, arity, in);
657 /* Create and initialize array for Phi-node construction. */
658 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
659 current_ir_graph->n_loc);
660 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
662 res = optimize (res);
663 current_ir_graph->current_block = res;
670 /* ***********************************************************************/
671 /* Methods necessary for automatic Phi node creation */
673 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
674 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
675 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
676 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
678 Call Graph: ( A ---> B == A "calls" B)
680 get_value mature_block
688 get_r_value_internal |
692 new_r_Phi0 new_r_Phi_in
694 * *************************************************************************** */
696 /* Creates a Phi node with 0 predecessors */
698 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
701 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
706 /* There are two implementations of the Phi node construction. The first
707 is faster, but does not work for blocks with more than 2 predecessors.
708 The second works always but is slower and causes more unnecessary Phi
710 Select the implementations by the following preprocessor flag set in
712 #if USE_FAST_PHI_CONSTRUCTION
714 /* This is a stack used for allocating and deallocating nodes in
715 new_r_Phi_in. The original implementation used the obstack
716 to model this stack, now it is explicit. This reduces side effects.
718 #if USE_EXPICIT_PHI_IN_STACK
723 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
725 res->stack = NEW_ARR_F (ir_node *, 1);
732 free_Phi_in_stack(Phi_in_stack *s) {
737 void free_to_Phi_in_stack(ir_node *phi) {
738 assert(get_irn_opcode(phi) == iro_Phi);
740 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
741 current_ir_graph->Phi_in_stack->pos)
742 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
744 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
746 (current_ir_graph->Phi_in_stack->pos)++;
750 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
751 int arity, ir_node **in) {
753 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
754 int pos = current_ir_graph->Phi_in_stack->pos;
758 /* We need to allocate a new node */
759 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
761 /* reuse the old node and initialize it again. */
764 assert (res->kind == k_ir_node);
765 assert (res->op == op_Phi);
770 /* ???!!! How to free the old in array?? */
771 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
773 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
775 (current_ir_graph->Phi_in_stack->pos)--;
779 #endif /* USE_EXPICIT_PHI_IN_STACK */
781 /* Creates a Phi node with a given, fixed array **in of predecessors.
782 If the Phi node is unnecessary, as the same value reaches the block
783 through all control flow paths, it is eliminated and the value
784 returned directly. This constructor is only intended for use in
785 the automatic Phi node generation triggered by get_value or mature.
786 The implementation is quite tricky and depends on the fact, that
787 the nodes are allocated on a stack:
788 The in array contains predecessors and NULLs. The NULLs appear,
789 if get_r_value_internal, that computed the predecessors, reached
790 the same block on two paths. In this case the same value reaches
791 this block on both paths, there is no definition in between. We need
792 not allocate a Phi where these path's merge, but we have to communicate
793 this fact to the caller. This happens by returning a pointer to the
794 node the caller _will_ allocate. (Yes, we predict the address. We can
795 do so because the nodes are allocated on the obstack.) The caller then
796 finds a pointer to itself and, when this routine is called again,
800 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
801 ir_node **in, int ins)
804 ir_node *res, *known;
806 /* allocate a new node on the obstack.
807 This can return a node to which some of the pointers in the in-array
809 Attention: the constructor copies the in array, i.e., the later changes
810 to the array in this routine do not affect the constructed node! If
811 the in array contains NULLs, there will be missing predecessors in the
813 Is this a possible internal state of the Phi node generation? */
814 #if USE_EXPICIT_PHI_IN_STACK
815 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
817 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
819 /* The in-array can contain NULLs. These were returned by
820 get_r_value_internal if it reached the same block/definition on a
822 The NULLs are replaced by the node itself to simplify the test in the
824 for (i=0; i < ins; ++i)
825 if (in[i] == NULL) in[i] = res;
827 /* This loop checks whether the Phi has more than one predecessor.
828 If so, it is a real Phi node and we break the loop. Else the
829 Phi node merges the same definition on several paths and therefore
831 for (i=0; i < ins; ++i)
833 if (in[i]==res || in[i]==known) continue;
841 /* i==ins: there is at most one predecessor, we don't need a phi node. */
843 #if USE_EXPICIT_PHI_IN_STACK
844 free_to_Phi_in_stack(res);
846 obstack_free (current_ir_graph->obst, res);
850 res = optimize (res);
854 /* return the pointer to the Phi node. This node might be deallocated! */
859 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
861 /** This function computes the predecessors for a real Phi node, and then
862 allocates and returns this node. The routine called to allocate the
863 node might optimize it away and return a real value, or even a pointer
864 to a deallocated Phi node on top of the obstack!
865 This function is called with an in-array of proper size. **/
866 static inline ir_node *
867 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
869 ir_node *prevBlock, *res;
872 /* This loop goes to all predecessor blocks of the block the Phi node is in
873 and there finds the operands of the Phi node by calling
874 get_r_value_internal. */
875 for (i = 1; i <= ins; ++i) {
876 assert (block->in[i]);
877 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
879 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
882 /* After collecting all predecessors into the array nin a new Phi node
883 with these predecessors is created. This constructor contains an
884 optimization: If all predecessors of the Phi node are identical it
885 returns the only operand instead of a new Phi node. If the value
886 passes two different control flow edges without being defined, and
887 this is the second path treated, a pointer to the node that will be
888 allocated for the first path (recursion) is returned. We already
889 know the address of this node, as it is the next node to be allocated
890 and will be placed on top of the obstack. (The obstack is a _stack_!) */
891 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
893 /* Now we now the value for "pos" and can enter it in the array with
894 all known local variables. Attention: this might be a pointer to
895 a node, that later will be allocated!!! See new_r_Phi_in.
896 If this is called in mature, after some set_value in the same block,
897 the proper value must not be overwritten:
899 get_value (makes Phi0, put's it into graph_arr)
900 set_value (overwrites Phi0 in graph_arr)
901 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
904 if (!block->attr.block.graph_arr[pos]) {
905 block->attr.block.graph_arr[pos] = res;
907 /* printf(" value already computed by %s\n",
908 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
914 /* This function returns the last definition of a variable. In case
915 this variable was last defined in a previous block, Phi nodes are
916 inserted. If the part of the firm graph containing the definition
917 is not yet constructed, a dummy Phi node is returned. */
919 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
922 /* There are 4 cases to treat.
924 1. The block is not mature and we visit it the first time. We can not
925 create a proper Phi node, therefore a Phi0, i.e., a Phi without
926 predecessors is returned. This node is added to the linked list (field
927 "link") of the containing block to be completed when this block is
928 matured. (Completion will add a new Phi and turn the Phi0 into an Id
931 2. The value is already known in this block, graph_arr[pos] is set and we
932 visit the block the first time. We can return the value without
933 creating any new nodes.
935 3. The block is mature and we visit it the first time. A Phi node needs
936 to be created (phi_merge). If the Phi is not needed, as all it's
937 operands are the same value reaching the block through different
938 paths, it's optimized away and the value itself is returned.
940 4. The block is mature, and we visit it the second time. Now two
941 subcases are possible:
942 * The value was computed completely the last time we were here. This
943 is the case if there is no loop. We can return the proper value.
944 * The recursion that visited this node and set the flag did not
945 return yet. We are computing a value in a loop and need to
946 break the recursion without knowing the result yet.
947 @@@ strange case. Straight forward we would create a Phi before
948 starting the computation of it's predecessors. In this case we will
949 find a Phi here in any case. The problem is that this implementation
950 only creates a Phi after computing the predecessors, so that it is
951 hard to compute self references of this Phi. @@@
952 There is no simple check for the second subcase. Therefore we check
953 for a second visit and treat all such cases as the second subcase.
954 Anyways, the basic situation is the same: we reached a block
955 on two paths without finding a definition of the value: No Phi
956 nodes are needed on both paths.
957 We return this information "Two paths, no Phi needed" by a very tricky
958 implementation that relies on the fact that an obstack is a stack and
959 will return a node with the same address on different allocations.
960 Look also at phi_merge and new_r_phi_in to understand this.
961 @@@ Unfortunately this does not work, see testprogram
962 three_cfpred_example.
966 /* case 4 -- already visited. */
967 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
969 /* visited the first time */
970 set_irn_visited(block, get_irg_visited(current_ir_graph));
972 /* Get the local valid value */
973 res = block->attr.block.graph_arr[pos];
975 /* case 2 -- If the value is actually computed, return it. */
976 if (res) { return res;};
978 if (block->attr.block.matured) { /* case 3 */
980 /* The Phi has the same amount of ins as the corresponding block. */
981 int ins = get_irn_arity(block);
983 NEW_ARR_A (ir_node *, nin, ins);
985 /* Phi merge collects the predecessors and then creates a node. */
986 res = phi_merge (block, pos, mode, nin, ins);
988 } else { /* case 1 */
989 /* The block is not mature, we don't know how many in's are needed. A Phi
990 with zero predecessors is created. Such a Phi node is called Phi0
991 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
992 to the list of Phi0 nodes in this block to be matured by mature_block
994 The Phi0 has to remember the pos of it's internal value. If the real
995 Phi is computed, pos is used to update the array with the local
998 res = new_r_Phi0 (current_ir_graph, block, mode);
999 res->attr.phi0_pos = pos;
1000 res->link = block->link;
1004 /* If we get here, the frontend missed a use-before-definition error */
1007 printf("Error: no value set. Use of undefined variable. Initializing
1009 assert (mode->code >= irm_f && mode->code <= irm_p);
1010 res = new_r_Const (current_ir_graph, block, mode,
1011 tarval_mode_null[mode->code]);
1014 /* The local valid value is available now. */
1015 block->attr.block.graph_arr[pos] = res;
1022 /** This is the simple algorithm. If first generates a Phi0, then
1023 it starts the recursion. This causes an Id at the entry of
1024 every block that has no definition of the value! **/
1026 #if USE_EXPICIT_PHI_IN_STACK
1028 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1029 void free_Phi_in_stack(Phi_in_stack *s) { }
1033 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1034 ir_node **in, int ins)
1037 ir_node *res, *known;
1039 /* Allocate a new node on the obstack. The allocation copies the in
1041 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1043 /* This loop checks whether the Phi has more than one predecessor.
1044 If so, it is a real Phi node and we break the loop. Else the
1045 Phi node merges the same definition on several paths and therefore
1046 is not needed. Don't consider Bad nodes! */
1048 for (i=0; i < ins; ++i)
1050 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1058 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1061 obstack_free (current_ir_graph->obst, res);
1064 /* A undefined value, e.g., in unreachable code. */
1068 res = optimize (res);
1070 /* Memory Phis in endless loops must be kept alive.
1071 As we can't distinguish these easily we keep all of the alive. */
1072 if ((res->op == op_Phi) && (mode == mode_M))
1073 add_End_keepalive(irg->end, res);
1080 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1082 #if PRECISE_EXC_CONTEXT
1083 static inline ir_node *
1084 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1087 new_frag_arr (ir_node *n) {
1089 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1090 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1091 sizeof(ir_node *)*current_ir_graph->n_loc);
1092 /* Here we rely on the fact that all frag ops have Memory as first result! */
1093 if (get_irn_op(n) == op_Call)
1094 arr[0] = new_Proj(n, mode_M, 3);
1096 arr[0] = new_Proj(n, mode_M, 0);
1097 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1102 get_frag_arr (ir_node *n) {
1103 if (get_irn_op(n) == op_Call) {
1104 return n->attr.call.frag_arr;
1105 } else if (get_irn_op(n) == op_Alloc) {
1106 return n->attr.a.frag_arr;
1108 return n->attr.frag_arr;
1113 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1114 if (!frag_arr[pos]) frag_arr[pos] = val;
1115 if (frag_arr[current_ir_graph->n_loc - 1])
1116 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1120 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1125 assert(is_fragile_op(cfOp));
1127 frag_arr = get_frag_arr(cfOp);
1128 res = frag_arr[pos];
1130 if (block->attr.block.graph_arr[pos]) {
1131 /* There was a set_value after the cfOp and no get_value before that
1132 set_value. We must build a Phi node now. */
1133 if (block->attr.block.matured) {
1134 int ins = get_irn_arity(block);
1136 NEW_ARR_A (ir_node *, nin, ins);
1137 phi_merge(block, pos, mode, nin, ins);
1139 res = new_r_Phi0 (current_ir_graph, block, mode);
1140 res->attr.phi0_pos = pos;
1141 res->link = block->link;
1144 set_frag_value(frag_arr, pos, res);
1146 res = get_r_value_internal(block, pos, mode);
1153 /** This function allocates a dummy Phi node to break recursions,
1154 computes the predecessors for the real phi node, and then
1155 allocates and returns this node. The routine called to allocate the
1156 node might optimize it away and return a real value.
1157 This function is called with an in-array of proper size. **/
1158 static inline ir_node *
1159 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1161 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1165 /* If this block has no value at pos create a Phi0 and remember it
1166 in graph_arr to break recursions.
1167 Else we may not set graph_arr as there a later value is remembered. */
1169 if (!block->attr.block.graph_arr[pos]) {
1170 /* This is commented out as collapsing to Bads is no good idea.
1171 Either we need an assert here, or we need to call a routine
1172 that deals with this case as appropriate for the given language.
1173 Right now a self referencing Id is created which will crash irg_vrfy().
1175 Even if all variables are defined before use, it can happen that
1176 we get to the start block, if a cond has been replaced by a tuple
1177 (bad, jmp). As the start has a self referencing control flow edge,
1178 we get a self referencing Id, which is hard to optimize away. We avoid
1179 this by defining the value as a Bad node.
1180 Returning a const with tarval_bad is a preliminary solution. In some
1181 situations we might want a Warning or an Error. */
1183 if (block == get_irg_start_block(current_ir_graph)) {
1184 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1185 /* We don't need to care about exception ops in the start block.
1186 There are none by definition. */
1187 return block->attr.block.graph_arr[pos];
1189 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1190 block->attr.block.graph_arr[pos] = phi0;
1191 #if PRECISE_EXC_CONTEXT
1192 /* Set graph_arr for fragile ops. Also here we should break recursion. */
1193 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1198 /* This loop goes to all predecessor blocks of the block the Phi node
1199 is in and there finds the operands of the Phi node by calling
1200 get_r_value_internal. */
1201 for (i = 1; i <= ins; ++i) {
1202 prevCfOp = skip_Proj(block->in[i]);
1204 if (is_Bad(prevCfOp)) {
1205 /* In case a Cond has been optimized we would get right to the start block
1206 with an invalid definition. */
1207 nin[i-1] = new_Bad();
1210 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1212 if (!is_Bad(prevBlock)) {
1213 #if PRECISE_EXC_CONTEXT
1214 if (is_fragile_op(prevCfOp))
1215 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1218 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1220 nin[i-1] = new_Bad();
1224 /* After collecting all predecessors into the array nin a new Phi node
1225 with these predecessors is created. This constructor contains an
1226 optimization: If all predecessors of the Phi node are identical it
1227 returns the only operand instead of a new Phi node. */
1228 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1230 /* In case we allocated a Phi0 node at the beginning of this procedure,
1231 we need to exchange this Phi0 with the real Phi. */
1233 exchange(phi0, res);
1234 block->attr.block.graph_arr[pos] = res;
1235 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1236 only an optimization. */
1242 /* This function returns the last definition of a variable. In case
1243 this variable was last defined in a previous block, Phi nodes are
1244 inserted. If the part of the firm graph containing the definition
1245 is not yet constructed, a dummy Phi node is returned. */
1247 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1250 /* There are 4 cases to treat.
1252 1. The block is not mature and we visit it the first time. We can not
1253 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1254 predecessors is returned. This node is added to the linked list (field
1255 "link") of the containing block to be completed when this block is
1256 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1259 2. The value is already known in this block, graph_arr[pos] is set and we
1260 visit the block the first time. We can return the value without
1261 creating any new nodes.
1263 3. The block is mature and we visit it the first time. A Phi node needs
1264 to be created (phi_merge). If the Phi is not needed, as all it's
1265 operands are the same value reaching the block through different
1266 paths, it's optimized away and the value itself is returned.
1268 4. The block is mature, and we visit it the second time. Now two
1269 subcases are possible:
1270 * The value was computed completely the last time we were here. This
1271 is the case if there is no loop. We can return the proper value.
1272 * The recursion that visited this node and set the flag did not
1273 return yet. We are computing a value in a loop and need to
1274 break the recursion. This case only happens if we visited
1275 the same block with phi_merge before, which inserted a Phi0.
1276 So we return the Phi0.
1279 /* case 4 -- already visited. */
1280 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1281 /* As phi_merge allocates a Phi0 this value is always defined. Here
1282 is the critical difference of the two algorithms. */
1283 assert(block->attr.block.graph_arr[pos]);
1284 return block->attr.block.graph_arr[pos];
1287 /* visited the first time */
1288 set_irn_visited(block, get_irg_visited(current_ir_graph));
1290 /* Get the local valid value */
1291 res = block->attr.block.graph_arr[pos];
1293 /* case 2 -- If the value is actually computed, return it. */
1294 if (res) { return res; };
1296 if (block->attr.block.matured) { /* case 3 */
1298 /* The Phi has the same amount of ins as the corresponding block. */
1299 int ins = get_irn_arity(block);
1301 NEW_ARR_A (ir_node *, nin, ins);
1303 /* Phi merge collects the predecessors and then creates a node. */
1304 res = phi_merge (block, pos, mode, nin, ins);
1306 } else { /* case 1 */
1307 /* The block is not mature, we don't know how many in's are needed. A Phi
1308 with zero predecessors is created. Such a Phi node is called Phi0
1309 node. The Phi0 is then added to the list of Phi0 nodes in this block
1310 to be matured by mature_block later.
1311 The Phi0 has to remember the pos of it's internal value. If the real
1312 Phi is computed, pos is used to update the array with the local
1314 res = new_r_Phi0 (current_ir_graph, block, mode);
1315 res->attr.phi0_pos = pos;
1316 res->link = block->link;
1320 /* If we get here, the frontend missed a use-before-definition error */
1323 printf("Error: no value set. Use of undefined variable. Initializing
1325 assert (mode->code >= irm_f && mode->code <= irm_p);
1326 res = new_r_Const (current_ir_graph, block, mode,
1327 tarval_mode_null[mode->code]);
1330 /* The local valid value is available now. */
1331 block->attr.block.graph_arr[pos] = res;
1336 #endif /* USE_FAST_PHI_CONSTRUCTION */
1338 /* ************************************************************************** */
1340 /** Finalize a Block node, when all control flows are known. */
1341 /** Acceptable parameters are only Block nodes. */
1343 mature_block (ir_node *block)
1350 assert (get_irn_opcode(block) == iro_Block);
1352 if (!get_Block_matured(block)) {
1354 /* turn the dynamic in-array into a static one. */
1355 ins = ARR_LEN (block->in)-1;
1356 NEW_ARR_A (ir_node *, nin, ins);
1357 /* @@@ something is strange here... why isn't the array copied? */
1359 /* Traverse a chain of Phi nodes attached to this block and mature
1361 for (n = block->link; n; n=next) {
1362 inc_irg_visited(current_ir_graph);
1364 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1367 block->attr.block.matured = 1;
1369 /* Now, as the block is a finished firm node, we can optimize it.
1370 Since other nodes have been allocated since the block was created
1371 we can not free the node on the obstack. Therefore we have to call
1373 Unfortunately the optimization does not change a lot, as all allocated
1374 nodes refer to the unoptimized node.
1375 We can call _2, as global cse has no effect on blocks. */
1376 block = optimize_in_place_2(block);
1382 new_Phi (int arity, ir_node **in, ir_mode *mode)
1384 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1389 new_Const (ir_mode *mode, tarval *con)
1391 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1396 new_Id (ir_node *val, ir_mode *mode)
1398 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1403 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1405 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1410 new_defaultProj (ir_node *arg, long max_proj)
1413 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1414 arg->attr.c.kind = fragmentary;
1415 arg->attr.c.default_proj = max_proj;
1416 res = new_Proj (arg, mode_X, max_proj);
1421 new_Conv (ir_node *op, ir_mode *mode)
1423 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1428 new_Tuple (int arity, ir_node **in)
1430 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1435 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1437 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1442 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1444 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1450 new_Minus (ir_node *op, ir_mode *mode)
1452 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1457 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1459 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1464 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1467 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1469 #if PRECISE_EXC_CONTEXT
1470 res->attr.frag_arr = new_frag_arr(res);
1477 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1480 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1482 #if PRECISE_EXC_CONTEXT
1483 res->attr.frag_arr = new_frag_arr(res);
1490 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1493 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1495 #if PRECISE_EXC_CONTEXT
1496 res->attr.frag_arr = new_frag_arr(res);
1503 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1506 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1508 #if PRECISE_EXC_CONTEXT
1509 res->attr.frag_arr = new_frag_arr(res);
1516 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1518 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1523 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1525 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1530 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1532 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1537 new_Not (ir_node *op, ir_mode *mode)
1539 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1544 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1546 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1551 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1553 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1558 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1560 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1565 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1567 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1572 new_Abs (ir_node *op, ir_mode *mode)
1574 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1579 new_Cmp (ir_node *op1, ir_node *op2)
1581 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1588 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1592 new_Cond (ir_node *c)
1594 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1598 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1602 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1603 store, callee, arity, in, type);
1604 #if PRECISE_EXC_CONTEXT
1605 res->attr.call.frag_arr = new_frag_arr(res);
1612 new_Return (ir_node* store, int arity, ir_node **in)
1614 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1619 new_Raise (ir_node *store, ir_node *obj)
1621 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1626 new_Load (ir_node *store, ir_node *addr)
1629 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1631 #if PRECISE_EXC_CONTEXT
1632 res->attr.frag_arr = new_frag_arr(res);
1639 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1642 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1644 #if PRECISE_EXC_CONTEXT
1645 res->attr.frag_arr = new_frag_arr(res);
1652 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1656 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1657 store, size, alloc_type, where);
1658 #if PRECISE_EXC_CONTEXT
1659 res->attr.a.frag_arr = new_frag_arr(res);
1666 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1668 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1669 store, ptr, size, free_type);
1673 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1674 /* GL: objptr was called frame before. Frame was a bad choice for the name
1675 as the operand could as well be a pointer to a dynamic object. */
1677 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1678 store, objptr, 0, NULL, ent);
1682 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1684 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1685 store, objptr, n_index, index, sel);
1689 new_SymConst (type_or_id_p value, symconst_kind kind)
1691 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1696 new_Sync (int arity, ir_node** in)
1698 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1706 return current_ir_graph->bad;
1709 /* ********************************************************************* */
1710 /* Comfortable interface with automatic Phi node construction. */
1711 /* (Uses also constructors of ?? interface, except new_Block. */
1712 /* ********************************************************************* */
1714 /** Block construction **/
1715 /* immature Block without predecessors */
1716 ir_node *new_immBlock (void) {
1719 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1720 /* creates a new dynamic in-array as length of in is -1 */
1721 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1722 current_ir_graph->current_block = res;
1723 res->attr.block.matured = 0;
1724 set_Block_block_visited(res, 0);
1726 /* Create and initialize array for Phi-node construction. */
1727 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1728 current_ir_graph->n_loc);
1729 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1731 /* Immature block may not be optimized! */
1737 /* add an adge to a jmp/control flow node */
1739 add_in_edge (ir_node *block, ir_node *jmp)
1741 if (block->attr.block.matured) {
1742 assert(0 && "Error: Block already matured!\n");
1745 assert (jmp != NULL);
1746 ARR_APP1 (ir_node *, block->in, jmp);
1750 /* changing the current block */
1752 switch_block (ir_node *target)
1754 current_ir_graph->current_block = target;
1757 /* ************************ */
1758 /* parameter administration */
1760 /* get a value from the parameter array from the current block by its index */
1762 get_value (int pos, ir_mode *mode)
1764 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1765 inc_irg_visited(current_ir_graph);
1766 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1770 /* set a value at position pos in the parameter array from the current block */
1772 set_value (int pos, ir_node *value)
1774 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1775 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1778 /* get the current store */
1782 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1783 /* GL: one could call get_value instead */
1784 inc_irg_visited(current_ir_graph);
1785 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1788 /* set the current store */
1790 set_store (ir_node *store)
1792 assert(get_irg_phase_state (current_ir_graph) == phase_building);
1793 /* GL: one could call set_value instead */
1794 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1798 keep_alive (ir_node *ka)
1800 add_End_keepalive(current_ir_graph->end, ka);
1803 /** Useful access routines **/
1804 /* Returns the current block of the current graph. To set the current
1805 block use switch_block(). */
1806 ir_node *get_cur_block() {
1807 return get_irg_current_block(current_ir_graph);
1810 /* Returns the frame type of the current graph */
1811 type *get_cur_frame_type() {
1812 return get_irg_frame_type(current_ir_graph);
1816 /* ********************************************************************* */
1819 /* call once for each run of the library */
1825 /* call for each graph */
1827 finalize_cons (ir_graph *irg) {
1828 irg->phase_state = phase_high;