1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
16 # include "irgraph_t.h"
17 # include "irnode_t.h"
18 # include "irmode_t.h"
26 /* memset belongs to string.h */
29 #if USE_EXPICIT_PHI_IN_STACK
30 /* A stack needed for the automatic Phi node construction in constructor
31 Phi_in. Redefinition in irgraph.c!! */
36 typedef struct Phi_in_stack Phi_in_stack;
39 /*** ******************************************** */
40 /** privat interfaces, for professional use only */
42 /* Constructs a Block with a fixed number of predecessors.
43 Does not set current_block. Can not be used with automatic
44 Phi node construction. */
46 new_r_Block (ir_graph *irg, int arity, ir_node **in)
50 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
51 set_Block_matured(res, 1);
52 set_Block_block_visited(res, 0);
59 new_r_Start (ir_graph *irg, ir_node *block)
63 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
70 new_r_End (ir_graph *irg, ir_node *block)
74 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
80 /* Creates a Phi node with all predecessors. Calling this constructor
81 is only allowed if the corresponding block is mature. */
83 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
87 assert( get_Block_matured(block) );
88 assert( get_irn_arity(block) == arity );
90 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
98 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
101 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
103 res = optimize (res);
107 res = local_optimize_newby (res);
114 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
116 ir_node *in[1] = {val};
118 res = new_ir_node (irg, block, op_Id, mode, 1, in);
119 res = optimize (res);
125 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
128 ir_node *in[1] = {arg};
130 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
131 res->attr.proj = proj;
134 assert(get_Proj_pred(res));
135 assert(get_nodes_Block(get_Proj_pred(res)));
137 res = optimize (res);
145 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
149 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
150 arg->attr.c.kind = fragmentary;
151 arg->attr.c.default_proj = max_proj;
152 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
157 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
159 ir_node *in[1] = {op};
161 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
162 res = optimize (res);
169 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
173 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
174 res = optimize (res);
180 new_r_Add (ir_graph *irg, ir_node *block,
181 ir_node *op1, ir_node *op2, ir_mode *mode)
183 ir_node *in[2] = {op1, op2};
185 res = new_ir_node (irg, block, op_Add, mode, 2, in);
186 res = optimize (res);
192 new_r_Sub (ir_graph *irg, ir_node *block,
193 ir_node *op1, ir_node *op2, ir_mode *mode)
195 ir_node *in[2] = {op1, op2};
197 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
198 res = optimize (res);
204 new_r_Minus (ir_graph *irg, ir_node *block,
205 ir_node *op, ir_mode *mode)
207 ir_node *in[1] = {op};
209 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
210 res = optimize (res);
216 new_r_Mul (ir_graph *irg, ir_node *block,
217 ir_node *op1, ir_node *op2, ir_mode *mode)
219 ir_node *in[2] = {op1, op2};
221 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
222 res = optimize (res);
228 new_r_Quot (ir_graph *irg, ir_node *block,
229 ir_node *memop, ir_node *op1, ir_node *op2)
231 ir_node *in[3] = {memop, op1, op2};
233 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
234 res = optimize (res);
240 new_r_DivMod (ir_graph *irg, ir_node *block,
241 ir_node *memop, ir_node *op1, ir_node *op2)
243 ir_node *in[3] = {memop, op1, op2};
245 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
246 res = optimize (res);
252 new_r_Div (ir_graph *irg, ir_node *block,
253 ir_node *memop, ir_node *op1, ir_node *op2)
255 ir_node *in[3] = {memop, op1, op2};
257 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
258 res = optimize (res);
264 new_r_Mod (ir_graph *irg, ir_node *block,
265 ir_node *memop, ir_node *op1, ir_node *op2)
267 ir_node *in[3] = {memop, op1, op2};
269 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
270 res = optimize (res);
276 new_r_And (ir_graph *irg, ir_node *block,
277 ir_node *op1, ir_node *op2, ir_mode *mode)
279 ir_node *in[2] = {op1, op2};
281 res = new_ir_node (irg, block, op_And, mode, 2, in);
282 res = optimize (res);
288 new_r_Or (ir_graph *irg, ir_node *block,
289 ir_node *op1, ir_node *op2, ir_mode *mode)
291 ir_node *in[2] = {op1, op2};
293 res = new_ir_node (irg, block, op_Or, mode, 2, in);
294 res = optimize (res);
300 new_r_Eor (ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
303 ir_node *in[2] = {op1, op2};
305 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
306 res = optimize (res);
312 new_r_Not (ir_graph *irg, ir_node *block,
313 ir_node *op, ir_mode *mode)
315 ir_node *in[1] = {op};
317 res = new_ir_node (irg, block, op_Not, mode, 1, in);
318 res = optimize (res);
324 new_r_Shl (ir_graph *irg, ir_node *block,
325 ir_node *op, ir_node *k, ir_mode *mode)
327 ir_node *in[2] = {op, k};
329 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
330 res = optimize (res);
336 new_r_Shr (ir_graph *irg, ir_node *block,
337 ir_node *op, ir_node *k, ir_mode *mode)
339 ir_node *in[2] = {op, k};
341 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
342 res = optimize (res);
348 new_r_Shrs (ir_graph *irg, ir_node *block,
349 ir_node *op, ir_node *k, ir_mode *mode)
351 ir_node *in[2] = {op, k};
353 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
354 res = optimize (res);
360 new_r_Rot (ir_graph *irg, ir_node *block,
361 ir_node *op, ir_node *k, ir_mode *mode)
363 ir_node *in[2] = {op, k};
365 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
366 res = optimize (res);
372 new_r_Abs (ir_graph *irg, ir_node *block,
373 ir_node *op, ir_mode *mode)
375 ir_node *in[1] = {op};
377 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
378 res = optimize (res);
384 new_r_Cmp (ir_graph *irg, ir_node *block,
385 ir_node *op1, ir_node *op2)
387 ir_node *in[2] = {op1, op2};
389 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
390 res = optimize (res);
396 new_r_Jmp (ir_graph *irg, ir_node *block)
400 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
401 res = optimize (res);
407 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
409 ir_node *in[1] = {c};
411 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
412 res->attr.c.kind = dense;
413 res->attr.c.default_proj = 0;
414 res = optimize (res);
420 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
421 ir_node *callee, int arity, ir_node **in, type *type)
428 NEW_ARR_A (ir_node *, r_in, r_arity);
431 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
433 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
435 assert(is_method_type(type));
436 set_Call_type(res, type);
437 res = optimize (res);
443 new_r_Return (ir_graph *irg, ir_node *block,
444 ir_node *store, int arity, ir_node **in)
451 NEW_ARR_A (ir_node *, r_in, r_arity);
453 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
454 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
455 res = optimize (res);
461 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
463 ir_node *in[2] = {store, obj};
465 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
467 res = optimize (res);
473 new_r_Load (ir_graph *irg, ir_node *block,
474 ir_node *store, ir_node *adr)
476 ir_node *in[2] = {store, adr};
478 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
480 res = optimize (res);
486 new_r_Store (ir_graph *irg, ir_node *block,
487 ir_node *store, ir_node *adr, ir_node *val)
489 ir_node *in[3] = {store, adr, val};
491 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
493 res = optimize (res);
499 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
500 ir_node *size, type *alloc_type, where_alloc where)
502 ir_node *in[2] = {store, size};
504 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
506 res->attr.a.where = where;
507 res->attr.a.type = alloc_type;
509 res = optimize (res);
515 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
516 ir_node *ptr, ir_node *size, type *free_type)
518 ir_node *in[3] = {store, ptr, size};
520 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
522 res->attr.f = free_type;
524 res = optimize (res);
530 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
531 int arity, ir_node **in, entity *ent)
538 NEW_ARR_A (ir_node *, r_in, r_arity);
541 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
542 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
544 res->attr.s.ltyp = static_linkage;
545 res->attr.s.ent = ent;
547 res = optimize (res);
553 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
554 symconst_kind symkind)
559 if (symkind == linkage_ptr_info)
563 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
565 res->attr.i.num = symkind;
566 if (symkind == linkage_ptr_info) {
567 res->attr.i.tori.ptrinfo = (ident *)value;
569 assert ( ( (symkind == type_tag)
570 || (symkind == size))
571 && (is_type(value)));
572 res->attr.i.tori.typ = (type *)value;
574 res = optimize (res);
580 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
584 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
586 res = optimize (res);
594 return current_ir_graph->bad;
597 /** ********************/
598 /** public interfaces */
599 /** construction tools */
601 /****f* ircons/new_Start
604 * new_Start -- create a new Start node in the current block
607 * s = new_Start(void);
608 * ir_node* new_Start(void);
611 * s - pointer to the created Start node
620 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
621 op_Start, mode_T, 0, NULL);
623 res = optimize (res);
633 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
634 op_End, mode_X, -1, NULL);
636 res = optimize (res);
642 /* Constructs a Block with a fixed number of predecessors.
643 Does set current_block. Can be used with automatic Phi
644 node construction. */
646 new_Block (int arity, ir_node **in)
650 res = new_r_Block (current_ir_graph, arity, in);
652 /* Create and initialize array for Phi-node construction. */
653 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
654 current_ir_graph->n_loc);
655 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
657 res = optimize (res);
658 current_ir_graph->current_block = res;
665 /* ***********************************************************************/
666 /* Methods necessary for automatic Phi node creation */
668 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
669 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
670 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
671 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
673 Call Graph: ( A ---> B == A "calls" B)
675 get_value mature_block
683 get_r_value_internal |
687 new_r_Phi0 new_r_Phi_in
689 * *************************************************************************** */
691 /* Creates a Phi node with 0 predecessors */
693 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
696 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
701 /* There are two implementations of the Phi node construction. The first
702 is faster, but does not work for blocks with more than 2 predecessors.
703 The second works always but is slower and causes more unnecessary Phi
705 Select the implementations by the following preprocessor flag set in
707 #if USE_FAST_PHI_CONSTRUCTION
709 /* This is a stack used for allocating and deallocating nodes in
710 new_r_Phi_in. The original implementation used the obstack
711 to model this stack, now it is explicit. This reduces side effects.
713 #if USE_EXPICIT_PHI_IN_STACK
718 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
720 res->stack = NEW_ARR_F (ir_node *, 1);
727 free_Phi_in_stack(Phi_in_stack *s) {
732 void free_to_Phi_in_stack(ir_node *phi) {
733 assert(get_irn_opcode(phi) == iro_Phi);
735 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
736 current_ir_graph->Phi_in_stack->pos)
737 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
739 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
741 (current_ir_graph->Phi_in_stack->pos)++;
745 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
746 int arity, ir_node **in) {
748 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
749 int pos = current_ir_graph->Phi_in_stack->pos;
753 /* We need to allocate a new node */
754 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
756 /* reuse the old node and initialize it again. */
759 assert (res->kind == k_ir_node);
760 assert (res->op == op_Phi);
765 /* ???!!! How to free the old in array?? */
766 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
768 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
770 (current_ir_graph->Phi_in_stack->pos)--;
774 #endif /* USE_EXPICIT_PHI_IN_STACK */
776 /* Creates a Phi node with a given, fixed array **in of predecessors.
777 If the Phi node is unnecessary, as the same value reaches the block
778 through all control flow paths, it is eliminated and the value
779 returned directly. This constructor is only intended for use in
780 the automatic Phi node generation triggered by get_value or mature.
781 The implementation is quite tricky and depends on the fact, that
782 the nodes are allocated on a stack:
783 The in array contains predecessors and NULLs. The NULLs appear,
784 if get_r_value_internal, that computed the predecessors, reached
785 the same block on two paths. In this case the same value reaches
786 this block on both paths, there is no definition in between. We need
787 not allocate a Phi where these path's merge, but we have to communicate
788 this fact to the caller. This happens by returning a pointer to the
789 node the caller _will_ allocate. (Yes, we predict the address. We can
790 do so because the nodes are allocated on the obstack.) The caller then
791 finds a pointer to itself and, when this routine is called again,
795 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
796 ir_node **in, int ins)
799 ir_node *res, *known;
801 /* allocate a new node on the obstack.
802 This can return a node to which some of the pointers in the in-array
804 Attention: the constructor copies the in array, i.e., the later changes
805 to the array in this routine do not affect the constructed node! If
806 the in array contains NULLs, there will be missing predecessors in the
808 Is this a possible internal state of the Phi node generation? */
809 #if USE_EXPICIT_PHI_IN_STACK
810 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
812 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
814 /* The in-array can contain NULLs. These were returned by
815 get_r_value_internal if it reached the same block/definition on a
817 The NULLs are replaced by the node itself to simplify the test in the
819 for (i=0; i < ins; ++i)
820 if (in[i] == NULL) in[i] = res;
822 /* This loop checks whether the Phi has more than one predecessor.
823 If so, it is a real Phi node and we break the loop. Else the
824 Phi node merges the same definition on several paths and therefore
826 for (i=0; i < ins; ++i)
828 if (in[i]==res || in[i]==known) continue;
836 /* i==ins: there is at most one predecessor, we don't need a phi node. */
838 #if USE_EXPICIT_PHI_IN_STACK
839 free_to_Phi_in_stack(res);
841 obstack_free (current_ir_graph->obst, res);
845 res = optimize (res);
849 /* return the pointer to the Phi node. This node might be deallocated! */
854 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
856 /** This function computes the predecessors for a real Phi node, and then
857 allocates and returns this node. The routine called to allocate the
858 node might optimize it away and return a real value, or even a pointer
859 to a deallocated Phi node on top of the obstack!
860 This function is called with an in-array of proper size. **/
861 static inline ir_node *
862 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
864 ir_node *prevBlock, *res;
867 /* This loop goes to all predecessor blocks of the block the Phi node is in
868 and there finds the operands of the Phi node by calling
869 get_r_value_internal. */
870 for (i = 1; i <= ins; ++i) {
871 assert (block->in[i]);
872 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
874 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
877 /* After collecting all predecessors into the array nin a new Phi node
878 with these predecessors is created. This constructor contains an
879 optimization: If all predecessors of the Phi node are identical it
880 returns the only operand instead of a new Phi node. If the value
881 passes two different control flow edges without being defined, and
882 this is the second path treated, a pointer to the node that will be
883 allocated for the first path (recursion) is returned. We already
884 know the address of this node, as it is the next node to be allocated
885 and will be placed on top of the obstack. (The obstack is a _stack_!) */
886 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
888 /* Now we now the value for "pos" and can enter it in the array with
889 all known local variables. Attention: this might be a pointer to
890 a node, that later will be allocated!!! See new_r_Phi_in.
891 If this is called in mature, after some set_value in the same block,
892 the proper value must not be overwritten:
894 get_value (makes Phi0, put's it into graph_arr)
895 set_value (overwrites Phi0 in graph_arr)
896 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
899 if (!block->attr.block.graph_arr[pos]) {
900 block->attr.block.graph_arr[pos] = res;
902 /* printf(" value already computed by %s\n",
903 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
909 /* This function returns the last definition of a variable. In case
910 this variable was last defined in a previous block, Phi nodes are
911 inserted. If the part of the firm graph containing the definition
912 is not yet constructed, a dummy Phi node is returned. */
914 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
917 /* There are 4 cases to treat.
919 1. The block is not mature and we visit it the first time. We can not
920 create a proper Phi node, therefore a Phi0, i.e., a Phi without
921 predecessors is returned. This node is added to the linked list (field
922 "link") of the containing block to be completed when this block is
923 matured. (Completion will add a new Phi and turn the Phi0 into an Id
926 2. The value is already known in this block, graph_arr[pos] is set and we
927 visit the block the first time. We can return the value without
928 creating any new nodes.
930 3. The block is mature and we visit it the first time. A Phi node needs
931 to be created (phi_merge). If the Phi is not needed, as all it's
932 operands are the same value reaching the block through different
933 paths, it's optimized away and the value itself is returned.
935 4. The block is mature, and we visit it the second time. Now two
936 subcases are possible:
937 * The value was computed completely the last time we were here. This
938 is the case if there is no loop. We can return the proper value.
939 * The recursion that visited this node and set the flag did not
940 return yet. We are computing a value in a loop and need to
941 break the recursion without knowing the result yet.
942 @@@ strange case. Straight forward we would create a Phi before
943 starting the computation of it's predecessors. In this case we will
944 find a Phi here in any case. The problem is that this implementation
945 only creates a Phi after computing the predecessors, so that it is
946 hard to compute self references of this Phi. @@@
947 There is no simple check for the second subcase. Therefore we check
948 for a second visit and treat all such cases as the second subcase.
949 Anyways, the basic situation is the same: we reached a block
950 on two paths without finding a definition of the value: No Phi
951 nodes are needed on both paths.
952 We return this information "Two paths, no Phi needed" by a very tricky
953 implementation that relies on the fact that an obstack is a stack and
954 will return a node with the same address on different allocations.
955 Look also at phi_merge and new_r_phi_in to understand this.
956 @@@ Unfortunately this does not work, see testprogram
957 three_cfpred_example.
961 /* case 4 -- already visited. */
962 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
964 /* visited the first time */
965 set_irn_visited(block, get_irg_visited(current_ir_graph));
967 /* Get the local valid value */
968 res = block->attr.block.graph_arr[pos];
970 /* case 2 -- If the value is actually computed, return it. */
971 if (res) { return res;};
973 if (block->attr.block.matured) { /* case 3 */
975 /* The Phi has the same amount of ins as the corresponding block. */
976 int ins = get_irn_arity(block);
978 NEW_ARR_A (ir_node *, nin, ins);
980 /* Phi merge collects the predecessors and then creates a node. */
981 res = phi_merge (block, pos, mode, nin, ins);
983 } else { /* case 1 */
984 /* The block is not mature, we don't know how many in's are needed. A Phi
985 with zero predecessors is created. Such a Phi node is called Phi0
986 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
987 to the list of Phi0 nodes in this block to be matured by mature_block
989 The Phi0 has to remember the pos of it's internal value. If the real
990 Phi is computed, pos is used to update the array with the local
993 res = new_r_Phi0 (current_ir_graph, block, mode);
994 res->attr.phi0_pos = pos;
995 res->link = block->link;
999 /* If we get here, the frontend missed a use-before-definition error */
1002 printf("Error: no value set. Use of undefined variable. Initializing
1004 assert (mode->code >= irm_f && mode->code <= irm_p);
1005 res = new_r_Const (current_ir_graph, block, mode,
1006 tarval_mode_null[mode->code]);
1009 /* The local valid value is available now. */
1010 block->attr.block.graph_arr[pos] = res;
1017 /** This is the simple algorithm. If first generates a Phi0, then
1018 it starts the recursion. This causes an Id at the entry of
1019 every block that has no definition of the value! **/
1021 #if USE_EXPICIT_PHI_IN_STACK
1023 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1024 void free_Phi_in_stack(Phi_in_stack *s) { }
1028 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1029 ir_node **in, int ins)
1032 ir_node *res, *known;
1034 /* Allocate a new node on the obstack. The allocation copies the in
1036 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1038 /* This loop checks whether the Phi has more than one predecessor.
1039 If so, it is a real Phi node and we break the loop. Else the
1040 Phi node merges the same definition on several paths and therefore
1041 is not needed. Don't consider Bad nodes! */
1043 for (i=0; i < ins; ++i)
1045 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1053 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1056 obstack_free (current_ir_graph->obst, res);
1059 /* A undefined value, e.g., in unreachable code. */
1063 res = optimize (res);
1071 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1073 #if PRECISE_EXC_CONTEXT
1074 static inline ir_node *
1075 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1078 new_frag_arr (ir_node *n) {
1080 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1081 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1082 sizeof(ir_node *)*current_ir_graph->n_loc);
1083 /* Here we rely on the fact that all frag ops have Memory as first result! */
1084 if (get_irn_op(n) == op_Call)
1085 arr[0] = new_Proj(n, mode_M, 3);
1087 arr[0] = new_Proj(n, mode_M, 0);
1088 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1093 get_frag_arr (ir_node *n) {
1094 if (get_irn_op(n) == op_Call) {
1095 return n->attr.call.frag_arr;
1096 } else if (get_irn_op(n) == op_Alloc) {
1097 return n->attr.a.frag_arr;
1099 return n->attr.frag_arr;
1104 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1105 if (!frag_arr[pos]) frag_arr[pos] = val;
1106 if (frag_arr[current_ir_graph->n_loc - 1])
1107 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1111 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1117 assert(is_fragile_op(cfOp));
1119 frag_arr = get_frag_arr(cfOp);
1120 res = frag_arr[pos];
1122 if (block->attr.block.graph_arr[pos]) {
1123 /* There was a set_value after the cfOp and no get_value before that
1124 set_value. We must build a Phi node now. */
1125 if (block->attr.block.matured) {
1126 int ins = get_irn_arity(block);
1128 NEW_ARR_A (ir_node *, nin, ins);
1129 phi_merge(block, pos, mode, nin, ins);
1131 res = new_r_Phi0 (current_ir_graph, block, mode);
1132 res->attr.phi0_pos = pos;
1133 res->link = block->link;
1136 set_frag_value(frag_arr, pos, res);
1138 res = get_r_value_internal(block, pos, mode);
1145 /** This function allocates a dummy Phi node to break recursions,
1146 computes the predecessors for the real phi node, and then
1147 allocates and returns this node. The routine called to allocate the
1148 node might optimize it away and return a real value.
1149 This function is called with an in-array of proper size. **/
1150 static inline ir_node *
1151 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1153 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1157 /* If this block has no value at pos create a Phi0 and remember it
1158 in graph_arr to break recursions.
1159 Else we may not set graph_arr as there a later value is remembered. */
1161 if (!block->attr.block.graph_arr[pos]) {
1162 /* This is commented out as collapsing to Bads is no good idea.
1163 Either we need an assert here, or we need to call a routine
1164 that deals with this case as appropriate for the given language.
1165 Right now a self referencing Id is created which will crash irg_vrfy().
1167 Even if all variables are defined before use, it can happen that
1168 we get to the start block, if a cond has been replaced by a tuple
1169 (bad, jmp). As the start has a self referencing control flow edge,
1170 we get a self referencing Id, which is hard to optimize away. We avoid
1171 this by defining the value as a Bad node.
1172 Returning a const with tarval_bad is a preliminary solution. In some
1173 situations we might want a Warning or an Error. */
1175 if (block == get_irg_start_block(current_ir_graph)) {
1176 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1177 /* We don't need to care about exception ops in the start block.
1178 There are none by definition. */
1179 return block->attr.block.graph_arr[pos];
1181 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1182 block->attr.block.graph_arr[pos] = phi0;
1183 #if PRECISE_EXC_CONTEXT
1184 /* Set graph_arr for fragile ops. Also here we should break recursion. */
1185 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1190 /* This loop goes to all predecessor blocks of the block the Phi node
1191 is in and there finds the operands of the Phi node by calling
1192 get_r_value_internal. */
1193 for (i = 1; i <= ins; ++i) {
1194 prevCfOp = skip_Proj(block->in[i]);
1196 if (is_Bad(prevCfOp)) {
1197 /* In case a Cond has been optimized we would get right to the start block
1198 with an invalid definition. */
1199 nin[i-1] = new_Bad();
1202 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1204 if (!is_Bad(prevBlock)) {
1205 #if PRECISE_EXC_CONTEXT
1206 if (is_fragile_op(prevCfOp))
1207 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1210 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1212 nin[i-1] = new_Bad();
1216 /* After collecting all predecessors into the array nin a new Phi node
1217 with these predecessors is created. This constructor contains an
1218 optimization: If all predecessors of the Phi node are identical it
1219 returns the only operand instead of a new Phi node. */
1220 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1222 /* In case we allocated a Phi0 node at the beginning of this procedure,
1223 we need to exchange this Phi0 with the real Phi. */
1225 exchange(phi0, res);
1226 block->attr.block.graph_arr[pos] = res;
1227 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1228 only an optimization. */
1234 /* This function returns the last definition of a variable. In case
1235 this variable was last defined in a previous block, Phi nodes are
1236 inserted. If the part of the firm graph containing the definition
1237 is not yet constructed, a dummy Phi node is returned. */
1239 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1242 /* There are 4 cases to treat.
1244 1. The block is not mature and we visit it the first time. We can not
1245 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1246 predecessors is returned. This node is added to the linked list (field
1247 "link") of the containing block to be completed when this block is
1248 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1251 2. The value is already known in this block, graph_arr[pos] is set and we
1252 visit the block the first time. We can return the value without
1253 creating any new nodes.
1255 3. The block is mature and we visit it the first time. A Phi node needs
1256 to be created (phi_merge). If the Phi is not needed, as all it's
1257 operands are the same value reaching the block through different
1258 paths, it's optimized away and the value itself is returned.
1260 4. The block is mature, and we visit it the second time. Now two
1261 subcases are possible:
1262 * The value was computed completely the last time we were here. This
1263 is the case if there is no loop. We can return the proper value.
1264 * The recursion that visited this node and set the flag did not
1265 return yet. We are computing a value in a loop and need to
1266 break the recursion. This case only happens if we visited
1267 the same block with phi_merge before, which inserted a Phi0.
1268 So we return the Phi0.
1271 /* case 4 -- already visited. */
1272 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1273 /* As phi_merge allocates a Phi0 this value is always defined. Here
1274 is the critical difference of the two algorithms. */
1275 assert(block->attr.block.graph_arr[pos]);
1276 return block->attr.block.graph_arr[pos];
1279 /* visited the first time */
1280 set_irn_visited(block, get_irg_visited(current_ir_graph));
1282 /* Get the local valid value */
1283 res = block->attr.block.graph_arr[pos];
1285 /* case 2 -- If the value is actually computed, return it. */
1286 if (res) { return res; };
1288 if (block->attr.block.matured) { /* case 3 */
1290 /* The Phi has the same amount of ins as the corresponding block. */
1291 int ins = get_irn_arity(block);
1293 NEW_ARR_A (ir_node *, nin, ins);
1295 /* Phi merge collects the predecessors and then creates a node. */
1296 res = phi_merge (block, pos, mode, nin, ins);
1298 } else { /* case 1 */
1299 /* The block is not mature, we don't know how many in's are needed. A Phi
1300 with zero predecessors is created. Such a Phi node is called Phi0
1301 node. The Phi0 is then added to the list of Phi0 nodes in this block
1302 to be matured by mature_block later.
1303 The Phi0 has to remember the pos of it's internal value. If the real
1304 Phi is computed, pos is used to update the array with the local
1306 res = new_r_Phi0 (current_ir_graph, block, mode);
1307 res->attr.phi0_pos = pos;
1308 res->link = block->link;
1312 /* If we get here, the frontend missed a use-before-definition error */
1315 printf("Error: no value set. Use of undefined variable. Initializing
1317 assert (mode->code >= irm_f && mode->code <= irm_p);
1318 res = new_r_Const (current_ir_graph, block, mode,
1319 tarval_mode_null[mode->code]);
1322 /* The local valid value is available now. */
1323 block->attr.block.graph_arr[pos] = res;
1328 #endif /* USE_FAST_PHI_CONSTRUCTION */
1330 /* ************************************************************************** */
1332 /** Finalize a Block node, when all control flows are known. */
1333 /** Acceptable parameters are only Block nodes. */
1335 mature_block (ir_node *block)
1342 assert (get_irn_opcode(block) == iro_Block);
1344 if (!get_Block_matured(block)) {
1346 /* turn the dynamic in-array into a static one. */
1347 ins = ARR_LEN (block->in)-1;
1348 NEW_ARR_A (ir_node *, nin, ins);
1349 /* @@@ something is strange here... why isn't the array copied? */
1351 /* Traverse a chain of Phi nodes attached to this block and mature
1353 for (n = block->link; n; n=next) {
1354 inc_irg_visited(current_ir_graph);
1356 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1359 block->attr.block.matured = 1;
1361 /* Now, as the block is a finished firm node, we can optimize it.
1362 Since other nodes have been allocated since the block was created
1363 we can not free the node on the obstack. Therefore we have to call
1365 Unfortunately the optimization does not change a lot, as all allocated
1366 nodes refer to the unoptimized node. */
1367 block = optimize_in_place(block);
1373 new_Phi (int arity, ir_node **in, ir_mode *mode)
1375 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1380 new_Const (ir_mode *mode, tarval *con)
1382 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1387 new_Id (ir_node *val, ir_mode *mode)
1389 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1394 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1396 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1401 new_defaultProj (ir_node *arg, long max_proj)
1404 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1405 arg->attr.c.kind = fragmentary;
1406 arg->attr.c.default_proj = max_proj;
1407 res = new_Proj (arg, mode_X, max_proj);
1412 new_Conv (ir_node *op, ir_mode *mode)
1414 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1419 new_Tuple (int arity, ir_node **in)
1421 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1426 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1428 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1433 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1435 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1441 new_Minus (ir_node *op, ir_mode *mode)
1443 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1448 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1450 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1455 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1458 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1460 #if PRECISE_EXC_CONTEXT
1461 res->attr.frag_arr = new_frag_arr(res);
1468 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1471 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1473 #if PRECISE_EXC_CONTEXT
1474 res->attr.frag_arr = new_frag_arr(res);
1481 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1484 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1486 #if PRECISE_EXC_CONTEXT
1487 res->attr.frag_arr = new_frag_arr(res);
1494 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1497 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1499 #if PRECISE_EXC_CONTEXT
1500 res->attr.frag_arr = new_frag_arr(res);
1507 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1509 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1514 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1516 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1521 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1523 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1528 new_Not (ir_node *op, ir_mode *mode)
1530 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1535 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1537 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1542 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1544 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1549 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1551 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1556 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1558 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1563 new_Abs (ir_node *op, ir_mode *mode)
1565 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1570 new_Cmp (ir_node *op1, ir_node *op2)
1572 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1579 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1583 new_Cond (ir_node *c)
1585 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1589 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1593 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1594 store, callee, arity, in, type);
1595 #if PRECISE_EXC_CONTEXT
1596 res->attr.call.frag_arr = new_frag_arr(res);
1603 new_Return (ir_node* store, int arity, ir_node **in)
1605 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1610 new_Raise (ir_node *store, ir_node *obj)
1612 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1617 new_Load (ir_node *store, ir_node *addr)
1620 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1622 #if PRECISE_EXC_CONTEXT
1623 res->attr.frag_arr = new_frag_arr(res);
1630 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1633 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1635 #if PRECISE_EXC_CONTEXT
1636 res->attr.frag_arr = new_frag_arr(res);
1643 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1647 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1648 store, size, alloc_type, where);
1649 #if PRECISE_EXC_CONTEXT
1650 res->attr.a.frag_arr = new_frag_arr(res);
1657 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1659 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1660 store, ptr, size, free_type);
1664 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1665 /* GL: objptr was called frame before. Frame was a bad choice for the name
1666 as the operand could as well be a pointer to a dynamic object. */
1668 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1669 store, objptr, 0, NULL, ent);
1673 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1675 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1676 store, objptr, n_index, index, sel);
1680 new_SymConst (type_or_id_p value, symconst_kind kind)
1682 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1687 new_Sync (int arity, ir_node** in)
1689 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1697 return current_ir_graph->bad;
1700 /* ********************************************************************* */
1701 /* Comfortable interface with automatic Phi node construction. */
1702 /* (Uses also constructors of ?? interface, except new_Block. */
1703 /* ********************************************************************* */
1705 /** Block construction **/
1706 /* immature Block without predecessors */
1707 ir_node *new_immBlock (void) {
1710 /* creates a new dynamic in-array as length of in is -1 */
1711 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1712 current_ir_graph->current_block = res;
1713 res->attr.block.matured = 0;
1714 set_Block_block_visited(res, 0);
1716 /* Create and initialize array for Phi-node construction. */
1717 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1718 current_ir_graph->n_loc);
1719 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1721 /* Immature block may not be optimized! */
1727 /* add an adge to a jmp/control flow node */
1729 add_in_edge (ir_node *block, ir_node *jmp)
1731 if (block->attr.block.matured) {
1732 assert(0 && "Error: Block already matured!\n");
1735 assert (jmp != NULL);
1736 ARR_APP1 (ir_node *, block->in, jmp);
1740 /* changing the current block */
1742 switch_block (ir_node *target)
1744 current_ir_graph->current_block = target;
1747 /* ************************ */
1748 /* parameter administration */
1750 /* get a value from the parameter array from the current block by its index */
1752 get_value (int pos, ir_mode *mode)
1754 inc_irg_visited(current_ir_graph);
1755 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1759 /* set a value at position pos in the parameter array from the current block */
1761 set_value (int pos, ir_node *value)
1763 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1766 /* get the current store */
1770 /* GL: one could call get_value instead */
1771 inc_irg_visited(current_ir_graph);
1772 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1775 /* set the current store */
1777 set_store (ir_node *store)
1779 /* GL: one could call set_value instead */
1780 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1783 /* ********************************************************************* */
1786 /* call once for each run of the library */