1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
28 /* memset belongs to string.h */
31 #if USE_EXPICIT_PHI_IN_STACK
32 /* A stack needed for the automatic Phi node construction in constructor
33 Phi_in. Redefinition in irgraph.c!! */
38 typedef struct Phi_in_stack Phi_in_stack;
41 /*** ******************************************** */
42 /** privat interfaces, for professional use only */
44 /* Constructs a Block with a fixed number of predecessors.
45 Does not set current_block. Can not be used with automatic
46 Phi node construction. */
48 new_r_Block (ir_graph *irg, int arity, ir_node **in)
52 res = new_ir_node (irg, NULL, op_Block, mode_R, arity, in);
53 set_Block_matured(res, 1);
54 set_Block_block_visited(res, 0);
61 new_r_Start (ir_graph *irg, ir_node *block)
65 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
72 new_r_End (ir_graph *irg, ir_node *block)
76 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
82 /* Creates a Phi node with all predecessors. Calling this constructor
83 is only allowed if the corresponding block is mature. */
85 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
89 assert( get_Block_matured(block) );
90 assert( get_irn_arity(block) == arity );
92 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
100 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
103 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
105 res = optimize (res);
109 res = local_optimize_newby (res);
116 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
118 ir_node *in[1] = {val};
120 res = new_ir_node (irg, block, op_Id, mode, 1, in);
121 res = optimize (res);
127 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
130 ir_node *in[1] = {arg};
132 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
133 res->attr.proj = proj;
136 assert(get_Proj_pred(res));
137 assert(get_nodes_Block(get_Proj_pred(res)));
139 res = optimize (res);
147 new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
151 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
152 arg->attr.c.kind = fragmentary;
153 arg->attr.c.default_proj = max_proj;
154 res = new_r_Proj (irg, block, arg, mode_X, max_proj);
159 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
161 ir_node *in[1] = {op};
163 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
164 res = optimize (res);
171 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
175 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
176 res = optimize (res);
182 new_r_Add (ir_graph *irg, ir_node *block,
183 ir_node *op1, ir_node *op2, ir_mode *mode)
185 ir_node *in[2] = {op1, op2};
187 res = new_ir_node (irg, block, op_Add, mode, 2, in);
188 res = optimize (res);
194 new_r_Sub (ir_graph *irg, ir_node *block,
195 ir_node *op1, ir_node *op2, ir_mode *mode)
197 ir_node *in[2] = {op1, op2};
199 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
200 res = optimize (res);
206 new_r_Minus (ir_graph *irg, ir_node *block,
207 ir_node *op, ir_mode *mode)
209 ir_node *in[1] = {op};
211 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
212 res = optimize (res);
218 new_r_Mul (ir_graph *irg, ir_node *block,
219 ir_node *op1, ir_node *op2, ir_mode *mode)
221 ir_node *in[2] = {op1, op2};
223 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
224 res = optimize (res);
230 new_r_Quot (ir_graph *irg, ir_node *block,
231 ir_node *memop, ir_node *op1, ir_node *op2)
233 ir_node *in[3] = {memop, op1, op2};
235 res = new_ir_node (irg, block, op_Quot, mode_T, 3, in);
236 res = optimize (res);
242 new_r_DivMod (ir_graph *irg, ir_node *block,
243 ir_node *memop, ir_node *op1, ir_node *op2)
245 ir_node *in[3] = {memop, op1, op2};
247 res = new_ir_node (irg, block, op_DivMod, mode_T, 3, in);
248 res = optimize (res);
254 new_r_Div (ir_graph *irg, ir_node *block,
255 ir_node *memop, ir_node *op1, ir_node *op2)
257 ir_node *in[3] = {memop, op1, op2};
259 res = new_ir_node (irg, block, op_Div, mode_T, 3, in);
260 res = optimize (res);
266 new_r_Mod (ir_graph *irg, ir_node *block,
267 ir_node *memop, ir_node *op1, ir_node *op2)
269 ir_node *in[3] = {memop, op1, op2};
271 res = new_ir_node (irg, block, op_Mod, mode_T, 3, in);
272 res = optimize (res);
278 new_r_And (ir_graph *irg, ir_node *block,
279 ir_node *op1, ir_node *op2, ir_mode *mode)
281 ir_node *in[2] = {op1, op2};
283 res = new_ir_node (irg, block, op_And, mode, 2, in);
284 res = optimize (res);
290 new_r_Or (ir_graph *irg, ir_node *block,
291 ir_node *op1, ir_node *op2, ir_mode *mode)
293 ir_node *in[2] = {op1, op2};
295 res = new_ir_node (irg, block, op_Or, mode, 2, in);
296 res = optimize (res);
302 new_r_Eor (ir_graph *irg, ir_node *block,
303 ir_node *op1, ir_node *op2, ir_mode *mode)
305 ir_node *in[2] = {op1, op2};
307 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
308 res = optimize (res);
314 new_r_Not (ir_graph *irg, ir_node *block,
315 ir_node *op, ir_mode *mode)
317 ir_node *in[1] = {op};
319 res = new_ir_node (irg, block, op_Not, mode, 1, in);
320 res = optimize (res);
326 new_r_Shl (ir_graph *irg, ir_node *block,
327 ir_node *op, ir_node *k, ir_mode *mode)
329 ir_node *in[2] = {op, k};
331 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
332 res = optimize (res);
338 new_r_Shr (ir_graph *irg, ir_node *block,
339 ir_node *op, ir_node *k, ir_mode *mode)
341 ir_node *in[2] = {op, k};
343 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
344 res = optimize (res);
350 new_r_Shrs (ir_graph *irg, ir_node *block,
351 ir_node *op, ir_node *k, ir_mode *mode)
353 ir_node *in[2] = {op, k};
355 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
356 res = optimize (res);
362 new_r_Rot (ir_graph *irg, ir_node *block,
363 ir_node *op, ir_node *k, ir_mode *mode)
365 ir_node *in[2] = {op, k};
367 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
368 res = optimize (res);
374 new_r_Abs (ir_graph *irg, ir_node *block,
375 ir_node *op, ir_mode *mode)
377 ir_node *in[1] = {op};
379 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
380 res = optimize (res);
386 new_r_Cmp (ir_graph *irg, ir_node *block,
387 ir_node *op1, ir_node *op2)
389 ir_node *in[2] = {op1, op2};
391 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
392 res = optimize (res);
398 new_r_Jmp (ir_graph *irg, ir_node *block)
402 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
403 res = optimize (res);
409 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
411 ir_node *in[1] = {c};
413 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
414 res->attr.c.kind = dense;
415 res->attr.c.default_proj = 0;
416 res = optimize (res);
422 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
423 ir_node *callee, int arity, ir_node **in, type *type)
430 NEW_ARR_A (ir_node *, r_in, r_arity);
433 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
435 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
437 assert(is_method_type(type));
438 set_Call_type(res, type);
439 res = optimize (res);
445 new_r_Return (ir_graph *irg, ir_node *block,
446 ir_node *store, int arity, ir_node **in)
453 NEW_ARR_A (ir_node *, r_in, r_arity);
455 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
456 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
457 res = optimize (res);
463 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
465 ir_node *in[2] = {store, obj};
467 res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
469 res = optimize (res);
475 new_r_Load (ir_graph *irg, ir_node *block,
476 ir_node *store, ir_node *adr)
478 ir_node *in[2] = {store, adr};
480 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
482 res = optimize (res);
488 new_r_Store (ir_graph *irg, ir_node *block,
489 ir_node *store, ir_node *adr, ir_node *val)
491 ir_node *in[3] = {store, adr, val};
493 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
495 res = optimize (res);
501 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
502 ir_node *size, type *alloc_type, where_alloc where)
504 ir_node *in[2] = {store, size};
506 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
508 res->attr.a.where = where;
509 res->attr.a.type = alloc_type;
511 res = optimize (res);
517 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
518 ir_node *ptr, ir_node *size, type *free_type)
520 ir_node *in[3] = {store, ptr, size};
522 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
524 res->attr.f = free_type;
526 res = optimize (res);
532 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
533 int arity, ir_node **in, entity *ent)
540 NEW_ARR_A (ir_node *, r_in, r_arity);
543 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
544 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
546 res->attr.s.ltyp = static_linkage;
547 res->attr.s.ent = ent;
549 res = optimize (res);
555 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id_p value,
556 symconst_kind symkind)
561 if (symkind == linkage_ptr_info)
565 res = new_ir_node (irg, block, op_SymConst, mode, 0, in);
567 res->attr.i.num = symkind;
568 if (symkind == linkage_ptr_info) {
569 res->attr.i.tori.ptrinfo = (ident *)value;
571 assert ( ( (symkind == type_tag)
572 || (symkind == size))
573 && (is_type(value)));
574 res->attr.i.tori.typ = (type *)value;
576 res = optimize (res);
582 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
586 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
588 res = optimize (res);
596 return current_ir_graph->bad;
599 /** ********************/
600 /** public interfaces */
601 /** construction tools */
603 /****f* ircons/new_Start
606 * new_Start -- create a new Start node in the current block
609 * s = new_Start(void);
610 * ir_node* new_Start(void);
613 * s - pointer to the created Start node
622 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
623 op_Start, mode_T, 0, NULL);
625 res = optimize (res);
635 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
636 op_End, mode_X, -1, NULL);
638 res = optimize (res);
644 /* Constructs a Block with a fixed number of predecessors.
645 Does set current_block. Can be used with automatic Phi
646 node construction. */
648 new_Block (int arity, ir_node **in)
652 res = new_r_Block (current_ir_graph, arity, in);
654 /* Create and initialize array for Phi-node construction. */
655 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
656 current_ir_graph->n_loc);
657 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
659 res = optimize (res);
660 current_ir_graph->current_block = res;
667 /* ***********************************************************************/
668 /* Methods necessary for automatic Phi node creation */
670 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
671 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
672 ir_node *new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
673 ir_node *new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
675 Call Graph: ( A ---> B == A "calls" B)
677 get_value mature_block
685 get_r_value_internal |
689 new_r_Phi0 new_r_Phi_in
691 * *************************************************************************** */
693 /* Creates a Phi node with 0 predecessors */
695 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
698 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
703 /* There are two implementations of the Phi node construction. The first
704 is faster, but does not work for blocks with more than 2 predecessors.
705 The second works always but is slower and causes more unnecessary Phi
707 Select the implementations by the following preprocessor flag set in
709 #if USE_FAST_PHI_CONSTRUCTION
711 /* This is a stack used for allocating and deallocating nodes in
712 new_r_Phi_in. The original implementation used the obstack
713 to model this stack, now it is explicit. This reduces side effects.
715 #if USE_EXPICIT_PHI_IN_STACK
720 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
722 res->stack = NEW_ARR_F (ir_node *, 1);
729 free_Phi_in_stack(Phi_in_stack *s) {
734 void free_to_Phi_in_stack(ir_node *phi) {
735 assert(get_irn_opcode(phi) == iro_Phi);
737 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
738 current_ir_graph->Phi_in_stack->pos)
739 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
741 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
743 (current_ir_graph->Phi_in_stack->pos)++;
747 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
748 int arity, ir_node **in) {
750 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
751 int pos = current_ir_graph->Phi_in_stack->pos;
755 /* We need to allocate a new node */
756 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
758 /* reuse the old node and initialize it again. */
761 assert (res->kind == k_ir_node);
762 assert (res->op == op_Phi);
767 /* ???!!! How to free the old in array?? */
768 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
770 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
772 (current_ir_graph->Phi_in_stack->pos)--;
776 #endif /* USE_EXPICIT_PHI_IN_STACK */
778 /* Creates a Phi node with a given, fixed array **in of predecessors.
779 If the Phi node is unnecessary, as the same value reaches the block
780 through all control flow paths, it is eliminated and the value
781 returned directly. This constructor is only intended for use in
782 the automatic Phi node generation triggered by get_value or mature.
783 The implementation is quite tricky and depends on the fact, that
784 the nodes are allocated on a stack:
785 The in array contains predecessors and NULLs. The NULLs appear,
786 if get_r_value_internal, that computed the predecessors, reached
787 the same block on two paths. In this case the same value reaches
788 this block on both paths, there is no definition in between. We need
789 not allocate a Phi where these path's merge, but we have to communicate
790 this fact to the caller. This happens by returning a pointer to the
791 node the caller _will_ allocate. (Yes, we predict the address. We can
792 do so because the nodes are allocated on the obstack.) The caller then
793 finds a pointer to itself and, when this routine is called again,
797 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
798 ir_node **in, int ins)
801 ir_node *res, *known;
803 /* allocate a new node on the obstack.
804 This can return a node to which some of the pointers in the in-array
806 Attention: the constructor copies the in array, i.e., the later changes
807 to the array in this routine do not affect the constructed node! If
808 the in array contains NULLs, there will be missing predecessors in the
810 Is this a possible internal state of the Phi node generation? */
811 #if USE_EXPICIT_PHI_IN_STACK
812 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
814 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
816 /* The in-array can contain NULLs. These were returned by
817 get_r_value_internal if it reached the same block/definition on a
819 The NULLs are replaced by the node itself to simplify the test in the
821 for (i=0; i < ins; ++i)
822 if (in[i] == NULL) in[i] = res;
824 /* This loop checks whether the Phi has more than one predecessor.
825 If so, it is a real Phi node and we break the loop. Else the
826 Phi node merges the same definition on several paths and therefore
828 for (i=0; i < ins; ++i)
830 if (in[i]==res || in[i]==known) continue;
838 /* i==ins: there is at most one predecessor, we don't need a phi node. */
840 #if USE_EXPICIT_PHI_IN_STACK
841 free_to_Phi_in_stack(res);
843 obstack_free (current_ir_graph->obst, res);
847 res = optimize (res);
851 /* return the pointer to the Phi node. This node might be deallocated! */
856 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
858 /** This function computes the predecessors for a real Phi node, and then
859 allocates and returns this node. The routine called to allocate the
860 node might optimize it away and return a real value, or even a pointer
861 to a deallocated Phi node on top of the obstack!
862 This function is called with an in-array of proper size. **/
863 static inline ir_node *
864 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
866 ir_node *prevBlock, *res;
869 /* This loop goes to all predecessor blocks of the block the Phi node is in
870 and there finds the operands of the Phi node by calling
871 get_r_value_internal. */
872 for (i = 1; i <= ins; ++i) {
873 assert (block->in[i]);
874 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
876 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
879 /* After collecting all predecessors into the array nin a new Phi node
880 with these predecessors is created. This constructor contains an
881 optimization: If all predecessors of the Phi node are identical it
882 returns the only operand instead of a new Phi node. If the value
883 passes two different control flow edges without being defined, and
884 this is the second path treated, a pointer to the node that will be
885 allocated for the first path (recursion) is returned. We already
886 know the address of this node, as it is the next node to be allocated
887 and will be placed on top of the obstack. (The obstack is a _stack_!) */
888 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
890 /* Now we now the value for "pos" and can enter it in the array with
891 all known local variables. Attention: this might be a pointer to
892 a node, that later will be allocated!!! See new_r_Phi_in.
893 If this is called in mature, after some set_value in the same block,
894 the proper value must not be overwritten:
896 get_value (makes Phi0, put's it into graph_arr)
897 set_value (overwrites Phi0 in graph_arr)
898 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
901 if (!block->attr.block.graph_arr[pos]) {
902 block->attr.block.graph_arr[pos] = res;
904 /* printf(" value already computed by %s\n",
905 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
911 /* This function returns the last definition of a variable. In case
912 this variable was last defined in a previous block, Phi nodes are
913 inserted. If the part of the firm graph containing the definition
914 is not yet constructed, a dummy Phi node is returned. */
916 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
919 /* There are 4 cases to treat.
921 1. The block is not mature and we visit it the first time. We can not
922 create a proper Phi node, therefore a Phi0, i.e., a Phi without
923 predecessors is returned. This node is added to the linked list (field
924 "link") of the containing block to be completed when this block is
925 matured. (Completion will add a new Phi and turn the Phi0 into an Id
928 2. The value is already known in this block, graph_arr[pos] is set and we
929 visit the block the first time. We can return the value without
930 creating any new nodes.
932 3. The block is mature and we visit it the first time. A Phi node needs
933 to be created (phi_merge). If the Phi is not needed, as all it's
934 operands are the same value reaching the block through different
935 paths, it's optimized away and the value itself is returned.
937 4. The block is mature, and we visit it the second time. Now two
938 subcases are possible:
939 * The value was computed completely the last time we were here. This
940 is the case if there is no loop. We can return the proper value.
941 * The recursion that visited this node and set the flag did not
942 return yet. We are computing a value in a loop and need to
943 break the recursion without knowing the result yet.
944 @@@ strange case. Straight forward we would create a Phi before
945 starting the computation of it's predecessors. In this case we will
946 find a Phi here in any case. The problem is that this implementation
947 only creates a Phi after computing the predecessors, so that it is
948 hard to compute self references of this Phi. @@@
949 There is no simple check for the second subcase. Therefore we check
950 for a second visit and treat all such cases as the second subcase.
951 Anyways, the basic situation is the same: we reached a block
952 on two paths without finding a definition of the value: No Phi
953 nodes are needed on both paths.
954 We return this information "Two paths, no Phi needed" by a very tricky
955 implementation that relies on the fact that an obstack is a stack and
956 will return a node with the same address on different allocations.
957 Look also at phi_merge and new_r_phi_in to understand this.
958 @@@ Unfortunately this does not work, see testprogram
959 three_cfpred_example.
963 /* case 4 -- already visited. */
964 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
966 /* visited the first time */
967 set_irn_visited(block, get_irg_visited(current_ir_graph));
969 /* Get the local valid value */
970 res = block->attr.block.graph_arr[pos];
972 /* case 2 -- If the value is actually computed, return it. */
973 if (res) { return res;};
975 if (block->attr.block.matured) { /* case 3 */
977 /* The Phi has the same amount of ins as the corresponding block. */
978 int ins = get_irn_arity(block);
980 NEW_ARR_A (ir_node *, nin, ins);
982 /* Phi merge collects the predecessors and then creates a node. */
983 res = phi_merge (block, pos, mode, nin, ins);
985 } else { /* case 1 */
986 /* The block is not mature, we don't know how many in's are needed. A Phi
987 with zero predecessors is created. Such a Phi node is called Phi0
988 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
989 to the list of Phi0 nodes in this block to be matured by mature_block
991 The Phi0 has to remember the pos of it's internal value. If the real
992 Phi is computed, pos is used to update the array with the local
995 res = new_r_Phi0 (current_ir_graph, block, mode);
996 res->attr.phi0_pos = pos;
997 res->link = block->link;
1001 /* If we get here, the frontend missed a use-before-definition error */
1004 printf("Error: no value set. Use of undefined variable. Initializing
1006 assert (mode->code >= irm_f && mode->code <= irm_p);
1007 res = new_r_Const (current_ir_graph, block, mode,
1008 tarval_mode_null[mode->code]);
1011 /* The local valid value is available now. */
1012 block->attr.block.graph_arr[pos] = res;
1019 /** This is the simple algorithm. If first generates a Phi0, then
1020 it starts the recursion. This causes an Id at the entry of
1021 every block that has no definition of the value! **/
1023 #if USE_EXPICIT_PHI_IN_STACK
1025 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1026 void free_Phi_in_stack(Phi_in_stack *s) { }
1030 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1031 ir_node **in, int ins)
1034 ir_node *res, *known;
1036 /* Allocate a new node on the obstack. The allocation copies the in
1038 res = new_ir_node (irg, block, op_Phi, mode, ins, in);
1040 /* This loop checks whether the Phi has more than one predecessor.
1041 If so, it is a real Phi node and we break the loop. Else the
1042 Phi node merges the same definition on several paths and therefore
1043 is not needed. Don't consider Bad nodes! */
1045 for (i=0; i < ins; ++i)
1047 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1055 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1058 obstack_free (current_ir_graph->obst, res);
1061 /* A undefined value, e.g., in unreachable code. */
1065 res = optimize (res);
1073 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1075 #if PRECISE_EXC_CONTEXT
1076 static inline ir_node *
1077 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1080 new_frag_arr (ir_node *n) {
1082 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1083 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1084 sizeof(ir_node *)*current_ir_graph->n_loc);
1085 /* Here we rely on the fact that all frag ops have Memory as first result! */
1086 if (get_irn_op(n) == op_Call)
1087 arr[0] = new_Proj(n, mode_M, 3);
1089 arr[0] = new_Proj(n, mode_M, 0);
1090 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1095 get_frag_arr (ir_node *n) {
1096 if (get_irn_op(n) == op_Call) {
1097 return n->attr.call.frag_arr;
1098 } else if (get_irn_op(n) == op_Alloc) {
1099 return n->attr.a.frag_arr;
1101 return n->attr.frag_arr;
1106 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1107 if (!frag_arr[pos]) frag_arr[pos] = val;
1108 if (frag_arr[current_ir_graph->n_loc - 1])
1109 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1113 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1118 assert(is_fragile_op(cfOp));
1120 frag_arr = get_frag_arr(cfOp);
1121 res = frag_arr[pos];
1123 if (block->attr.block.graph_arr[pos]) {
1124 /* There was a set_value after the cfOp and no get_value before that
1125 set_value. We must build a Phi node now. */
1126 if (block->attr.block.matured) {
1127 int ins = get_irn_arity(block);
1129 NEW_ARR_A (ir_node *, nin, ins);
1130 phi_merge(block, pos, mode, nin, ins);
1132 res = new_r_Phi0 (current_ir_graph, block, mode);
1133 res->attr.phi0_pos = pos;
1134 res->link = block->link;
1137 set_frag_value(frag_arr, pos, res);
1139 res = get_r_value_internal(block, pos, mode);
1146 /** This function allocates a dummy Phi node to break recursions,
1147 computes the predecessors for the real phi node, and then
1148 allocates and returns this node. The routine called to allocate the
1149 node might optimize it away and return a real value.
1150 This function is called with an in-array of proper size. **/
1151 static inline ir_node *
1152 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1154 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1158 /* If this block has no value at pos create a Phi0 and remember it
1159 in graph_arr to break recursions.
1160 Else we may not set graph_arr as there a later value is remembered. */
1162 if (!block->attr.block.graph_arr[pos]) {
1163 /* This is commented out as collapsing to Bads is no good idea.
1164 Either we need an assert here, or we need to call a routine
1165 that deals with this case as appropriate for the given language.
1166 Right now a self referencing Id is created which will crash irg_vrfy().
1168 Even if all variables are defined before use, it can happen that
1169 we get to the start block, if a cond has been replaced by a tuple
1170 (bad, jmp). As the start has a self referencing control flow edge,
1171 we get a self referencing Id, which is hard to optimize away. We avoid
1172 this by defining the value as a Bad node.
1173 Returning a const with tarval_bad is a preliminary solution. In some
1174 situations we might want a Warning or an Error. */
1176 if (block == get_irg_start_block(current_ir_graph)) {
1177 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1178 /* We don't need to care about exception ops in the start block.
1179 There are none by definition. */
1180 return block->attr.block.graph_arr[pos];
1182 phi0 = new_r_Phi0(current_ir_graph, block, mode);
1183 block->attr.block.graph_arr[pos] = phi0;
1184 #if PRECISE_EXC_CONTEXT
1185 /* Set graph_arr for fragile ops. Also here we should break recursion. */
1186 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1191 /* This loop goes to all predecessor blocks of the block the Phi node
1192 is in and there finds the operands of the Phi node by calling
1193 get_r_value_internal. */
1194 for (i = 1; i <= ins; ++i) {
1195 prevCfOp = skip_Proj(block->in[i]);
1197 if (is_Bad(prevCfOp)) {
1198 /* In case a Cond has been optimized we would get right to the start block
1199 with an invalid definition. */
1200 nin[i-1] = new_Bad();
1203 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1205 if (!is_Bad(prevBlock)) {
1206 #if PRECISE_EXC_CONTEXT
1207 if (is_fragile_op(prevCfOp))
1208 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1211 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1213 nin[i-1] = new_Bad();
1217 /* After collecting all predecessors into the array nin a new Phi node
1218 with these predecessors is created. This constructor contains an
1219 optimization: If all predecessors of the Phi node are identical it
1220 returns the only operand instead of a new Phi node. */
1221 res = new_r_Phi_in (current_ir_graph, block, mode, nin, ins);
1223 /* In case we allocated a Phi0 node at the beginning of this procedure,
1224 we need to exchange this Phi0 with the real Phi. */
1226 exchange(phi0, res);
1227 block->attr.block.graph_arr[pos] = res;
1228 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1229 only an optimization. */
1235 /* This function returns the last definition of a variable. In case
1236 this variable was last defined in a previous block, Phi nodes are
1237 inserted. If the part of the firm graph containing the definition
1238 is not yet constructed, a dummy Phi node is returned. */
1240 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1243 /* There are 4 cases to treat.
1245 1. The block is not mature and we visit it the first time. We can not
1246 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1247 predecessors is returned. This node is added to the linked list (field
1248 "link") of the containing block to be completed when this block is
1249 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1252 2. The value is already known in this block, graph_arr[pos] is set and we
1253 visit the block the first time. We can return the value without
1254 creating any new nodes.
1256 3. The block is mature and we visit it the first time. A Phi node needs
1257 to be created (phi_merge). If the Phi is not needed, as all it's
1258 operands are the same value reaching the block through different
1259 paths, it's optimized away and the value itself is returned.
1261 4. The block is mature, and we visit it the second time. Now two
1262 subcases are possible:
1263 * The value was computed completely the last time we were here. This
1264 is the case if there is no loop. We can return the proper value.
1265 * The recursion that visited this node and set the flag did not
1266 return yet. We are computing a value in a loop and need to
1267 break the recursion. This case only happens if we visited
1268 the same block with phi_merge before, which inserted a Phi0.
1269 So we return the Phi0.
1272 /* case 4 -- already visited. */
1273 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1274 /* As phi_merge allocates a Phi0 this value is always defined. Here
1275 is the critical difference of the two algorithms. */
1276 assert(block->attr.block.graph_arr[pos]);
1277 return block->attr.block.graph_arr[pos];
1280 /* visited the first time */
1281 set_irn_visited(block, get_irg_visited(current_ir_graph));
1283 /* Get the local valid value */
1284 res = block->attr.block.graph_arr[pos];
1286 /* case 2 -- If the value is actually computed, return it. */
1287 if (res) { return res; };
1289 if (block->attr.block.matured) { /* case 3 */
1291 /* The Phi has the same amount of ins as the corresponding block. */
1292 int ins = get_irn_arity(block);
1294 NEW_ARR_A (ir_node *, nin, ins);
1296 /* Phi merge collects the predecessors and then creates a node. */
1297 res = phi_merge (block, pos, mode, nin, ins);
1299 } else { /* case 1 */
1300 /* The block is not mature, we don't know how many in's are needed. A Phi
1301 with zero predecessors is created. Such a Phi node is called Phi0
1302 node. The Phi0 is then added to the list of Phi0 nodes in this block
1303 to be matured by mature_block later.
1304 The Phi0 has to remember the pos of it's internal value. If the real
1305 Phi is computed, pos is used to update the array with the local
1307 res = new_r_Phi0 (current_ir_graph, block, mode);
1308 res->attr.phi0_pos = pos;
1309 res->link = block->link;
1313 /* If we get here, the frontend missed a use-before-definition error */
1316 printf("Error: no value set. Use of undefined variable. Initializing
1318 assert (mode->code >= irm_f && mode->code <= irm_p);
1319 res = new_r_Const (current_ir_graph, block, mode,
1320 tarval_mode_null[mode->code]);
1323 /* The local valid value is available now. */
1324 block->attr.block.graph_arr[pos] = res;
1329 #endif /* USE_FAST_PHI_CONSTRUCTION */
1331 /* ************************************************************************** */
1333 /** Finalize a Block node, when all control flows are known. */
1334 /** Acceptable parameters are only Block nodes. */
1336 mature_block (ir_node *block)
1343 assert (get_irn_opcode(block) == iro_Block);
1345 if (!get_Block_matured(block)) {
1347 /* turn the dynamic in-array into a static one. */
1348 ins = ARR_LEN (block->in)-1;
1349 NEW_ARR_A (ir_node *, nin, ins);
1350 /* @@@ something is strange here... why isn't the array copied? */
1352 /* Traverse a chain of Phi nodes attached to this block and mature
1354 for (n = block->link; n; n=next) {
1355 inc_irg_visited(current_ir_graph);
1357 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1360 block->attr.block.matured = 1;
1362 /* Now, as the block is a finished firm node, we can optimize it.
1363 Since other nodes have been allocated since the block was created
1364 we can not free the node on the obstack. Therefore we have to call
1366 Unfortunately the optimization does not change a lot, as all allocated
1367 nodes refer to the unoptimized node. */
1368 block = optimize_in_place(block);
1374 new_Phi (int arity, ir_node **in, ir_mode *mode)
1376 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
1381 new_Const (ir_mode *mode, tarval *con)
1383 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
1388 new_Id (ir_node *val, ir_mode *mode)
1390 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
1395 new_Proj (ir_node *arg, ir_mode *mode, long proj)
1397 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
1402 new_defaultProj (ir_node *arg, long max_proj)
1405 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1406 arg->attr.c.kind = fragmentary;
1407 arg->attr.c.default_proj = max_proj;
1408 res = new_Proj (arg, mode_X, max_proj);
1413 new_Conv (ir_node *op, ir_mode *mode)
1415 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
1420 new_Tuple (int arity, ir_node **in)
1422 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
1427 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
1429 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
1434 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
1436 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
1442 new_Minus (ir_node *op, ir_mode *mode)
1444 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
1449 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
1451 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
1456 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
1459 res = new_r_Quot (current_ir_graph, current_ir_graph->current_block,
1461 #if PRECISE_EXC_CONTEXT
1462 res->attr.frag_arr = new_frag_arr(res);
1469 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
1472 res = new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
1474 #if PRECISE_EXC_CONTEXT
1475 res->attr.frag_arr = new_frag_arr(res);
1482 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
1485 res = new_r_Div (current_ir_graph, current_ir_graph->current_block,
1487 #if PRECISE_EXC_CONTEXT
1488 res->attr.frag_arr = new_frag_arr(res);
1495 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
1498 res = new_r_Mod (current_ir_graph, current_ir_graph->current_block,
1500 #if PRECISE_EXC_CONTEXT
1501 res->attr.frag_arr = new_frag_arr(res);
1508 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
1510 return new_r_And (current_ir_graph, current_ir_graph->current_block,
1515 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
1517 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
1522 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
1524 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
1529 new_Not (ir_node *op, ir_mode *mode)
1531 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
1536 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
1538 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
1543 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
1545 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
1550 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
1552 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
1557 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
1559 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
1564 new_Abs (ir_node *op, ir_mode *mode)
1566 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
1571 new_Cmp (ir_node *op1, ir_node *op2)
1573 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
1580 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
1584 new_Cond (ir_node *c)
1586 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
1590 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
1594 res = new_r_Call (current_ir_graph, current_ir_graph->current_block,
1595 store, callee, arity, in, type);
1596 #if PRECISE_EXC_CONTEXT
1597 res->attr.call.frag_arr = new_frag_arr(res);
1604 new_Return (ir_node* store, int arity, ir_node **in)
1606 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
1611 new_Raise (ir_node *store, ir_node *obj)
1613 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1618 new_Load (ir_node *store, ir_node *addr)
1621 res = new_r_Load (current_ir_graph, current_ir_graph->current_block,
1623 #if PRECISE_EXC_CONTEXT
1624 res->attr.frag_arr = new_frag_arr(res);
1631 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1634 res = new_r_Store (current_ir_graph, current_ir_graph->current_block,
1636 #if PRECISE_EXC_CONTEXT
1637 res->attr.frag_arr = new_frag_arr(res);
1644 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1648 res = new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1649 store, size, alloc_type, where);
1650 #if PRECISE_EXC_CONTEXT
1651 res->attr.a.frag_arr = new_frag_arr(res);
1658 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1660 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1661 store, ptr, size, free_type);
1665 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1666 /* GL: objptr was called frame before. Frame was a bad choice for the name
1667 as the operand could as well be a pointer to a dynamic object. */
1669 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1670 store, objptr, 0, NULL, ent);
1674 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1676 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1677 store, objptr, n_index, index, sel);
1681 new_SymConst (type_or_id_p value, symconst_kind kind)
1683 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1688 new_Sync (int arity, ir_node** in)
1690 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1698 return current_ir_graph->bad;
1701 /* ********************************************************************* */
1702 /* Comfortable interface with automatic Phi node construction. */
1703 /* (Uses also constructors of ?? interface, except new_Block. */
1704 /* ********************************************************************* */
1706 /** Block construction **/
1707 /* immature Block without predecessors */
1708 ir_node *new_immBlock (void) {
1711 /* creates a new dynamic in-array as length of in is -1 */
1712 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
1713 current_ir_graph->current_block = res;
1714 res->attr.block.matured = 0;
1715 set_Block_block_visited(res, 0);
1717 /* Create and initialize array for Phi-node construction. */
1718 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1719 current_ir_graph->n_loc);
1720 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1722 /* Immature block may not be optimized! */
1728 /* add an adge to a jmp/control flow node */
1730 add_in_edge (ir_node *block, ir_node *jmp)
1732 if (block->attr.block.matured) {
1733 assert(0 && "Error: Block already matured!\n");
1736 assert (jmp != NULL);
1737 ARR_APP1 (ir_node *, block->in, jmp);
1741 /* changing the current block */
1743 switch_block (ir_node *target)
1745 current_ir_graph->current_block = target;
1748 /* ************************ */
1749 /* parameter administration */
1751 /* get a value from the parameter array from the current block by its index */
1753 get_value (int pos, ir_mode *mode)
1755 inc_irg_visited(current_ir_graph);
1756 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
1760 /* set a value at position pos in the parameter array from the current block */
1762 set_value (int pos, ir_node *value)
1764 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
1767 /* get the current store */
1771 /* GL: one could call get_value instead */
1772 inc_irg_visited(current_ir_graph);
1773 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
1776 /* set the current store */
1778 set_store (ir_node *store)
1780 /* GL: one could call set_value instead */
1781 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1785 /** Useful access routines **/
1786 /* Returns the current block of the current graph. To set the current
1787 block use switch_block(). */
1788 ir_node *get_cur_block() {
1789 return get_irg_current_block(current_ir_graph);
1792 /* Returns the frame type of the current graph */
1793 type *get_cur_frame_type() {
1794 return get_irg_frame_type(current_ir_graph);
1798 /* ********************************************************************* */
1801 /* call once for each run of the library */