1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration ,
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
15 /* memset belongs to string.h */
18 /* irnode constructor */
19 /* create a new irnode in irg, with an op, mode, arity and */
20 /* some incoming irnodes */
21 /* this constructor is used in every specified irnode constructor */
23 new_ir_node (ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
24 int arity, ir_node **in)
27 int node_size = offsetof (ir_node, attr) + op->attr_size;
29 res = (ir_node *) obstack_alloc (irg->obst, node_size);
31 res->kind = k_ir_node;
37 res->in = NEW_ARR_F (ir_node *, 1);
39 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
40 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
49 /*********************************************** */
50 /** privat interfaces, for professional use only */
54 new_r_Block (ir_graph *irg, int arity, ir_node **in)
63 new_r_Start (ir_graph *irg, ir_node *block)
67 res = new_ir_node (irg, block, op_Start, mode_T, 0, NULL);
75 new_r_End (ir_graph *irg, ir_node *block)
79 res = new_ir_node (irg, block, op_End, mode_X, -1, NULL);
87 /* Creates a Phi node with 0 predecessors */
89 new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
93 res = new_ir_node (irg, block, op_Phi, mode, 0, NULL);
95 /* GL I'm not sure whether we should optimize this guy. *
96 res = optimize (res); ??? */
101 /* Creates a Phi node with all predecessors. Calling this constructor
102 is only allowed if the corresponding block is mature. */
104 new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
108 assert( get_Block_matured(block) );
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
113 res = optimize (res);
118 /* This is a stack used for allocating and deallocating nodes in
119 new_r_Phi_in. The original implementation used the obstack
120 to model this stack, now it is explicit. This reduces side effects.
122 #if USE_EXPICIT_PHI_IN_STACK
127 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
129 res->stack = NEW_ARR_F (ir_node *, 1);
136 void free_to_Phi_in_stack(ir_node *phi) {
137 assert(get_irn_opcode(phi) == iro_Phi);
139 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
140 current_ir_graph->Phi_in_stack->pos)
141 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
143 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
145 (current_ir_graph->Phi_in_stack->pos)++;
149 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
150 int arity, ir_node **in) {
152 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
153 int pos = current_ir_graph->Phi_in_stack->pos;
157 /* We need to allocate a new node */
158 res = new_ir_node (irg, block, op_Phi, mode, arity, in);
160 /* reuse the old node and initialize it again. */
163 assert (res->kind == k_ir_node);
164 assert (res->op == op_Phi);
169 /* ???!!! How to free the old in array?? */
170 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
172 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
174 (current_ir_graph->Phi_in_stack->pos)--;
182 /* Creates a Phi node with a given, fixed array **in of predecessors.
183 If the Phi node is unnecessary, as the same value reaches the block
184 through all control flow paths, it is eliminated and the value
185 returned directly. This constructor is only intended for use in
186 the automatic Phi node generation triggered by get_value or mature.
187 The implementation is quite tricky and depends on the fact, that
188 the nodes are allocated on a stack:
189 The in array contains predecessors and NULLs. The NULLs appear,
190 if get_r_value_internal, that computed the predecessors, reached
191 the same block on two paths. In this case the same value reaches
192 this block on both paths, there is no definition in between. We need
193 not allocate a Phi where these path's merge, but we have to communicate
194 this fact to the caller. This happens by returning a pointer to the
195 node the caller _will_ allocate. (Yes, we predict the address. We can
196 do so because the nodes are allocated on the obstack.) The caller then
197 finds a pointer to itself and, when this routine is called again,
201 new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
202 ir_node **in, int ins)
205 ir_node *res, *known;
207 /* allocate a new node on the obstack.
208 This can return a node to which some of the pointers in the in-array
210 Attention: the constructor copies the in array, i.e., the later changes
211 to the array in this routine do not affect the constructed node! If
212 the in array contains NULLs, there will be missing predecessors in the
214 Is this a possible internal state of the Phi node generation? */
215 #if USE_EXPICIT_PHI_IN_STACK
216 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
218 res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
220 /* The in-array can contain NULLs. These were returned by get_r_value_internal
221 if it reached the same block/definition on a second path.
222 The NULLs are replaced by the node itself to simplify the test in the
224 for (i=0; i < ins; ++i)
225 if (in[i] == NULL) in[i] = res;
227 /* This loop checks whether the Phi has more than one predecessor.
228 If so, it is a real Phi node and we break the loop. Else the
229 Phi node merges the same definition on several paths and therefore
231 for (i=0; i < ins; ++i)
233 if (in[i]==res || in[i]==known) continue;
241 /* i==ins: there is at most one predecessor, we don't need a phi node. */
243 #if USE_EXPICIT_PHI_IN_STACK
244 free_to_Phi_in_stack(res);
246 obstack_free (current_ir_graph->obst, res);
250 res = optimize (res);
254 /* return the pointer to the Phi node. This node might be deallocated! */
259 new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
262 res = new_ir_node (irg, block, op_Const, mode, 0, NULL);
264 res = optimize (res);
268 res = local_optimize_newby (res);
275 new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
277 ir_node *in[1] = {val};
279 res = new_ir_node (irg, block, op_Id, mode, 1, in);
280 res = optimize (res);
286 new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode, long proj)
288 ir_node *in[1] = {arg};
290 res = new_ir_node (irg, block, op_Proj, mode, 1, in);
291 res->attr.proj = proj;
292 res = optimize (res);
299 new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
301 ir_node *in[1] = {op};
303 res = new_ir_node (irg, block, op_Conv, mode, 1, in);
304 res = optimize (res);
311 new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in)
315 res = new_ir_node (irg, block, op_Tuple, mode_T, arity, in);
316 res = optimize (res);
322 new_r_Add (ir_graph *irg, ir_node *block,
323 ir_node *op1, ir_node *op2, ir_mode *mode)
325 ir_node *in[2] = {op1, op2};
327 res = new_ir_node (irg, block, op_Add, mode, 2, in);
328 res = optimize (res);
334 new_r_Sub (ir_graph *irg, ir_node *block,
335 ir_node *op1, ir_node *op2, ir_mode *mode)
337 ir_node *in[2] = {op1, op2};
339 res = new_ir_node (irg, block, op_Sub, mode, 2, in);
340 res = optimize (res);
346 new_r_Minus (ir_graph *irg, ir_node *block,
347 ir_node *op, ir_mode *mode)
349 ir_node *in[1] = {op};
351 res = new_ir_node (irg, block, op_Minus, mode, 1, in);
352 res = optimize (res);
358 new_r_Mul (ir_graph *irg, ir_node *block,
359 ir_node *op1, ir_node *op2, ir_mode *mode)
361 ir_node *in[2] = {op1, op2};
363 res = new_ir_node (irg, block, op_Mul, mode, 2, in);
364 res = optimize (res);
370 new_r_Quot (ir_graph *irg, ir_node *block,
371 ir_node *memop, ir_node *op1, ir_node *op2)
373 ir_node *in[3] = {memop, op1, op2};
375 res = new_ir_node (irg, block, op_Quot, mode_T, 2, in);
376 res = optimize (res);
382 new_r_DivMod (ir_graph *irg, ir_node *block,
383 ir_node *memop, ir_node *op1, ir_node *op2)
385 ir_node *in[3] = {memop, op1, op2};
387 res = new_ir_node (irg, block, op_DivMod, mode_T, 2, in);
388 res = optimize (res);
394 new_r_Div (ir_graph *irg, ir_node *block,
395 ir_node *memop, ir_node *op1, ir_node *op2)
397 ir_node *in[3] = {memop, op1, op2};
399 res = new_ir_node (irg, block, op_Div, mode_T, 2, in);
400 res = optimize (res);
406 new_r_Mod (ir_graph *irg, ir_node *block,
407 ir_node *memop, ir_node *op1, ir_node *op2)
409 ir_node *in[3] = {memop, op1, op2};
411 res = new_ir_node (irg, block, op_Mod, mode_T, 2, in);
412 res = optimize (res);
418 new_r_And (ir_graph *irg, ir_node *block,
419 ir_node *op1, ir_node *op2, ir_mode *mode)
421 ir_node *in[2] = {op1, op2};
423 res = new_ir_node (irg, block, op_And, mode, 2, in);
424 res = optimize (res);
430 new_r_Or (ir_graph *irg, ir_node *block,
431 ir_node *op1, ir_node *op2, ir_mode *mode)
433 ir_node *in[2] = {op1, op2};
435 res = new_ir_node (irg, block, op_Or, mode, 2, in);
436 res = optimize (res);
442 new_r_Eor (ir_graph *irg, ir_node *block,
443 ir_node *op1, ir_node *op2, ir_mode *mode)
445 ir_node *in[2] = {op1, op2};
447 res = new_ir_node (irg, block, op_Eor, mode, 2, in);
448 res = optimize (res);
454 new_r_Not (ir_graph *irg, ir_node *block,
455 ir_node *op, ir_mode *mode)
457 ir_node *in[1] = {op};
459 res = new_ir_node (irg, block, op_Not, mode, 1, in);
460 res = optimize (res);
466 new_r_Shl (ir_graph *irg, ir_node *block,
467 ir_node *op, ir_node *k, ir_mode *mode)
469 ir_node *in[2] = {op, k};
471 res = new_ir_node (irg, block, op_Shl, mode, 2, in);
472 res = optimize (res);
478 new_r_Shr (ir_graph *irg, ir_node *block,
479 ir_node *op, ir_node *k, ir_mode *mode)
481 ir_node *in[2] = {op, k};
483 res = new_ir_node (irg, block, op_Shr, mode, 2, in);
484 res = optimize (res);
490 new_r_Shrs (ir_graph *irg, ir_node *block,
491 ir_node *op, ir_node *k, ir_mode *mode)
493 ir_node *in[2] = {op, k};
495 res = new_ir_node (irg, block, op_Shrs, mode, 2, in);
496 res = optimize (res);
502 new_r_Rot (ir_graph *irg, ir_node *block,
503 ir_node *op, ir_node *k, ir_mode *mode)
505 ir_node *in[2] = {op, k};
507 res = new_ir_node (irg, block, op_Rot, mode, 2, in);
508 res = optimize (res);
514 new_r_Abs (ir_graph *irg, ir_node *block,
515 ir_node *op, ir_mode *mode)
517 ir_node *in[1] = {op};
519 res = new_ir_node (irg, block, op_Abs, mode, 1, in);
520 res = optimize (res);
526 new_r_Cmp (ir_graph *irg, ir_node *block,
527 ir_node *op1, ir_node *op2)
529 ir_node *in[2] = {op1, op2};
531 res = new_ir_node (irg, block, op_Cmp, mode_T, 2, in);
532 res = optimize (res);
538 new_r_Jmp (ir_graph *irg, ir_node *block)
542 res = new_ir_node (irg, block, op_Jmp, mode_X, 0, in);
543 res = optimize (res);
549 new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c)
551 ir_node *in[1] = {c};
553 res = new_ir_node (irg, block, op_Cond, mode_T, 1, in);
554 res = optimize (res);
560 new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
561 ir_node *callee, int arity, ir_node **in, type_method *type)
568 NEW_ARR_A (ir_node *, r_in, r_arity);
571 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
573 res = new_ir_node (irg, block, op_Call, mode_T, r_arity, r_in);
575 set_Call_type(res, type);
576 res = optimize (res);
582 new_r_Return (ir_graph *irg, ir_node *block,
583 ir_node *store, int arity, ir_node **in)
591 NEW_ARR_A (ir_node *, r_in, r_arity);
595 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
597 res = new_ir_node (irg, block, op_Return, mode_X, r_arity, r_in);
599 res = optimize (res);
606 new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
608 ir_node *in[2] = {store, obj};
610 res = new_ir_node (irg, block, op_Raise, mode_X, 2, in);
612 res = optimize (res);
618 new_r_Load (ir_graph *irg, ir_node *block,
619 ir_node *store, ir_node *adr)
621 ir_node *in[2] = {store, adr};
623 res = new_ir_node (irg, block, op_Load, mode_T, 2, in);
625 res = optimize (res);
631 new_r_Store (ir_graph *irg, ir_node *block,
632 ir_node *store, ir_node *adr, ir_node *val)
634 ir_node *in[3] = {store, adr, val};
636 res = new_ir_node (irg, block, op_Store, mode_T, 3, in);
638 res = optimize (res);
644 new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
645 ir_node *size, type *alloc_type, where_alloc where)
647 ir_node *in[2] = {store, size};
649 res = new_ir_node (irg, block, op_Alloc, mode_T, 2, in);
651 res->attr.a.where = where;
652 res->attr.a.type = alloc_type;
654 res = optimize (res);
660 new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
661 ir_node *ptr, ir_node *size, type *free_type)
663 ir_node *in[3] = {store, ptr, size};
665 res = new_ir_node (irg, block, op_Free, mode_T, 3, in);
667 res->attr.f = free_type;
669 res = optimize (res);
675 new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
676 int arity, ir_node **in, entity *ent)
683 NEW_ARR_A (ir_node *, r_in, r_arity);
686 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
687 res = new_ir_node (irg, block, op_Sel, mode_p, r_arity, r_in);
689 res->attr.s.ltyp = static_linkage;
690 res->attr.s.ent = ent;
692 res = optimize (res);
698 new_r_SymConst (ir_graph *irg, ir_node *block, type_or_id *value,
699 symconst_kind symkind)
703 res = new_ir_node (irg, block, op_SymConst, mode_I, 0, in);
705 res->attr.i.num = symkind;
706 if (symkind == linkage_ptr_info) {
707 res->attr.i.tori.ptrinfo = (ident *)value;
709 assert ( ( (symkind == type_tag)
710 || (symkind == size))
711 && (is_type(value)));
712 res->attr.i.tori.typ = (type *)value;
714 res = optimize (res);
720 new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in)
724 res = new_ir_node (irg, block, op_Sync, mode_M, arity, in);
726 res = optimize (res);
733 new_r_Bad (ir_node *block)
735 return current_ir_graph->bad;
738 /***********************/
739 /** public interfaces */
740 /** construction tools */
747 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
748 op_Start, mode_T, 0, NULL);
750 res = optimize (res);
761 res = new_ir_node (current_ir_graph, current_ir_graph->current_block,
762 op_End, mode_X, -1, NULL);
764 res = optimize (res);
775 res = new_ir_node (current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
776 current_ir_graph->current_block = res;
777 res->attr.block.matured = 0;
778 set_Block_block_visit(res, 0);
780 /* forget this optimization. use this only if mature !!!!
781 res = optimize (res); */
784 /** create a new dynamic array, which stores all parameters in irnodes */
785 /** using the same obstack as the whole irgraph */
786 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
787 current_ir_graph->params);
789 /** initialize the parameter array */
790 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->params);
797 new_Phi (int arity, ir_node **in, ir_mode *mode)
799 return new_r_Phi (current_ir_graph, current_ir_graph->current_block,
804 new_Const (ir_mode *mode, tarval *con)
806 return new_r_Const (current_ir_graph, current_ir_graph->start_block,
811 new_Id (ir_node *val, ir_mode *mode)
813 return new_r_Id (current_ir_graph, current_ir_graph->current_block,
818 new_Proj (ir_node *arg, ir_mode *mode, long proj)
820 return new_r_Proj (current_ir_graph, current_ir_graph->current_block,
825 new_Conv (ir_node *op, ir_mode *mode)
827 return new_r_Conv (current_ir_graph, current_ir_graph->current_block,
832 new_Tuple (int arity, ir_node **in)
834 return new_r_Tuple (current_ir_graph, current_ir_graph->current_block,
839 new_Add (ir_node *op1, ir_node *op2, ir_mode *mode)
841 return new_r_Add (current_ir_graph, current_ir_graph->current_block,
846 new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode)
848 return new_r_Sub (current_ir_graph, current_ir_graph->current_block,
854 new_Minus (ir_node *op, ir_mode *mode)
856 return new_r_Minus (current_ir_graph, current_ir_graph->current_block,
861 new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode)
863 return new_r_Mul (current_ir_graph, current_ir_graph->current_block,
868 new_Quot (ir_node *memop, ir_node *op1, ir_node *op2)
870 return new_r_Quot (current_ir_graph, current_ir_graph->current_block,
875 new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2)
877 return new_r_DivMod (current_ir_graph, current_ir_graph->current_block,
882 new_Div (ir_node *memop, ir_node *op1, ir_node *op2)
884 return new_r_Div (current_ir_graph, current_ir_graph->current_block,
889 new_Mod (ir_node *memop, ir_node *op1, ir_node *op2)
891 return new_r_Mod (current_ir_graph, current_ir_graph->current_block,
896 new_And (ir_node *op1, ir_node *op2, ir_mode *mode)
898 return new_r_And (current_ir_graph, current_ir_graph->current_block,
903 new_Or (ir_node *op1, ir_node *op2, ir_mode *mode)
905 return new_r_Or (current_ir_graph, current_ir_graph->current_block,
910 new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode)
912 return new_r_Eor (current_ir_graph, current_ir_graph->current_block,
917 new_Not (ir_node *op, ir_mode *mode)
919 return new_r_Not (current_ir_graph, current_ir_graph->current_block,
924 new_Shl (ir_node *op, ir_node *k, ir_mode *mode)
926 return new_r_Shl (current_ir_graph, current_ir_graph->current_block,
931 new_Shr (ir_node *op, ir_node *k, ir_mode *mode)
933 return new_r_Shr (current_ir_graph, current_ir_graph->current_block,
938 new_Shrs (ir_node *op, ir_node *k, ir_mode *mode)
940 return new_r_Shrs (current_ir_graph, current_ir_graph->current_block,
945 new_Rotate (ir_node *op, ir_node *k, ir_mode *mode)
947 return new_r_Rot (current_ir_graph, current_ir_graph->current_block,
952 new_Abs (ir_node *op, ir_mode *mode)
954 return new_r_Abs (current_ir_graph, current_ir_graph->current_block,
959 new_Cmp (ir_node *op1, ir_node *op2)
961 return new_r_Cmp (current_ir_graph, current_ir_graph->current_block,
968 return new_r_Jmp (current_ir_graph, current_ir_graph->current_block);
972 new_Cond (ir_node *c)
974 return new_r_Cond (current_ir_graph, current_ir_graph->current_block, c);
978 new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
981 return new_r_Call (current_ir_graph, current_ir_graph->current_block,
982 store, callee, arity, in, type);
985 /* make M parameter in call explicit:
986 new_Return (ir_node* store, int arity, ir_node **in) */
988 new_Return (ir_node* store, int arity, ir_node **in)
990 return new_r_Return (current_ir_graph, current_ir_graph->current_block,
995 new_Raise (ir_node *store, ir_node *obj)
997 return new_r_Raise (current_ir_graph, current_ir_graph->current_block,
1002 new_Load (ir_node *store, ir_node *addr)
1004 return new_r_Load (current_ir_graph, current_ir_graph->current_block,
1009 new_Store (ir_node *store, ir_node *addr, ir_node *val)
1011 return new_r_Store (current_ir_graph, current_ir_graph->current_block,
1016 new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
1019 return new_r_Alloc (current_ir_graph, current_ir_graph->current_block,
1020 store, size, alloc_type, where);
1024 new_Free (ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1026 return new_r_Free (current_ir_graph, current_ir_graph->current_block,
1027 store, ptr, size, free_type);
1031 new_simpleSel (ir_node *store, ir_node *objptr, entity *ent)
1032 /* GL: objptr was called frame before. Frame was a bad choice for the name
1033 as the operand could as well be a pointer to a dynamic object. */
1035 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1036 store, objptr, 0, NULL, ent);
1040 new_Sel (ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1042 return new_r_Sel (current_ir_graph, current_ir_graph->current_block,
1043 store, objptr, n_index, index, sel);
1047 new_SymConst (type_or_id *value, symconst_kind kind)
1049 return new_r_SymConst (current_ir_graph, current_ir_graph->current_block,
1054 new_Sync (int arity, ir_node** in)
1056 return new_r_Sync (current_ir_graph, current_ir_graph->current_block,
1064 return current_ir_graph->bad;
1068 /************************/
1069 /* ir block constructor */
1071 /* GL: what's this good for? */
1073 typedef struct ir_block {
1076 /* -1 = error, 0 = OK */
1093 /* call once for each run of the library */