1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "common_t.h"
28 /* memset belongs to string.h */
31 /* # include "exc.h" */
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /*** ******************************************** */
44 /** privat interfaces, for professional use only */
46 /* Constructs a Block with a fixed number of predecessors.
47 Does not set current_block. Can not be used with automatic
48 Phi node construction. */
50 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
54 res = new_ir_node (db, irg, NULL, op_Block, mode_R, arity, in);
55 set_Block_matured(res, 1);
56 set_Block_block_visited(res, 0);
58 res->attr.block.exc = exc_normal;
59 res->attr.block.handler_entry = 0;
60 res->attr.block.in_cg = NULL;
67 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
71 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
78 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
82 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
88 /* Creates a Phi node with all predecessors. Calling this constructor
89 is only allowed if the corresponding block is mature. */
91 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
95 assert( get_Block_matured(block) );
96 assert( get_irn_arity(block) == arity );
98 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
100 res = optimize (res);
103 /* Memory Phis in endless loops must be kept alive.
104 As we can't distinguish these easily we keep all of them alive. */
105 if ((res->op == op_Phi) && (mode == mode_M))
106 add_End_keepalive(irg->end, res);
111 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
114 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
116 res = optimize (res);
120 res = local_optimize_newby (res);
127 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
129 ir_node *in[1] = {val};
131 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
132 res = optimize (res);
138 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
141 ir_node *in[1] = {arg};
143 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
144 res->attr.proj = proj;
147 assert(get_Proj_pred(res));
148 assert(get_nodes_Block(get_Proj_pred(res)));
150 res = optimize (res);
158 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
162 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
163 arg->attr.c.kind = fragmentary;
164 arg->attr.c.default_proj = max_proj;
165 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
170 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
172 ir_node *in[1] = {op};
174 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
175 res = optimize (res);
182 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
186 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
187 res = optimize (res);
193 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
194 ir_node *op1, ir_node *op2, ir_mode *mode)
196 ir_node *in[2] = {op1, op2};
198 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
199 res = optimize (res);
205 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
206 ir_node *op1, ir_node *op2, ir_mode *mode)
208 ir_node *in[2] = {op1, op2};
210 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
211 res = optimize (res);
217 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
218 ir_node *op, ir_mode *mode)
220 ir_node *in[1] = {op};
222 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
223 res = optimize (res);
229 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
230 ir_node *op1, ir_node *op2, ir_mode *mode)
232 ir_node *in[2] = {op1, op2};
234 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
235 res = optimize (res);
241 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
242 ir_node *memop, ir_node *op1, ir_node *op2)
244 ir_node *in[3] = {memop, op1, op2};
246 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
247 res = optimize (res);
253 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
254 ir_node *memop, ir_node *op1, ir_node *op2)
256 ir_node *in[3] = {memop, op1, op2};
258 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
259 res = optimize (res);
265 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
266 ir_node *memop, ir_node *op1, ir_node *op2)
268 ir_node *in[3] = {memop, op1, op2};
270 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
271 res = optimize (res);
277 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *memop, ir_node *op1, ir_node *op2)
280 ir_node *in[3] = {memop, op1, op2};
282 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
283 res = optimize (res);
289 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
290 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_node *in[2] = {op1, op2};
294 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
295 res = optimize (res);
301 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
302 ir_node *op1, ir_node *op2, ir_mode *mode)
304 ir_node *in[2] = {op1, op2};
306 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
307 res = optimize (res);
313 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
314 ir_node *op1, ir_node *op2, ir_mode *mode)
316 ir_node *in[2] = {op1, op2};
318 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
319 res = optimize (res);
325 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
326 ir_node *op, ir_mode *mode)
328 ir_node *in[1] = {op};
330 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
331 res = optimize (res);
337 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
338 ir_node *op, ir_node *k, ir_mode *mode)
340 ir_node *in[2] = {op, k};
342 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
343 res = optimize (res);
349 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
350 ir_node *op, ir_node *k, ir_mode *mode)
352 ir_node *in[2] = {op, k};
354 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
355 res = optimize (res);
361 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *op, ir_node *k, ir_mode *mode)
364 ir_node *in[2] = {op, k};
366 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
367 res = optimize (res);
373 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
374 ir_node *op, ir_node *k, ir_mode *mode)
376 ir_node *in[2] = {op, k};
378 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
379 res = optimize (res);
385 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
386 ir_node *op, ir_mode *mode)
388 ir_node *in[1] = {op};
390 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
391 res = optimize (res);
397 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
398 ir_node *op1, ir_node *op2)
400 ir_node *in[2] = {op1, op2};
402 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
403 res = optimize (res);
409 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
413 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, in);
414 res = optimize (res);
420 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
422 ir_node *in[1] = {c};
424 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
425 res->attr.c.kind = dense;
426 res->attr.c.default_proj = 0;
427 res = optimize (res);
433 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
434 ir_node *callee, int arity, ir_node **in, type *type)
441 NEW_ARR_A (ir_node *, r_in, r_arity);
444 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
446 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
448 assert(is_method_type(type));
449 set_Call_type(res, type);
450 res->attr.call.callee_arr = NULL;
451 res = optimize (res);
457 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *store, int arity, ir_node **in)
465 NEW_ARR_A (ir_node *, r_in, r_arity);
467 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
468 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
469 res = optimize (res);
475 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
477 ir_node *in[2] = {store, obj};
479 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
480 res = optimize (res);
486 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
487 ir_node *store, ir_node *adr)
489 ir_node *in[2] = {store, adr};
491 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
493 res = optimize (res);
499 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
500 ir_node *store, ir_node *adr, ir_node *val)
502 ir_node *in[3] = {store, adr, val};
504 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
506 res = optimize (res);
513 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
514 ir_node *size, type *alloc_type, where_alloc where)
516 ir_node *in[2] = {store, size};
518 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
520 res->attr.a.where = where;
521 res->attr.a.type = alloc_type;
523 res = optimize (res);
529 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
530 ir_node *ptr, ir_node *size, type *free_type)
532 ir_node *in[3] = {store, ptr, size};
534 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
536 res->attr.f = free_type;
538 res = optimize (res);
544 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
545 int arity, ir_node **in, entity *ent)
552 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
555 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
556 res = new_ir_node (db, irg, block, op_Sel, mode_p, r_arity, r_in);
558 res->attr.s.ltyp = static_linkage;
559 res->attr.s.ent = ent;
561 res = optimize (res);
567 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
568 ir_node *objptr, type *ent)
575 NEW_ARR_A (ir_node *, r_in, r_arity);
579 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
581 res->attr.io.ent = ent;
583 /* res = optimize (res);
584 ** irn_vrfy (res); */
589 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
590 symconst_kind symkind)
595 if (symkind == linkage_ptr_info)
599 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, in);
601 res->attr.i.num = symkind;
602 if (symkind == linkage_ptr_info) {
603 res->attr.i.tori.ptrinfo = (ident *)value;
605 assert ( ( (symkind == type_tag)
606 || (symkind == size))
607 && (is_type(value)));
608 res->attr.i.tori.typ = (type *)value;
610 res = optimize (res);
616 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
620 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
622 res = optimize (res);
630 return current_ir_graph->bad;
636 return current_ir_graph->unknown;
640 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
642 ir_node *in[1] = { get_Call_ptr(call) };
644 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
645 res->attr.callbegin.irg = irg;
646 res->attr.callbegin.call = call;
647 res = optimize (res);
653 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
657 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
658 res->attr.end.irg = irg;
665 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
669 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
670 res->attr.end.irg = irg;
677 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
681 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
682 res = optimize (res);
688 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
691 ir_node *in[1] = {arg};
693 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
694 res->attr.filter.proj = proj;
695 res->attr.filter.in_cg = NULL;
698 assert(get_Proj_pred(res));
699 assert(get_nodes_Block(get_Proj_pred(res)));
701 res = optimize (res);
708 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
709 return new_rd_Block(NULL, irg, arity, in);
711 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
712 return new_rd_Start(NULL, irg, block);
714 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
715 return new_rd_End(NULL, irg, block);
717 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
718 return new_rd_Jmp(NULL, irg, block);
720 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
721 return new_rd_Cond(NULL, irg, block, c);
723 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
724 ir_node *store, int arity, ir_node **in) {
725 return new_rd_Return(NULL, irg, block, store, arity, in);
727 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
728 ir_node *store, ir_node *obj) {
729 return new_rd_Raise(NULL, irg, block, store, obj);
731 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
732 ir_mode *mode, tarval *con) {
733 return new_rd_Const(NULL, irg, block, mode, con);
735 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
736 type_or_id_p value, symconst_kind symkind) {
737 return new_rd_SymConst(NULL, irg, block, value, symkind);
739 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
740 ir_node *objptr, int n_index, ir_node **index,
742 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
744 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
746 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
748 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
749 ir_node *callee, int arity, ir_node **in,
751 return new_rd_Call(NULL, irg, block, store, callee, arity, in, type);
753 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
754 ir_node *op1, ir_node *op2, ir_mode *mode) {
755 return new_rd_Add(NULL, irg, block, op1, op2, mode);
757 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
758 ir_node *op1, ir_node *op2, ir_mode *mode) {
759 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
761 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
762 ir_node *op, ir_mode *mode) {
763 return new_rd_Minus(NULL, irg, block, op, mode);
765 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
766 ir_node *op1, ir_node *op2, ir_mode *mode) {
767 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
769 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
770 ir_node *memop, ir_node *op1, ir_node *op2) {
771 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
773 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
774 ir_node *memop, ir_node *op1, ir_node *op2) {
775 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
777 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
778 ir_node *memop, ir_node *op1, ir_node *op2) {
779 return new_rd_Div(NULL, irg, block, memop, op1, op2);
781 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
782 ir_node *memop, ir_node *op1, ir_node *op2) {
783 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
785 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
786 ir_node *op, ir_mode *mode) {
787 return new_rd_Abs(NULL, irg, block, op, mode);
789 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
790 ir_node *op1, ir_node *op2, ir_mode *mode) {
791 return new_rd_And(NULL, irg, block, op1, op2, mode);
793 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
794 ir_node *op1, ir_node *op2, ir_mode *mode) {
795 return new_rd_Or(NULL, irg, block, op1, op2, mode);
797 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
798 ir_node *op1, ir_node *op2, ir_mode *mode) {
799 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
801 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
802 ir_node *op, ir_mode *mode) {
803 return new_rd_Not(NULL, irg, block, op, mode);
805 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
806 ir_node *op1, ir_node *op2) {
807 return new_rd_Cmp(NULL, irg, block, op1, op2);
809 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
810 ir_node *op, ir_node *k, ir_mode *mode) {
811 return new_rd_Shl(NULL, irg, block, op, k, mode);
813 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
814 ir_node *op, ir_node *k, ir_mode *mode) {
815 return new_rd_Shr(NULL, irg, block, op, k, mode);
817 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
818 ir_node *op, ir_node *k, ir_mode *mode) {
819 return new_rd_Shrs(NULL, irg, block, op, k, mode);
821 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
822 ir_node *op, ir_node *k, ir_mode *mode) {
823 return new_rd_Rot(NULL, irg, block, op, k, mode);
825 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
826 ir_node *op, ir_mode *mode) {
827 return new_rd_Conv(NULL, irg, block, op, mode);
829 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
830 ir_node **in, ir_mode *mode) {
831 return new_rd_Phi(NULL, irg, block, arity, in, mode);
833 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
834 ir_node *store, ir_node *adr) {
835 return new_rd_Load(NULL, irg, block, store, adr);
837 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
838 ir_node *store, ir_node *adr, ir_node *val) {
839 return new_rd_Store(NULL, irg, block, store, adr, val);
841 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
842 ir_node *size, type *alloc_type, where_alloc where) {
843 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
845 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
846 ir_node *ptr, ir_node *size, type *free_type) {
847 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
849 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
850 return new_rd_Sync(NULL, irg, block, arity, in);
852 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
853 ir_mode *mode, long proj) {
854 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
856 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
858 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
860 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
861 int arity, ir_node **in) {
862 return new_rd_Tuple(NULL, irg, block, arity, in );
864 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
865 ir_node *val, ir_mode *mode) {
866 return new_rd_Id(NULL, irg, block, val, mode);
868 INLINE ir_node *new_r_Bad () {
871 INLINE ir_node *new_r_Unknown () {
872 return new_rd_Unknown();
874 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
875 return new_rd_CallBegin(NULL, irg, block, callee);
877 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
878 return new_rd_EndReg(NULL, irg, block);
880 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
881 return new_rd_EndExcept(NULL, irg, block);
883 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
884 return new_rd_Break(NULL, irg, block);
886 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
887 ir_mode *mode, long proj) {
888 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
892 /** ********************/
893 /** public interfaces */
894 /** construction tools */
896 /****f* ircons/new_Start
899 * new_Start -- create a new Start node in the current block
902 * s = new_Start(void);
903 * ir_node* new_Start(void);
906 * s - pointer to the created Start node
911 new_d_Start (dbg_info* db)
915 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
916 op_Start, mode_T, 0, NULL);
918 res = optimize (res);
924 new_d_End (dbg_info* db)
927 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
928 op_End, mode_X, -1, NULL);
929 res = optimize (res);
935 /* Constructs a Block with a fixed number of predecessors.
936 Does set current_block. Can be used with automatic Phi
937 node construction. */
939 new_d_Block (dbg_info* db, int arity, ir_node **in)
943 res = new_rd_Block (db, current_ir_graph, arity, in);
945 /* Create and initialize array for Phi-node construction. */
946 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
947 current_ir_graph->n_loc);
948 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
950 res = optimize (res);
951 current_ir_graph->current_block = res;
958 /* ***********************************************************************/
959 /* Methods necessary for automatic Phi node creation */
961 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
962 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
963 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
964 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
966 Call Graph: ( A ---> B == A "calls" B)
968 get_value mature_block
976 get_r_value_internal |
980 new_rd_Phi0 new_rd_Phi_in
982 * *************************************************************************** */
984 /* Creates a Phi node with 0 predecessors */
985 static INLINE ir_node *
986 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
989 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
994 /* There are two implementations of the Phi node construction. The first
995 is faster, but does not work for blocks with more than 2 predecessors.
996 The second works always but is slower and causes more unnecessary Phi
998 Select the implementations by the following preprocessor flag set in
1000 #if USE_FAST_PHI_CONSTRUCTION
1002 /* This is a stack used for allocating and deallocating nodes in
1003 new_rd_Phi_in. The original implementation used the obstack
1004 to model this stack, now it is explicit. This reduces side effects.
1006 #if USE_EXPLICIT_PHI_IN_STACK
1007 INLINE Phi_in_stack *
1008 new_Phi_in_stack() {
1011 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1013 res->stack = NEW_ARR_F (ir_node *, 1);
1020 free_Phi_in_stack(Phi_in_stack *s) {
1021 DEL_ARR_F(s->stack);
1025 free_to_Phi_in_stack(ir_node *phi) {
1026 assert(get_irn_opcode(phi) == iro_Phi);
1028 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1029 current_ir_graph->Phi_in_stack->pos)
1030 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1032 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1034 (current_ir_graph->Phi_in_stack->pos)++;
1037 static INLINE ir_node *
1038 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1039 int arity, ir_node **in) {
1041 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1042 int pos = current_ir_graph->Phi_in_stack->pos;
1046 /* We need to allocate a new node */
1047 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1049 /* reuse the old node and initialize it again. */
1052 assert (res->kind == k_ir_node);
1053 assert (res->op == op_Phi);
1057 assert (arity >= 0);
1058 /* ???!!! How to free the old in array?? */
1059 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1061 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1063 (current_ir_graph->Phi_in_stack->pos)--;
1067 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1069 /* Creates a Phi node with a given, fixed array **in of predecessors.
1070 If the Phi node is unnecessary, as the same value reaches the block
1071 through all control flow paths, it is eliminated and the value
1072 returned directly. This constructor is only intended for use in
1073 the automatic Phi node generation triggered by get_value or mature.
1074 The implementation is quite tricky and depends on the fact, that
1075 the nodes are allocated on a stack:
1076 The in array contains predecessors and NULLs. The NULLs appear,
1077 if get_r_value_internal, that computed the predecessors, reached
1078 the same block on two paths. In this case the same value reaches
1079 this block on both paths, there is no definition in between. We need
1080 not allocate a Phi where these path's merge, but we have to communicate
1081 this fact to the caller. This happens by returning a pointer to the
1082 node the caller _will_ allocate. (Yes, we predict the address. We can
1083 do so because the nodes are allocated on the obstack.) The caller then
1084 finds a pointer to itself and, when this routine is called again,
1087 static INLINE ir_node *
1088 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1089 ir_node **in, int ins)
1092 ir_node *res, *known;
1094 /* allocate a new node on the obstack.
1095 This can return a node to which some of the pointers in the in-array
1097 Attention: the constructor copies the in array, i.e., the later changes
1098 to the array in this routine do not affect the constructed node! If
1099 the in array contains NULLs, there will be missing predecessors in the
1101 Is this a possible internal state of the Phi node generation? */
1102 #if USE_EXPLICIT_PHI_IN_STACK
1103 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1105 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1107 /* The in-array can contain NULLs. These were returned by
1108 get_r_value_internal if it reached the same block/definition on a
1110 The NULLs are replaced by the node itself to simplify the test in the
1112 for (i=0; i < ins; ++i)
1113 if (in[i] == NULL) in[i] = res;
1115 /* This loop checks whether the Phi has more than one predecessor.
1116 If so, it is a real Phi node and we break the loop. Else the
1117 Phi node merges the same definition on several paths and therefore
1119 for (i=0; i < ins; ++i)
1121 if (in[i]==res || in[i]==known) continue;
1129 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1131 #if USE_EXPLICIT_PHI_IN_STACK
1132 free_to_Phi_in_stack(res);
1134 obstack_free (current_ir_graph->obst, res);
1138 res = optimize (res);
1142 /* return the pointer to the Phi node. This node might be deallocated! */
1147 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1149 /** This function computes the predecessors for a real Phi node, and then
1150 allocates and returns this node. The routine called to allocate the
1151 node might optimize it away and return a real value, or even a pointer
1152 to a deallocated Phi node on top of the obstack!
1153 This function is called with an in-array of proper size. **/
1155 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1157 ir_node *prevBlock, *res;
1160 /* This loop goes to all predecessor blocks of the block the Phi node is in
1161 and there finds the operands of the Phi node by calling
1162 get_r_value_internal. */
1163 for (i = 1; i <= ins; ++i) {
1164 assert (block->in[i]);
1165 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1167 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1170 /* After collecting all predecessors into the array nin a new Phi node
1171 with these predecessors is created. This constructor contains an
1172 optimization: If all predecessors of the Phi node are identical it
1173 returns the only operand instead of a new Phi node. If the value
1174 passes two different control flow edges without being defined, and
1175 this is the second path treated, a pointer to the node that will be
1176 allocated for the first path (recursion) is returned. We already
1177 know the address of this node, as it is the next node to be allocated
1178 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1179 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1181 /* Now we now the value for "pos" and can enter it in the array with
1182 all known local variables. Attention: this might be a pointer to
1183 a node, that later will be allocated!!! See new_rd_Phi_in.
1184 If this is called in mature, after some set_value in the same block,
1185 the proper value must not be overwritten:
1187 get_value (makes Phi0, put's it into graph_arr)
1188 set_value (overwrites Phi0 in graph_arr)
1189 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1192 if (!block->attr.block.graph_arr[pos]) {
1193 block->attr.block.graph_arr[pos] = res;
1195 /* printf(" value already computed by %s\n",
1196 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1202 /* This function returns the last definition of a variable. In case
1203 this variable was last defined in a previous block, Phi nodes are
1204 inserted. If the part of the firm graph containing the definition
1205 is not yet constructed, a dummy Phi node is returned. */
1207 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1210 /* There are 4 cases to treat.
1212 1. The block is not mature and we visit it the first time. We can not
1213 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1214 predecessors is returned. This node is added to the linked list (field
1215 "link") of the containing block to be completed when this block is
1216 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1219 2. The value is already known in this block, graph_arr[pos] is set and we
1220 visit the block the first time. We can return the value without
1221 creating any new nodes.
1223 3. The block is mature and we visit it the first time. A Phi node needs
1224 to be created (phi_merge). If the Phi is not needed, as all it's
1225 operands are the same value reaching the block through different
1226 paths, it's optimized away and the value itself is returned.
1228 4. The block is mature, and we visit it the second time. Now two
1229 subcases are possible:
1230 * The value was computed completely the last time we were here. This
1231 is the case if there is no loop. We can return the proper value.
1232 * The recursion that visited this node and set the flag did not
1233 return yet. We are computing a value in a loop and need to
1234 break the recursion without knowing the result yet.
1235 @@@ strange case. Straight forward we would create a Phi before
1236 starting the computation of it's predecessors. In this case we will
1237 find a Phi here in any case. The problem is that this implementation
1238 only creates a Phi after computing the predecessors, so that it is
1239 hard to compute self references of this Phi. @@@
1240 There is no simple check for the second subcase. Therefore we check
1241 for a second visit and treat all such cases as the second subcase.
1242 Anyways, the basic situation is the same: we reached a block
1243 on two paths without finding a definition of the value: No Phi
1244 nodes are needed on both paths.
1245 We return this information "Two paths, no Phi needed" by a very tricky
1246 implementation that relies on the fact that an obstack is a stack and
1247 will return a node with the same address on different allocations.
1248 Look also at phi_merge and new_rd_phi_in to understand this.
1249 @@@ Unfortunately this does not work, see testprogram
1250 three_cfpred_example.
1254 /* case 4 -- already visited. */
1255 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1257 /* visited the first time */
1258 set_irn_visited(block, get_irg_visited(current_ir_graph));
1260 /* Get the local valid value */
1261 res = block->attr.block.graph_arr[pos];
1263 /* case 2 -- If the value is actually computed, return it. */
1264 if (res) { return res;};
1266 if (block->attr.block.matured) { /* case 3 */
1268 /* The Phi has the same amount of ins as the corresponding block. */
1269 int ins = get_irn_arity(block);
1271 NEW_ARR_A (ir_node *, nin, ins);
1273 /* Phi merge collects the predecessors and then creates a node. */
1274 res = phi_merge (block, pos, mode, nin, ins);
1276 } else { /* case 1 */
1277 /* The block is not mature, we don't know how many in's are needed. A Phi
1278 with zero predecessors is created. Such a Phi node is called Phi0
1279 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1280 to the list of Phi0 nodes in this block to be matured by mature_block
1282 The Phi0 has to remember the pos of it's internal value. If the real
1283 Phi is computed, pos is used to update the array with the local
1286 res = new_rd_Phi0 (current_ir_graph, block, mode);
1287 res->attr.phi0_pos = pos;
1288 res->link = block->link;
1292 /* If we get here, the frontend missed a use-before-definition error */
1295 printf("Error: no value set. Use of undefined variable. Initializing
1297 assert (mode->code >= irm_f && mode->code <= irm_p);
1298 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1299 tarval_mode_null[mode->code]);
1302 /* The local valid value is available now. */
1303 block->attr.block.graph_arr[pos] = res;
1310 /** This is the simple algorithm. If first generates a Phi0, then
1311 it starts the recursion. This causes an Id at the entry of
1312 every block that has no definition of the value! **/
1314 #if USE_EXPLICIT_PHI_IN_STACK
1316 static INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1317 static INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1320 static INLINE ir_node *
1321 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1322 ir_node **in, int ins)
1325 ir_node *res, *known;
1327 /* Allocate a new node on the obstack. The allocation copies the in
1329 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1331 /* This loop checks whether the Phi has more than one predecessor.
1332 If so, it is a real Phi node and we break the loop. Else the
1333 Phi node merges the same definition on several paths and therefore
1334 is not needed. Don't consider Bad nodes! */
1336 for (i=0; i < ins; ++i)
1340 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1348 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1351 obstack_free (current_ir_graph->obst, res);
1354 /* A undefined value, e.g., in unreachable code. */
1358 res = optimize (res);
1360 /* Memory Phis in endless loops must be kept alive.
1361 As we can't distinguish these easily we keep all of the alive. */
1362 if ((res->op == op_Phi) && (mode == mode_M))
1363 add_End_keepalive(irg->end, res);
1370 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1372 #if PRECISE_EXC_CONTEXT
1374 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1376 static INLINE ir_node **
1377 new_frag_arr (ir_node *n) {
1380 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1381 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1382 sizeof(ir_node *)*current_ir_graph->n_loc);
1383 /* turn off optimization before allocating Proj nodes, as res isn't
1385 opt = get_optimize(); set_optimize(0);
1386 /* Here we rely on the fact that all frag ops have Memory as first result! */
1387 if (get_irn_op(n) == op_Call)
1388 arr[0] = new_Proj(n, mode_M, 3);
1390 arr[0] = new_Proj(n, mode_M, 0);
1392 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1396 static INLINE ir_node **
1397 get_frag_arr (ir_node *n) {
1398 if (get_irn_op(n) == op_Call) {
1399 return n->attr.call.frag_arr;
1400 } else if (get_irn_op(n) == op_Alloc) {
1401 return n->attr.a.frag_arr;
1403 return n->attr.frag_arr;
1408 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1409 if (!frag_arr[pos]) frag_arr[pos] = val;
1410 if (frag_arr[current_ir_graph->n_loc - 1])
1411 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1415 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1419 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1421 frag_arr = get_frag_arr(cfOp);
1422 res = frag_arr[pos];
1424 if (block->attr.block.graph_arr[pos]) {
1425 /* There was a set_value after the cfOp and no get_value before that
1426 set_value. We must build a Phi node now. */
1427 if (block->attr.block.matured) {
1428 int ins = get_irn_arity(block);
1430 NEW_ARR_A (ir_node *, nin, ins);
1431 res = phi_merge(block, pos, mode, nin, ins);
1433 res = new_rd_Phi0 (current_ir_graph, block, mode);
1434 res->attr.phi0_pos = pos;
1435 res->link = block->link;
1439 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1440 but this should be better: (remove comment if this works) */
1441 /* It's a Phi, we can write this into all graph_arrs with NULL */
1442 set_frag_value(block->attr.block.graph_arr, pos, res);
1444 res = get_r_value_internal(block, pos, mode);
1445 set_frag_value(block->attr.block.graph_arr, pos, res);
1452 /** This function allocates a dummy Phi node to break recursions,
1453 computes the predecessors for the real phi node, and then
1454 allocates and returns this node. The routine called to allocate the
1455 node might optimize it away and return a real value.
1456 This function must be called with an in-array of proper size. **/
1458 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1460 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1463 /* If this block has no value at pos create a Phi0 and remember it
1464 in graph_arr to break recursions.
1465 Else we may not set graph_arr as there a later value is remembered. */
1467 if (!block->attr.block.graph_arr[pos]) {
1468 /* This is commented out as collapsing to Bads is no good idea.
1469 Either we need an assert here, or we need to call a routine
1470 that deals with this case as appropriate for the given language.
1471 Right now a self referencing Id is created which will crash irg_vrfy().
1473 Even if all variables are defined before use, it can happen that
1474 we get to the start block, if a cond has been replaced by a tuple
1475 (bad, jmp). As the start has a self referencing control flow edge,
1476 we get a self referencing Id, which is hard to optimize away. We avoid
1477 this by defining the value as a Bad node.
1478 Returning a const with tarval_bad is a preliminary solution. In some
1479 situations we might want a Warning or an Error. */
1481 if (block == get_irg_start_block(current_ir_graph)) {
1482 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1483 /* We don't need to care about exception ops in the start block.
1484 There are none by definition. */
1485 return block->attr.block.graph_arr[pos];
1487 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1488 block->attr.block.graph_arr[pos] = phi0;
1489 #if PRECISE_EXC_CONTEXT
1490 /* Set graph_arr for fragile ops. Also here we should break recursion.
1491 We could choose a cyclic path through an cfop. But the recursion would
1492 break at some point. */
1493 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1498 /* This loop goes to all predecessor blocks of the block the Phi node
1499 is in and there finds the operands of the Phi node by calling
1500 get_r_value_internal. */
1501 for (i = 1; i <= ins; ++i) {
1502 prevCfOp = skip_Proj(block->in[i]);
1504 if (is_Bad(prevCfOp)) {
1505 /* In case a Cond has been optimized we would get right to the start block
1506 with an invalid definition. */
1507 nin[i-1] = new_Bad();
1510 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1512 if (!is_Bad(prevBlock)) {
1513 #if PRECISE_EXC_CONTEXT
1514 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1515 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1516 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1519 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1521 nin[i-1] = new_Bad();
1525 /* After collecting all predecessors into the array nin a new Phi node
1526 with these predecessors is created. This constructor contains an
1527 optimization: If all predecessors of the Phi node are identical it
1528 returns the only operand instead of a new Phi node. */
1529 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1531 /* In case we allocated a Phi0 node at the beginning of this procedure,
1532 we need to exchange this Phi0 with the real Phi. */
1534 exchange(phi0, res);
1535 block->attr.block.graph_arr[pos] = res;
1536 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1537 only an optimization. */
1543 /* This function returns the last definition of a variable. In case
1544 this variable was last defined in a previous block, Phi nodes are
1545 inserted. If the part of the firm graph containing the definition
1546 is not yet constructed, a dummy Phi node is returned. */
1548 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1551 /* There are 4 cases to treat.
1553 1. The block is not mature and we visit it the first time. We can not
1554 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1555 predecessors is returned. This node is added to the linked list (field
1556 "link") of the containing block to be completed when this block is
1557 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1560 2. The value is already known in this block, graph_arr[pos] is set and we
1561 visit the block the first time. We can return the value without
1562 creating any new nodes.
1564 3. The block is mature and we visit it the first time. A Phi node needs
1565 to be created (phi_merge). If the Phi is not needed, as all it's
1566 operands are the same value reaching the block through different
1567 paths, it's optimized away and the value itself is returned.
1569 4. The block is mature, and we visit it the second time. Now two
1570 subcases are possible:
1571 * The value was computed completely the last time we were here. This
1572 is the case if there is no loop. We can return the proper value.
1573 * The recursion that visited this node and set the flag did not
1574 return yet. We are computing a value in a loop and need to
1575 break the recursion. This case only happens if we visited
1576 the same block with phi_merge before, which inserted a Phi0.
1577 So we return the Phi0.
1580 /* case 4 -- already visited. */
1581 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1582 /* As phi_merge allocates a Phi0 this value is always defined. Here
1583 is the critical difference of the two algorithms. */
1584 assert(block->attr.block.graph_arr[pos]);
1585 return block->attr.block.graph_arr[pos];
1588 /* visited the first time */
1589 set_irn_visited(block, get_irg_visited(current_ir_graph));
1591 /* Get the local valid value */
1592 res = block->attr.block.graph_arr[pos];
1594 /* case 2 -- If the value is actually computed, return it. */
1595 if (res) { return res; };
1597 if (block->attr.block.matured) { /* case 3 */
1599 /* The Phi has the same amount of ins as the corresponding block. */
1600 int ins = get_irn_arity(block);
1602 NEW_ARR_A (ir_node *, nin, ins);
1604 /* Phi merge collects the predecessors and then creates a node. */
1605 res = phi_merge (block, pos, mode, nin, ins);
1607 } else { /* case 1 */
1608 /* The block is not mature, we don't know how many in's are needed. A Phi
1609 with zero predecessors is created. Such a Phi node is called Phi0
1610 node. The Phi0 is then added to the list of Phi0 nodes in this block
1611 to be matured by mature_block later.
1612 The Phi0 has to remember the pos of it's internal value. If the real
1613 Phi is computed, pos is used to update the array with the local
1615 res = new_rd_Phi0 (current_ir_graph, block, mode);
1616 res->attr.phi0_pos = pos;
1617 res->link = block->link;
1621 /* If we get here, the frontend missed a use-before-definition error */
1624 printf("Error: no value set. Use of undefined variable. Initializing
1626 assert (mode->code >= irm_f && mode->code <= irm_p);
1627 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1628 tarval_mode_null[mode->code]);
1631 /* The local valid value is available now. */
1632 block->attr.block.graph_arr[pos] = res;
1637 #endif /* USE_FAST_PHI_CONSTRUCTION */
1639 /* ************************************************************************** */
1641 /** Finalize a Block node, when all control flows are known. */
1642 /** Acceptable parameters are only Block nodes. */
1644 mature_block (ir_node *block)
1651 assert (get_irn_opcode(block) == iro_Block);
1652 /* @@@ should be commented in
1653 assert (!get_Block_matured(block) && "Block already matured"); */
1655 if (!get_Block_matured(block)) {
1657 /* An array for building the Phi nodes. */
1658 ins = ARR_LEN (block->in)-1;
1659 NEW_ARR_A (ir_node *, nin, ins);
1661 /* Traverse a chain of Phi nodes attached to this block and mature
1663 for (n = block->link; n; n=next) {
1664 inc_irg_visited(current_ir_graph);
1666 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1669 block->attr.block.matured = 1;
1671 /* Now, as the block is a finished firm node, we can optimize it.
1672 Since other nodes have been allocated since the block was created
1673 we can not free the node on the obstack. Therefore we have to call
1675 Unfortunately the optimization does not change a lot, as all allocated
1676 nodes refer to the unoptimized node.
1677 We can call _2, as global cse has no effect on blocks. */
1678 block = optimize_in_place_2(block);
1684 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1686 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1691 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1693 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1698 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1700 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1705 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1707 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1712 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1715 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1716 arg->attr.c.kind = fragmentary;
1717 arg->attr.c.default_proj = max_proj;
1718 res = new_Proj (arg, mode_X, max_proj);
1723 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1725 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1730 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1732 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1737 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1739 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1744 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1746 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1752 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1754 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1759 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1761 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1766 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1769 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1771 #if PRECISE_EXC_CONTEXT
1772 if ((current_ir_graph->phase_state == phase_building) &&
1773 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1774 res->attr.frag_arr = new_frag_arr(res);
1781 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1784 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1786 #if PRECISE_EXC_CONTEXT
1787 if ((current_ir_graph->phase_state == phase_building) &&
1788 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1789 res->attr.frag_arr = new_frag_arr(res);
1796 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1799 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1801 #if PRECISE_EXC_CONTEXT
1802 if ((current_ir_graph->phase_state == phase_building) &&
1803 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1804 res->attr.frag_arr = new_frag_arr(res);
1811 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1814 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1816 #if PRECISE_EXC_CONTEXT
1817 if ((current_ir_graph->phase_state == phase_building) &&
1818 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1819 res->attr.frag_arr = new_frag_arr(res);
1826 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1828 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1833 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1835 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1840 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1842 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1847 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1849 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1854 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1856 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1861 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1863 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1868 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1870 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1875 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1877 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1882 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1884 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1889 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1891 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1896 new_d_Jmp (dbg_info* db)
1898 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1902 new_d_Cond (dbg_info* db, ir_node *c)
1904 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1908 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1912 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1913 store, callee, arity, in, type);
1914 #if PRECISE_EXC_CONTEXT
1915 if ((current_ir_graph->phase_state == phase_building) &&
1916 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1917 res->attr.call.frag_arr = new_frag_arr(res);
1924 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1926 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1931 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1933 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1938 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1941 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1943 #if PRECISE_EXC_CONTEXT
1944 if ((current_ir_graph->phase_state == phase_building) &&
1945 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1946 res->attr.frag_arr = new_frag_arr(res);
1953 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1956 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1958 #if PRECISE_EXC_CONTEXT
1959 if ((current_ir_graph->phase_state == phase_building) &&
1960 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1961 res->attr.frag_arr = new_frag_arr(res);
1968 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1972 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1973 store, size, alloc_type, where);
1974 #if PRECISE_EXC_CONTEXT
1975 if ((current_ir_graph->phase_state == phase_building) &&
1976 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1977 res->attr.a.frag_arr = new_frag_arr(res);
1984 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1986 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1987 store, ptr, size, free_type);
1991 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1992 /* GL: objptr was called frame before. Frame was a bad choice for the name
1993 as the operand could as well be a pointer to a dynamic object. */
1995 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1996 store, objptr, 0, NULL, ent);
2000 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2002 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2003 store, objptr, n_index, index, sel);
2007 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2009 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2010 store, objptr, ent));
2014 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2016 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2021 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2023 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2031 return current_ir_graph->bad;
2035 new_d_Unknown (void)
2037 return current_ir_graph->unknown;
2041 new_d_CallBegin (dbg_info *db, ir_node *call)
2044 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2049 new_d_EndReg (dbg_info *db)
2052 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2057 new_d_EndExcept (dbg_info *db)
2060 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2065 new_d_Break (dbg_info *db)
2067 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2071 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2073 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2077 /* ********************************************************************* */
2078 /* Comfortable interface with automatic Phi node construction. */
2079 /* (Uses also constructors of ?? interface, except new_Block. */
2080 /* ********************************************************************* */
2082 /** Block construction **/
2083 /* immature Block without predecessors */
2084 ir_node *new_d_immBlock (dbg_info* db) {
2087 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2088 /* creates a new dynamic in-array as length of in is -1 */
2089 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
2090 current_ir_graph->current_block = res;
2091 res->attr.block.matured = 0;
2092 res->attr.block.exc = exc_normal;
2093 res->attr.block.handler_entry = 0;
2094 res->attr.block.in_cg = NULL;
2095 set_Block_block_visited(res, 0);
2097 /* Create and initialize array for Phi-node construction. */
2098 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2099 current_ir_graph->n_loc);
2100 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2102 /* Immature block may not be optimized! */
2110 return new_d_immBlock(NULL);
2113 /* add an adge to a jmp/control flow node */
2115 add_in_edge (ir_node *block, ir_node *jmp)
2117 if (block->attr.block.matured) {
2118 assert(0 && "Error: Block already matured!\n");
2121 assert (jmp != NULL);
2122 ARR_APP1 (ir_node *, block->in, jmp);
2126 /* changing the current block */
2128 switch_block (ir_node *target)
2130 current_ir_graph->current_block = target;
2133 /* ************************ */
2134 /* parameter administration */
2136 /* get a value from the parameter array from the current block by its index */
2138 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2140 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2141 inc_irg_visited(current_ir_graph);
2143 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2145 /* get a value from the parameter array from the current block by its index */
2147 get_value (int pos, ir_mode *mode)
2149 return get_d_value(NULL, pos, mode);
2152 /* set a value at position pos in the parameter array from the current block */
2154 set_value (int pos, ir_node *value)
2156 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2157 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2160 /* get the current store */
2164 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2165 /* GL: one could call get_value instead */
2166 inc_irg_visited(current_ir_graph);
2167 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2170 /* set the current store */
2172 set_store (ir_node *store)
2174 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2175 /* GL: one could call set_value instead */
2176 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2180 keep_alive (ir_node *ka)
2182 add_End_keepalive(current_ir_graph->end, ka);
2185 /** Useful access routines **/
2186 /* Returns the current block of the current graph. To set the current
2187 block use switch_block(). */
2188 ir_node *get_cur_block() {
2189 return get_irg_current_block(current_ir_graph);
2192 /* Returns the frame type of the current graph */
2193 type *get_cur_frame_type() {
2194 return get_irg_frame_type(current_ir_graph);
2198 /* ********************************************************************* */
2201 /* call once for each run of the library */
2207 /* call for each graph */
2209 finalize_cons (ir_graph *irg) {
2210 irg->phase_state = phase_high;
2214 ir_node *new_Block(int arity, ir_node **in) {
2215 return new_d_Block(NULL, arity, in);
2217 ir_node *new_Start (void) {
2218 return new_d_Start(NULL);
2220 ir_node *new_End (void) {
2221 return new_d_End(NULL);
2223 ir_node *new_Jmp (void) {
2224 return new_d_Jmp(NULL);
2226 ir_node *new_Cond (ir_node *c) {
2227 return new_d_Cond(NULL, c);
2229 ir_node *new_Return (ir_node *store, int arity, ir_node **in) {
2230 return new_d_Return(NULL, store, arity, in);
2232 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2233 return new_d_Raise(NULL, store, obj);
2235 ir_node *new_Const (ir_mode *mode, tarval *con) {
2236 return new_d_Const(NULL, mode, con);
2238 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2239 return new_d_SymConst(NULL, value, kind);
2241 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2242 return new_d_simpleSel(NULL, store, objptr, ent);
2244 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2246 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2248 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2249 return (new_d_InstOf (NULL, store, objptr, ent));
2251 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2253 return new_d_Call(NULL, store, callee, arity, in, type);
2255 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2256 return new_d_Add(NULL, op1, op2, mode);
2258 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2259 return new_d_Sub(NULL, op1, op2, mode);
2261 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2262 return new_d_Minus(NULL, op, mode);
2264 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2265 return new_d_Mul(NULL, op1, op2, mode);
2267 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2268 return new_d_Quot(NULL, memop, op1, op2);
2270 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2271 return new_d_DivMod(NULL, memop, op1, op2);
2273 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2274 return new_d_Div(NULL, memop, op1, op2);
2276 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2277 return new_d_Mod(NULL, memop, op1, op2);
2279 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2280 return new_d_Abs(NULL, op, mode);
2282 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2283 return new_d_And(NULL, op1, op2, mode);
2285 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2286 return new_d_Or(NULL, op1, op2, mode);
2288 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2289 return new_d_Eor(NULL, op1, op2, mode);
2291 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2292 return new_d_Not(NULL, op, mode);
2294 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2295 return new_d_Shl(NULL, op, k, mode);
2297 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2298 return new_d_Shr(NULL, op, k, mode);
2300 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2301 return new_d_Shrs(NULL, op, k, mode);
2303 #define new_Rotate new_Rot
2304 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2305 return new_d_Rot(NULL, op, k, mode);
2307 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2308 return new_d_Cmp(NULL, op1, op2);
2310 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2311 return new_d_Conv(NULL, op, mode);
2313 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2314 return new_d_Phi(NULL, arity, in, mode);
2316 ir_node *new_Load (ir_node *store, ir_node *addr) {
2317 return new_d_Load(NULL, store, addr);
2319 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2320 return new_d_Store(NULL, store, addr, val);
2322 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2323 where_alloc where) {
2324 return new_d_Alloc(NULL, store, size, alloc_type, where);
2326 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2328 return new_d_Free(NULL, store, ptr, size, free_type);
2330 ir_node *new_Sync (int arity, ir_node **in) {
2331 return new_d_Sync(NULL, arity, in);
2333 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2334 return new_d_Proj(NULL, arg, mode, proj);
2336 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2337 return new_d_defaultProj(NULL, arg, max_proj);
2339 ir_node *new_Tuple (int arity, ir_node **in) {
2340 return new_d_Tuple(NULL, arity, in);
2342 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2343 return new_d_Id(NULL, val, mode);
2345 ir_node *new_Bad (void) {
2348 ir_node *new_Unknown(void) {
2349 return new_d_Unknown();
2351 ir_node *new_CallBegin (ir_node *callee) {
2352 return new_d_CallBegin(NULL, callee);
2354 ir_node *new_EndReg (void) {
2355 return new_d_EndReg(NULL);
2357 ir_node *new_EndExcept (void) {
2358 return new_d_EndExcept(NULL);
2360 ir_node *new_Break (void) {
2361 return new_d_Break(NULL);
2363 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2364 return new_d_Filter(NULL, arg, mode, proj);