1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "common_t.h"
28 /* memset belongs to string.h */
31 /* # include "exc.h" */
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /*** ******************************************** */
44 /** privat interfaces, for professional use only */
46 /* Constructs a Block with a fixed number of predecessors.
47 Does not set current_block. Can not be used with automatic
48 Phi node construction. */
50 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
54 res = new_ir_node (db, irg, NULL, op_Block, mode_R, arity, in);
55 set_Block_matured(res, 1);
56 set_Block_block_visited(res, 0);
58 res->attr.block.exc = exc_normal;
59 res->attr.block.handler_entry = 0;
60 res->attr.block.in_cg = NULL;
67 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
71 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
78 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
82 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
88 /* Creates a Phi node with all predecessors. Calling this constructor
89 is only allowed if the corresponding block is mature. */
91 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
95 assert( get_Block_matured(block) );
96 assert( get_irn_arity(block) == arity );
98 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
100 res = optimize (res);
103 /* Memory Phis in endless loops must be kept alive.
104 As we can't distinguish these easily we keep all of them alive. */
105 if ((res->op == op_Phi) && (mode == mode_M))
106 add_End_keepalive(irg->end, res);
111 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
114 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
116 res = optimize (res);
120 res = local_optimize_newby (res);
127 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
129 ir_node *in[1] = {val};
131 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
132 res = optimize (res);
138 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
141 ir_node *in[1] = {arg};
143 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
144 res->attr.proj = proj;
147 assert(get_Proj_pred(res));
148 assert(get_nodes_Block(get_Proj_pred(res)));
150 res = optimize (res);
158 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
162 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
163 arg->attr.c.kind = fragmentary;
164 arg->attr.c.default_proj = max_proj;
165 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
170 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
172 ir_node *in[1] = {op};
174 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
175 res = optimize (res);
182 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
186 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
187 res = optimize (res);
193 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
194 ir_node *op1, ir_node *op2, ir_mode *mode)
196 ir_node *in[2] = {op1, op2};
198 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
199 res = optimize (res);
205 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
206 ir_node *op1, ir_node *op2, ir_mode *mode)
208 ir_node *in[2] = {op1, op2};
210 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
211 res = optimize (res);
217 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
218 ir_node *op, ir_mode *mode)
220 ir_node *in[1] = {op};
222 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
223 res = optimize (res);
229 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
230 ir_node *op1, ir_node *op2, ir_mode *mode)
232 ir_node *in[2] = {op1, op2};
234 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
235 res = optimize (res);
241 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
242 ir_node *memop, ir_node *op1, ir_node *op2)
244 ir_node *in[3] = {memop, op1, op2};
246 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
247 res = optimize (res);
253 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
254 ir_node *memop, ir_node *op1, ir_node *op2)
256 ir_node *in[3] = {memop, op1, op2};
258 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
259 res = optimize (res);
265 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
266 ir_node *memop, ir_node *op1, ir_node *op2)
268 ir_node *in[3] = {memop, op1, op2};
270 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
271 res = optimize (res);
277 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *memop, ir_node *op1, ir_node *op2)
280 ir_node *in[3] = {memop, op1, op2};
282 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
283 res = optimize (res);
289 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
290 ir_node *op1, ir_node *op2, ir_mode *mode)
292 ir_node *in[2] = {op1, op2};
294 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
295 res = optimize (res);
301 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
302 ir_node *op1, ir_node *op2, ir_mode *mode)
304 ir_node *in[2] = {op1, op2};
306 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
307 res = optimize (res);
313 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
314 ir_node *op1, ir_node *op2, ir_mode *mode)
316 ir_node *in[2] = {op1, op2};
318 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
319 res = optimize (res);
325 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
326 ir_node *op, ir_mode *mode)
328 ir_node *in[1] = {op};
330 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
331 res = optimize (res);
337 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
338 ir_node *op, ir_node *k, ir_mode *mode)
340 ir_node *in[2] = {op, k};
342 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
343 res = optimize (res);
349 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
350 ir_node *op, ir_node *k, ir_mode *mode)
352 ir_node *in[2] = {op, k};
354 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
355 res = optimize (res);
361 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
362 ir_node *op, ir_node *k, ir_mode *mode)
364 ir_node *in[2] = {op, k};
366 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
367 res = optimize (res);
373 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
374 ir_node *op, ir_node *k, ir_mode *mode)
376 ir_node *in[2] = {op, k};
378 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
379 res = optimize (res);
385 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
386 ir_node *op, ir_mode *mode)
388 ir_node *in[1] = {op};
390 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
391 res = optimize (res);
397 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
398 ir_node *op1, ir_node *op2)
400 ir_node *in[2] = {op1, op2};
402 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
403 res = optimize (res);
409 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
413 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, in);
414 res = optimize (res);
420 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
422 ir_node *in[1] = {c};
424 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
425 res->attr.c.kind = dense;
426 res->attr.c.default_proj = 0;
427 res = optimize (res);
433 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
434 ir_node *callee, int arity, ir_node **in, type *type)
441 NEW_ARR_A (ir_node *, r_in, r_arity);
444 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
446 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
448 assert(is_method_type(type));
449 set_Call_type(res, type);
450 res->attr.call.callee_arr = NULL;
451 res = optimize (res);
457 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *store, int arity, ir_node **in)
465 NEW_ARR_A (ir_node *, r_in, r_arity);
467 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
468 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
469 res = optimize (res);
475 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
477 ir_node *in[2] = {store, obj};
479 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
480 res = optimize (res);
486 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
487 ir_node *store, ir_node *adr)
489 ir_node *in[2] = {store, adr};
491 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
493 res = optimize (res);
499 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
500 ir_node *store, ir_node *adr, ir_node *val)
502 ir_node *in[3] = {store, adr, val};
504 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
506 res = optimize (res);
513 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
514 ir_node *size, type *alloc_type, where_alloc where)
516 ir_node *in[2] = {store, size};
518 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
520 res->attr.a.where = where;
521 res->attr.a.type = alloc_type;
523 res = optimize (res);
529 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
530 ir_node *ptr, ir_node *size, type *free_type)
532 ir_node *in[3] = {store, ptr, size};
534 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
536 res->attr.f = free_type;
538 res = optimize (res);
544 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
545 int arity, ir_node **in, entity *ent)
552 NEW_ARR_A (ir_node *, r_in, r_arity);
555 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
556 res = new_ir_node (db, irg, block, op_Sel, mode_p, r_arity, r_in);
558 res->attr.s.ltyp = static_linkage;
559 res->attr.s.ent = ent;
561 res = optimize (res);
567 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr, type *ent)
574 NEW_ARR_A (ir_node *, r_in, r_arity);
578 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
580 res->attr.io.ent = ent;
582 // res = optimize (res);
588 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
589 symconst_kind symkind)
594 if (symkind == linkage_ptr_info)
598 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, in);
600 res->attr.i.num = symkind;
601 if (symkind == linkage_ptr_info) {
602 res->attr.i.tori.ptrinfo = (ident *)value;
604 assert ( ( (symkind == type_tag)
605 || (symkind == size))
606 && (is_type(value)));
607 res->attr.i.tori.typ = (type *)value;
609 res = optimize (res);
615 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
619 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
621 res = optimize (res);
629 return current_ir_graph->bad;
635 return current_ir_graph->unknown;
639 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
641 ir_node *in[1] = { get_Call_ptr(call) };
643 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
644 res->attr.callbegin.irg = irg;
645 res->attr.callbegin.call = call;
646 res = optimize (res);
652 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
656 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
657 res->attr.end.irg = irg;
664 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
668 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
669 res->attr.end.irg = irg;
676 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
680 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
681 res = optimize (res);
687 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
690 ir_node *in[1] = {arg};
692 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
693 res->attr.filter.proj = proj;
694 res->attr.filter.in_cg = NULL;
697 assert(get_Proj_pred(res));
698 assert(get_nodes_Block(get_Proj_pred(res)));
700 res = optimize (res);
707 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
708 return new_rd_Block(NULL, irg, arity, in);
710 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
711 return new_rd_Start(NULL, irg, block);
713 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
714 return new_rd_End(NULL, irg, block);
716 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
717 return new_rd_Jmp(NULL, irg, block);
719 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
720 return new_rd_Cond(NULL, irg, block, c);
722 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
723 ir_node *store, int arity, ir_node **in) {
724 return new_rd_Return(NULL, irg, block, store, arity, in);
726 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
727 ir_node *store, ir_node *obj) {
728 return new_rd_Raise(NULL, irg, block, store, obj);
730 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
731 ir_mode *mode, tarval *con) {
732 return new_rd_Const(NULL, irg, block, mode, con);
734 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
735 type_or_id_p value, symconst_kind symkind) {
736 return new_rd_SymConst(NULL, irg, block, value, symkind);
738 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
739 ir_node *objptr, int n_index, ir_node **index,
741 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
743 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
745 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
747 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
748 ir_node *callee, int arity, ir_node **in,
750 return new_rd_Call(NULL, irg, block, store, callee, arity, in, type);
752 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
753 ir_node *op1, ir_node *op2, ir_mode *mode) {
754 return new_rd_Add(NULL, irg, block, op1, op2, mode);
756 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
757 ir_node *op1, ir_node *op2, ir_mode *mode) {
758 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
760 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
761 ir_node *op, ir_mode *mode) {
762 return new_rd_Minus(NULL, irg, block, op, mode);
764 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
765 ir_node *op1, ir_node *op2, ir_mode *mode) {
766 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
768 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
769 ir_node *memop, ir_node *op1, ir_node *op2) {
770 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
772 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
773 ir_node *memop, ir_node *op1, ir_node *op2) {
774 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
776 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
777 ir_node *memop, ir_node *op1, ir_node *op2) {
778 return new_rd_Div(NULL, irg, block, memop, op1, op2);
780 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
781 ir_node *memop, ir_node *op1, ir_node *op2) {
782 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
784 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
785 ir_node *op, ir_mode *mode) {
786 return new_rd_Abs(NULL, irg, block, op, mode);
788 ir_node *new_r_And (ir_graph *irg, ir_node *block,
789 ir_node *op1, ir_node *op2, ir_mode *mode) {
790 return new_rd_And(NULL, irg, block, op1, op2, mode);
792 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
793 ir_node *op1, ir_node *op2, ir_mode *mode) {
794 return new_rd_Or(NULL, irg, block, op1, op2, mode);
796 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
797 ir_node *op1, ir_node *op2, ir_mode *mode) {
798 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
800 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
801 ir_node *op, ir_mode *mode) {
802 return new_rd_Not(NULL, irg, block, op, mode);
804 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
805 ir_node *op1, ir_node *op2) {
806 return new_rd_Cmp(NULL, irg, block, op1, op2);
808 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
809 ir_node *op, ir_node *k, ir_mode *mode) {
810 return new_rd_Shl(NULL, irg, block, op, k, mode);
812 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
813 ir_node *op, ir_node *k, ir_mode *mode) {
814 return new_rd_Shr(NULL, irg, block, op, k, mode);
816 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
817 ir_node *op, ir_node *k, ir_mode *mode) {
818 return new_rd_Shrs(NULL, irg, block, op, k, mode);
820 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
821 ir_node *op, ir_node *k, ir_mode *mode) {
822 return new_rd_Rot(NULL, irg, block, op, k, mode);
824 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
825 ir_node *op, ir_mode *mode) {
826 return new_rd_Conv(NULL, irg, block, op, mode);
828 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
829 ir_node **in, ir_mode *mode) {
830 return new_rd_Phi(NULL, irg, block, arity, in, mode);
832 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
833 ir_node *store, ir_node *adr) {
834 return new_rd_Load(NULL, irg, block, store, adr);
836 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
837 ir_node *store, ir_node *adr, ir_node *val) {
838 return new_rd_Store(NULL, irg, block, store, adr, val);
840 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
841 ir_node *size, type *alloc_type, where_alloc where) {
842 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
844 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
845 ir_node *ptr, ir_node *size, type *free_type) {
846 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
848 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
849 return new_rd_Sync(NULL, irg, block, arity, in);
851 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
852 ir_mode *mode, long proj) {
853 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
855 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
857 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
859 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
860 int arity, ir_node **in) {
861 return new_rd_Tuple(NULL, irg, block, arity, in );
863 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
864 ir_node *val, ir_mode *mode) {
865 return new_rd_Id(NULL, irg, block, val, mode);
867 ir_node *new_r_Bad () {
870 ir_node *new_r_Unknown () {
871 return new_rd_Unknown();
873 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
874 return new_rd_CallBegin(NULL, irg, block, callee);
876 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
877 return new_rd_EndReg(NULL, irg, block);
879 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
880 return new_rd_EndExcept(NULL, irg, block);
882 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
883 return new_rd_Break(NULL, irg, block);
885 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
886 ir_mode *mode, long proj) {
887 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
891 /** ********************/
892 /** public interfaces */
893 /** construction tools */
895 /****f* ircons/new_Start
898 * new_Start -- create a new Start node in the current block
901 * s = new_Start(void);
902 * ir_node* new_Start(void);
905 * s - pointer to the created Start node
910 new_d_Start (dbg_info* db)
914 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
915 op_Start, mode_T, 0, NULL);
917 res = optimize (res);
923 new_d_End (dbg_info* db)
926 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
927 op_End, mode_X, -1, NULL);
928 res = optimize (res);
934 /* Constructs a Block with a fixed number of predecessors.
935 Does set current_block. Can be used with automatic Phi
936 node construction. */
938 new_d_Block (dbg_info* db, int arity, ir_node **in)
942 res = new_rd_Block (db, current_ir_graph, arity, in);
944 /* Create and initialize array for Phi-node construction. */
945 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
946 current_ir_graph->n_loc);
947 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
949 res = optimize (res);
950 current_ir_graph->current_block = res;
957 /* ***********************************************************************/
958 /* Methods necessary for automatic Phi node creation */
960 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
961 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
962 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
963 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
965 Call Graph: ( A ---> B == A "calls" B)
967 get_value mature_block
975 get_r_value_internal |
979 new_rd_Phi0 new_rd_Phi_in
981 * *************************************************************************** */
983 /* Creates a Phi node with 0 predecessors */
985 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
988 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
993 /* There are two implementations of the Phi node construction. The first
994 is faster, but does not work for blocks with more than 2 predecessors.
995 The second works always but is slower and causes more unnecessary Phi
997 Select the implementations by the following preprocessor flag set in
999 #if USE_FAST_PHI_CONSTRUCTION
1001 /* This is a stack used for allocating and deallocating nodes in
1002 new_rd_Phi_in. The original implementation used the obstack
1003 to model this stack, now it is explicit. This reduces side effects.
1005 #if USE_EXPLICIT_PHI_IN_STACK
1007 new_Phi_in_stack() {
1010 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1012 res->stack = NEW_ARR_F (ir_node *, 1);
1019 free_Phi_in_stack(Phi_in_stack *s) {
1020 DEL_ARR_F(s->stack);
1024 void free_to_Phi_in_stack(ir_node *phi) {
1025 assert(get_irn_opcode(phi) == iro_Phi);
1027 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1028 current_ir_graph->Phi_in_stack->pos)
1029 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1031 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1033 (current_ir_graph->Phi_in_stack->pos)++;
1037 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1038 int arity, ir_node **in) {
1040 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1041 int pos = current_ir_graph->Phi_in_stack->pos;
1045 /* We need to allocate a new node */
1046 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1048 /* reuse the old node and initialize it again. */
1051 assert (res->kind == k_ir_node);
1052 assert (res->op == op_Phi);
1056 assert (arity >= 0);
1057 /* ???!!! How to free the old in array?? */
1058 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1060 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1062 (current_ir_graph->Phi_in_stack->pos)--;
1066 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1068 /* Creates a Phi node with a given, fixed array **in of predecessors.
1069 If the Phi node is unnecessary, as the same value reaches the block
1070 through all control flow paths, it is eliminated and the value
1071 returned directly. This constructor is only intended for use in
1072 the automatic Phi node generation triggered by get_value or mature.
1073 The implementation is quite tricky and depends on the fact, that
1074 the nodes are allocated on a stack:
1075 The in array contains predecessors and NULLs. The NULLs appear,
1076 if get_r_value_internal, that computed the predecessors, reached
1077 the same block on two paths. In this case the same value reaches
1078 this block on both paths, there is no definition in between. We need
1079 not allocate a Phi where these path's merge, but we have to communicate
1080 this fact to the caller. This happens by returning a pointer to the
1081 node the caller _will_ allocate. (Yes, we predict the address. We can
1082 do so because the nodes are allocated on the obstack.) The caller then
1083 finds a pointer to itself and, when this routine is called again,
1087 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1088 ir_node **in, int ins)
1091 ir_node *res, *known;
1093 /* allocate a new node on the obstack.
1094 This can return a node to which some of the pointers in the in-array
1096 Attention: the constructor copies the in array, i.e., the later changes
1097 to the array in this routine do not affect the constructed node! If
1098 the in array contains NULLs, there will be missing predecessors in the
1100 Is this a possible internal state of the Phi node generation? */
1101 #if USE_EXPLICIT_PHI_IN_STACK
1102 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1104 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1106 /* The in-array can contain NULLs. These were returned by
1107 get_r_value_internal if it reached the same block/definition on a
1109 The NULLs are replaced by the node itself to simplify the test in the
1111 for (i=0; i < ins; ++i)
1112 if (in[i] == NULL) in[i] = res;
1114 /* This loop checks whether the Phi has more than one predecessor.
1115 If so, it is a real Phi node and we break the loop. Else the
1116 Phi node merges the same definition on several paths and therefore
1118 for (i=0; i < ins; ++i)
1120 if (in[i]==res || in[i]==known) continue;
1128 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1130 #if USE_EXPLICIT_PHI_IN_STACK
1131 free_to_Phi_in_stack(res);
1133 obstack_free (current_ir_graph->obst, res);
1137 res = optimize (res);
1141 /* return the pointer to the Phi node. This node might be deallocated! */
1146 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1148 /** This function computes the predecessors for a real Phi node, and then
1149 allocates and returns this node. The routine called to allocate the
1150 node might optimize it away and return a real value, or even a pointer
1151 to a deallocated Phi node on top of the obstack!
1152 This function is called with an in-array of proper size. **/
1153 static inline ir_node *
1154 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1156 ir_node *prevBlock, *res;
1159 /* This loop goes to all predecessor blocks of the block the Phi node is in
1160 and there finds the operands of the Phi node by calling
1161 get_r_value_internal. */
1162 for (i = 1; i <= ins; ++i) {
1163 assert (block->in[i]);
1164 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1166 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1169 /* After collecting all predecessors into the array nin a new Phi node
1170 with these predecessors is created. This constructor contains an
1171 optimization: If all predecessors of the Phi node are identical it
1172 returns the only operand instead of a new Phi node. If the value
1173 passes two different control flow edges without being defined, and
1174 this is the second path treated, a pointer to the node that will be
1175 allocated for the first path (recursion) is returned. We already
1176 know the address of this node, as it is the next node to be allocated
1177 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1178 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1180 /* Now we now the value for "pos" and can enter it in the array with
1181 all known local variables. Attention: this might be a pointer to
1182 a node, that later will be allocated!!! See new_rd_Phi_in.
1183 If this is called in mature, after some set_value in the same block,
1184 the proper value must not be overwritten:
1186 get_value (makes Phi0, put's it into graph_arr)
1187 set_value (overwrites Phi0 in graph_arr)
1188 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1191 if (!block->attr.block.graph_arr[pos]) {
1192 block->attr.block.graph_arr[pos] = res;
1194 /* printf(" value already computed by %s\n",
1195 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1201 /* This function returns the last definition of a variable. In case
1202 this variable was last defined in a previous block, Phi nodes are
1203 inserted. If the part of the firm graph containing the definition
1204 is not yet constructed, a dummy Phi node is returned. */
1206 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1209 /* There are 4 cases to treat.
1211 1. The block is not mature and we visit it the first time. We can not
1212 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1213 predecessors is returned. This node is added to the linked list (field
1214 "link") of the containing block to be completed when this block is
1215 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1218 2. The value is already known in this block, graph_arr[pos] is set and we
1219 visit the block the first time. We can return the value without
1220 creating any new nodes.
1222 3. The block is mature and we visit it the first time. A Phi node needs
1223 to be created (phi_merge). If the Phi is not needed, as all it's
1224 operands are the same value reaching the block through different
1225 paths, it's optimized away and the value itself is returned.
1227 4. The block is mature, and we visit it the second time. Now two
1228 subcases are possible:
1229 * The value was computed completely the last time we were here. This
1230 is the case if there is no loop. We can return the proper value.
1231 * The recursion that visited this node and set the flag did not
1232 return yet. We are computing a value in a loop and need to
1233 break the recursion without knowing the result yet.
1234 @@@ strange case. Straight forward we would create a Phi before
1235 starting the computation of it's predecessors. In this case we will
1236 find a Phi here in any case. The problem is that this implementation
1237 only creates a Phi after computing the predecessors, so that it is
1238 hard to compute self references of this Phi. @@@
1239 There is no simple check for the second subcase. Therefore we check
1240 for a second visit and treat all such cases as the second subcase.
1241 Anyways, the basic situation is the same: we reached a block
1242 on two paths without finding a definition of the value: No Phi
1243 nodes are needed on both paths.
1244 We return this information "Two paths, no Phi needed" by a very tricky
1245 implementation that relies on the fact that an obstack is a stack and
1246 will return a node with the same address on different allocations.
1247 Look also at phi_merge and new_rd_phi_in to understand this.
1248 @@@ Unfortunately this does not work, see testprogram
1249 three_cfpred_example.
1253 /* case 4 -- already visited. */
1254 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1256 /* visited the first time */
1257 set_irn_visited(block, get_irg_visited(current_ir_graph));
1259 /* Get the local valid value */
1260 res = block->attr.block.graph_arr[pos];
1262 /* case 2 -- If the value is actually computed, return it. */
1263 if (res) { return res;};
1265 if (block->attr.block.matured) { /* case 3 */
1267 /* The Phi has the same amount of ins as the corresponding block. */
1268 int ins = get_irn_arity(block);
1270 NEW_ARR_A (ir_node *, nin, ins);
1272 /* Phi merge collects the predecessors and then creates a node. */
1273 res = phi_merge (block, pos, mode, nin, ins);
1275 } else { /* case 1 */
1276 /* The block is not mature, we don't know how many in's are needed. A Phi
1277 with zero predecessors is created. Such a Phi node is called Phi0
1278 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1279 to the list of Phi0 nodes in this block to be matured by mature_block
1281 The Phi0 has to remember the pos of it's internal value. If the real
1282 Phi is computed, pos is used to update the array with the local
1285 res = new_rd_Phi0 (current_ir_graph, block, mode);
1286 res->attr.phi0_pos = pos;
1287 res->link = block->link;
1291 /* If we get here, the frontend missed a use-before-definition error */
1294 printf("Error: no value set. Use of undefined variable. Initializing
1296 assert (mode->code >= irm_f && mode->code <= irm_p);
1297 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1298 tarval_mode_null[mode->code]);
1301 /* The local valid value is available now. */
1302 block->attr.block.graph_arr[pos] = res;
1309 /** This is the simple algorithm. If first generates a Phi0, then
1310 it starts the recursion. This causes an Id at the entry of
1311 every block that has no definition of the value! **/
1313 #if USE_EXPLICIT_PHI_IN_STACK
1315 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1316 void free_Phi_in_stack(Phi_in_stack *s) { }
1320 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1321 ir_node **in, int ins)
1324 ir_node *res, *known;
1326 /* Allocate a new node on the obstack. The allocation copies the in
1328 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1330 /* This loop checks whether the Phi has more than one predecessor.
1331 If so, it is a real Phi node and we break the loop. Else the
1332 Phi node merges the same definition on several paths and therefore
1333 is not needed. Don't consider Bad nodes! */
1335 for (i=0; i < ins; ++i)
1339 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1347 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1350 obstack_free (current_ir_graph->obst, res);
1353 /* A undefined value, e.g., in unreachable code. */
1357 res = optimize (res);
1359 /* Memory Phis in endless loops must be kept alive.
1360 As we can't distinguish these easily we keep all of the alive. */
1361 if ((res->op == op_Phi) && (mode == mode_M))
1362 add_End_keepalive(irg->end, res);
1369 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1371 #if PRECISE_EXC_CONTEXT
1372 static inline ir_node *
1373 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1376 new_frag_arr (ir_node *n) {
1379 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1380 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1381 sizeof(ir_node *)*current_ir_graph->n_loc);
1382 /* turn off optimization before allocating Proj nodes, as res isn't
1384 opt = get_optimize(); set_optimize(0);
1385 /* Here we rely on the fact that all frag ops have Memory as first result! */
1386 if (get_irn_op(n) == op_Call)
1387 arr[0] = new_Proj(n, mode_M, 3);
1389 arr[0] = new_Proj(n, mode_M, 0);
1391 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1396 get_frag_arr (ir_node *n) {
1397 if (get_irn_op(n) == op_Call) {
1398 return n->attr.call.frag_arr;
1399 } else if (get_irn_op(n) == op_Alloc) {
1400 return n->attr.a.frag_arr;
1402 return n->attr.frag_arr;
1407 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1408 if (!frag_arr[pos]) frag_arr[pos] = val;
1409 if (frag_arr[current_ir_graph->n_loc - 1])
1410 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1414 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1418 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1420 frag_arr = get_frag_arr(cfOp);
1421 res = frag_arr[pos];
1423 if (block->attr.block.graph_arr[pos]) {
1424 /* There was a set_value after the cfOp and no get_value before that
1425 set_value. We must build a Phi node now. */
1426 if (block->attr.block.matured) {
1427 int ins = get_irn_arity(block);
1429 NEW_ARR_A (ir_node *, nin, ins);
1430 res = phi_merge(block, pos, mode, nin, ins);
1432 res = new_rd_Phi0 (current_ir_graph, block, mode);
1433 res->attr.phi0_pos = pos;
1434 res->link = block->link;
1438 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1439 but this should be better: (remove comment if this works) */
1440 /* It's a Phi, we can write this into all graph_arrs with NULL */
1441 set_frag_value(block->attr.block.graph_arr, pos, res);
1443 res = get_r_value_internal(block, pos, mode);
1444 set_frag_value(block->attr.block.graph_arr, pos, res);
1451 /** This function allocates a dummy Phi node to break recursions,
1452 computes the predecessors for the real phi node, and then
1453 allocates and returns this node. The routine called to allocate the
1454 node might optimize it away and return a real value.
1455 This function is called with an in-array of proper size. **/
1456 static inline ir_node *
1457 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1459 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1462 /* If this block has no value at pos create a Phi0 and remember it
1463 in graph_arr to break recursions.
1464 Else we may not set graph_arr as there a later value is remembered. */
1466 if (!block->attr.block.graph_arr[pos]) {
1467 /* This is commented out as collapsing to Bads is no good idea.
1468 Either we need an assert here, or we need to call a routine
1469 that deals with this case as appropriate for the given language.
1470 Right now a self referencing Id is created which will crash irg_vrfy().
1472 Even if all variables are defined before use, it can happen that
1473 we get to the start block, if a cond has been replaced by a tuple
1474 (bad, jmp). As the start has a self referencing control flow edge,
1475 we get a self referencing Id, which is hard to optimize away. We avoid
1476 this by defining the value as a Bad node.
1477 Returning a const with tarval_bad is a preliminary solution. In some
1478 situations we might want a Warning or an Error. */
1480 if (block == get_irg_start_block(current_ir_graph)) {
1481 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1482 /* We don't need to care about exception ops in the start block.
1483 There are none by definition. */
1484 return block->attr.block.graph_arr[pos];
1486 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1487 block->attr.block.graph_arr[pos] = phi0;
1488 #if PRECISE_EXC_CONTEXT
1489 /* Set graph_arr for fragile ops. Also here we should break recursion.
1490 We could choose a cyclic path through an cfop. But the recursion would
1491 break at some point. */
1492 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1497 /* This loop goes to all predecessor blocks of the block the Phi node
1498 is in and there finds the operands of the Phi node by calling
1499 get_r_value_internal. */
1500 for (i = 1; i <= ins; ++i) {
1501 prevCfOp = skip_Proj(block->in[i]);
1503 if (is_Bad(prevCfOp)) {
1504 /* In case a Cond has been optimized we would get right to the start block
1505 with an invalid definition. */
1506 nin[i-1] = new_Bad();
1509 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1511 if (!is_Bad(prevBlock)) {
1512 #if PRECISE_EXC_CONTEXT
1513 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1514 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1515 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1518 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1520 nin[i-1] = new_Bad();
1524 /* After collecting all predecessors into the array nin a new Phi node
1525 with these predecessors is created. This constructor contains an
1526 optimization: If all predecessors of the Phi node are identical it
1527 returns the only operand instead of a new Phi node. */
1528 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1530 /* In case we allocated a Phi0 node at the beginning of this procedure,
1531 we need to exchange this Phi0 with the real Phi. */
1533 exchange(phi0, res);
1534 block->attr.block.graph_arr[pos] = res;
1535 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1536 only an optimization. */
1542 /* This function returns the last definition of a variable. In case
1543 this variable was last defined in a previous block, Phi nodes are
1544 inserted. If the part of the firm graph containing the definition
1545 is not yet constructed, a dummy Phi node is returned. */
1547 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1550 /* There are 4 cases to treat.
1552 1. The block is not mature and we visit it the first time. We can not
1553 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1554 predecessors is returned. This node is added to the linked list (field
1555 "link") of the containing block to be completed when this block is
1556 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1559 2. The value is already known in this block, graph_arr[pos] is set and we
1560 visit the block the first time. We can return the value without
1561 creating any new nodes.
1563 3. The block is mature and we visit it the first time. A Phi node needs
1564 to be created (phi_merge). If the Phi is not needed, as all it's
1565 operands are the same value reaching the block through different
1566 paths, it's optimized away and the value itself is returned.
1568 4. The block is mature, and we visit it the second time. Now two
1569 subcases are possible:
1570 * The value was computed completely the last time we were here. This
1571 is the case if there is no loop. We can return the proper value.
1572 * The recursion that visited this node and set the flag did not
1573 return yet. We are computing a value in a loop and need to
1574 break the recursion. This case only happens if we visited
1575 the same block with phi_merge before, which inserted a Phi0.
1576 So we return the Phi0.
1579 /* case 4 -- already visited. */
1580 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1581 /* As phi_merge allocates a Phi0 this value is always defined. Here
1582 is the critical difference of the two algorithms. */
1583 assert(block->attr.block.graph_arr[pos]);
1584 return block->attr.block.graph_arr[pos];
1587 /* visited the first time */
1588 set_irn_visited(block, get_irg_visited(current_ir_graph));
1590 /* Get the local valid value */
1591 res = block->attr.block.graph_arr[pos];
1593 /* case 2 -- If the value is actually computed, return it. */
1594 if (res) { return res; };
1596 if (block->attr.block.matured) { /* case 3 */
1598 /* The Phi has the same amount of ins as the corresponding block. */
1599 int ins = get_irn_arity(block);
1601 NEW_ARR_A (ir_node *, nin, ins);
1603 /* Phi merge collects the predecessors and then creates a node. */
1604 res = phi_merge (block, pos, mode, nin, ins);
1606 } else { /* case 1 */
1607 /* The block is not mature, we don't know how many in's are needed. A Phi
1608 with zero predecessors is created. Such a Phi node is called Phi0
1609 node. The Phi0 is then added to the list of Phi0 nodes in this block
1610 to be matured by mature_block later.
1611 The Phi0 has to remember the pos of it's internal value. If the real
1612 Phi is computed, pos is used to update the array with the local
1614 res = new_rd_Phi0 (current_ir_graph, block, mode);
1615 res->attr.phi0_pos = pos;
1616 res->link = block->link;
1620 /* If we get here, the frontend missed a use-before-definition error */
1623 printf("Error: no value set. Use of undefined variable. Initializing
1625 assert (mode->code >= irm_f && mode->code <= irm_p);
1626 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1627 tarval_mode_null[mode->code]);
1630 /* The local valid value is available now. */
1631 block->attr.block.graph_arr[pos] = res;
1636 #endif /* USE_FAST_PHI_CONSTRUCTION */
1638 /* ************************************************************************** */
1640 /** Finalize a Block node, when all control flows are known. */
1641 /** Acceptable parameters are only Block nodes. */
1643 mature_block (ir_node *block)
1650 assert (get_irn_opcode(block) == iro_Block);
1651 // assert (!get_Block_matured(block) && "Block already matured");
1653 if (!get_Block_matured(block)) {
1655 /* An array for building the Phi nodes. */
1656 ins = ARR_LEN (block->in)-1;
1657 NEW_ARR_A (ir_node *, nin, ins);
1658 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1660 /* Traverse a chain of Phi nodes attached to this block and mature
1662 for (n = block->link; n; n=next) {
1663 inc_irg_visited(current_ir_graph);
1665 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1668 block->attr.block.matured = 1;
1670 /* Now, as the block is a finished firm node, we can optimize it.
1671 Since other nodes have been allocated since the block was created
1672 we can not free the node on the obstack. Therefore we have to call
1674 Unfortunately the optimization does not change a lot, as all allocated
1675 nodes refer to the unoptimized node.
1676 We can call _2, as global cse has no effect on blocks. */
1677 block = optimize_in_place_2(block);
1683 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1685 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1690 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1692 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1697 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1699 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1704 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1706 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1711 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1714 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1715 arg->attr.c.kind = fragmentary;
1716 arg->attr.c.default_proj = max_proj;
1717 res = new_Proj (arg, mode_X, max_proj);
1722 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1724 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1729 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1731 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1736 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1738 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1743 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1745 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1751 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1753 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1758 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1760 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1765 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1768 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1770 #if PRECISE_EXC_CONTEXT
1771 if ((current_ir_graph->phase_state == phase_building) &&
1772 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1773 res->attr.frag_arr = new_frag_arr(res);
1780 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1783 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1785 #if PRECISE_EXC_CONTEXT
1786 if ((current_ir_graph->phase_state == phase_building) &&
1787 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1788 res->attr.frag_arr = new_frag_arr(res);
1795 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1798 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1800 #if PRECISE_EXC_CONTEXT
1801 if ((current_ir_graph->phase_state == phase_building) &&
1802 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1803 res->attr.frag_arr = new_frag_arr(res);
1810 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1813 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1815 #if PRECISE_EXC_CONTEXT
1816 if ((current_ir_graph->phase_state == phase_building) &&
1817 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1818 res->attr.frag_arr = new_frag_arr(res);
1825 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1827 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1832 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1834 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1839 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1841 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1846 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1848 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1853 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1855 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1860 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1862 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1867 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1869 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1874 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1876 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1881 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1883 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1888 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1890 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1895 new_d_Jmp (dbg_info* db)
1897 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1901 new_d_Cond (dbg_info* db, ir_node *c)
1903 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1907 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1911 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1912 store, callee, arity, in, type);
1913 #if PRECISE_EXC_CONTEXT
1914 if ((current_ir_graph->phase_state == phase_building) &&
1915 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1916 res->attr.call.frag_arr = new_frag_arr(res);
1923 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1925 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1930 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1932 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1937 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1940 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1942 #if PRECISE_EXC_CONTEXT
1943 if ((current_ir_graph->phase_state == phase_building) &&
1944 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1945 res->attr.frag_arr = new_frag_arr(res);
1952 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1955 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1957 #if PRECISE_EXC_CONTEXT
1958 if ((current_ir_graph->phase_state == phase_building) &&
1959 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1960 res->attr.frag_arr = new_frag_arr(res);
1967 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1971 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1972 store, size, alloc_type, where);
1973 #if PRECISE_EXC_CONTEXT
1974 if ((current_ir_graph->phase_state == phase_building) &&
1975 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1976 res->attr.a.frag_arr = new_frag_arr(res);
1983 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1985 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1986 store, ptr, size, free_type);
1990 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1991 /* GL: objptr was called frame before. Frame was a bad choice for the name
1992 as the operand could as well be a pointer to a dynamic object. */
1994 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1995 store, objptr, 0, NULL, ent);
1999 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2001 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2002 store, objptr, n_index, index, sel);
2006 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2008 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2009 store, objptr, ent));
2013 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2015 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2020 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2022 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2030 return current_ir_graph->bad;
2034 new_d_Unknown (void)
2036 return current_ir_graph->unknown;
2040 new_d_CallBegin (dbg_info *db, ir_node *call)
2043 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2048 new_d_EndReg (dbg_info *db)
2051 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2056 new_d_EndExcept (dbg_info *db)
2059 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2064 new_d_Break (dbg_info *db)
2066 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2070 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2072 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2076 /* ********************************************************************* */
2077 /* Comfortable interface with automatic Phi node construction. */
2078 /* (Uses also constructors of ?? interface, except new_Block. */
2079 /* ********************************************************************* */
2081 /** Block construction **/
2082 /* immature Block without predecessors */
2083 ir_node *new_d_immBlock (dbg_info* db) {
2086 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2087 /* creates a new dynamic in-array as length of in is -1 */
2088 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
2089 current_ir_graph->current_block = res;
2090 res->attr.block.matured = 0;
2091 res->attr.block.exc = exc_normal;
2092 res->attr.block.handler_entry = 0;
2093 res->attr.block.in_cg = NULL;
2094 set_Block_block_visited(res, 0);
2096 /* Create and initialize array for Phi-node construction. */
2097 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2098 current_ir_graph->n_loc);
2099 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2101 /* Immature block may not be optimized! */
2109 return new_d_immBlock(NULL);
2112 /* add an adge to a jmp/control flow node */
2114 add_in_edge (ir_node *block, ir_node *jmp)
2116 if (block->attr.block.matured) {
2117 assert(0 && "Error: Block already matured!\n");
2120 assert (jmp != NULL);
2121 ARR_APP1 (ir_node *, block->in, jmp);
2125 /* changing the current block */
2127 switch_block (ir_node *target)
2129 current_ir_graph->current_block = target;
2132 /* ************************ */
2133 /* parameter administration */
2135 /* get a value from the parameter array from the current block by its index */
2137 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2139 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2140 inc_irg_visited(current_ir_graph);
2142 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2144 /* get a value from the parameter array from the current block by its index */
2146 get_value (int pos, ir_mode *mode)
2148 return get_d_value(NULL, pos, mode);
2151 /* set a value at position pos in the parameter array from the current block */
2153 set_value (int pos, ir_node *value)
2155 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2156 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2159 /* get the current store */
2163 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2164 /* GL: one could call get_value instead */
2165 inc_irg_visited(current_ir_graph);
2166 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2169 /* set the current store */
2171 set_store (ir_node *store)
2173 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2174 /* GL: one could call set_value instead */
2175 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2179 keep_alive (ir_node *ka)
2181 add_End_keepalive(current_ir_graph->end, ka);
2184 /** Useful access routines **/
2185 /* Returns the current block of the current graph. To set the current
2186 block use switch_block(). */
2187 ir_node *get_cur_block() {
2188 return get_irg_current_block(current_ir_graph);
2191 /* Returns the frame type of the current graph */
2192 type *get_cur_frame_type() {
2193 return get_irg_frame_type(current_ir_graph);
2197 /* ********************************************************************* */
2200 /* call once for each run of the library */
2206 /* call for each graph */
2208 finalize_cons (ir_graph *irg) {
2209 irg->phase_state = phase_high;
2213 ir_node *new_Block(int arity, ir_node **in) {
2214 return new_d_Block(NULL, arity, in);
2216 ir_node *new_Start (void) {
2217 return new_d_Start(NULL);
2219 ir_node *new_End (void) {
2220 return new_d_End(NULL);
2222 ir_node *new_Jmp (void) {
2223 return new_d_Jmp(NULL);
2225 ir_node *new_Cond (ir_node *c) {
2226 return new_d_Cond(NULL, c);
2228 ir_node *new_Return (ir_node *store, int arity, ir_node **in) {
2229 return new_d_Return(NULL, store, arity, in);
2231 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2232 return new_d_Raise(NULL, store, obj);
2234 ir_node *new_Const (ir_mode *mode, tarval *con) {
2235 return new_d_Const(NULL, mode, con);
2237 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2238 return new_d_SymConst(NULL, value, kind);
2240 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2241 return new_d_simpleSel(NULL, store, objptr, ent);
2243 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2245 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2247 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2248 return (new_d_InstOf (NULL, store, objptr, ent));
2250 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2252 return new_d_Call(NULL, store, callee, arity, in, type);
2254 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2255 return new_d_Add(NULL, op1, op2, mode);
2257 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2258 return new_d_Sub(NULL, op1, op2, mode);
2260 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2261 return new_d_Minus(NULL, op, mode);
2263 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2264 return new_d_Mul(NULL, op1, op2, mode);
2266 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2267 return new_d_Quot(NULL, memop, op1, op2);
2269 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2270 return new_d_DivMod(NULL, memop, op1, op2);
2272 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2273 return new_d_Div(NULL, memop, op1, op2);
2275 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2276 return new_d_Mod(NULL, memop, op1, op2);
2278 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2279 return new_d_Abs(NULL, op, mode);
2281 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2282 return new_d_And(NULL, op1, op2, mode);
2284 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2285 return new_d_Or(NULL, op1, op2, mode);
2287 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2288 return new_d_Eor(NULL, op1, op2, mode);
2290 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2291 return new_d_Not(NULL, op, mode);
2293 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2294 return new_d_Shl(NULL, op, k, mode);
2296 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2297 return new_d_Shr(NULL, op, k, mode);
2299 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2300 return new_d_Shrs(NULL, op, k, mode);
2302 #define new_Rotate new_Rot
2303 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2304 return new_d_Rot(NULL, op, k, mode);
2306 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2307 return new_d_Cmp(NULL, op1, op2);
2309 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2310 return new_d_Conv(NULL, op, mode);
2312 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2313 return new_d_Phi(NULL, arity, in, mode);
2315 ir_node *new_Load (ir_node *store, ir_node *addr) {
2316 return new_d_Load(NULL, store, addr);
2318 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2319 return new_d_Store(NULL, store, addr, val);
2321 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2322 where_alloc where) {
2323 return new_d_Alloc(NULL, store, size, alloc_type, where);
2325 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2327 return new_d_Free(NULL, store, ptr, size, free_type);
2329 ir_node *new_Sync (int arity, ir_node **in) {
2330 return new_d_Sync(NULL, arity, in);
2332 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2333 return new_d_Proj(NULL, arg, mode, proj);
2335 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2336 return new_d_defaultProj(NULL, arg, max_proj);
2338 ir_node *new_Tuple (int arity, ir_node **in) {
2339 return new_d_Tuple(NULL, arity, in);
2341 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2342 return new_d_Id(NULL, val, mode);
2344 ir_node *new_Bad (void) {
2347 ir_node *new_Unknown(void) {
2348 return new_d_Unknown();
2350 ir_node *new_CallBegin (ir_node *callee) {
2351 return new_d_CallBegin(NULL, callee);
2353 ir_node *new_EndReg (void) {
2354 return new_d_EndReg(NULL);
2356 ir_node *new_EndExcept (void) {
2357 return new_d_EndExcept(NULL);
2359 ir_node *new_Break (void) {
2360 return new_d_Break(NULL);
2362 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2363 return new_d_Filter(NULL, arg, mode, proj);