1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "common_t.h"
28 /* memset belongs to string.h */
31 /* # include "exc.h" */
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
43 /*** ******************************************** */
44 /** privat interfaces, for professional use only */
46 /* Constructs a Block with a fixed number of predecessors.
47 Does not set current_block. Can not be used with automatic
48 Phi node construction. */
50 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
54 res = new_ir_node (db, irg, NULL, op_Block, mode_R, arity, in);
55 set_Block_matured(res, 1);
56 set_Block_block_visited(res, 0);
58 res->attr.block.exc = exc_normal;
59 res->attr.block.in_cg = NULL;
66 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
70 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
77 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
81 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
87 /* Creates a Phi node with all predecessors. Calling this constructor
88 is only allowed if the corresponding block is mature. */
90 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
94 assert( get_Block_matured(block) );
95 assert( get_irn_arity(block) == arity );
97 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
102 /* Memory Phis in endless loops must be kept alive.
103 As we can't distinguish these easily we keep all of them alive. */
104 if ((res->op == op_Phi) && (mode == mode_M))
105 add_End_keepalive(irg->end, res);
110 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
113 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
115 res = optimize (res);
119 res = local_optimize_newby (res);
126 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
128 ir_node *in[1] = {val};
130 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
131 res = optimize (res);
137 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
140 ir_node *in[1] = {arg};
142 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
143 res->attr.proj = proj;
146 assert(get_Proj_pred(res));
147 assert(get_nodes_Block(get_Proj_pred(res)));
149 res = optimize (res);
157 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
161 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
162 arg->attr.c.kind = fragmentary;
163 arg->attr.c.default_proj = max_proj;
164 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
169 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
171 ir_node *in[1] = {op};
173 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
174 res = optimize (res);
181 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
185 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
186 res = optimize (res);
192 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
193 ir_node *op1, ir_node *op2, ir_mode *mode)
195 ir_node *in[2] = {op1, op2};
197 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
198 res = optimize (res);
204 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
205 ir_node *op1, ir_node *op2, ir_mode *mode)
207 ir_node *in[2] = {op1, op2};
209 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
210 res = optimize (res);
216 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
217 ir_node *op, ir_mode *mode)
219 ir_node *in[1] = {op};
221 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
222 res = optimize (res);
228 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
229 ir_node *op1, ir_node *op2, ir_mode *mode)
231 ir_node *in[2] = {op1, op2};
233 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
234 res = optimize (res);
240 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
241 ir_node *memop, ir_node *op1, ir_node *op2)
243 ir_node *in[3] = {memop, op1, op2};
245 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
246 res = optimize (res);
252 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
253 ir_node *memop, ir_node *op1, ir_node *op2)
255 ir_node *in[3] = {memop, op1, op2};
257 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
258 res = optimize (res);
264 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
265 ir_node *memop, ir_node *op1, ir_node *op2)
267 ir_node *in[3] = {memop, op1, op2};
269 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
270 res = optimize (res);
276 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
277 ir_node *memop, ir_node *op1, ir_node *op2)
279 ir_node *in[3] = {memop, op1, op2};
281 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
282 res = optimize (res);
288 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *op1, ir_node *op2, ir_mode *mode)
291 ir_node *in[2] = {op1, op2};
293 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
294 res = optimize (res);
300 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *op1, ir_node *op2, ir_mode *mode)
303 ir_node *in[2] = {op1, op2};
305 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
306 res = optimize (res);
312 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
313 ir_node *op1, ir_node *op2, ir_mode *mode)
315 ir_node *in[2] = {op1, op2};
317 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
318 res = optimize (res);
324 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
325 ir_node *op, ir_mode *mode)
327 ir_node *in[1] = {op};
329 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
330 res = optimize (res);
336 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *op, ir_node *k, ir_mode *mode)
339 ir_node *in[2] = {op, k};
341 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
342 res = optimize (res);
348 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
349 ir_node *op, ir_node *k, ir_mode *mode)
351 ir_node *in[2] = {op, k};
353 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
354 res = optimize (res);
360 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op, ir_node *k, ir_mode *mode)
363 ir_node *in[2] = {op, k};
365 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
366 res = optimize (res);
372 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
373 ir_node *op, ir_node *k, ir_mode *mode)
375 ir_node *in[2] = {op, k};
377 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
378 res = optimize (res);
384 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
385 ir_node *op, ir_mode *mode)
387 ir_node *in[1] = {op};
389 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
390 res = optimize (res);
396 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
397 ir_node *op1, ir_node *op2)
399 ir_node *in[2] = {op1, op2};
401 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
402 res = optimize (res);
408 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
412 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, in);
413 res = optimize (res);
419 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
421 ir_node *in[1] = {c};
423 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
424 res->attr.c.kind = dense;
425 res->attr.c.default_proj = 0;
426 res = optimize (res);
432 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
433 ir_node *callee, int arity, ir_node **in, type *type)
440 NEW_ARR_A (ir_node *, r_in, r_arity);
443 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
445 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
447 assert(is_method_type(type));
448 set_Call_type(res, type);
449 res->attr.call.callee_arr = NULL;
450 res = optimize (res);
456 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
457 ir_node *store, int arity, ir_node **in)
464 NEW_ARR_A (ir_node *, r_in, r_arity);
466 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
467 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
468 res = optimize (res);
474 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
476 ir_node *in[2] = {store, obj};
478 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
479 res = optimize (res);
485 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
486 ir_node *store, ir_node *adr)
488 ir_node *in[2] = {store, adr};
490 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
492 res = optimize (res);
498 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
499 ir_node *store, ir_node *adr, ir_node *val)
501 ir_node *in[3] = {store, adr, val};
503 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
505 res = optimize (res);
512 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
513 ir_node *size, type *alloc_type, where_alloc where)
515 ir_node *in[2] = {store, size};
517 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
519 res->attr.a.where = where;
520 res->attr.a.type = alloc_type;
522 res = optimize (res);
528 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
529 ir_node *ptr, ir_node *size, type *free_type)
531 ir_node *in[3] = {store, ptr, size};
533 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
535 res->attr.f = free_type;
537 res = optimize (res);
543 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
544 int arity, ir_node **in, entity *ent)
551 NEW_ARR_A (ir_node *, r_in, r_arity);
554 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
555 res = new_ir_node (db, irg, block, op_Sel, mode_p, r_arity, r_in);
557 res->attr.s.ltyp = static_linkage;
558 res->attr.s.ent = ent;
560 res = optimize (res);
566 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
567 symconst_kind symkind)
572 if (symkind == linkage_ptr_info)
576 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, in);
578 res->attr.i.num = symkind;
579 if (symkind == linkage_ptr_info) {
580 res->attr.i.tori.ptrinfo = (ident *)value;
582 assert ( ( (symkind == type_tag)
583 || (symkind == size))
584 && (is_type(value)));
585 res->attr.i.tori.typ = (type *)value;
587 res = optimize (res);
593 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
597 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
599 res = optimize (res);
607 return current_ir_graph->bad;
613 return current_ir_graph->unknown;
617 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
619 ir_node *in[1] = { get_Call_ptr(call) };
621 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
622 res->attr.callbegin.irg = irg;
623 res->attr.callbegin.call = call;
624 res = optimize (res);
630 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
634 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
635 res->attr.end.irg = irg;
642 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
646 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
647 res->attr.end.irg = irg;
654 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
658 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
659 res = optimize (res);
665 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
668 ir_node *in[1] = {arg};
670 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
671 res->attr.filter.proj = proj;
672 res->attr.filter.in_cg = NULL;
675 assert(get_Proj_pred(res));
676 assert(get_nodes_Block(get_Proj_pred(res)));
678 res = optimize (res);
685 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
686 return new_rd_Block(NULL, irg, arity, in);
688 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
689 return new_rd_Start(NULL, irg, block);
691 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
692 return new_rd_End(NULL, irg, block);
694 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
695 return new_rd_Jmp(NULL, irg, block);
697 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
698 return new_rd_Cond(NULL, irg, block, c);
700 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
701 ir_node *store, int arity, ir_node **in) {
702 return new_rd_Return(NULL, irg, block, store, arity, in);
704 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
705 ir_node *store, ir_node *obj) {
706 return new_rd_Raise(NULL, irg, block, store, obj);
708 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
709 ir_mode *mode, tarval *con) {
710 return new_rd_Const(NULL, irg, block, mode, con);
712 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
713 type_or_id_p value, symconst_kind symkind) {
714 return new_rd_SymConst(NULL, irg, block, value, symkind);
716 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
717 ir_node *objptr, int n_index, ir_node **index,
719 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
721 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
722 ir_node *callee, int arity, ir_node **in,
724 return new_rd_Call(NULL, irg, block, store, callee, arity, in, type);
726 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
727 ir_node *op1, ir_node *op2, ir_mode *mode) {
728 return new_rd_Add(NULL, irg, block, op1, op2, mode);
730 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
731 ir_node *op1, ir_node *op2, ir_mode *mode) {
732 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
734 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
735 ir_node *op, ir_mode *mode) {
736 return new_rd_Minus(NULL, irg, block, op, mode);
738 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
739 ir_node *op1, ir_node *op2, ir_mode *mode) {
740 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
742 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
743 ir_node *memop, ir_node *op1, ir_node *op2) {
744 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
746 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
747 ir_node *memop, ir_node *op1, ir_node *op2) {
748 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
750 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
751 ir_node *memop, ir_node *op1, ir_node *op2) {
752 return new_rd_Div(NULL, irg, block, memop, op1, op2);
754 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
755 ir_node *memop, ir_node *op1, ir_node *op2) {
756 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
758 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
759 ir_node *op, ir_mode *mode) {
760 return new_rd_Abs(NULL, irg, block, op, mode);
762 ir_node *new_r_And (ir_graph *irg, ir_node *block,
763 ir_node *op1, ir_node *op2, ir_mode *mode) {
764 return new_rd_And(NULL, irg, block, op1, op2, mode);
766 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
767 ir_node *op1, ir_node *op2, ir_mode *mode) {
768 return new_rd_Or(NULL, irg, block, op1, op2, mode);
770 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
771 ir_node *op1, ir_node *op2, ir_mode *mode) {
772 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
774 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
775 ir_node *op, ir_mode *mode) {
776 return new_rd_Not(NULL, irg, block, op, mode);
778 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
779 ir_node *op1, ir_node *op2) {
780 return new_rd_Cmp(NULL, irg, block, op1, op2);
782 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
783 ir_node *op, ir_node *k, ir_mode *mode) {
784 return new_rd_Shl(NULL, irg, block, op, k, mode);
786 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
787 ir_node *op, ir_node *k, ir_mode *mode) {
788 return new_rd_Shr(NULL, irg, block, op, k, mode);
790 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
791 ir_node *op, ir_node *k, ir_mode *mode) {
792 return new_rd_Shrs(NULL, irg, block, op, k, mode);
794 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
795 ir_node *op, ir_node *k, ir_mode *mode) {
796 return new_rd_Rot(NULL, irg, block, op, k, mode);
798 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
799 ir_node *op, ir_mode *mode) {
800 return new_rd_Conv(NULL, irg, block, op, mode);
802 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
803 ir_node **in, ir_mode *mode) {
804 return new_rd_Phi(NULL, irg, block, arity, in, mode);
806 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
807 ir_node *store, ir_node *adr) {
808 return new_rd_Load(NULL, irg, block, store, adr);
810 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
811 ir_node *store, ir_node *adr, ir_node *val) {
812 return new_rd_Store(NULL, irg, block, store, adr, val);
814 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
815 ir_node *size, type *alloc_type, where_alloc where) {
816 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
818 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
819 ir_node *ptr, ir_node *size, type *free_type) {
820 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
822 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
823 return new_rd_Sync(NULL, irg, block, arity, in);
825 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
826 ir_mode *mode, long proj) {
827 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
829 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
831 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
833 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
834 int arity, ir_node **in) {
835 return new_rd_Tuple(NULL, irg, block, arity, in );
837 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
838 ir_node *val, ir_mode *mode) {
839 return new_rd_Id(NULL, irg, block, val, mode);
841 ir_node *new_r_Bad () {
844 ir_node *new_r_Unknown () {
845 return new_rd_Unknown();
847 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
848 return new_rd_CallBegin(NULL, irg, block, callee);
850 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
851 return new_rd_EndReg(NULL, irg, block);
853 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
854 return new_rd_EndExcept(NULL, irg, block);
856 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
857 return new_rd_Break(NULL, irg, block);
859 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
860 ir_mode *mode, long proj) {
861 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
865 /** ********************/
866 /** public interfaces */
867 /** construction tools */
869 /****f* ircons/new_Start
872 * new_Start -- create a new Start node in the current block
875 * s = new_Start(void);
876 * ir_node* new_Start(void);
879 * s - pointer to the created Start node
884 new_d_Start (dbg_info* db)
888 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
889 op_Start, mode_T, 0, NULL);
891 res = optimize (res);
897 new_d_End (dbg_info* db)
900 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
901 op_End, mode_X, -1, NULL);
902 res = optimize (res);
908 /* Constructs a Block with a fixed number of predecessors.
909 Does set current_block. Can be used with automatic Phi
910 node construction. */
912 new_d_Block (dbg_info* db, int arity, ir_node **in)
916 res = new_rd_Block (db, current_ir_graph, arity, in);
918 /* Create and initialize array for Phi-node construction. */
919 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
920 current_ir_graph->n_loc);
921 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
923 res = optimize (res);
924 current_ir_graph->current_block = res;
931 /* ***********************************************************************/
932 /* Methods necessary for automatic Phi node creation */
934 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
935 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
936 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
937 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
939 Call Graph: ( A ---> B == A "calls" B)
941 get_value mature_block
949 get_r_value_internal |
953 new_rd_Phi0 new_rd_Phi_in
955 * *************************************************************************** */
957 /* Creates a Phi node with 0 predecessors */
959 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
962 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
967 /* There are two implementations of the Phi node construction. The first
968 is faster, but does not work for blocks with more than 2 predecessors.
969 The second works always but is slower and causes more unnecessary Phi
971 Select the implementations by the following preprocessor flag set in
973 #if USE_FAST_PHI_CONSTRUCTION
975 /* This is a stack used for allocating and deallocating nodes in
976 new_rd_Phi_in. The original implementation used the obstack
977 to model this stack, now it is explicit. This reduces side effects.
979 #if USE_EXPLICIT_PHI_IN_STACK
984 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
986 res->stack = NEW_ARR_F (ir_node *, 1);
993 free_Phi_in_stack(Phi_in_stack *s) {
998 void free_to_Phi_in_stack(ir_node *phi) {
999 assert(get_irn_opcode(phi) == iro_Phi);
1001 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1002 current_ir_graph->Phi_in_stack->pos)
1003 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1005 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1007 (current_ir_graph->Phi_in_stack->pos)++;
1011 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1012 int arity, ir_node **in) {
1014 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1015 int pos = current_ir_graph->Phi_in_stack->pos;
1019 /* We need to allocate a new node */
1020 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1022 /* reuse the old node and initialize it again. */
1025 assert (res->kind == k_ir_node);
1026 assert (res->op == op_Phi);
1030 assert (arity >= 0);
1031 /* ???!!! How to free the old in array?? */
1032 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1034 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1036 (current_ir_graph->Phi_in_stack->pos)--;
1040 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1042 /* Creates a Phi node with a given, fixed array **in of predecessors.
1043 If the Phi node is unnecessary, as the same value reaches the block
1044 through all control flow paths, it is eliminated and the value
1045 returned directly. This constructor is only intended for use in
1046 the automatic Phi node generation triggered by get_value or mature.
1047 The implementation is quite tricky and depends on the fact, that
1048 the nodes are allocated on a stack:
1049 The in array contains predecessors and NULLs. The NULLs appear,
1050 if get_r_value_internal, that computed the predecessors, reached
1051 the same block on two paths. In this case the same value reaches
1052 this block on both paths, there is no definition in between. We need
1053 not allocate a Phi where these path's merge, but we have to communicate
1054 this fact to the caller. This happens by returning a pointer to the
1055 node the caller _will_ allocate. (Yes, we predict the address. We can
1056 do so because the nodes are allocated on the obstack.) The caller then
1057 finds a pointer to itself and, when this routine is called again,
1061 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1062 ir_node **in, int ins)
1065 ir_node *res, *known;
1067 /* allocate a new node on the obstack.
1068 This can return a node to which some of the pointers in the in-array
1070 Attention: the constructor copies the in array, i.e., the later changes
1071 to the array in this routine do not affect the constructed node! If
1072 the in array contains NULLs, there will be missing predecessors in the
1074 Is this a possible internal state of the Phi node generation? */
1075 #if USE_EXPLICIT_PHI_IN_STACK
1076 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1078 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1080 /* The in-array can contain NULLs. These were returned by
1081 get_r_value_internal if it reached the same block/definition on a
1083 The NULLs are replaced by the node itself to simplify the test in the
1085 for (i=0; i < ins; ++i)
1086 if (in[i] == NULL) in[i] = res;
1088 /* This loop checks whether the Phi has more than one predecessor.
1089 If so, it is a real Phi node and we break the loop. Else the
1090 Phi node merges the same definition on several paths and therefore
1092 for (i=0; i < ins; ++i)
1094 if (in[i]==res || in[i]==known) continue;
1102 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1104 #if USE_EXPLICIT_PHI_IN_STACK
1105 free_to_Phi_in_stack(res);
1107 obstack_free (current_ir_graph->obst, res);
1111 res = optimize (res);
1115 /* return the pointer to the Phi node. This node might be deallocated! */
1120 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1122 /** This function computes the predecessors for a real Phi node, and then
1123 allocates and returns this node. The routine called to allocate the
1124 node might optimize it away and return a real value, or even a pointer
1125 to a deallocated Phi node on top of the obstack!
1126 This function is called with an in-array of proper size. **/
1127 static inline ir_node *
1128 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1130 ir_node *prevBlock, *res;
1133 /* This loop goes to all predecessor blocks of the block the Phi node is in
1134 and there finds the operands of the Phi node by calling
1135 get_r_value_internal. */
1136 for (i = 1; i <= ins; ++i) {
1137 assert (block->in[i]);
1138 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1140 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1143 /* After collecting all predecessors into the array nin a new Phi node
1144 with these predecessors is created. This constructor contains an
1145 optimization: If all predecessors of the Phi node are identical it
1146 returns the only operand instead of a new Phi node. If the value
1147 passes two different control flow edges without being defined, and
1148 this is the second path treated, a pointer to the node that will be
1149 allocated for the first path (recursion) is returned. We already
1150 know the address of this node, as it is the next node to be allocated
1151 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1152 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1154 /* Now we now the value for "pos" and can enter it in the array with
1155 all known local variables. Attention: this might be a pointer to
1156 a node, that later will be allocated!!! See new_rd_Phi_in.
1157 If this is called in mature, after some set_value in the same block,
1158 the proper value must not be overwritten:
1160 get_value (makes Phi0, put's it into graph_arr)
1161 set_value (overwrites Phi0 in graph_arr)
1162 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1165 if (!block->attr.block.graph_arr[pos]) {
1166 block->attr.block.graph_arr[pos] = res;
1168 /* printf(" value already computed by %s\n",
1169 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1175 /* This function returns the last definition of a variable. In case
1176 this variable was last defined in a previous block, Phi nodes are
1177 inserted. If the part of the firm graph containing the definition
1178 is not yet constructed, a dummy Phi node is returned. */
1180 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1183 /* There are 4 cases to treat.
1185 1. The block is not mature and we visit it the first time. We can not
1186 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1187 predecessors is returned. This node is added to the linked list (field
1188 "link") of the containing block to be completed when this block is
1189 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1192 2. The value is already known in this block, graph_arr[pos] is set and we
1193 visit the block the first time. We can return the value without
1194 creating any new nodes.
1196 3. The block is mature and we visit it the first time. A Phi node needs
1197 to be created (phi_merge). If the Phi is not needed, as all it's
1198 operands are the same value reaching the block through different
1199 paths, it's optimized away and the value itself is returned.
1201 4. The block is mature, and we visit it the second time. Now two
1202 subcases are possible:
1203 * The value was computed completely the last time we were here. This
1204 is the case if there is no loop. We can return the proper value.
1205 * The recursion that visited this node and set the flag did not
1206 return yet. We are computing a value in a loop and need to
1207 break the recursion without knowing the result yet.
1208 @@@ strange case. Straight forward we would create a Phi before
1209 starting the computation of it's predecessors. In this case we will
1210 find a Phi here in any case. The problem is that this implementation
1211 only creates a Phi after computing the predecessors, so that it is
1212 hard to compute self references of this Phi. @@@
1213 There is no simple check for the second subcase. Therefore we check
1214 for a second visit and treat all such cases as the second subcase.
1215 Anyways, the basic situation is the same: we reached a block
1216 on two paths without finding a definition of the value: No Phi
1217 nodes are needed on both paths.
1218 We return this information "Two paths, no Phi needed" by a very tricky
1219 implementation that relies on the fact that an obstack is a stack and
1220 will return a node with the same address on different allocations.
1221 Look also at phi_merge and new_rd_phi_in to understand this.
1222 @@@ Unfortunately this does not work, see testprogram
1223 three_cfpred_example.
1227 /* case 4 -- already visited. */
1228 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1230 /* visited the first time */
1231 set_irn_visited(block, get_irg_visited(current_ir_graph));
1233 /* Get the local valid value */
1234 res = block->attr.block.graph_arr[pos];
1236 /* case 2 -- If the value is actually computed, return it. */
1237 if (res) { return res;};
1239 if (block->attr.block.matured) { /* case 3 */
1241 /* The Phi has the same amount of ins as the corresponding block. */
1242 int ins = get_irn_arity(block);
1244 NEW_ARR_A (ir_node *, nin, ins);
1246 /* Phi merge collects the predecessors and then creates a node. */
1247 res = phi_merge (block, pos, mode, nin, ins);
1249 } else { /* case 1 */
1250 /* The block is not mature, we don't know how many in's are needed. A Phi
1251 with zero predecessors is created. Such a Phi node is called Phi0
1252 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1253 to the list of Phi0 nodes in this block to be matured by mature_block
1255 The Phi0 has to remember the pos of it's internal value. If the real
1256 Phi is computed, pos is used to update the array with the local
1259 res = new_rd_Phi0 (current_ir_graph, block, mode);
1260 res->attr.phi0_pos = pos;
1261 res->link = block->link;
1265 /* If we get here, the frontend missed a use-before-definition error */
1268 printf("Error: no value set. Use of undefined variable. Initializing
1270 assert (mode->code >= irm_f && mode->code <= irm_p);
1271 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1272 tarval_mode_null[mode->code]);
1275 /* The local valid value is available now. */
1276 block->attr.block.graph_arr[pos] = res;
1283 /** This is the simple algorithm. If first generates a Phi0, then
1284 it starts the recursion. This causes an Id at the entry of
1285 every block that has no definition of the value! **/
1287 #if USE_EXPLICIT_PHI_IN_STACK
1289 Phi_in_stack * new_Phi_in_stack() { return NULL; }
1290 void free_Phi_in_stack(Phi_in_stack *s) { }
1294 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1295 ir_node **in, int ins)
1298 ir_node *res, *known;
1300 /* Allocate a new node on the obstack. The allocation copies the in
1302 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1304 /* This loop checks whether the Phi has more than one predecessor.
1305 If so, it is a real Phi node and we break the loop. Else the
1306 Phi node merges the same definition on several paths and therefore
1307 is not needed. Don't consider Bad nodes! */
1309 for (i=0; i < ins; ++i)
1313 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1321 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1324 obstack_free (current_ir_graph->obst, res);
1327 /* A undefined value, e.g., in unreachable code. */
1331 res = optimize (res);
1333 /* Memory Phis in endless loops must be kept alive.
1334 As we can't distinguish these easily we keep all of the alive. */
1335 if ((res->op == op_Phi) && (mode == mode_M))
1336 add_End_keepalive(irg->end, res);
1343 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1345 #if PRECISE_EXC_CONTEXT
1346 static inline ir_node *
1347 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1350 new_frag_arr (ir_node *n) {
1353 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1354 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1355 sizeof(ir_node *)*current_ir_graph->n_loc);
1356 /* turn off optimization before allocating Proj nodes, as res isn't
1358 opt = get_optimize(); set_optimize(0);
1359 /* Here we rely on the fact that all frag ops have Memory as first result! */
1360 if (get_irn_op(n) == op_Call)
1361 arr[0] = new_Proj(n, mode_M, 3);
1363 arr[0] = new_Proj(n, mode_M, 0);
1365 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1370 get_frag_arr (ir_node *n) {
1371 if (get_irn_op(n) == op_Call) {
1372 return n->attr.call.frag_arr;
1373 } else if (get_irn_op(n) == op_Alloc) {
1374 return n->attr.a.frag_arr;
1376 return n->attr.frag_arr;
1381 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1382 if (!frag_arr[pos]) frag_arr[pos] = val;
1383 if (frag_arr[current_ir_graph->n_loc - 1])
1384 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1388 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1392 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1394 frag_arr = get_frag_arr(cfOp);
1395 res = frag_arr[pos];
1397 if (block->attr.block.graph_arr[pos]) {
1398 /* There was a set_value after the cfOp and no get_value before that
1399 set_value. We must build a Phi node now. */
1400 if (block->attr.block.matured) {
1401 int ins = get_irn_arity(block);
1403 NEW_ARR_A (ir_node *, nin, ins);
1404 res = phi_merge(block, pos, mode, nin, ins);
1406 res = new_rd_Phi0 (current_ir_graph, block, mode);
1407 res->attr.phi0_pos = pos;
1408 res->link = block->link;
1412 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1413 but this should be better: (remove comment if this works) */
1414 /* It's a Phi, we can write this into all graph_arrs with NULL */
1415 set_frag_value(block->attr.block.graph_arr, pos, res);
1417 res = get_r_value_internal(block, pos, mode);
1418 set_frag_value(block->attr.block.graph_arr, pos, res);
1425 /** This function allocates a dummy Phi node to break recursions,
1426 computes the predecessors for the real phi node, and then
1427 allocates and returns this node. The routine called to allocate the
1428 node might optimize it away and return a real value.
1429 This function is called with an in-array of proper size. **/
1430 static inline ir_node *
1431 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1433 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1436 /* If this block has no value at pos create a Phi0 and remember it
1437 in graph_arr to break recursions.
1438 Else we may not set graph_arr as there a later value is remembered. */
1440 if (!block->attr.block.graph_arr[pos]) {
1441 /* This is commented out as collapsing to Bads is no good idea.
1442 Either we need an assert here, or we need to call a routine
1443 that deals with this case as appropriate for the given language.
1444 Right now a self referencing Id is created which will crash irg_vrfy().
1446 Even if all variables are defined before use, it can happen that
1447 we get to the start block, if a cond has been replaced by a tuple
1448 (bad, jmp). As the start has a self referencing control flow edge,
1449 we get a self referencing Id, which is hard to optimize away. We avoid
1450 this by defining the value as a Bad node.
1451 Returning a const with tarval_bad is a preliminary solution. In some
1452 situations we might want a Warning or an Error. */
1454 if (block == get_irg_start_block(current_ir_graph)) {
1455 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1456 /* We don't need to care about exception ops in the start block.
1457 There are none by definition. */
1458 return block->attr.block.graph_arr[pos];
1460 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1461 block->attr.block.graph_arr[pos] = phi0;
1462 #if PRECISE_EXC_CONTEXT
1463 /* Set graph_arr for fragile ops. Also here we should break recursion.
1464 We could choose a cyclic path through an cfop. But the recursion would
1465 break at some point. */
1466 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1471 /* This loop goes to all predecessor blocks of the block the Phi node
1472 is in and there finds the operands of the Phi node by calling
1473 get_r_value_internal. */
1474 for (i = 1; i <= ins; ++i) {
1475 prevCfOp = skip_Proj(block->in[i]);
1477 if (is_Bad(prevCfOp)) {
1478 /* In case a Cond has been optimized we would get right to the start block
1479 with an invalid definition. */
1480 nin[i-1] = new_Bad();
1483 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1485 if (!is_Bad(prevBlock)) {
1486 #if PRECISE_EXC_CONTEXT
1487 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1488 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1489 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1492 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1494 nin[i-1] = new_Bad();
1498 /* After collecting all predecessors into the array nin a new Phi node
1499 with these predecessors is created. This constructor contains an
1500 optimization: If all predecessors of the Phi node are identical it
1501 returns the only operand instead of a new Phi node. */
1502 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1504 /* In case we allocated a Phi0 node at the beginning of this procedure,
1505 we need to exchange this Phi0 with the real Phi. */
1507 exchange(phi0, res);
1508 block->attr.block.graph_arr[pos] = res;
1509 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1510 only an optimization. */
1516 /* This function returns the last definition of a variable. In case
1517 this variable was last defined in a previous block, Phi nodes are
1518 inserted. If the part of the firm graph containing the definition
1519 is not yet constructed, a dummy Phi node is returned. */
1521 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1524 /* There are 4 cases to treat.
1526 1. The block is not mature and we visit it the first time. We can not
1527 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1528 predecessors is returned. This node is added to the linked list (field
1529 "link") of the containing block to be completed when this block is
1530 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1533 2. The value is already known in this block, graph_arr[pos] is set and we
1534 visit the block the first time. We can return the value without
1535 creating any new nodes.
1537 3. The block is mature and we visit it the first time. A Phi node needs
1538 to be created (phi_merge). If the Phi is not needed, as all it's
1539 operands are the same value reaching the block through different
1540 paths, it's optimized away and the value itself is returned.
1542 4. The block is mature, and we visit it the second time. Now two
1543 subcases are possible:
1544 * The value was computed completely the last time we were here. This
1545 is the case if there is no loop. We can return the proper value.
1546 * The recursion that visited this node and set the flag did not
1547 return yet. We are computing a value in a loop and need to
1548 break the recursion. This case only happens if we visited
1549 the same block with phi_merge before, which inserted a Phi0.
1550 So we return the Phi0.
1553 /* case 4 -- already visited. */
1554 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1555 /* As phi_merge allocates a Phi0 this value is always defined. Here
1556 is the critical difference of the two algorithms. */
1557 assert(block->attr.block.graph_arr[pos]);
1558 return block->attr.block.graph_arr[pos];
1561 /* visited the first time */
1562 set_irn_visited(block, get_irg_visited(current_ir_graph));
1564 /* Get the local valid value */
1565 res = block->attr.block.graph_arr[pos];
1567 /* case 2 -- If the value is actually computed, return it. */
1568 if (res) { return res; };
1570 if (block->attr.block.matured) { /* case 3 */
1572 /* The Phi has the same amount of ins as the corresponding block. */
1573 int ins = get_irn_arity(block);
1575 NEW_ARR_A (ir_node *, nin, ins);
1577 /* Phi merge collects the predecessors and then creates a node. */
1578 res = phi_merge (block, pos, mode, nin, ins);
1580 } else { /* case 1 */
1581 /* The block is not mature, we don't know how many in's are needed. A Phi
1582 with zero predecessors is created. Such a Phi node is called Phi0
1583 node. The Phi0 is then added to the list of Phi0 nodes in this block
1584 to be matured by mature_block later.
1585 The Phi0 has to remember the pos of it's internal value. If the real
1586 Phi is computed, pos is used to update the array with the local
1588 res = new_rd_Phi0 (current_ir_graph, block, mode);
1589 res->attr.phi0_pos = pos;
1590 res->link = block->link;
1594 /* If we get here, the frontend missed a use-before-definition error */
1597 printf("Error: no value set. Use of undefined variable. Initializing
1599 assert (mode->code >= irm_f && mode->code <= irm_p);
1600 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1601 tarval_mode_null[mode->code]);
1604 /* The local valid value is available now. */
1605 block->attr.block.graph_arr[pos] = res;
1610 #endif /* USE_FAST_PHI_CONSTRUCTION */
1612 /* ************************************************************************** */
1614 /** Finalize a Block node, when all control flows are known. */
1615 /** Acceptable parameters are only Block nodes. */
1617 mature_block (ir_node *block)
1624 assert (get_irn_opcode(block) == iro_Block);
1625 // assert (!get_Block_matured(block) && "Block already matured");
1627 if (!get_Block_matured(block)) {
1629 /* An array for building the Phi nodes. */
1630 ins = ARR_LEN (block->in)-1;
1631 NEW_ARR_A (ir_node *, nin, ins);
1632 /* shouldn't we delete this array at the end of the procedure? @@@ memory leak? */
1634 /* Traverse a chain of Phi nodes attached to this block and mature
1636 for (n = block->link; n; n=next) {
1637 inc_irg_visited(current_ir_graph);
1639 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1642 block->attr.block.matured = 1;
1644 /* Now, as the block is a finished firm node, we can optimize it.
1645 Since other nodes have been allocated since the block was created
1646 we can not free the node on the obstack. Therefore we have to call
1648 Unfortunately the optimization does not change a lot, as all allocated
1649 nodes refer to the unoptimized node.
1650 We can call _2, as global cse has no effect on blocks. */
1651 block = optimize_in_place_2(block);
1657 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1659 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1664 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1666 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1671 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1673 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1678 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1680 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1685 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1688 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_I));
1689 arg->attr.c.kind = fragmentary;
1690 arg->attr.c.default_proj = max_proj;
1691 res = new_Proj (arg, mode_X, max_proj);
1696 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1698 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1703 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1705 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1710 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1712 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1717 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1719 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1725 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1727 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1732 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1734 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1739 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1742 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1744 #if PRECISE_EXC_CONTEXT
1745 if ((current_ir_graph->phase_state == phase_building) &&
1746 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1747 res->attr.frag_arr = new_frag_arr(res);
1754 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1757 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1759 #if PRECISE_EXC_CONTEXT
1760 if ((current_ir_graph->phase_state == phase_building) &&
1761 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1762 res->attr.frag_arr = new_frag_arr(res);
1769 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1772 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1774 #if PRECISE_EXC_CONTEXT
1775 if ((current_ir_graph->phase_state == phase_building) &&
1776 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1777 res->attr.frag_arr = new_frag_arr(res);
1784 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1787 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1789 #if PRECISE_EXC_CONTEXT
1790 if ((current_ir_graph->phase_state == phase_building) &&
1791 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1792 res->attr.frag_arr = new_frag_arr(res);
1799 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1801 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1806 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1808 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1813 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1815 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1820 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1822 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1827 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1829 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1834 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1836 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1841 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1843 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1848 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1850 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1855 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1857 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1862 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1864 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1869 new_d_Jmp (dbg_info* db)
1871 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1875 new_d_Cond (dbg_info* db, ir_node *c)
1877 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1881 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1885 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1886 store, callee, arity, in, type);
1887 #if PRECISE_EXC_CONTEXT
1888 if ((current_ir_graph->phase_state == phase_building) &&
1889 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1890 res->attr.call.frag_arr = new_frag_arr(res);
1897 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1899 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1904 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1906 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1911 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1914 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1916 #if PRECISE_EXC_CONTEXT
1917 if ((current_ir_graph->phase_state == phase_building) &&
1918 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1919 res->attr.frag_arr = new_frag_arr(res);
1926 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1929 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1931 #if PRECISE_EXC_CONTEXT
1932 if ((current_ir_graph->phase_state == phase_building) &&
1933 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1934 res->attr.frag_arr = new_frag_arr(res);
1941 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1945 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1946 store, size, alloc_type, where);
1947 #if PRECISE_EXC_CONTEXT
1948 if ((current_ir_graph->phase_state == phase_building) &&
1949 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1950 res->attr.a.frag_arr = new_frag_arr(res);
1957 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1959 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1960 store, ptr, size, free_type);
1964 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1965 /* GL: objptr was called frame before. Frame was a bad choice for the name
1966 as the operand could as well be a pointer to a dynamic object. */
1968 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1969 store, objptr, 0, NULL, ent);
1973 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1975 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1976 store, objptr, n_index, index, sel);
1980 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
1982 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
1987 new_d_Sync (dbg_info* db, int arity, ir_node** in)
1989 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
1997 return current_ir_graph->bad;
2001 new_d_Unknown (void)
2003 return current_ir_graph->unknown;
2007 new_d_CallBegin (dbg_info *db, ir_node *call)
2010 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2015 new_d_EndReg (dbg_info *db)
2018 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2023 new_d_EndExcept (dbg_info *db)
2026 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2031 new_d_Break (dbg_info *db)
2033 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2037 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2039 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2043 /* ********************************************************************* */
2044 /* Comfortable interface with automatic Phi node construction. */
2045 /* (Uses also constructors of ?? interface, except new_Block. */
2046 /* ********************************************************************* */
2048 /** Block construction **/
2049 /* immature Block without predecessors */
2050 ir_node *new_d_immBlock (dbg_info* db) {
2053 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2054 /* creates a new dynamic in-array as length of in is -1 */
2055 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_R, -1, NULL);
2056 current_ir_graph->current_block = res;
2057 res->attr.block.matured = 0;
2058 res->attr.block.exc = exc_normal;
2059 res->attr.block.in_cg = NULL;
2060 set_Block_block_visited(res, 0);
2062 /* Create and initialize array for Phi-node construction. */
2063 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2064 current_ir_graph->n_loc);
2065 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2067 /* Immature block may not be optimized! */
2075 return new_d_immBlock(NULL);
2078 /* add an adge to a jmp/control flow node */
2080 add_in_edge (ir_node *block, ir_node *jmp)
2082 if (block->attr.block.matured) {
2083 assert(0 && "Error: Block already matured!\n");
2086 assert (jmp != NULL);
2087 ARR_APP1 (ir_node *, block->in, jmp);
2091 /* changing the current block */
2093 switch_block (ir_node *target)
2095 current_ir_graph->current_block = target;
2098 /* ************************ */
2099 /* parameter administration */
2101 /* get a value from the parameter array from the current block by its index */
2103 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2105 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2106 inc_irg_visited(current_ir_graph);
2108 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2110 /* get a value from the parameter array from the current block by its index */
2112 get_value (int pos, ir_mode *mode)
2114 return get_d_value(NULL, pos, mode);
2117 /* set a value at position pos in the parameter array from the current block */
2119 set_value (int pos, ir_node *value)
2121 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2122 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2125 /* get the current store */
2129 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2130 /* GL: one could call get_value instead */
2131 inc_irg_visited(current_ir_graph);
2132 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2135 /* set the current store */
2137 set_store (ir_node *store)
2139 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2140 /* GL: one could call set_value instead */
2141 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2145 keep_alive (ir_node *ka)
2147 add_End_keepalive(current_ir_graph->end, ka);
2150 /** Useful access routines **/
2151 /* Returns the current block of the current graph. To set the current
2152 block use switch_block(). */
2153 ir_node *get_cur_block() {
2154 return get_irg_current_block(current_ir_graph);
2157 /* Returns the frame type of the current graph */
2158 type *get_cur_frame_type() {
2159 return get_irg_frame_type(current_ir_graph);
2163 /* ********************************************************************* */
2166 /* call once for each run of the library */
2172 /* call for each graph */
2174 finalize_cons (ir_graph *irg) {
2175 irg->phase_state = phase_high;
2179 ir_node *new_Block(int arity, ir_node **in) {
2180 return new_d_Block(NULL, arity, in);
2182 ir_node *new_Start (void) {
2183 return new_d_Start(NULL);
2185 ir_node *new_End (void) {
2186 return new_d_End(NULL);
2188 ir_node *new_Jmp (void) {
2189 return new_d_Jmp(NULL);
2191 ir_node *new_Cond (ir_node *c) {
2192 return new_d_Cond(NULL, c);
2194 ir_node *new_Return (ir_node *store, int arity, ir_node **in) {
2195 return new_d_Return(NULL, store, arity, in);
2197 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2198 return new_d_Raise(NULL, store, obj);
2200 ir_node *new_Const (ir_mode *mode, tarval *con) {
2201 return new_d_Const(NULL, mode, con);
2203 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2204 return new_d_SymConst(NULL, value, kind);
2206 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2207 return new_d_simpleSel(NULL, store, objptr, ent);
2209 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2211 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2213 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2215 return new_d_Call(NULL, store, callee, arity, in, type);
2217 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2218 return new_d_Add(NULL, op1, op2, mode);
2220 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2221 return new_d_Sub(NULL, op1, op2, mode);
2223 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2224 return new_d_Minus(NULL, op, mode);
2226 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2227 return new_d_Mul(NULL, op1, op2, mode);
2229 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2230 return new_d_Quot(NULL, memop, op1, op2);
2232 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2233 return new_d_DivMod(NULL, memop, op1, op2);
2235 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2236 return new_d_Div(NULL, memop, op1, op2);
2238 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2239 return new_d_Mod(NULL, memop, op1, op2);
2241 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2242 return new_d_Abs(NULL, op, mode);
2244 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2245 return new_d_And(NULL, op1, op2, mode);
2247 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2248 return new_d_Or(NULL, op1, op2, mode);
2250 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2251 return new_d_Eor(NULL, op1, op2, mode);
2253 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2254 return new_d_Not(NULL, op, mode);
2256 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2257 return new_d_Shl(NULL, op, k, mode);
2259 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2260 return new_d_Shr(NULL, op, k, mode);
2262 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2263 return new_d_Shrs(NULL, op, k, mode);
2265 #define new_Rotate new_Rot
2266 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2267 return new_d_Rot(NULL, op, k, mode);
2269 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2270 return new_d_Cmp(NULL, op1, op2);
2272 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2273 return new_d_Conv(NULL, op, mode);
2275 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2276 return new_d_Phi(NULL, arity, in, mode);
2278 ir_node *new_Load (ir_node *store, ir_node *addr) {
2279 return new_d_Load(NULL, store, addr);
2281 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2282 return new_d_Store(NULL, store, addr, val);
2284 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2285 where_alloc where) {
2286 return new_d_Alloc(NULL, store, size, alloc_type, where);
2288 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2290 return new_d_Free(NULL, store, ptr, size, free_type);
2292 ir_node *new_Sync (int arity, ir_node **in) {
2293 return new_d_Sync(NULL, arity, in);
2295 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2296 return new_d_Proj(NULL, arg, mode, proj);
2298 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2299 return new_d_defaultProj(NULL, arg, max_proj);
2301 ir_node *new_Tuple (int arity, ir_node **in) {
2302 return new_d_Tuple(NULL, arity, in);
2304 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2305 return new_d_Id(NULL, val, mode);
2307 ir_node *new_Bad (void) {
2310 ir_node *new_Unknown(void) {
2311 return new_d_Unknown();
2313 ir_node *new_CallBegin (ir_node *callee) {
2314 return new_d_CallBegin(NULL, callee);
2316 ir_node *new_EndReg (void) {
2317 return new_d_EndReg(NULL);
2319 ir_node *new_EndExcept (void) {
2320 return new_d_EndExcept(NULL);
2322 ir_node *new_Break (void) {
2323 return new_d_Break(NULL);
2325 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2326 return new_d_Filter(NULL, arg, mode, proj);