1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Martin Trapp, Christian Schaefer
6 ** ircons.c: basic and more detailed irnode constructors
7 ** store, block and parameter administration.
8 ** Adapted to extended FIRM nodes (exceptions...) and commented
9 ** by Goetz Lindenmaier
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
42 /*** ******************************************** */
43 /** privat interfaces, for professional use only */
45 /* Constructs a Block with a fixed number of predecessors.
46 Does not set current_block. Can not be used with automatic
47 Phi node construction. */
49 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
53 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
54 set_Block_matured(res, 1);
55 set_Block_block_visited(res, 0);
57 res->attr.block.exc = exc_normal;
58 res->attr.block.handler_entry = 0;
59 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
60 res->attr.block.in_cg = NULL;
61 res->attr.block.cg_backedge = NULL;
68 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
72 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
83 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
89 /* Creates a Phi node with all predecessors. Calling this constructor
90 is only allowed if the corresponding block is mature. */
92 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
96 assert( get_Block_matured(block) );
97 assert( get_irn_arity(block) == arity );
99 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
103 res = optimize (res);
106 /* Memory Phis in endless loops must be kept alive.
107 As we can't distinguish these easily we keep all of them alive. */
108 if ((res->op == op_Phi) && (mode == mode_M))
109 add_End_keepalive(irg->end, res);
114 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
117 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
119 res = optimize (res);
123 res = local_optimize_newby (res);
130 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
132 ir_node *in[1] = {val};
134 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
135 res = optimize (res);
141 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
144 ir_node *in[1] = {arg};
146 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
147 res->attr.proj = proj;
150 assert(get_Proj_pred(res));
151 assert(get_nodes_Block(get_Proj_pred(res)));
153 res = optimize (res);
161 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
165 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
166 arg->attr.c.kind = fragmentary;
167 arg->attr.c.default_proj = max_proj;
168 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
173 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
175 ir_node *in[1] = {op};
177 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
178 res = optimize (res);
185 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
189 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
190 res = optimize (res);
196 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
197 ir_node *op1, ir_node *op2, ir_mode *mode)
199 ir_node *in[2] = {op1, op2};
201 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
202 res = optimize (res);
208 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
209 ir_node *op1, ir_node *op2, ir_mode *mode)
211 ir_node *in[2] = {op1, op2};
213 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
214 res = optimize (res);
220 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
221 ir_node *op, ir_mode *mode)
223 ir_node *in[1] = {op};
225 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
226 res = optimize (res);
232 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
233 ir_node *op1, ir_node *op2, ir_mode *mode)
235 ir_node *in[2] = {op1, op2};
237 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
238 res = optimize (res);
244 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *memop, ir_node *op1, ir_node *op2)
247 ir_node *in[3] = {memop, op1, op2};
249 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
250 res = optimize (res);
256 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *memop, ir_node *op1, ir_node *op2)
259 ir_node *in[3] = {memop, op1, op2};
261 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
262 res = optimize (res);
268 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
269 ir_node *memop, ir_node *op1, ir_node *op2)
271 ir_node *in[3] = {memop, op1, op2};
273 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
274 res = optimize (res);
280 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
281 ir_node *memop, ir_node *op1, ir_node *op2)
283 ir_node *in[3] = {memop, op1, op2};
285 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
286 res = optimize (res);
292 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
293 ir_node *op1, ir_node *op2, ir_mode *mode)
295 ir_node *in[2] = {op1, op2};
297 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
298 res = optimize (res);
304 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
305 ir_node *op1, ir_node *op2, ir_mode *mode)
307 ir_node *in[2] = {op1, op2};
309 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
310 res = optimize (res);
316 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
319 ir_node *in[2] = {op1, op2};
321 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
322 res = optimize (res);
328 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *op, ir_mode *mode)
331 ir_node *in[1] = {op};
333 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
334 res = optimize (res);
340 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *op, ir_node *k, ir_mode *mode)
343 ir_node *in[2] = {op, k};
345 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
346 res = optimize (res);
352 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
353 ir_node *op, ir_node *k, ir_mode *mode)
355 ir_node *in[2] = {op, k};
357 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
358 res = optimize (res);
364 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op, ir_node *k, ir_mode *mode)
367 ir_node *in[2] = {op, k};
369 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
370 res = optimize (res);
376 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op, ir_node *k, ir_mode *mode)
379 ir_node *in[2] = {op, k};
381 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
382 res = optimize (res);
388 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
391 ir_node *in[1] = {op};
393 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
394 res = optimize (res);
400 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2)
403 ir_node *in[2] = {op1, op2};
405 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize (res);
412 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, in);
417 res = optimize (res);
423 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
425 ir_node *in[1] = {c};
427 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
428 res->attr.c.kind = dense;
429 res->attr.c.default_proj = 0;
430 res = optimize (res);
436 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
437 ir_node *callee, int arity, ir_node **in, type *tp)
444 NEW_ARR_A (ir_node *, r_in, r_arity);
447 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
449 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
451 assert(is_method_type(tp));
452 set_Call_type(res, tp);
453 res->attr.call.callee_arr = NULL;
454 res = optimize (res);
460 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
461 ir_node *store, int arity, ir_node **in)
468 NEW_ARR_A (ir_node *, r_in, r_arity);
470 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
471 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
472 res = optimize (res);
478 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
480 ir_node *in[2] = {store, obj};
482 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
483 res = optimize (res);
489 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *store, ir_node *adr)
492 ir_node *in[2] = {store, adr};
494 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
496 res = optimize (res);
502 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
503 ir_node *store, ir_node *adr, ir_node *val)
505 ir_node *in[3] = {store, adr, val};
507 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
509 res = optimize (res);
516 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
517 ir_node *size, type *alloc_type, where_alloc where)
519 ir_node *in[2] = {store, size};
521 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
523 res->attr.a.where = where;
524 res->attr.a.type = alloc_type;
526 res = optimize (res);
532 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
533 ir_node *ptr, ir_node *size, type *free_type)
535 ir_node *in[3] = {store, ptr, size};
537 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
539 res->attr.f = free_type;
541 res = optimize (res);
547 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
548 int arity, ir_node **in, entity *ent)
555 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
558 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
559 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
561 res->attr.s.ent = ent;
563 res = optimize (res);
569 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
570 ir_node *objptr, type *ent)
577 NEW_ARR_A (ir_node *, r_in, r_arity);
581 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
583 res->attr.io.ent = ent;
585 /* res = optimize (res);
586 ** irn_vrfy (res); */
591 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
592 symconst_kind symkind)
597 if (symkind == linkage_ptr_info)
601 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, in);
603 res->attr.i.num = symkind;
604 if (symkind == linkage_ptr_info) {
605 res->attr.i.tori.ptrinfo = (ident *)value;
607 assert ( ( (symkind == type_tag)
608 || (symkind == size))
609 && (is_type(value)));
610 res->attr.i.tori.typ = (type *)value;
612 res = optimize (res);
618 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
622 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
624 res = optimize (res);
632 return current_ir_graph->bad;
638 return current_ir_graph->unknown;
642 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
644 ir_node *in[1] = { get_Call_ptr(call) };
646 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
647 res->attr.callbegin.irg = irg;
648 res->attr.callbegin.call = call;
649 res = optimize (res);
655 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
659 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
660 res->attr.end.irg = irg;
667 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
671 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
672 res->attr.end.irg = irg;
679 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
683 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
684 res = optimize (res);
690 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
693 ir_node *in[1] = {arg};
695 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
696 res->attr.filter.proj = proj;
697 res->attr.filter.in_cg = NULL;
698 res->attr.filter.backedge = NULL;
701 assert(get_Proj_pred(res));
702 assert(get_nodes_Block(get_Proj_pred(res)));
704 res = optimize (res);
711 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
712 return new_rd_Block(NULL, irg, arity, in);
714 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
715 return new_rd_Start(NULL, irg, block);
717 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
718 return new_rd_End(NULL, irg, block);
720 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
721 return new_rd_Jmp(NULL, irg, block);
723 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
724 return new_rd_Cond(NULL, irg, block, c);
726 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
727 ir_node *store, int arity, ir_node **in) {
728 return new_rd_Return(NULL, irg, block, store, arity, in);
730 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
731 ir_node *store, ir_node *obj) {
732 return new_rd_Raise(NULL, irg, block, store, obj);
734 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
735 ir_mode *mode, tarval *con) {
736 return new_rd_Const(NULL, irg, block, mode, con);
738 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
739 type_or_id_p value, symconst_kind symkind) {
740 return new_rd_SymConst(NULL, irg, block, value, symkind);
742 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
743 ir_node *objptr, int n_index, ir_node **index,
745 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
747 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
749 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
751 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
752 ir_node *callee, int arity, ir_node **in,
754 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
756 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
757 ir_node *op1, ir_node *op2, ir_mode *mode) {
758 return new_rd_Add(NULL, irg, block, op1, op2, mode);
760 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
761 ir_node *op1, ir_node *op2, ir_mode *mode) {
762 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
764 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
765 ir_node *op, ir_mode *mode) {
766 return new_rd_Minus(NULL, irg, block, op, mode);
768 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
769 ir_node *op1, ir_node *op2, ir_mode *mode) {
770 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
772 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
773 ir_node *memop, ir_node *op1, ir_node *op2) {
774 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
776 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
777 ir_node *memop, ir_node *op1, ir_node *op2) {
778 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
780 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
781 ir_node *memop, ir_node *op1, ir_node *op2) {
782 return new_rd_Div(NULL, irg, block, memop, op1, op2);
784 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
785 ir_node *memop, ir_node *op1, ir_node *op2) {
786 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
788 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
789 ir_node *op, ir_mode *mode) {
790 return new_rd_Abs(NULL, irg, block, op, mode);
792 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
793 ir_node *op1, ir_node *op2, ir_mode *mode) {
794 return new_rd_And(NULL, irg, block, op1, op2, mode);
796 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
797 ir_node *op1, ir_node *op2, ir_mode *mode) {
798 return new_rd_Or(NULL, irg, block, op1, op2, mode);
800 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
801 ir_node *op1, ir_node *op2, ir_mode *mode) {
802 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
804 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
805 ir_node *op, ir_mode *mode) {
806 return new_rd_Not(NULL, irg, block, op, mode);
808 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
809 ir_node *op1, ir_node *op2) {
810 return new_rd_Cmp(NULL, irg, block, op1, op2);
812 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
813 ir_node *op, ir_node *k, ir_mode *mode) {
814 return new_rd_Shl(NULL, irg, block, op, k, mode);
816 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
817 ir_node *op, ir_node *k, ir_mode *mode) {
818 return new_rd_Shr(NULL, irg, block, op, k, mode);
820 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
821 ir_node *op, ir_node *k, ir_mode *mode) {
822 return new_rd_Shrs(NULL, irg, block, op, k, mode);
824 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
825 ir_node *op, ir_node *k, ir_mode *mode) {
826 return new_rd_Rot(NULL, irg, block, op, k, mode);
828 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
829 ir_node *op, ir_mode *mode) {
830 return new_rd_Conv(NULL, irg, block, op, mode);
832 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
833 ir_node **in, ir_mode *mode) {
834 return new_rd_Phi(NULL, irg, block, arity, in, mode);
836 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
837 ir_node *store, ir_node *adr) {
838 return new_rd_Load(NULL, irg, block, store, adr);
840 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
841 ir_node *store, ir_node *adr, ir_node *val) {
842 return new_rd_Store(NULL, irg, block, store, adr, val);
844 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
845 ir_node *size, type *alloc_type, where_alloc where) {
846 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
848 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
849 ir_node *ptr, ir_node *size, type *free_type) {
850 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
852 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
853 return new_rd_Sync(NULL, irg, block, arity, in);
855 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
856 ir_mode *mode, long proj) {
857 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
859 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
861 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
863 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
864 int arity, ir_node **in) {
865 return new_rd_Tuple(NULL, irg, block, arity, in );
867 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
868 ir_node *val, ir_mode *mode) {
869 return new_rd_Id(NULL, irg, block, val, mode);
871 INLINE ir_node *new_r_Bad () {
874 INLINE ir_node *new_r_Unknown () {
875 return new_rd_Unknown();
877 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
878 return new_rd_CallBegin(NULL, irg, block, callee);
880 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
881 return new_rd_EndReg(NULL, irg, block);
883 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
884 return new_rd_EndExcept(NULL, irg, block);
886 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
887 return new_rd_Break(NULL, irg, block);
889 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
890 ir_mode *mode, long proj) {
891 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
895 /** ********************/
896 /** public interfaces */
897 /** construction tools */
899 /****f* ircons/new_Start
902 * new_Start -- create a new Start node in the current block
905 * s = new_Start(void);
906 * ir_node* new_Start(void);
909 * s - pointer to the created Start node
914 new_d_Start (dbg_info* db)
918 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
919 op_Start, mode_T, 0, NULL);
921 res = optimize (res);
927 new_d_End (dbg_info* db)
930 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
931 op_End, mode_X, -1, NULL);
932 res = optimize (res);
938 /* Constructs a Block with a fixed number of predecessors.
939 Does set current_block. Can be used with automatic Phi
940 node construction. */
942 new_d_Block (dbg_info* db, int arity, ir_node **in)
946 res = new_rd_Block (db, current_ir_graph, arity, in);
948 /* Create and initialize array for Phi-node construction. */
949 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
950 current_ir_graph->n_loc);
951 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
953 res = optimize (res);
954 current_ir_graph->current_block = res;
961 /* ***********************************************************************/
962 /* Methods necessary for automatic Phi node creation */
964 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
965 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
966 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
967 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
969 Call Graph: ( A ---> B == A "calls" B)
971 get_value mature_block
979 get_r_value_internal |
983 new_rd_Phi0 new_rd_Phi_in
985 * *************************************************************************** */
987 /* Creates a Phi node with 0 predecessors */
988 static INLINE ir_node *
989 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
992 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
997 /* There are two implementations of the Phi node construction. The first
998 is faster, but does not work for blocks with more than 2 predecessors.
999 The second works always but is slower and causes more unnecessary Phi
1001 Select the implementations by the following preprocessor flag set in
1003 #if USE_FAST_PHI_CONSTRUCTION
1005 /* This is a stack used for allocating and deallocating nodes in
1006 new_rd_Phi_in. The original implementation used the obstack
1007 to model this stack, now it is explicit. This reduces side effects.
1009 #if USE_EXPLICIT_PHI_IN_STACK
1010 INLINE Phi_in_stack *
1011 new_Phi_in_stack() {
1014 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1016 res->stack = NEW_ARR_F (ir_node *, 1);
1023 free_Phi_in_stack(Phi_in_stack *s) {
1024 DEL_ARR_F(s->stack);
1028 free_to_Phi_in_stack(ir_node *phi) {
1029 assert(get_irn_opcode(phi) == iro_Phi);
1031 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1032 current_ir_graph->Phi_in_stack->pos)
1033 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1035 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1037 (current_ir_graph->Phi_in_stack->pos)++;
1040 static INLINE ir_node *
1041 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1042 int arity, ir_node **in) {
1044 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1045 int pos = current_ir_graph->Phi_in_stack->pos;
1049 /* We need to allocate a new node */
1050 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1051 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1053 /* reuse the old node and initialize it again. */
1056 assert (res->kind == k_ir_node);
1057 assert (res->op == op_Phi);
1061 assert (arity >= 0);
1062 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1063 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1065 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1067 (current_ir_graph->Phi_in_stack->pos)--;
1071 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1073 /* Creates a Phi node with a given, fixed array **in of predecessors.
1074 If the Phi node is unnecessary, as the same value reaches the block
1075 through all control flow paths, it is eliminated and the value
1076 returned directly. This constructor is only intended for use in
1077 the automatic Phi node generation triggered by get_value or mature.
1078 The implementation is quite tricky and depends on the fact, that
1079 the nodes are allocated on a stack:
1080 The in array contains predecessors and NULLs. The NULLs appear,
1081 if get_r_value_internal, that computed the predecessors, reached
1082 the same block on two paths. In this case the same value reaches
1083 this block on both paths, there is no definition in between. We need
1084 not allocate a Phi where these path's merge, but we have to communicate
1085 this fact to the caller. This happens by returning a pointer to the
1086 node the caller _will_ allocate. (Yes, we predict the address. We can
1087 do so because the nodes are allocated on the obstack.) The caller then
1088 finds a pointer to itself and, when this routine is called again,
1091 static INLINE ir_node *
1092 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1093 ir_node **in, int ins)
1096 ir_node *res, *known;
1098 /* allocate a new node on the obstack.
1099 This can return a node to which some of the pointers in the in-array
1101 Attention: the constructor copies the in array, i.e., the later changes
1102 to the array in this routine do not affect the constructed node! If
1103 the in array contains NULLs, there will be missing predecessors in the
1105 Is this a possible internal state of the Phi node generation? */
1106 #if USE_EXPLICIT_PHI_IN_STACK
1107 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1109 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1110 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1112 /* The in-array can contain NULLs. These were returned by
1113 get_r_value_internal if it reached the same block/definition on a
1115 The NULLs are replaced by the node itself to simplify the test in the
1117 for (i=0; i < ins; ++i)
1118 if (in[i] == NULL) in[i] = res;
1120 /* This loop checks whether the Phi has more than one predecessor.
1121 If so, it is a real Phi node and we break the loop. Else the
1122 Phi node merges the same definition on several paths and therefore
1124 for (i=0; i < ins; ++i)
1126 if (in[i]==res || in[i]==known) continue;
1134 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1136 #if USE_EXPLICIT_PHI_IN_STACK
1137 free_to_Phi_in_stack(res);
1139 obstack_free (current_ir_graph->obst, res);
1143 res = optimize (res);
1147 /* return the pointer to the Phi node. This node might be deallocated! */
1152 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1154 /** This function computes the predecessors for a real Phi node, and then
1155 allocates and returns this node. The routine called to allocate the
1156 node might optimize it away and return a real value, or even a pointer
1157 to a deallocated Phi node on top of the obstack!
1158 This function is called with an in-array of proper size. **/
1160 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1162 ir_node *prevBlock, *res;
1165 /* This loop goes to all predecessor blocks of the block the Phi node is in
1166 and there finds the operands of the Phi node by calling
1167 get_r_value_internal. */
1168 for (i = 1; i <= ins; ++i) {
1169 assert (block->in[i]);
1170 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1172 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1175 /* After collecting all predecessors into the array nin a new Phi node
1176 with these predecessors is created. This constructor contains an
1177 optimization: If all predecessors of the Phi node are identical it
1178 returns the only operand instead of a new Phi node. If the value
1179 passes two different control flow edges without being defined, and
1180 this is the second path treated, a pointer to the node that will be
1181 allocated for the first path (recursion) is returned. We already
1182 know the address of this node, as it is the next node to be allocated
1183 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1184 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1186 /* Now we now the value for "pos" and can enter it in the array with
1187 all known local variables. Attention: this might be a pointer to
1188 a node, that later will be allocated!!! See new_rd_Phi_in.
1189 If this is called in mature, after some set_value in the same block,
1190 the proper value must not be overwritten:
1192 get_value (makes Phi0, put's it into graph_arr)
1193 set_value (overwrites Phi0 in graph_arr)
1194 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1197 if (!block->attr.block.graph_arr[pos]) {
1198 block->attr.block.graph_arr[pos] = res;
1200 /* printf(" value already computed by %s\n",
1201 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1207 /* This function returns the last definition of a variable. In case
1208 this variable was last defined in a previous block, Phi nodes are
1209 inserted. If the part of the firm graph containing the definition
1210 is not yet constructed, a dummy Phi node is returned. */
1212 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1215 /* There are 4 cases to treat.
1217 1. The block is not mature and we visit it the first time. We can not
1218 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1219 predecessors is returned. This node is added to the linked list (field
1220 "link") of the containing block to be completed when this block is
1221 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1224 2. The value is already known in this block, graph_arr[pos] is set and we
1225 visit the block the first time. We can return the value without
1226 creating any new nodes.
1228 3. The block is mature and we visit it the first time. A Phi node needs
1229 to be created (phi_merge). If the Phi is not needed, as all it's
1230 operands are the same value reaching the block through different
1231 paths, it's optimized away and the value itself is returned.
1233 4. The block is mature, and we visit it the second time. Now two
1234 subcases are possible:
1235 * The value was computed completely the last time we were here. This
1236 is the case if there is no loop. We can return the proper value.
1237 * The recursion that visited this node and set the flag did not
1238 return yet. We are computing a value in a loop and need to
1239 break the recursion without knowing the result yet.
1240 @@@ strange case. Straight forward we would create a Phi before
1241 starting the computation of it's predecessors. In this case we will
1242 find a Phi here in any case. The problem is that this implementation
1243 only creates a Phi after computing the predecessors, so that it is
1244 hard to compute self references of this Phi. @@@
1245 There is no simple check for the second subcase. Therefore we check
1246 for a second visit and treat all such cases as the second subcase.
1247 Anyways, the basic situation is the same: we reached a block
1248 on two paths without finding a definition of the value: No Phi
1249 nodes are needed on both paths.
1250 We return this information "Two paths, no Phi needed" by a very tricky
1251 implementation that relies on the fact that an obstack is a stack and
1252 will return a node with the same address on different allocations.
1253 Look also at phi_merge and new_rd_phi_in to understand this.
1254 @@@ Unfortunately this does not work, see testprogram
1255 three_cfpred_example.
1259 /* case 4 -- already visited. */
1260 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1262 /* visited the first time */
1263 set_irn_visited(block, get_irg_visited(current_ir_graph));
1265 /* Get the local valid value */
1266 res = block->attr.block.graph_arr[pos];
1268 /* case 2 -- If the value is actually computed, return it. */
1269 if (res) { return res;};
1271 if (block->attr.block.matured) { /* case 3 */
1273 /* The Phi has the same amount of ins as the corresponding block. */
1274 int ins = get_irn_arity(block);
1276 NEW_ARR_A (ir_node *, nin, ins);
1278 /* Phi merge collects the predecessors and then creates a node. */
1279 res = phi_merge (block, pos, mode, nin, ins);
1281 } else { /* case 1 */
1282 /* The block is not mature, we don't know how many in's are needed. A Phi
1283 with zero predecessors is created. Such a Phi node is called Phi0
1284 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1285 to the list of Phi0 nodes in this block to be matured by mature_block
1287 The Phi0 has to remember the pos of it's internal value. If the real
1288 Phi is computed, pos is used to update the array with the local
1291 res = new_rd_Phi0 (current_ir_graph, block, mode);
1292 res->attr.phi0_pos = pos;
1293 res->link = block->link;
1297 /* If we get here, the frontend missed a use-before-definition error */
1300 printf("Error: no value set. Use of undefined variable. Initializing
1302 assert (mode->code >= irm_F && mode->code <= irm_P);
1303 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1304 tarval_mode_null[mode->code]);
1307 /* The local valid value is available now. */
1308 block->attr.block.graph_arr[pos] = res;
1315 /** This is the simple algorithm. If first generates a Phi0, then
1316 it starts the recursion. This causes an Id at the entry of
1317 every block that has no definition of the value! **/
1319 #if USE_EXPLICIT_PHI_IN_STACK
1321 static INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1322 static INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1325 static INLINE ir_node *
1326 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1327 ir_node **in, int ins)
1330 ir_node *res, *known;
1332 /* Allocate a new node on the obstack. The allocation copies the in
1334 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1335 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1337 /* This loop checks whether the Phi has more than one predecessor.
1338 If so, it is a real Phi node and we break the loop. Else the
1339 Phi node merges the same definition on several paths and therefore
1340 is not needed. Don't consider Bad nodes! */
1342 for (i=0; i < ins; ++i)
1346 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1354 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1357 obstack_free (current_ir_graph->obst, res);
1360 /* A undefined value, e.g., in unreachable code. */
1364 res = optimize (res);
1366 /* Memory Phis in endless loops must be kept alive.
1367 As we can't distinguish these easily we keep all of the alive. */
1368 if ((res->op == op_Phi) && (mode == mode_M))
1369 add_End_keepalive(irg->end, res);
1376 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1378 #if PRECISE_EXC_CONTEXT
1380 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1382 static INLINE ir_node **
1383 new_frag_arr (ir_node *n) {
1386 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1387 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1388 sizeof(ir_node *)*current_ir_graph->n_loc);
1389 /* turn off optimization before allocating Proj nodes, as res isn't
1391 opt = get_optimize(); set_optimize(0);
1392 /* Here we rely on the fact that all frag ops have Memory as first result! */
1393 if (get_irn_op(n) == op_Call)
1394 arr[0] = new_Proj(n, mode_M, 3);
1396 arr[0] = new_Proj(n, mode_M, 0);
1398 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1402 static INLINE ir_node **
1403 get_frag_arr (ir_node *n) {
1404 if (get_irn_op(n) == op_Call) {
1405 return n->attr.call.frag_arr;
1406 } else if (get_irn_op(n) == op_Alloc) {
1407 return n->attr.a.frag_arr;
1409 return n->attr.frag_arr;
1414 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1415 if (!frag_arr[pos]) frag_arr[pos] = val;
1416 if (frag_arr[current_ir_graph->n_loc - 1])
1417 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1421 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1425 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1427 frag_arr = get_frag_arr(cfOp);
1428 res = frag_arr[pos];
1430 if (block->attr.block.graph_arr[pos]) {
1431 /* There was a set_value after the cfOp and no get_value before that
1432 set_value. We must build a Phi node now. */
1433 if (block->attr.block.matured) {
1434 int ins = get_irn_arity(block);
1436 NEW_ARR_A (ir_node *, nin, ins);
1437 res = phi_merge(block, pos, mode, nin, ins);
1439 res = new_rd_Phi0 (current_ir_graph, block, mode);
1440 res->attr.phi0_pos = pos;
1441 res->link = block->link;
1445 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1446 but this should be better: (remove comment if this works) */
1447 /* It's a Phi, we can write this into all graph_arrs with NULL */
1448 set_frag_value(block->attr.block.graph_arr, pos, res);
1450 res = get_r_value_internal(block, pos, mode);
1451 set_frag_value(block->attr.block.graph_arr, pos, res);
1458 /** This function allocates a dummy Phi node to break recursions,
1459 computes the predecessors for the real phi node, and then
1460 allocates and returns this node. The routine called to allocate the
1461 node might optimize it away and return a real value.
1462 This function must be called with an in-array of proper size. **/
1464 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1466 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1469 /* If this block has no value at pos create a Phi0 and remember it
1470 in graph_arr to break recursions.
1471 Else we may not set graph_arr as there a later value is remembered. */
1473 if (!block->attr.block.graph_arr[pos]) {
1474 /* This is commented out as collapsing to Bads is no good idea.
1475 Either we need an assert here, or we need to call a routine
1476 that deals with this case as appropriate for the given language.
1477 Right now a self referencing Id is created which will crash irg_vrfy().
1479 Even if all variables are defined before use, it can happen that
1480 we get to the start block, if a cond has been replaced by a tuple
1481 (bad, jmp). As the start has a self referencing control flow edge,
1482 we get a self referencing Id, which is hard to optimize away. We avoid
1483 this by defining the value as a Bad node.
1484 Returning a const with tarval_bad is a preliminary solution. In some
1485 situations we might want a Warning or an Error. */
1487 if (block == get_irg_start_block(current_ir_graph)) {
1488 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1489 /* We don't need to care about exception ops in the start block.
1490 There are none by definition. */
1491 return block->attr.block.graph_arr[pos];
1493 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1494 block->attr.block.graph_arr[pos] = phi0;
1495 #if PRECISE_EXC_CONTEXT
1496 /* Set graph_arr for fragile ops. Also here we should break recursion.
1497 We could choose a cyclic path through an cfop. But the recursion would
1498 break at some point. */
1499 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1504 /* This loop goes to all predecessor blocks of the block the Phi node
1505 is in and there finds the operands of the Phi node by calling
1506 get_r_value_internal. */
1507 for (i = 1; i <= ins; ++i) {
1508 prevCfOp = skip_Proj(block->in[i]);
1510 if (is_Bad(prevCfOp)) {
1511 /* In case a Cond has been optimized we would get right to the start block
1512 with an invalid definition. */
1513 nin[i-1] = new_Bad();
1516 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1518 if (!is_Bad(prevBlock)) {
1519 #if PRECISE_EXC_CONTEXT
1520 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1521 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1522 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1525 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1527 nin[i-1] = new_Bad();
1531 /* After collecting all predecessors into the array nin a new Phi node
1532 with these predecessors is created. This constructor contains an
1533 optimization: If all predecessors of the Phi node are identical it
1534 returns the only operand instead of a new Phi node. */
1535 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1537 /* In case we allocated a Phi0 node at the beginning of this procedure,
1538 we need to exchange this Phi0 with the real Phi. */
1540 exchange(phi0, res);
1541 block->attr.block.graph_arr[pos] = res;
1542 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1543 only an optimization. */
1549 /* This function returns the last definition of a variable. In case
1550 this variable was last defined in a previous block, Phi nodes are
1551 inserted. If the part of the firm graph containing the definition
1552 is not yet constructed, a dummy Phi node is returned. */
1554 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1557 /* There are 4 cases to treat.
1559 1. The block is not mature and we visit it the first time. We can not
1560 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1561 predecessors is returned. This node is added to the linked list (field
1562 "link") of the containing block to be completed when this block is
1563 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1566 2. The value is already known in this block, graph_arr[pos] is set and we
1567 visit the block the first time. We can return the value without
1568 creating any new nodes.
1570 3. The block is mature and we visit it the first time. A Phi node needs
1571 to be created (phi_merge). If the Phi is not needed, as all it's
1572 operands are the same value reaching the block through different
1573 paths, it's optimized away and the value itself is returned.
1575 4. The block is mature, and we visit it the second time. Now two
1576 subcases are possible:
1577 * The value was computed completely the last time we were here. This
1578 is the case if there is no loop. We can return the proper value.
1579 * The recursion that visited this node and set the flag did not
1580 return yet. We are computing a value in a loop and need to
1581 break the recursion. This case only happens if we visited
1582 the same block with phi_merge before, which inserted a Phi0.
1583 So we return the Phi0.
1586 /* case 4 -- already visited. */
1587 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1588 /* As phi_merge allocates a Phi0 this value is always defined. Here
1589 is the critical difference of the two algorithms. */
1590 assert(block->attr.block.graph_arr[pos]);
1591 return block->attr.block.graph_arr[pos];
1594 /* visited the first time */
1595 set_irn_visited(block, get_irg_visited(current_ir_graph));
1597 /* Get the local valid value */
1598 res = block->attr.block.graph_arr[pos];
1600 /* case 2 -- If the value is actually computed, return it. */
1601 if (res) { return res; };
1603 if (block->attr.block.matured) { /* case 3 */
1605 /* The Phi has the same amount of ins as the corresponding block. */
1606 int ins = get_irn_arity(block);
1608 NEW_ARR_A (ir_node *, nin, ins);
1610 /* Phi merge collects the predecessors and then creates a node. */
1611 res = phi_merge (block, pos, mode, nin, ins);
1613 } else { /* case 1 */
1614 /* The block is not mature, we don't know how many in's are needed. A Phi
1615 with zero predecessors is created. Such a Phi node is called Phi0
1616 node. The Phi0 is then added to the list of Phi0 nodes in this block
1617 to be matured by mature_block later.
1618 The Phi0 has to remember the pos of it's internal value. If the real
1619 Phi is computed, pos is used to update the array with the local
1621 res = new_rd_Phi0 (current_ir_graph, block, mode);
1622 res->attr.phi0_pos = pos;
1623 res->link = block->link;
1627 /* If we get here, the frontend missed a use-before-definition error */
1630 printf("Error: no value set. Use of undefined variable. Initializing
1632 assert (mode->code >= irm_F && mode->code <= irm_P);
1633 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1634 tarval_mode_null[mode->code]);
1637 /* The local valid value is available now. */
1638 block->attr.block.graph_arr[pos] = res;
1643 #endif /* USE_FAST_PHI_CONSTRUCTION */
1645 /* ************************************************************************** */
1647 /** Finalize a Block node, when all control flows are known. */
1648 /** Acceptable parameters are only Block nodes. */
1650 mature_block (ir_node *block)
1657 assert (get_irn_opcode(block) == iro_Block);
1658 /* @@@ should be commented in
1659 assert (!get_Block_matured(block) && "Block already matured"); */
1661 if (!get_Block_matured(block)) {
1662 ins = ARR_LEN (block->in)-1;
1663 /* Fix block parameters */
1664 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1666 /* An array for building the Phi nodes. */
1667 NEW_ARR_A (ir_node *, nin, ins);
1669 /* Traverse a chain of Phi nodes attached to this block and mature
1671 for (n = block->link; n; n=next) {
1672 inc_irg_visited(current_ir_graph);
1674 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1677 block->attr.block.matured = 1;
1679 /* Now, as the block is a finished firm node, we can optimize it.
1680 Since other nodes have been allocated since the block was created
1681 we can not free the node on the obstack. Therefore we have to call
1683 Unfortunately the optimization does not change a lot, as all allocated
1684 nodes refer to the unoptimized node.
1685 We can call _2, as global cse has no effect on blocks. */
1686 block = optimize_in_place_2(block);
1692 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1694 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1699 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1701 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1706 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1708 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1713 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1715 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1720 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1723 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1724 arg->attr.c.kind = fragmentary;
1725 arg->attr.c.default_proj = max_proj;
1726 res = new_Proj (arg, mode_X, max_proj);
1731 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1733 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1738 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1740 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1745 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1747 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1752 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1754 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1760 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1762 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1767 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1769 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1774 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1777 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1779 #if PRECISE_EXC_CONTEXT
1780 if ((current_ir_graph->phase_state == phase_building) &&
1781 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1782 res->attr.frag_arr = new_frag_arr(res);
1789 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1792 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1794 #if PRECISE_EXC_CONTEXT
1795 if ((current_ir_graph->phase_state == phase_building) &&
1796 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1797 res->attr.frag_arr = new_frag_arr(res);
1804 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1807 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1809 #if PRECISE_EXC_CONTEXT
1810 if ((current_ir_graph->phase_state == phase_building) &&
1811 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1812 res->attr.frag_arr = new_frag_arr(res);
1819 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1822 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1824 #if PRECISE_EXC_CONTEXT
1825 if ((current_ir_graph->phase_state == phase_building) &&
1826 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1827 res->attr.frag_arr = new_frag_arr(res);
1834 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1836 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1841 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1843 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1848 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1850 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1855 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1857 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1862 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1864 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1869 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1871 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1876 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1878 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1883 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1885 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1890 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1892 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1897 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1899 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1904 new_d_Jmp (dbg_info* db)
1906 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1910 new_d_Cond (dbg_info* db, ir_node *c)
1912 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1916 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1920 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1921 store, callee, arity, in, tp);
1922 #if PRECISE_EXC_CONTEXT
1923 if ((current_ir_graph->phase_state == phase_building) &&
1924 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1925 res->attr.call.frag_arr = new_frag_arr(res);
1932 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1934 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1939 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1941 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1946 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1949 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1951 #if PRECISE_EXC_CONTEXT
1952 if ((current_ir_graph->phase_state == phase_building) &&
1953 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1954 res->attr.frag_arr = new_frag_arr(res);
1961 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1964 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1966 #if PRECISE_EXC_CONTEXT
1967 if ((current_ir_graph->phase_state == phase_building) &&
1968 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1969 res->attr.frag_arr = new_frag_arr(res);
1976 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1980 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1981 store, size, alloc_type, where);
1982 #if PRECISE_EXC_CONTEXT
1983 if ((current_ir_graph->phase_state == phase_building) &&
1984 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1985 res->attr.a.frag_arr = new_frag_arr(res);
1992 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1994 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1995 store, ptr, size, free_type);
1999 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2000 /* GL: objptr was called frame before. Frame was a bad choice for the name
2001 as the operand could as well be a pointer to a dynamic object. */
2003 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2004 store, objptr, 0, NULL, ent);
2008 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2010 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2011 store, objptr, n_index, index, sel);
2015 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2017 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2018 store, objptr, ent));
2022 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2024 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2029 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2031 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2039 return current_ir_graph->bad;
2043 new_d_Unknown (void)
2045 return current_ir_graph->unknown;
2049 new_d_CallBegin (dbg_info *db, ir_node *call)
2052 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2057 new_d_EndReg (dbg_info *db)
2060 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2065 new_d_EndExcept (dbg_info *db)
2068 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2073 new_d_Break (dbg_info *db)
2075 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2079 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2081 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2085 /* ********************************************************************* */
2086 /* Comfortable interface with automatic Phi node construction. */
2087 /* (Uses also constructors of ?? interface, except new_Block. */
2088 /* ********************************************************************* */
2090 /** Block construction **/
2091 /* immature Block without predecessors */
2092 ir_node *new_d_immBlock (dbg_info* db) {
2095 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2096 /* creates a new dynamic in-array as length of in is -1 */
2097 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2098 current_ir_graph->current_block = res;
2099 res->attr.block.matured = 0;
2100 res->attr.block.exc = exc_normal;
2101 res->attr.block.handler_entry = 0;
2102 res->attr.block.backedge = NULL;
2103 res->attr.block.in_cg = NULL;
2104 res->attr.block.cg_backedge = NULL;
2105 set_Block_block_visited(res, 0);
2107 /* Create and initialize array for Phi-node construction. */
2108 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2109 current_ir_graph->n_loc);
2110 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2112 /* Immature block may not be optimized! */
2120 return new_d_immBlock(NULL);
2123 /* add an adge to a jmp/control flow node */
2125 add_in_edge (ir_node *block, ir_node *jmp)
2127 if (block->attr.block.matured) {
2128 assert(0 && "Error: Block already matured!\n");
2131 assert (jmp != NULL);
2132 ARR_APP1 (ir_node *, block->in, jmp);
2136 /* changing the current block */
2138 switch_block (ir_node *target)
2140 current_ir_graph->current_block = target;
2143 /* ************************ */
2144 /* parameter administration */
2146 /* get a value from the parameter array from the current block by its index */
2148 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2150 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2151 inc_irg_visited(current_ir_graph);
2153 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2155 /* get a value from the parameter array from the current block by its index */
2157 get_value (int pos, ir_mode *mode)
2159 return get_d_value(NULL, pos, mode);
2162 /* set a value at position pos in the parameter array from the current block */
2164 set_value (int pos, ir_node *value)
2166 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2167 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2170 /* get the current store */
2174 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2175 /* GL: one could call get_value instead */
2176 inc_irg_visited(current_ir_graph);
2177 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2180 /* set the current store */
2182 set_store (ir_node *store)
2184 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2185 /* GL: one could call set_value instead */
2186 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2190 keep_alive (ir_node *ka)
2192 add_End_keepalive(current_ir_graph->end, ka);
2195 /** Useful access routines **/
2196 /* Returns the current block of the current graph. To set the current
2197 block use switch_block(). */
2198 ir_node *get_cur_block() {
2199 return get_irg_current_block(current_ir_graph);
2202 /* Returns the frame type of the current graph */
2203 type *get_cur_frame_type() {
2204 return get_irg_frame_type(current_ir_graph);
2208 /* ********************************************************************* */
2211 /* call once for each run of the library */
2217 /* call for each graph */
2219 finalize_cons (ir_graph *irg) {
2220 irg->phase_state = phase_high;
2224 ir_node *new_Block(int arity, ir_node **in) {
2225 return new_d_Block(NULL, arity, in);
2227 ir_node *new_Start (void) {
2228 return new_d_Start(NULL);
2230 ir_node *new_End (void) {
2231 return new_d_End(NULL);
2233 ir_node *new_Jmp (void) {
2234 return new_d_Jmp(NULL);
2236 ir_node *new_Cond (ir_node *c) {
2237 return new_d_Cond(NULL, c);
2239 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2240 return new_d_Return(NULL, store, arity, in);
2242 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2243 return new_d_Raise(NULL, store, obj);
2245 ir_node *new_Const (ir_mode *mode, tarval *con) {
2246 return new_d_Const(NULL, mode, con);
2248 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2249 return new_d_SymConst(NULL, value, kind);
2251 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2252 return new_d_simpleSel(NULL, store, objptr, ent);
2254 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2256 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2258 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2259 return (new_d_InstOf (NULL, store, objptr, ent));
2261 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2263 return new_d_Call(NULL, store, callee, arity, in, tp);
2265 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2266 return new_d_Add(NULL, op1, op2, mode);
2268 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2269 return new_d_Sub(NULL, op1, op2, mode);
2271 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2272 return new_d_Minus(NULL, op, mode);
2274 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2275 return new_d_Mul(NULL, op1, op2, mode);
2277 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2278 return new_d_Quot(NULL, memop, op1, op2);
2280 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2281 return new_d_DivMod(NULL, memop, op1, op2);
2283 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2284 return new_d_Div(NULL, memop, op1, op2);
2286 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2287 return new_d_Mod(NULL, memop, op1, op2);
2289 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2290 return new_d_Abs(NULL, op, mode);
2292 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2293 return new_d_And(NULL, op1, op2, mode);
2295 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2296 return new_d_Or(NULL, op1, op2, mode);
2298 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2299 return new_d_Eor(NULL, op1, op2, mode);
2301 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2302 return new_d_Not(NULL, op, mode);
2304 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2305 return new_d_Shl(NULL, op, k, mode);
2307 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2308 return new_d_Shr(NULL, op, k, mode);
2310 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2311 return new_d_Shrs(NULL, op, k, mode);
2313 #define new_Rotate new_Rot
2314 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2315 return new_d_Rot(NULL, op, k, mode);
2317 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2318 return new_d_Cmp(NULL, op1, op2);
2320 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2321 return new_d_Conv(NULL, op, mode);
2323 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2324 return new_d_Phi(NULL, arity, in, mode);
2326 ir_node *new_Load (ir_node *store, ir_node *addr) {
2327 return new_d_Load(NULL, store, addr);
2329 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2330 return new_d_Store(NULL, store, addr, val);
2332 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2333 where_alloc where) {
2334 return new_d_Alloc(NULL, store, size, alloc_type, where);
2336 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2338 return new_d_Free(NULL, store, ptr, size, free_type);
2340 ir_node *new_Sync (int arity, ir_node **in) {
2341 return new_d_Sync(NULL, arity, in);
2343 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2344 return new_d_Proj(NULL, arg, mode, proj);
2346 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2347 return new_d_defaultProj(NULL, arg, max_proj);
2349 ir_node *new_Tuple (int arity, ir_node **in) {
2350 return new_d_Tuple(NULL, arity, in);
2352 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2353 return new_d_Id(NULL, val, mode);
2355 ir_node *new_Bad (void) {
2358 ir_node *new_Unknown(void) {
2359 return new_d_Unknown();
2361 ir_node *new_CallBegin (ir_node *callee) {
2362 return new_d_CallBegin(NULL, callee);
2364 ir_node *new_EndReg (void) {
2365 return new_d_EndReg(NULL);
2367 ir_node *new_EndExcept (void) {
2368 return new_d_EndExcept(NULL);
2370 ir_node *new_Break (void) {
2371 return new_d_Break(NULL);
2373 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2374 return new_d_Filter(NULL, arg, mode, proj);