1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
42 /*** ******************************************** */
43 /** privat interfaces, for professional use only */
45 /* Constructs a Block with a fixed number of predecessors.
46 Does not set current_block. Can not be used with automatic
47 Phi node construction. */
49 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
53 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
54 set_Block_matured(res, 1);
55 set_Block_block_visited(res, 0);
57 res->attr.block.exc = exc_normal;
58 res->attr.block.handler_entry = 0;
59 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
60 res->attr.block.in_cg = NULL;
61 res->attr.block.cg_backedge = NULL;
68 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
72 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
83 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
89 /* Creates a Phi node with all predecessors. Calling this constructor
90 is only allowed if the corresponding block is mature. */
92 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
96 assert( get_Block_matured(block) );
97 assert( get_irn_arity(block) == arity );
99 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
103 res = optimize (res);
106 /* Memory Phis in endless loops must be kept alive.
107 As we can't distinguish these easily we keep all of them alive. */
108 if ((res->op == op_Phi) && (mode == mode_M))
109 add_End_keepalive(irg->end, res);
114 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
117 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
119 res = optimize (res);
123 res = local_optimize_newby (res);
130 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
132 ir_node *in[1] = {val};
134 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
135 res = optimize (res);
141 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
144 ir_node *in[1] = {arg};
146 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
147 res->attr.proj = proj;
150 assert(get_Proj_pred(res));
151 assert(get_nodes_Block(get_Proj_pred(res)));
153 res = optimize (res);
161 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
165 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
166 arg->attr.c.kind = fragmentary;
167 arg->attr.c.default_proj = max_proj;
168 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
173 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
175 ir_node *in[1] = {op};
177 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
178 res = optimize (res);
185 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
189 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
190 res = optimize (res);
196 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
197 ir_node *op1, ir_node *op2, ir_mode *mode)
199 ir_node *in[2] = {op1, op2};
201 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
202 res = optimize (res);
208 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
209 ir_node *op1, ir_node *op2, ir_mode *mode)
211 ir_node *in[2] = {op1, op2};
213 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
214 res = optimize (res);
220 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
221 ir_node *op, ir_mode *mode)
223 ir_node *in[1] = {op};
225 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
226 res = optimize (res);
232 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
233 ir_node *op1, ir_node *op2, ir_mode *mode)
235 ir_node *in[2] = {op1, op2};
237 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
238 res = optimize (res);
244 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *memop, ir_node *op1, ir_node *op2)
247 ir_node *in[3] = {memop, op1, op2};
249 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
250 res = optimize (res);
256 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *memop, ir_node *op1, ir_node *op2)
259 ir_node *in[3] = {memop, op1, op2};
261 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
262 res = optimize (res);
268 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
269 ir_node *memop, ir_node *op1, ir_node *op2)
271 ir_node *in[3] = {memop, op1, op2};
273 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
274 res = optimize (res);
280 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
281 ir_node *memop, ir_node *op1, ir_node *op2)
283 ir_node *in[3] = {memop, op1, op2};
285 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
286 res = optimize (res);
292 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
293 ir_node *op1, ir_node *op2, ir_mode *mode)
295 ir_node *in[2] = {op1, op2};
297 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
298 res = optimize (res);
304 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
305 ir_node *op1, ir_node *op2, ir_mode *mode)
307 ir_node *in[2] = {op1, op2};
309 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
310 res = optimize (res);
316 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
319 ir_node *in[2] = {op1, op2};
321 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
322 res = optimize (res);
328 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *op, ir_mode *mode)
331 ir_node *in[1] = {op};
333 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
334 res = optimize (res);
340 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *op, ir_node *k, ir_mode *mode)
343 ir_node *in[2] = {op, k};
345 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
346 res = optimize (res);
352 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
353 ir_node *op, ir_node *k, ir_mode *mode)
355 ir_node *in[2] = {op, k};
357 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
358 res = optimize (res);
364 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op, ir_node *k, ir_mode *mode)
367 ir_node *in[2] = {op, k};
369 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
370 res = optimize (res);
376 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op, ir_node *k, ir_mode *mode)
379 ir_node *in[2] = {op, k};
381 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
382 res = optimize (res);
388 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
391 ir_node *in[1] = {op};
393 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
394 res = optimize (res);
400 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2)
403 ir_node *in[2] = {op1, op2};
405 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize (res);
412 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, in);
417 res = optimize (res);
423 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
425 ir_node *in[1] = {c};
427 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
428 res->attr.c.kind = dense;
429 res->attr.c.default_proj = 0;
430 res = optimize (res);
436 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
437 ir_node *callee, int arity, ir_node **in, type *tp)
444 NEW_ARR_A (ir_node *, r_in, r_arity);
447 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
449 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
451 assert(is_method_type(tp));
452 set_Call_type(res, tp);
453 res->attr.call.callee_arr = NULL;
454 res = optimize (res);
460 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
461 ir_node *store, int arity, ir_node **in)
468 NEW_ARR_A (ir_node *, r_in, r_arity);
470 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
471 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
472 res = optimize (res);
478 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
480 ir_node *in[2] = {store, obj};
482 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
483 res = optimize (res);
489 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
490 ir_node *store, ir_node *adr)
492 ir_node *in[2] = {store, adr};
494 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
496 res = optimize (res);
502 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
503 ir_node *store, ir_node *adr, ir_node *val)
505 ir_node *in[3] = {store, adr, val};
507 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
509 res = optimize (res);
516 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
517 ir_node *size, type *alloc_type, where_alloc where)
519 ir_node *in[2] = {store, size};
521 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
523 res->attr.a.where = where;
524 res->attr.a.type = alloc_type;
526 res = optimize (res);
532 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
533 ir_node *ptr, ir_node *size, type *free_type)
535 ir_node *in[3] = {store, ptr, size};
537 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
539 res->attr.f = free_type;
541 res = optimize (res);
547 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
548 int arity, ir_node **in, entity *ent)
555 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
558 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
559 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
561 res->attr.s.ent = ent;
563 res = optimize (res);
569 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
570 ir_node *objptr, type *ent)
577 NEW_ARR_A (ir_node *, r_in, r_arity);
581 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
583 res->attr.io.ent = ent;
585 /* res = optimize (res);
591 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
592 symconst_kind symkind)
597 if (symkind == linkage_ptr_info)
601 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, in);
603 res->attr.i.num = symkind;
604 if (symkind == linkage_ptr_info) {
605 res->attr.i.tori.ptrinfo = (ident *)value;
607 assert ( ( (symkind == type_tag)
608 || (symkind == size))
609 && (is_type(value)));
610 res->attr.i.tori.typ = (type *)value;
612 res = optimize (res);
618 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
622 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
624 res = optimize (res);
632 return current_ir_graph->bad;
638 return current_ir_graph->unknown;
642 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
644 ir_node *in[1] = { get_Call_ptr(call) };
646 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
647 res->attr.callbegin.irg = irg;
648 res->attr.callbegin.call = call;
649 res = optimize (res);
655 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
659 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
660 res->attr.end.irg = irg;
667 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
671 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
672 res->attr.end.irg = irg;
679 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
683 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, in);
684 res = optimize (res);
690 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
693 ir_node *in[1] = {arg};
695 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
696 res->attr.filter.proj = proj;
697 res->attr.filter.in_cg = NULL;
698 res->attr.filter.backedge = NULL;
701 assert(get_Proj_pred(res));
702 assert(get_nodes_Block(get_Proj_pred(res)));
704 res = optimize (res);
711 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
712 return new_rd_Block(NULL, irg, arity, in);
714 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
715 return new_rd_Start(NULL, irg, block);
717 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
718 return new_rd_End(NULL, irg, block);
720 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
721 return new_rd_Jmp(NULL, irg, block);
723 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
724 return new_rd_Cond(NULL, irg, block, c);
726 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
727 ir_node *store, int arity, ir_node **in) {
728 return new_rd_Return(NULL, irg, block, store, arity, in);
730 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
731 ir_node *store, ir_node *obj) {
732 return new_rd_Raise(NULL, irg, block, store, obj);
734 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
735 ir_mode *mode, tarval *con) {
736 return new_rd_Const(NULL, irg, block, mode, con);
738 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
739 type_or_id_p value, symconst_kind symkind) {
740 return new_rd_SymConst(NULL, irg, block, value, symkind);
742 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
743 ir_node *objptr, int n_index, ir_node **index,
745 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
747 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
749 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
751 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
752 ir_node *callee, int arity, ir_node **in,
754 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
756 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
757 ir_node *op1, ir_node *op2, ir_mode *mode) {
758 return new_rd_Add(NULL, irg, block, op1, op2, mode);
760 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
761 ir_node *op1, ir_node *op2, ir_mode *mode) {
762 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
764 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
765 ir_node *op, ir_mode *mode) {
766 return new_rd_Minus(NULL, irg, block, op, mode);
768 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
769 ir_node *op1, ir_node *op2, ir_mode *mode) {
770 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
772 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
773 ir_node *memop, ir_node *op1, ir_node *op2) {
774 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
776 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
777 ir_node *memop, ir_node *op1, ir_node *op2) {
778 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
780 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
781 ir_node *memop, ir_node *op1, ir_node *op2) {
782 return new_rd_Div(NULL, irg, block, memop, op1, op2);
784 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
785 ir_node *memop, ir_node *op1, ir_node *op2) {
786 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
788 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
789 ir_node *op, ir_mode *mode) {
790 return new_rd_Abs(NULL, irg, block, op, mode);
792 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
793 ir_node *op1, ir_node *op2, ir_mode *mode) {
794 return new_rd_And(NULL, irg, block, op1, op2, mode);
796 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
797 ir_node *op1, ir_node *op2, ir_mode *mode) {
798 return new_rd_Or(NULL, irg, block, op1, op2, mode);
800 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
801 ir_node *op1, ir_node *op2, ir_mode *mode) {
802 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
804 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
805 ir_node *op, ir_mode *mode) {
806 return new_rd_Not(NULL, irg, block, op, mode);
808 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
809 ir_node *op1, ir_node *op2) {
810 return new_rd_Cmp(NULL, irg, block, op1, op2);
812 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
813 ir_node *op, ir_node *k, ir_mode *mode) {
814 return new_rd_Shl(NULL, irg, block, op, k, mode);
816 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
817 ir_node *op, ir_node *k, ir_mode *mode) {
818 return new_rd_Shr(NULL, irg, block, op, k, mode);
820 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
821 ir_node *op, ir_node *k, ir_mode *mode) {
822 return new_rd_Shrs(NULL, irg, block, op, k, mode);
824 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
825 ir_node *op, ir_node *k, ir_mode *mode) {
826 return new_rd_Rot(NULL, irg, block, op, k, mode);
828 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
829 ir_node *op, ir_mode *mode) {
830 return new_rd_Conv(NULL, irg, block, op, mode);
832 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
833 ir_node **in, ir_mode *mode) {
834 return new_rd_Phi(NULL, irg, block, arity, in, mode);
836 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
837 ir_node *store, ir_node *adr) {
838 return new_rd_Load(NULL, irg, block, store, adr);
840 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
841 ir_node *store, ir_node *adr, ir_node *val) {
842 return new_rd_Store(NULL, irg, block, store, adr, val);
844 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
845 ir_node *size, type *alloc_type, where_alloc where) {
846 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
848 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
849 ir_node *ptr, ir_node *size, type *free_type) {
850 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
852 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
853 return new_rd_Sync(NULL, irg, block, arity, in);
855 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
856 ir_mode *mode, long proj) {
857 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
859 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
861 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
863 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
864 int arity, ir_node **in) {
865 return new_rd_Tuple(NULL, irg, block, arity, in );
867 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
868 ir_node *val, ir_mode *mode) {
869 return new_rd_Id(NULL, irg, block, val, mode);
871 INLINE ir_node *new_r_Bad () {
874 INLINE ir_node *new_r_Unknown () {
875 return new_rd_Unknown();
877 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
878 return new_rd_CallBegin(NULL, irg, block, callee);
880 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
881 return new_rd_EndReg(NULL, irg, block);
883 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
884 return new_rd_EndExcept(NULL, irg, block);
886 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
887 return new_rd_Break(NULL, irg, block);
889 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
890 ir_mode *mode, long proj) {
891 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
895 /** ********************/
896 /** public interfaces */
897 /** construction tools */
901 * - create a new Start node in the current block
903 * @return s - pointer to the created Start node
908 new_d_Start (dbg_info* db)
912 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
913 op_Start, mode_T, 0, NULL);
915 res = optimize (res);
921 new_d_End (dbg_info* db)
924 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
925 op_End, mode_X, -1, NULL);
926 res = optimize (res);
932 /* Constructs a Block with a fixed number of predecessors.
933 Does set current_block. Can be used with automatic Phi
934 node construction. */
936 new_d_Block (dbg_info* db, int arity, ir_node **in)
940 res = new_rd_Block (db, current_ir_graph, arity, in);
942 /* Create and initialize array for Phi-node construction. */
943 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
944 current_ir_graph->n_loc);
945 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
947 res = optimize (res);
948 current_ir_graph->current_block = res;
955 /* ***********************************************************************/
956 /* Methods necessary for automatic Phi node creation */
958 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
959 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
960 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
961 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
963 Call Graph: ( A ---> B == A "calls" B)
965 get_value mature_block
973 get_r_value_internal |
977 new_rd_Phi0 new_rd_Phi_in
979 * *************************************************************************** */
981 /* Creates a Phi node with 0 predecessors */
982 static INLINE ir_node *
983 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
986 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
991 /* There are two implementations of the Phi node construction. The first
992 is faster, but does not work for blocks with more than 2 predecessors.
993 The second works always but is slower and causes more unnecessary Phi
995 Select the implementations by the following preprocessor flag set in
997 #if USE_FAST_PHI_CONSTRUCTION
999 /* This is a stack used for allocating and deallocating nodes in
1000 new_rd_Phi_in. The original implementation used the obstack
1001 to model this stack, now it is explicit. This reduces side effects.
1003 #if USE_EXPLICIT_PHI_IN_STACK
1004 INLINE Phi_in_stack *
1005 new_Phi_in_stack() {
1008 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1010 res->stack = NEW_ARR_F (ir_node *, 1);
1017 free_Phi_in_stack(Phi_in_stack *s) {
1018 DEL_ARR_F(s->stack);
1022 free_to_Phi_in_stack(ir_node *phi) {
1023 assert(get_irn_opcode(phi) == iro_Phi);
1025 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1026 current_ir_graph->Phi_in_stack->pos)
1027 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1029 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1031 (current_ir_graph->Phi_in_stack->pos)++;
1034 static INLINE ir_node *
1035 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1036 int arity, ir_node **in) {
1038 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1039 int pos = current_ir_graph->Phi_in_stack->pos;
1043 /* We need to allocate a new node */
1044 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1045 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1047 /* reuse the old node and initialize it again. */
1050 assert (res->kind == k_ir_node);
1051 assert (res->op == op_Phi);
1055 assert (arity >= 0);
1056 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1057 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1059 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1061 (current_ir_graph->Phi_in_stack->pos)--;
1065 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1067 /* Creates a Phi node with a given, fixed array **in of predecessors.
1068 If the Phi node is unnecessary, as the same value reaches the block
1069 through all control flow paths, it is eliminated and the value
1070 returned directly. This constructor is only intended for use in
1071 the automatic Phi node generation triggered by get_value or mature.
1072 The implementation is quite tricky and depends on the fact, that
1073 the nodes are allocated on a stack:
1074 The in array contains predecessors and NULLs. The NULLs appear,
1075 if get_r_value_internal, that computed the predecessors, reached
1076 the same block on two paths. In this case the same value reaches
1077 this block on both paths, there is no definition in between. We need
1078 not allocate a Phi where these path's merge, but we have to communicate
1079 this fact to the caller. This happens by returning a pointer to the
1080 node the caller _will_ allocate. (Yes, we predict the address. We can
1081 do so because the nodes are allocated on the obstack.) The caller then
1082 finds a pointer to itself and, when this routine is called again,
1085 static INLINE ir_node *
1086 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1087 ir_node **in, int ins)
1090 ir_node *res, *known;
1092 /* allocate a new node on the obstack.
1093 This can return a node to which some of the pointers in the in-array
1095 Attention: the constructor copies the in array, i.e., the later changes
1096 to the array in this routine do not affect the constructed node! If
1097 the in array contains NULLs, there will be missing predecessors in the
1099 Is this a possible internal state of the Phi node generation? */
1100 #if USE_EXPLICIT_PHI_IN_STACK
1101 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1103 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1104 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1106 /* The in-array can contain NULLs. These were returned by
1107 get_r_value_internal if it reached the same block/definition on a
1109 The NULLs are replaced by the node itself to simplify the test in the
1111 for (i=0; i < ins; ++i)
1112 if (in[i] == NULL) in[i] = res;
1114 /* This loop checks whether the Phi has more than one predecessor.
1115 If so, it is a real Phi node and we break the loop. Else the
1116 Phi node merges the same definition on several paths and therefore
1118 for (i=0; i < ins; ++i)
1120 if (in[i]==res || in[i]==known) continue;
1128 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1130 #if USE_EXPLICIT_PHI_IN_STACK
1131 free_to_Phi_in_stack(res);
1133 obstack_free (current_ir_graph->obst, res);
1137 res = optimize (res);
1141 /* return the pointer to the Phi node. This node might be deallocated! */
1146 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1149 allocates and returns this node. The routine called to allocate the
1150 node might optimize it away and return a real value, or even a pointer
1151 to a deallocated Phi node on top of the obstack!
1152 This function is called with an in-array of proper size. **/
1154 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1156 ir_node *prevBlock, *res;
1159 /* This loop goes to all predecessor blocks of the block the Phi node is in
1160 and there finds the operands of the Phi node by calling
1161 get_r_value_internal. */
1162 for (i = 1; i <= ins; ++i) {
1163 assert (block->in[i]);
1164 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1166 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1169 /* After collecting all predecessors into the array nin a new Phi node
1170 with these predecessors is created. This constructor contains an
1171 optimization: If all predecessors of the Phi node are identical it
1172 returns the only operand instead of a new Phi node. If the value
1173 passes two different control flow edges without being defined, and
1174 this is the second path treated, a pointer to the node that will be
1175 allocated for the first path (recursion) is returned. We already
1176 know the address of this node, as it is the next node to be allocated
1177 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1178 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1180 /* Now we now the value for "pos" and can enter it in the array with
1181 all known local variables. Attention: this might be a pointer to
1182 a node, that later will be allocated!!! See new_rd_Phi_in.
1183 If this is called in mature, after some set_value in the same block,
1184 the proper value must not be overwritten:
1186 get_value (makes Phi0, put's it into graph_arr)
1187 set_value (overwrites Phi0 in graph_arr)
1188 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1191 if (!block->attr.block.graph_arr[pos]) {
1192 block->attr.block.graph_arr[pos] = res;
1194 /* printf(" value already computed by %s\n",
1195 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1201 /* This function returns the last definition of a variable. In case
1202 this variable was last defined in a previous block, Phi nodes are
1203 inserted. If the part of the firm graph containing the definition
1204 is not yet constructed, a dummy Phi node is returned. */
1206 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1209 /* There are 4 cases to treat.
1211 1. The block is not mature and we visit it the first time. We can not
1212 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1213 predecessors is returned. This node is added to the linked list (field
1214 "link") of the containing block to be completed when this block is
1215 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1218 2. The value is already known in this block, graph_arr[pos] is set and we
1219 visit the block the first time. We can return the value without
1220 creating any new nodes.
1222 3. The block is mature and we visit it the first time. A Phi node needs
1223 to be created (phi_merge). If the Phi is not needed, as all it's
1224 operands are the same value reaching the block through different
1225 paths, it's optimized away and the value itself is returned.
1227 4. The block is mature, and we visit it the second time. Now two
1228 subcases are possible:
1229 * The value was computed completely the last time we were here. This
1230 is the case if there is no loop. We can return the proper value.
1231 * The recursion that visited this node and set the flag did not
1232 return yet. We are computing a value in a loop and need to
1233 break the recursion without knowing the result yet.
1234 @@@ strange case. Straight forward we would create a Phi before
1235 starting the computation of it's predecessors. In this case we will
1236 find a Phi here in any case. The problem is that this implementation
1237 only creates a Phi after computing the predecessors, so that it is
1238 hard to compute self references of this Phi. @@@
1239 There is no simple check for the second subcase. Therefore we check
1240 for a second visit and treat all such cases as the second subcase.
1241 Anyways, the basic situation is the same: we reached a block
1242 on two paths without finding a definition of the value: No Phi
1243 nodes are needed on both paths.
1244 We return this information "Two paths, no Phi needed" by a very tricky
1245 implementation that relies on the fact that an obstack is a stack and
1246 will return a node with the same address on different allocations.
1247 Look also at phi_merge and new_rd_phi_in to understand this.
1248 @@@ Unfortunately this does not work, see testprogram
1249 three_cfpred_example.
1253 /* case 4 -- already visited. */
1254 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1256 /* visited the first time */
1257 set_irn_visited(block, get_irg_visited(current_ir_graph));
1259 /* Get the local valid value */
1260 res = block->attr.block.graph_arr[pos];
1262 /* case 2 -- If the value is actually computed, return it. */
1263 if (res) { return res;};
1265 if (block->attr.block.matured) { /* case 3 */
1267 /* The Phi has the same amount of ins as the corresponding block. */
1268 int ins = get_irn_arity(block);
1270 NEW_ARR_A (ir_node *, nin, ins);
1272 /* Phi merge collects the predecessors and then creates a node. */
1273 res = phi_merge (block, pos, mode, nin, ins);
1275 } else { /* case 1 */
1276 /* The block is not mature, we don't know how many in's are needed. A Phi
1277 with zero predecessors is created. Such a Phi node is called Phi0
1278 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1279 to the list of Phi0 nodes in this block to be matured by mature_block
1281 The Phi0 has to remember the pos of it's internal value. If the real
1282 Phi is computed, pos is used to update the array with the local
1285 res = new_rd_Phi0 (current_ir_graph, block, mode);
1286 res->attr.phi0_pos = pos;
1287 res->link = block->link;
1291 /* If we get here, the frontend missed a use-before-definition error */
1294 printf("Error: no value set. Use of undefined variable. Initializing
1296 assert (mode->code >= irm_F && mode->code <= irm_P);
1297 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1298 tarval_mode_null[mode->code]);
1301 /* The local valid value is available now. */
1302 block->attr.block.graph_arr[pos] = res;
1310 it starts the recursion. This causes an Id at the entry of
1311 every block that has no definition of the value! **/
1313 #if USE_EXPLICIT_PHI_IN_STACK
1315 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1316 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1319 static INLINE ir_node *
1320 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1321 ir_node **in, int ins)
1324 ir_node *res, *known;
1326 /* Allocate a new node on the obstack. The allocation copies the in
1328 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1329 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1331 /* This loop checks whether the Phi has more than one predecessor.
1332 If so, it is a real Phi node and we break the loop. Else the
1333 Phi node merges the same definition on several paths and therefore
1334 is not needed. Don't consider Bad nodes! */
1336 for (i=0; i < ins; ++i)
1340 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1348 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1351 obstack_free (current_ir_graph->obst, res);
1354 /* A undefined value, e.g., in unreachable code. */
1358 res = optimize (res);
1360 /* Memory Phis in endless loops must be kept alive.
1361 As we can't distinguish these easily we keep all of the alive. */
1362 if ((res->op == op_Phi) && (mode == mode_M))
1363 add_End_keepalive(irg->end, res);
1370 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1372 #if PRECISE_EXC_CONTEXT
1374 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1376 static INLINE ir_node **
1377 new_frag_arr (ir_node *n) {
1380 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1381 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1382 sizeof(ir_node *)*current_ir_graph->n_loc);
1383 /* turn off optimization before allocating Proj nodes, as res isn't
1385 opt = get_optimize(); set_optimize(0);
1386 /* Here we rely on the fact that all frag ops have Memory as first result! */
1387 if (get_irn_op(n) == op_Call)
1388 arr[0] = new_Proj(n, mode_M, 3);
1390 arr[0] = new_Proj(n, mode_M, 0);
1392 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1396 static INLINE ir_node **
1397 get_frag_arr (ir_node *n) {
1398 if (get_irn_op(n) == op_Call) {
1399 return n->attr.call.frag_arr;
1400 } else if (get_irn_op(n) == op_Alloc) {
1401 return n->attr.a.frag_arr;
1403 return n->attr.frag_arr;
1408 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1409 if (!frag_arr[pos]) frag_arr[pos] = val;
1410 if (frag_arr[current_ir_graph->n_loc - 1])
1411 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1415 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1419 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1421 frag_arr = get_frag_arr(cfOp);
1422 res = frag_arr[pos];
1424 if (block->attr.block.graph_arr[pos]) {
1425 /* There was a set_value after the cfOp and no get_value before that
1426 set_value. We must build a Phi node now. */
1427 if (block->attr.block.matured) {
1428 int ins = get_irn_arity(block);
1430 NEW_ARR_A (ir_node *, nin, ins);
1431 res = phi_merge(block, pos, mode, nin, ins);
1433 res = new_rd_Phi0 (current_ir_graph, block, mode);
1434 res->attr.phi0_pos = pos;
1435 res->link = block->link;
1439 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1440 but this should be better: (remove comment if this works) */
1441 /* It's a Phi, we can write this into all graph_arrs with NULL */
1442 set_frag_value(block->attr.block.graph_arr, pos, res);
1444 res = get_r_value_internal(block, pos, mode);
1445 set_frag_value(block->attr.block.graph_arr, pos, res);
1453 computes the predecessors for the real phi node, and then
1454 allocates and returns this node. The routine called to allocate the
1455 node might optimize it away and return a real value.
1456 This function must be called with an in-array of proper size. **/
1458 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1460 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1463 /* If this block has no value at pos create a Phi0 and remember it
1464 in graph_arr to break recursions.
1465 Else we may not set graph_arr as there a later value is remembered. */
1467 if (!block->attr.block.graph_arr[pos]) {
1468 /* This is commented out as collapsing to Bads is no good idea.
1469 Either we need an assert here, or we need to call a routine
1470 that deals with this case as appropriate for the given language.
1471 Right now a self referencing Id is created which will crash irg_vrfy().
1473 Even if all variables are defined before use, it can happen that
1474 we get to the start block, if a cond has been replaced by a tuple
1475 (bad, jmp). As the start has a self referencing control flow edge,
1476 we get a self referencing Id, which is hard to optimize away. We avoid
1477 this by defining the value as a Bad node.
1478 Returning a const with tarval_bad is a preliminary solution. In some
1479 situations we might want a Warning or an Error. */
1481 if (block == get_irg_start_block(current_ir_graph)) {
1482 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1483 /* We don't need to care about exception ops in the start block.
1484 There are none by definition. */
1485 return block->attr.block.graph_arr[pos];
1487 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1488 block->attr.block.graph_arr[pos] = phi0;
1489 #if PRECISE_EXC_CONTEXT
1490 /* Set graph_arr for fragile ops. Also here we should break recursion.
1491 We could choose a cyclic path through an cfop. But the recursion would
1492 break at some point. */
1493 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1498 /* This loop goes to all predecessor blocks of the block the Phi node
1499 is in and there finds the operands of the Phi node by calling
1500 get_r_value_internal. */
1501 for (i = 1; i <= ins; ++i) {
1502 prevCfOp = skip_Proj(block->in[i]);
1504 if (is_Bad(prevCfOp)) {
1505 /* In case a Cond has been optimized we would get right to the start block
1506 with an invalid definition. */
1507 nin[i-1] = new_Bad();
1510 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1512 if (!is_Bad(prevBlock)) {
1513 #if PRECISE_EXC_CONTEXT
1514 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1515 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1516 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1519 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1521 nin[i-1] = new_Bad();
1525 /* After collecting all predecessors into the array nin a new Phi node
1526 with these predecessors is created. This constructor contains an
1527 optimization: If all predecessors of the Phi node are identical it
1528 returns the only operand instead of a new Phi node. */
1529 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1531 /* In case we allocated a Phi0 node at the beginning of this procedure,
1532 we need to exchange this Phi0 with the real Phi. */
1534 exchange(phi0, res);
1535 block->attr.block.graph_arr[pos] = res;
1536 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1537 only an optimization. */
1543 /* This function returns the last definition of a variable. In case
1544 this variable was last defined in a previous block, Phi nodes are
1545 inserted. If the part of the firm graph containing the definition
1546 is not yet constructed, a dummy Phi node is returned. */
1548 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1551 /* There are 4 cases to treat.
1553 1. The block is not mature and we visit it the first time. We can not
1554 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1555 predecessors is returned. This node is added to the linked list (field
1556 "link") of the containing block to be completed when this block is
1557 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1560 2. The value is already known in this block, graph_arr[pos] is set and we
1561 visit the block the first time. We can return the value without
1562 creating any new nodes.
1564 3. The block is mature and we visit it the first time. A Phi node needs
1565 to be created (phi_merge). If the Phi is not needed, as all it's
1566 operands are the same value reaching the block through different
1567 paths, it's optimized away and the value itself is returned.
1569 4. The block is mature, and we visit it the second time. Now two
1570 subcases are possible:
1571 * The value was computed completely the last time we were here. This
1572 is the case if there is no loop. We can return the proper value.
1573 * The recursion that visited this node and set the flag did not
1574 return yet. We are computing a value in a loop and need to
1575 break the recursion. This case only happens if we visited
1576 the same block with phi_merge before, which inserted a Phi0.
1577 So we return the Phi0.
1580 /* case 4 -- already visited. */
1581 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1582 /* As phi_merge allocates a Phi0 this value is always defined. Here
1583 is the critical difference of the two algorithms. */
1584 assert(block->attr.block.graph_arr[pos]);
1585 return block->attr.block.graph_arr[pos];
1588 /* visited the first time */
1589 set_irn_visited(block, get_irg_visited(current_ir_graph));
1591 /* Get the local valid value */
1592 res = block->attr.block.graph_arr[pos];
1594 /* case 2 -- If the value is actually computed, return it. */
1595 if (res) { return res; };
1597 if (block->attr.block.matured) { /* case 3 */
1599 /* The Phi has the same amount of ins as the corresponding block. */
1600 int ins = get_irn_arity(block);
1602 NEW_ARR_A (ir_node *, nin, ins);
1604 /* Phi merge collects the predecessors and then creates a node. */
1605 res = phi_merge (block, pos, mode, nin, ins);
1607 } else { /* case 1 */
1608 /* The block is not mature, we don't know how many in's are needed. A Phi
1609 with zero predecessors is created. Such a Phi node is called Phi0
1610 node. The Phi0 is then added to the list of Phi0 nodes in this block
1611 to be matured by mature_block later.
1612 The Phi0 has to remember the pos of it's internal value. If the real
1613 Phi is computed, pos is used to update the array with the local
1615 res = new_rd_Phi0 (current_ir_graph, block, mode);
1616 res->attr.phi0_pos = pos;
1617 res->link = block->link;
1621 /* If we get here, the frontend missed a use-before-definition error */
1624 printf("Error: no value set. Use of undefined variable. Initializing
1626 assert (mode->code >= irm_F && mode->code <= irm_P);
1627 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1628 tarval_mode_null[mode->code]);
1631 /* The local valid value is available now. */
1632 block->attr.block.graph_arr[pos] = res;
1637 #endif /* USE_FAST_PHI_CONSTRUCTION */
1639 /* ************************************************************************** */
1641 /** Finalize a Block node, when all control flows are known. */
1642 /** Acceptable parameters are only Block nodes. */
1644 mature_block (ir_node *block)
1651 assert (get_irn_opcode(block) == iro_Block);
1652 /* @@@ should be commented in
1653 assert (!get_Block_matured(block) && "Block already matured"); */
1655 if (!get_Block_matured(block)) {
1656 ins = ARR_LEN (block->in)-1;
1657 /* Fix block parameters */
1658 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1660 /* An array for building the Phi nodes. */
1661 NEW_ARR_A (ir_node *, nin, ins);
1663 /* Traverse a chain of Phi nodes attached to this block and mature
1665 for (n = block->link; n; n=next) {
1666 inc_irg_visited(current_ir_graph);
1668 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1671 block->attr.block.matured = 1;
1673 /* Now, as the block is a finished firm node, we can optimize it.
1674 Since other nodes have been allocated since the block was created
1675 we can not free the node on the obstack. Therefore we have to call
1677 Unfortunately the optimization does not change a lot, as all allocated
1678 nodes refer to the unoptimized node.
1679 We can call _2, as global cse has no effect on blocks. */
1680 block = optimize_in_place_2(block);
1686 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1688 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1693 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1695 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1700 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1702 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1707 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1709 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1714 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1717 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1718 arg->attr.c.kind = fragmentary;
1719 arg->attr.c.default_proj = max_proj;
1720 res = new_Proj (arg, mode_X, max_proj);
1725 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1727 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1732 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1734 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1739 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1741 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1746 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1748 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1754 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1756 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1761 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1763 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1768 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1771 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1773 #if PRECISE_EXC_CONTEXT
1774 if ((current_ir_graph->phase_state == phase_building) &&
1775 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1776 res->attr.frag_arr = new_frag_arr(res);
1783 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1786 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1788 #if PRECISE_EXC_CONTEXT
1789 if ((current_ir_graph->phase_state == phase_building) &&
1790 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1791 res->attr.frag_arr = new_frag_arr(res);
1798 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1801 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1803 #if PRECISE_EXC_CONTEXT
1804 if ((current_ir_graph->phase_state == phase_building) &&
1805 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1806 res->attr.frag_arr = new_frag_arr(res);
1813 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1816 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1818 #if PRECISE_EXC_CONTEXT
1819 if ((current_ir_graph->phase_state == phase_building) &&
1820 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1821 res->attr.frag_arr = new_frag_arr(res);
1828 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1830 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1835 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1837 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1842 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1844 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1849 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1851 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1856 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1858 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1863 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1865 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1870 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1872 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1877 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1879 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1884 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1886 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1891 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1893 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1898 new_d_Jmp (dbg_info* db)
1900 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1904 new_d_Cond (dbg_info* db, ir_node *c)
1906 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1910 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1914 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1915 store, callee, arity, in, tp);
1916 #if PRECISE_EXC_CONTEXT
1917 if ((current_ir_graph->phase_state == phase_building) &&
1918 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1919 res->attr.call.frag_arr = new_frag_arr(res);
1926 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1928 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1933 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1935 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1940 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1943 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1945 #if PRECISE_EXC_CONTEXT
1946 if ((current_ir_graph->phase_state == phase_building) &&
1947 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1948 res->attr.frag_arr = new_frag_arr(res);
1955 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1958 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1960 #if PRECISE_EXC_CONTEXT
1961 if ((current_ir_graph->phase_state == phase_building) &&
1962 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1963 res->attr.frag_arr = new_frag_arr(res);
1970 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1974 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1975 store, size, alloc_type, where);
1976 #if PRECISE_EXC_CONTEXT
1977 if ((current_ir_graph->phase_state == phase_building) &&
1978 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1979 res->attr.a.frag_arr = new_frag_arr(res);
1986 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1988 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1989 store, ptr, size, free_type);
1993 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1994 /* GL: objptr was called frame before. Frame was a bad choice for the name
1995 as the operand could as well be a pointer to a dynamic object. */
1997 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1998 store, objptr, 0, NULL, ent);
2002 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2004 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2005 store, objptr, n_index, index, sel);
2009 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2011 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2012 store, objptr, ent));
2016 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2018 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2023 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2025 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2033 return current_ir_graph->bad;
2037 new_d_Unknown (void)
2039 return current_ir_graph->unknown;
2043 new_d_CallBegin (dbg_info *db, ir_node *call)
2046 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2051 new_d_EndReg (dbg_info *db)
2054 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2059 new_d_EndExcept (dbg_info *db)
2062 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2067 new_d_Break (dbg_info *db)
2069 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2073 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2075 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2079 /* ********************************************************************* */
2080 /* Comfortable interface with automatic Phi node construction. */
2081 /* (Uses also constructors of ?? interface, except new_Block. */
2082 /* ********************************************************************* */
2084 /** Block construction **/
2085 /* immature Block without predecessors */
2086 ir_node *new_d_immBlock (dbg_info* db) {
2089 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2090 /* creates a new dynamic in-array as length of in is -1 */
2091 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2092 current_ir_graph->current_block = res;
2093 res->attr.block.matured = 0;
2094 res->attr.block.exc = exc_normal;
2095 res->attr.block.handler_entry = 0;
2096 res->attr.block.backedge = NULL;
2097 res->attr.block.in_cg = NULL;
2098 res->attr.block.cg_backedge = NULL;
2099 set_Block_block_visited(res, 0);
2101 /* Create and initialize array for Phi-node construction. */
2102 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2103 current_ir_graph->n_loc);
2104 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2106 /* Immature block may not be optimized! */
2114 return new_d_immBlock(NULL);
2117 /* add an adge to a jmp/control flow node */
2119 add_in_edge (ir_node *block, ir_node *jmp)
2121 if (block->attr.block.matured) {
2122 assert(0 && "Error: Block already matured!\n");
2125 assert (jmp != NULL);
2126 ARR_APP1 (ir_node *, block->in, jmp);
2130 /* changing the current block */
2132 switch_block (ir_node *target)
2134 current_ir_graph->current_block = target;
2137 /* ************************ */
2138 /* parameter administration */
2140 /* get a value from the parameter array from the current block by its index */
2142 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2144 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2145 inc_irg_visited(current_ir_graph);
2147 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2149 /* get a value from the parameter array from the current block by its index */
2151 get_value (int pos, ir_mode *mode)
2153 return get_d_value(NULL, pos, mode);
2156 /* set a value at position pos in the parameter array from the current block */
2158 set_value (int pos, ir_node *value)
2160 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2161 assert(pos+1 < current_ir_graph->n_loc);
2162 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2165 /* get the current store */
2169 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2170 /* GL: one could call get_value instead */
2171 inc_irg_visited(current_ir_graph);
2172 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2175 /* set the current store */
2177 set_store (ir_node *store)
2179 /* GL: one could call set_value instead */
2180 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2181 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2185 keep_alive (ir_node *ka)
2187 add_End_keepalive(current_ir_graph->end, ka);
2190 /** Useful access routines **/
2191 /* Returns the current block of the current graph. To set the current
2192 block use switch_block(). */
2193 ir_node *get_cur_block() {
2194 return get_irg_current_block(current_ir_graph);
2197 /* Returns the frame type of the current graph */
2198 type *get_cur_frame_type() {
2199 return get_irg_frame_type(current_ir_graph);
2203 /* ********************************************************************* */
2206 /* call once for each run of the library */
2212 /* call for each graph */
2214 finalize_cons (ir_graph *irg) {
2215 irg->phase_state = phase_high;
2219 ir_node *new_Block(int arity, ir_node **in) {
2220 return new_d_Block(NULL, arity, in);
2222 ir_node *new_Start (void) {
2223 return new_d_Start(NULL);
2225 ir_node *new_End (void) {
2226 return new_d_End(NULL);
2228 ir_node *new_Jmp (void) {
2229 return new_d_Jmp(NULL);
2231 ir_node *new_Cond (ir_node *c) {
2232 return new_d_Cond(NULL, c);
2234 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2235 return new_d_Return(NULL, store, arity, in);
2237 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2238 return new_d_Raise(NULL, store, obj);
2240 ir_node *new_Const (ir_mode *mode, tarval *con) {
2241 return new_d_Const(NULL, mode, con);
2243 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2244 return new_d_SymConst(NULL, value, kind);
2246 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2247 return new_d_simpleSel(NULL, store, objptr, ent);
2249 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2251 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2253 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2254 return (new_d_InstOf (NULL, store, objptr, ent));
2256 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2258 return new_d_Call(NULL, store, callee, arity, in, tp);
2260 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2261 return new_d_Add(NULL, op1, op2, mode);
2263 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2264 return new_d_Sub(NULL, op1, op2, mode);
2266 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2267 return new_d_Minus(NULL, op, mode);
2269 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2270 return new_d_Mul(NULL, op1, op2, mode);
2272 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2273 return new_d_Quot(NULL, memop, op1, op2);
2275 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2276 return new_d_DivMod(NULL, memop, op1, op2);
2278 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2279 return new_d_Div(NULL, memop, op1, op2);
2281 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2282 return new_d_Mod(NULL, memop, op1, op2);
2284 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2285 return new_d_Abs(NULL, op, mode);
2287 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2288 return new_d_And(NULL, op1, op2, mode);
2290 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2291 return new_d_Or(NULL, op1, op2, mode);
2293 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2294 return new_d_Eor(NULL, op1, op2, mode);
2296 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2297 return new_d_Not(NULL, op, mode);
2299 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2300 return new_d_Shl(NULL, op, k, mode);
2302 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2303 return new_d_Shr(NULL, op, k, mode);
2305 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2306 return new_d_Shrs(NULL, op, k, mode);
2308 #define new_Rotate new_Rot
2309 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2310 return new_d_Rot(NULL, op, k, mode);
2312 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2313 return new_d_Cmp(NULL, op1, op2);
2315 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2316 return new_d_Conv(NULL, op, mode);
2318 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2319 return new_d_Phi(NULL, arity, in, mode);
2321 ir_node *new_Load (ir_node *store, ir_node *addr) {
2322 return new_d_Load(NULL, store, addr);
2324 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2325 return new_d_Store(NULL, store, addr, val);
2327 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2328 where_alloc where) {
2329 return new_d_Alloc(NULL, store, size, alloc_type, where);
2331 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2333 return new_d_Free(NULL, store, ptr, size, free_type);
2335 ir_node *new_Sync (int arity, ir_node **in) {
2336 return new_d_Sync(NULL, arity, in);
2338 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2339 return new_d_Proj(NULL, arg, mode, proj);
2341 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2342 return new_d_defaultProj(NULL, arg, max_proj);
2344 ir_node *new_Tuple (int arity, ir_node **in) {
2345 return new_d_Tuple(NULL, arity, in);
2347 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2348 return new_d_Id(NULL, val, mode);
2350 ir_node *new_Bad (void) {
2353 ir_node *new_Unknown(void) {
2354 return new_d_Unknown();
2356 ir_node *new_CallBegin (ir_node *callee) {
2357 return new_d_CallBegin(NULL, callee);
2359 ir_node *new_EndReg (void) {
2360 return new_d_EndReg(NULL);
2362 ir_node *new_EndExcept (void) {
2363 return new_d_EndExcept(NULL);
2365 ir_node *new_Break (void) {
2366 return new_d_Break(NULL);
2368 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2369 return new_d_Filter(NULL, arg, mode, proj);