1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
42 /*** ******************************************** */
43 /** privat interfaces, for professional use only */
45 /* Constructs a Block with a fixed number of predecessors.
46 Does not set current_block. Can not be used with automatic
47 Phi node construction. */
49 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
53 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
54 set_Block_matured(res, 1);
55 set_Block_block_visited(res, 0);
57 res->attr.block.exc = exc_normal;
58 res->attr.block.handler_entry = 0;
59 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
60 res->attr.block.in_cg = NULL;
61 res->attr.block.cg_backedge = NULL;
68 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
72 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
83 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
89 /* Creates a Phi node with all predecessors. Calling this constructor
90 is only allowed if the corresponding block is mature. */
92 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
96 assert( get_Block_matured(block) );
97 assert( get_irn_arity(block) == arity );
99 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
103 res = optimize_node (res);
106 /* Memory Phis in endless loops must be kept alive.
107 As we can't distinguish these easily we keep all of them alive. */
108 if ((res->op == op_Phi) && (mode == mode_M))
109 add_End_keepalive(irg->end, res);
114 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
117 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
119 res = optimize_node (res);
123 res = local_optimize_newby (res);
130 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
132 ir_node *in[1] = {val};
134 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
135 res = optimize_node (res);
141 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
144 ir_node *in[1] = {arg};
146 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
147 res->attr.proj = proj;
150 assert(get_Proj_pred(res));
151 assert(get_nodes_Block(get_Proj_pred(res)));
153 res = optimize_node (res);
161 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
165 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
166 arg->attr.c.kind = fragmentary;
167 arg->attr.c.default_proj = max_proj;
168 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
173 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
175 ir_node *in[1] = {op};
177 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
178 res = optimize_node (res);
185 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
189 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
190 res = optimize_node (res);
196 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
197 ir_node *op1, ir_node *op2, ir_mode *mode)
199 ir_node *in[2] = {op1, op2};
201 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
202 res = optimize_node (res);
208 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
209 ir_node *op1, ir_node *op2, ir_mode *mode)
211 ir_node *in[2] = {op1, op2};
213 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
214 res = optimize_node (res);
220 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
221 ir_node *op, ir_mode *mode)
223 ir_node *in[1] = {op};
225 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
226 res = optimize_node (res);
232 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
233 ir_node *op1, ir_node *op2, ir_mode *mode)
235 ir_node *in[2] = {op1, op2};
237 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
238 res = optimize_node (res);
244 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *memop, ir_node *op1, ir_node *op2)
247 ir_node *in[3] = {memop, op1, op2};
249 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
250 res = optimize_node (res);
256 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *memop, ir_node *op1, ir_node *op2)
259 ir_node *in[3] = {memop, op1, op2};
261 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
262 res = optimize_node (res);
268 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
269 ir_node *memop, ir_node *op1, ir_node *op2)
271 ir_node *in[3] = {memop, op1, op2};
273 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
274 res = optimize_node (res);
280 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
281 ir_node *memop, ir_node *op1, ir_node *op2)
283 ir_node *in[3] = {memop, op1, op2};
285 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
286 res = optimize_node (res);
292 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
293 ir_node *op1, ir_node *op2, ir_mode *mode)
295 ir_node *in[2] = {op1, op2};
297 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
298 res = optimize_node (res);
304 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
305 ir_node *op1, ir_node *op2, ir_mode *mode)
307 ir_node *in[2] = {op1, op2};
309 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
310 res = optimize_node (res);
316 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
319 ir_node *in[2] = {op1, op2};
321 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
322 res = optimize_node (res);
328 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *op, ir_mode *mode)
331 ir_node *in[1] = {op};
333 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
334 res = optimize_node (res);
340 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *op, ir_node *k, ir_mode *mode)
343 ir_node *in[2] = {op, k};
345 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
346 res = optimize_node (res);
352 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
353 ir_node *op, ir_node *k, ir_mode *mode)
355 ir_node *in[2] = {op, k};
357 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
358 res = optimize_node (res);
364 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op, ir_node *k, ir_mode *mode)
367 ir_node *in[2] = {op, k};
369 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
370 res = optimize_node (res);
376 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op, ir_node *k, ir_mode *mode)
379 ir_node *in[2] = {op, k};
381 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
382 res = optimize_node (res);
388 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
391 ir_node *in[1] = {op};
393 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
394 res = optimize_node (res);
400 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2)
403 ir_node *in[2] = {op1, op2};
405 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node (res);
412 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
422 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
424 ir_node *in[1] = {c};
426 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
427 res->attr.c.kind = dense;
428 res->attr.c.default_proj = 0;
429 res = optimize_node (res);
435 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
436 ir_node *callee, int arity, ir_node **in, type *tp)
443 NEW_ARR_A (ir_node *, r_in, r_arity);
446 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
448 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
450 assert(is_method_type(tp));
451 set_Call_type(res, tp);
452 res->attr.call.callee_arr = NULL;
453 res = optimize_node (res);
459 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *store, int arity, ir_node **in)
467 NEW_ARR_A (ir_node *, r_in, r_arity);
469 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
470 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
471 res = optimize_node (res);
477 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
479 ir_node *in[2] = {store, obj};
481 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
482 res = optimize_node (res);
488 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
489 ir_node *store, ir_node *adr)
491 ir_node *in[2] = {store, adr};
493 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
495 res = optimize_node (res);
501 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
502 ir_node *store, ir_node *adr, ir_node *val)
504 ir_node *in[3] = {store, adr, val};
506 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
508 res = optimize_node (res);
515 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
516 ir_node *size, type *alloc_type, where_alloc where)
518 ir_node *in[2] = {store, size};
520 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
522 res->attr.a.where = where;
523 res->attr.a.type = alloc_type;
525 res = optimize_node (res);
531 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
532 ir_node *ptr, ir_node *size, type *free_type)
534 ir_node *in[3] = {store, ptr, size};
536 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
538 res->attr.f = free_type;
540 res = optimize_node (res);
546 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
547 int arity, ir_node **in, entity *ent)
554 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
557 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
558 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
560 res->attr.s.ent = ent;
562 res = optimize_node (res);
568 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
569 ir_node *objptr, type *ent)
576 NEW_ARR_A (ir_node *, r_in, r_arity);
580 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
582 res->attr.io.ent = ent;
584 /* res = optimize (res);
590 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
591 symconst_kind symkind)
595 if (symkind == linkage_ptr_info)
599 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
601 res->attr.i.num = symkind;
602 if (symkind == linkage_ptr_info) {
603 res->attr.i.tori.ptrinfo = (ident *)value;
605 assert ( ( (symkind == type_tag)
606 || (symkind == size))
607 && (is_type(value)));
608 res->attr.i.tori.typ = (type *)value;
610 res = optimize_node (res);
616 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
620 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
622 res = optimize_node (res);
630 return current_ir_graph->bad;
636 return current_ir_graph->unknown;
640 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
642 ir_node *in[1] = { get_Call_ptr(call) };
644 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
645 res->attr.callbegin.irg = irg;
646 res->attr.callbegin.call = call;
647 res = optimize_node (res);
653 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
657 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
658 res->attr.end.irg = irg;
665 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
669 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
670 res->attr.end.irg = irg;
677 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
680 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
681 res = optimize_node (res);
687 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
690 ir_node *in[1] = {arg};
692 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
693 res->attr.filter.proj = proj;
694 res->attr.filter.in_cg = NULL;
695 res->attr.filter.backedge = NULL;
698 assert(get_Proj_pred(res));
699 assert(get_nodes_Block(get_Proj_pred(res)));
701 res = optimize_node (res);
708 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
709 return new_rd_Block(NULL, irg, arity, in);
711 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
712 return new_rd_Start(NULL, irg, block);
714 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
715 return new_rd_End(NULL, irg, block);
717 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
718 return new_rd_Jmp(NULL, irg, block);
720 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
721 return new_rd_Cond(NULL, irg, block, c);
723 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
724 ir_node *store, int arity, ir_node **in) {
725 return new_rd_Return(NULL, irg, block, store, arity, in);
727 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
728 ir_node *store, ir_node *obj) {
729 return new_rd_Raise(NULL, irg, block, store, obj);
731 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
732 ir_mode *mode, tarval *con) {
733 return new_rd_Const(NULL, irg, block, mode, con);
735 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
736 type_or_id_p value, symconst_kind symkind) {
737 return new_rd_SymConst(NULL, irg, block, value, symkind);
739 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
740 ir_node *objptr, int n_index, ir_node **index,
742 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
744 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
746 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
748 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
749 ir_node *callee, int arity, ir_node **in,
751 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
753 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
754 ir_node *op1, ir_node *op2, ir_mode *mode) {
755 return new_rd_Add(NULL, irg, block, op1, op2, mode);
757 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
758 ir_node *op1, ir_node *op2, ir_mode *mode) {
759 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
761 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
762 ir_node *op, ir_mode *mode) {
763 return new_rd_Minus(NULL, irg, block, op, mode);
765 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
766 ir_node *op1, ir_node *op2, ir_mode *mode) {
767 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
769 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
770 ir_node *memop, ir_node *op1, ir_node *op2) {
771 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
773 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
774 ir_node *memop, ir_node *op1, ir_node *op2) {
775 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
777 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
778 ir_node *memop, ir_node *op1, ir_node *op2) {
779 return new_rd_Div(NULL, irg, block, memop, op1, op2);
781 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
782 ir_node *memop, ir_node *op1, ir_node *op2) {
783 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
785 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
786 ir_node *op, ir_mode *mode) {
787 return new_rd_Abs(NULL, irg, block, op, mode);
789 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
790 ir_node *op1, ir_node *op2, ir_mode *mode) {
791 return new_rd_And(NULL, irg, block, op1, op2, mode);
793 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
794 ir_node *op1, ir_node *op2, ir_mode *mode) {
795 return new_rd_Or(NULL, irg, block, op1, op2, mode);
797 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
798 ir_node *op1, ir_node *op2, ir_mode *mode) {
799 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
801 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
802 ir_node *op, ir_mode *mode) {
803 return new_rd_Not(NULL, irg, block, op, mode);
805 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
806 ir_node *op1, ir_node *op2) {
807 return new_rd_Cmp(NULL, irg, block, op1, op2);
809 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
810 ir_node *op, ir_node *k, ir_mode *mode) {
811 return new_rd_Shl(NULL, irg, block, op, k, mode);
813 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
814 ir_node *op, ir_node *k, ir_mode *mode) {
815 return new_rd_Shr(NULL, irg, block, op, k, mode);
817 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
818 ir_node *op, ir_node *k, ir_mode *mode) {
819 return new_rd_Shrs(NULL, irg, block, op, k, mode);
821 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
822 ir_node *op, ir_node *k, ir_mode *mode) {
823 return new_rd_Rot(NULL, irg, block, op, k, mode);
825 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
826 ir_node *op, ir_mode *mode) {
827 return new_rd_Conv(NULL, irg, block, op, mode);
829 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
830 ir_node **in, ir_mode *mode) {
831 return new_rd_Phi(NULL, irg, block, arity, in, mode);
833 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
834 ir_node *store, ir_node *adr) {
835 return new_rd_Load(NULL, irg, block, store, adr);
837 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
838 ir_node *store, ir_node *adr, ir_node *val) {
839 return new_rd_Store(NULL, irg, block, store, adr, val);
841 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
842 ir_node *size, type *alloc_type, where_alloc where) {
843 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
845 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
846 ir_node *ptr, ir_node *size, type *free_type) {
847 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
849 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
850 return new_rd_Sync(NULL, irg, block, arity, in);
852 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
853 ir_mode *mode, long proj) {
854 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
856 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
858 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
860 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
861 int arity, ir_node **in) {
862 return new_rd_Tuple(NULL, irg, block, arity, in );
864 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
865 ir_node *val, ir_mode *mode) {
866 return new_rd_Id(NULL, irg, block, val, mode);
868 INLINE ir_node *new_r_Bad () {
871 INLINE ir_node *new_r_Unknown () {
872 return new_rd_Unknown();
874 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
875 return new_rd_CallBegin(NULL, irg, block, callee);
877 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
878 return new_rd_EndReg(NULL, irg, block);
880 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
881 return new_rd_EndExcept(NULL, irg, block);
883 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
884 return new_rd_Break(NULL, irg, block);
886 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
887 ir_mode *mode, long proj) {
888 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
892 /** ********************/
893 /** public interfaces */
894 /** construction tools */
898 * - create a new Start node in the current block
900 * @return s - pointer to the created Start node
905 new_d_Start (dbg_info* db)
909 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
910 op_Start, mode_T, 0, NULL);
912 res = optimize_node (res);
918 new_d_End (dbg_info* db)
921 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
922 op_End, mode_X, -1, NULL);
923 res = optimize_node (res);
929 /* Constructs a Block with a fixed number of predecessors.
930 Does set current_block. Can be used with automatic Phi
931 node construction. */
933 new_d_Block (dbg_info* db, int arity, ir_node **in)
937 res = new_rd_Block (db, current_ir_graph, arity, in);
939 /* Create and initialize array for Phi-node construction. */
940 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
941 current_ir_graph->n_loc);
942 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
944 res = optimize_node (res);
945 current_ir_graph->current_block = res;
952 /* ***********************************************************************/
953 /* Methods necessary for automatic Phi node creation */
955 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
956 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
957 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
958 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
960 Call Graph: ( A ---> B == A "calls" B)
962 get_value mature_block
970 get_r_value_internal |
974 new_rd_Phi0 new_rd_Phi_in
976 * *************************************************************************** */
978 /* Creates a Phi node with 0 predecessors */
979 static INLINE ir_node *
980 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
983 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
988 /* There are two implementations of the Phi node construction. The first
989 is faster, but does not work for blocks with more than 2 predecessors.
990 The second works always but is slower and causes more unnecessary Phi
992 Select the implementations by the following preprocessor flag set in
994 #if USE_FAST_PHI_CONSTRUCTION
996 /* This is a stack used for allocating and deallocating nodes in
997 new_rd_Phi_in. The original implementation used the obstack
998 to model this stack, now it is explicit. This reduces side effects.
1000 #if USE_EXPLICIT_PHI_IN_STACK
1001 INLINE Phi_in_stack *
1002 new_Phi_in_stack() {
1005 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1007 res->stack = NEW_ARR_F (ir_node *, 1);
1014 free_Phi_in_stack(Phi_in_stack *s) {
1015 DEL_ARR_F(s->stack);
1019 free_to_Phi_in_stack(ir_node *phi) {
1020 assert(get_irn_opcode(phi) == iro_Phi);
1022 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1023 current_ir_graph->Phi_in_stack->pos)
1024 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1026 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1028 (current_ir_graph->Phi_in_stack->pos)++;
1031 static INLINE ir_node *
1032 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1033 int arity, ir_node **in) {
1035 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1036 int pos = current_ir_graph->Phi_in_stack->pos;
1040 /* We need to allocate a new node */
1041 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1042 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1044 /* reuse the old node and initialize it again. */
1047 assert (res->kind == k_ir_node);
1048 assert (res->op == op_Phi);
1052 assert (arity >= 0);
1053 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1054 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1056 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1058 (current_ir_graph->Phi_in_stack->pos)--;
1062 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1064 /* Creates a Phi node with a given, fixed array **in of predecessors.
1065 If the Phi node is unnecessary, as the same value reaches the block
1066 through all control flow paths, it is eliminated and the value
1067 returned directly. This constructor is only intended for use in
1068 the automatic Phi node generation triggered by get_value or mature.
1069 The implementation is quite tricky and depends on the fact, that
1070 the nodes are allocated on a stack:
1071 The in array contains predecessors and NULLs. The NULLs appear,
1072 if get_r_value_internal, that computed the predecessors, reached
1073 the same block on two paths. In this case the same value reaches
1074 this block on both paths, there is no definition in between. We need
1075 not allocate a Phi where these path's merge, but we have to communicate
1076 this fact to the caller. This happens by returning a pointer to the
1077 node the caller _will_ allocate. (Yes, we predict the address. We can
1078 do so because the nodes are allocated on the obstack.) The caller then
1079 finds a pointer to itself and, when this routine is called again,
1082 static INLINE ir_node *
1083 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1084 ir_node **in, int ins)
1087 ir_node *res, *known;
1089 /* allocate a new node on the obstack.
1090 This can return a node to which some of the pointers in the in-array
1092 Attention: the constructor copies the in array, i.e., the later changes
1093 to the array in this routine do not affect the constructed node! If
1094 the in array contains NULLs, there will be missing predecessors in the
1096 Is this a possible internal state of the Phi node generation? */
1097 #if USE_EXPLICIT_PHI_IN_STACK
1098 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1100 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1101 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1103 /* The in-array can contain NULLs. These were returned by
1104 get_r_value_internal if it reached the same block/definition on a
1106 The NULLs are replaced by the node itself to simplify the test in the
1108 for (i=0; i < ins; ++i)
1109 if (in[i] == NULL) in[i] = res;
1111 /* This loop checks whether the Phi has more than one predecessor.
1112 If so, it is a real Phi node and we break the loop. Else the
1113 Phi node merges the same definition on several paths and therefore
1115 for (i=0; i < ins; ++i)
1117 if (in[i]==res || in[i]==known) continue;
1125 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1127 #if USE_EXPLICIT_PHI_IN_STACK
1128 free_to_Phi_in_stack(res);
1130 obstack_free (current_ir_graph->obst, res);
1134 res = optimize_node (res);
1138 /* return the pointer to the Phi node. This node might be deallocated! */
1143 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1146 allocates and returns this node. The routine called to allocate the
1147 node might optimize it away and return a real value, or even a pointer
1148 to a deallocated Phi node on top of the obstack!
1149 This function is called with an in-array of proper size. **/
1151 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1153 ir_node *prevBlock, *res;
1156 /* This loop goes to all predecessor blocks of the block the Phi node is in
1157 and there finds the operands of the Phi node by calling
1158 get_r_value_internal. */
1159 for (i = 1; i <= ins; ++i) {
1160 assert (block->in[i]);
1161 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1163 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1166 /* After collecting all predecessors into the array nin a new Phi node
1167 with these predecessors is created. This constructor contains an
1168 optimization: If all predecessors of the Phi node are identical it
1169 returns the only operand instead of a new Phi node. If the value
1170 passes two different control flow edges without being defined, and
1171 this is the second path treated, a pointer to the node that will be
1172 allocated for the first path (recursion) is returned. We already
1173 know the address of this node, as it is the next node to be allocated
1174 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1175 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1177 /* Now we now the value for "pos" and can enter it in the array with
1178 all known local variables. Attention: this might be a pointer to
1179 a node, that later will be allocated!!! See new_rd_Phi_in.
1180 If this is called in mature, after some set_value in the same block,
1181 the proper value must not be overwritten:
1183 get_value (makes Phi0, put's it into graph_arr)
1184 set_value (overwrites Phi0 in graph_arr)
1185 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1188 if (!block->attr.block.graph_arr[pos]) {
1189 block->attr.block.graph_arr[pos] = res;
1191 /* printf(" value already computed by %s\n",
1192 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1198 /* This function returns the last definition of a variable. In case
1199 this variable was last defined in a previous block, Phi nodes are
1200 inserted. If the part of the firm graph containing the definition
1201 is not yet constructed, a dummy Phi node is returned. */
1203 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1206 /* There are 4 cases to treat.
1208 1. The block is not mature and we visit it the first time. We can not
1209 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1210 predecessors is returned. This node is added to the linked list (field
1211 "link") of the containing block to be completed when this block is
1212 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1215 2. The value is already known in this block, graph_arr[pos] is set and we
1216 visit the block the first time. We can return the value without
1217 creating any new nodes.
1219 3. The block is mature and we visit it the first time. A Phi node needs
1220 to be created (phi_merge). If the Phi is not needed, as all it's
1221 operands are the same value reaching the block through different
1222 paths, it's optimized away and the value itself is returned.
1224 4. The block is mature, and we visit it the second time. Now two
1225 subcases are possible:
1226 * The value was computed completely the last time we were here. This
1227 is the case if there is no loop. We can return the proper value.
1228 * The recursion that visited this node and set the flag did not
1229 return yet. We are computing a value in a loop and need to
1230 break the recursion without knowing the result yet.
1231 @@@ strange case. Straight forward we would create a Phi before
1232 starting the computation of it's predecessors. In this case we will
1233 find a Phi here in any case. The problem is that this implementation
1234 only creates a Phi after computing the predecessors, so that it is
1235 hard to compute self references of this Phi. @@@
1236 There is no simple check for the second subcase. Therefore we check
1237 for a second visit and treat all such cases as the second subcase.
1238 Anyways, the basic situation is the same: we reached a block
1239 on two paths without finding a definition of the value: No Phi
1240 nodes are needed on both paths.
1241 We return this information "Two paths, no Phi needed" by a very tricky
1242 implementation that relies on the fact that an obstack is a stack and
1243 will return a node with the same address on different allocations.
1244 Look also at phi_merge and new_rd_phi_in to understand this.
1245 @@@ Unfortunately this does not work, see testprogram
1246 three_cfpred_example.
1250 /* case 4 -- already visited. */
1251 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1253 /* visited the first time */
1254 set_irn_visited(block, get_irg_visited(current_ir_graph));
1256 /* Get the local valid value */
1257 res = block->attr.block.graph_arr[pos];
1259 /* case 2 -- If the value is actually computed, return it. */
1260 if (res) { return res;};
1262 if (block->attr.block.matured) { /* case 3 */
1264 /* The Phi has the same amount of ins as the corresponding block. */
1265 int ins = get_irn_arity(block);
1267 NEW_ARR_A (ir_node *, nin, ins);
1269 /* Phi merge collects the predecessors and then creates a node. */
1270 res = phi_merge (block, pos, mode, nin, ins);
1272 } else { /* case 1 */
1273 /* The block is not mature, we don't know how many in's are needed. A Phi
1274 with zero predecessors is created. Such a Phi node is called Phi0
1275 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1276 to the list of Phi0 nodes in this block to be matured by mature_block
1278 The Phi0 has to remember the pos of it's internal value. If the real
1279 Phi is computed, pos is used to update the array with the local
1282 res = new_rd_Phi0 (current_ir_graph, block, mode);
1283 res->attr.phi0_pos = pos;
1284 res->link = block->link;
1288 /* If we get here, the frontend missed a use-before-definition error */
1291 printf("Error: no value set. Use of undefined variable. Initializing
1293 assert (mode->code >= irm_F && mode->code <= irm_P);
1294 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1295 tarval_mode_null[mode->code]);
1298 /* The local valid value is available now. */
1299 block->attr.block.graph_arr[pos] = res;
1307 it starts the recursion. This causes an Id at the entry of
1308 every block that has no definition of the value! **/
1310 #if USE_EXPLICIT_PHI_IN_STACK
1312 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1313 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1316 static INLINE ir_node *
1317 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1318 ir_node **in, int ins)
1321 ir_node *res, *known;
1323 /* Allocate a new node on the obstack. The allocation copies the in
1325 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1326 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1328 /* This loop checks whether the Phi has more than one predecessor.
1329 If so, it is a real Phi node and we break the loop. Else the
1330 Phi node merges the same definition on several paths and therefore
1331 is not needed. Don't consider Bad nodes! */
1333 for (i=0; i < ins; ++i)
1337 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1345 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1348 obstack_free (current_ir_graph->obst, res);
1351 /* A undefined value, e.g., in unreachable code. */
1355 res = optimize_node (res);
1357 /* Memory Phis in endless loops must be kept alive.
1358 As we can't distinguish these easily we keep all of the alive. */
1359 if ((res->op == op_Phi) && (mode == mode_M))
1360 add_End_keepalive(irg->end, res);
1367 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1369 #if PRECISE_EXC_CONTEXT
1371 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1373 static INLINE ir_node **
1374 new_frag_arr (ir_node *n) {
1377 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1378 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1379 sizeof(ir_node *)*current_ir_graph->n_loc);
1380 /* turn off optimization before allocating Proj nodes, as res isn't
1382 opt = get_optimize(); set_optimize(0);
1383 /* Here we rely on the fact that all frag ops have Memory as first result! */
1384 if (get_irn_op(n) == op_Call)
1385 arr[0] = new_Proj(n, mode_M, 3);
1387 arr[0] = new_Proj(n, mode_M, 0);
1389 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1393 static INLINE ir_node **
1394 get_frag_arr (ir_node *n) {
1395 if (get_irn_op(n) == op_Call) {
1396 return n->attr.call.frag_arr;
1397 } else if (get_irn_op(n) == op_Alloc) {
1398 return n->attr.a.frag_arr;
1400 return n->attr.frag_arr;
1405 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1406 if (!frag_arr[pos]) frag_arr[pos] = val;
1407 if (frag_arr[current_ir_graph->n_loc - 1])
1408 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1412 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1416 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1418 frag_arr = get_frag_arr(cfOp);
1419 res = frag_arr[pos];
1421 if (block->attr.block.graph_arr[pos]) {
1422 /* There was a set_value after the cfOp and no get_value before that
1423 set_value. We must build a Phi node now. */
1424 if (block->attr.block.matured) {
1425 int ins = get_irn_arity(block);
1427 NEW_ARR_A (ir_node *, nin, ins);
1428 res = phi_merge(block, pos, mode, nin, ins);
1430 res = new_rd_Phi0 (current_ir_graph, block, mode);
1431 res->attr.phi0_pos = pos;
1432 res->link = block->link;
1436 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1437 but this should be better: (remove comment if this works) */
1438 /* It's a Phi, we can write this into all graph_arrs with NULL */
1439 set_frag_value(block->attr.block.graph_arr, pos, res);
1441 res = get_r_value_internal(block, pos, mode);
1442 set_frag_value(block->attr.block.graph_arr, pos, res);
1450 computes the predecessors for the real phi node, and then
1451 allocates and returns this node. The routine called to allocate the
1452 node might optimize it away and return a real value.
1453 This function must be called with an in-array of proper size. **/
1455 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1457 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1460 /* If this block has no value at pos create a Phi0 and remember it
1461 in graph_arr to break recursions.
1462 Else we may not set graph_arr as there a later value is remembered. */
1464 if (!block->attr.block.graph_arr[pos]) {
1465 /* This is commented out as collapsing to Bads is no good idea.
1466 Either we need an assert here, or we need to call a routine
1467 that deals with this case as appropriate for the given language.
1468 Right now a self referencing Id is created which will crash irg_vrfy().
1470 Even if all variables are defined before use, it can happen that
1471 we get to the start block, if a cond has been replaced by a tuple
1472 (bad, jmp). As the start has a self referencing control flow edge,
1473 we get a self referencing Id, which is hard to optimize away. We avoid
1474 this by defining the value as a Bad node.
1475 Returning a const with tarval_bad is a preliminary solution. In some
1476 situations we might want a Warning or an Error. */
1478 if (block == get_irg_start_block(current_ir_graph)) {
1479 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1480 /* We don't need to care about exception ops in the start block.
1481 There are none by definition. */
1482 return block->attr.block.graph_arr[pos];
1484 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1485 block->attr.block.graph_arr[pos] = phi0;
1486 #if PRECISE_EXC_CONTEXT
1487 /* Set graph_arr for fragile ops. Also here we should break recursion.
1488 We could choose a cyclic path through an cfop. But the recursion would
1489 break at some point. */
1490 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1495 /* This loop goes to all predecessor blocks of the block the Phi node
1496 is in and there finds the operands of the Phi node by calling
1497 get_r_value_internal. */
1498 for (i = 1; i <= ins; ++i) {
1499 prevCfOp = skip_Proj(block->in[i]);
1501 if (is_Bad(prevCfOp)) {
1502 /* In case a Cond has been optimized we would get right to the start block
1503 with an invalid definition. */
1504 nin[i-1] = new_Bad();
1507 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1509 if (!is_Bad(prevBlock)) {
1510 #if PRECISE_EXC_CONTEXT
1511 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1512 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1513 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1516 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1518 nin[i-1] = new_Bad();
1522 /* After collecting all predecessors into the array nin a new Phi node
1523 with these predecessors is created. This constructor contains an
1524 optimization: If all predecessors of the Phi node are identical it
1525 returns the only operand instead of a new Phi node. */
1526 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1528 /* In case we allocated a Phi0 node at the beginning of this procedure,
1529 we need to exchange this Phi0 with the real Phi. */
1531 exchange(phi0, res);
1532 block->attr.block.graph_arr[pos] = res;
1533 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1534 only an optimization. */
1540 /* This function returns the last definition of a variable. In case
1541 this variable was last defined in a previous block, Phi nodes are
1542 inserted. If the part of the firm graph containing the definition
1543 is not yet constructed, a dummy Phi node is returned. */
1545 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1548 /* There are 4 cases to treat.
1550 1. The block is not mature and we visit it the first time. We can not
1551 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1552 predecessors is returned. This node is added to the linked list (field
1553 "link") of the containing block to be completed when this block is
1554 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1557 2. The value is already known in this block, graph_arr[pos] is set and we
1558 visit the block the first time. We can return the value without
1559 creating any new nodes.
1561 3. The block is mature and we visit it the first time. A Phi node needs
1562 to be created (phi_merge). If the Phi is not needed, as all it's
1563 operands are the same value reaching the block through different
1564 paths, it's optimized away and the value itself is returned.
1566 4. The block is mature, and we visit it the second time. Now two
1567 subcases are possible:
1568 * The value was computed completely the last time we were here. This
1569 is the case if there is no loop. We can return the proper value.
1570 * The recursion that visited this node and set the flag did not
1571 return yet. We are computing a value in a loop and need to
1572 break the recursion. This case only happens if we visited
1573 the same block with phi_merge before, which inserted a Phi0.
1574 So we return the Phi0.
1577 /* case 4 -- already visited. */
1578 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1579 /* As phi_merge allocates a Phi0 this value is always defined. Here
1580 is the critical difference of the two algorithms. */
1581 assert(block->attr.block.graph_arr[pos]);
1582 return block->attr.block.graph_arr[pos];
1585 /* visited the first time */
1586 set_irn_visited(block, get_irg_visited(current_ir_graph));
1588 /* Get the local valid value */
1589 res = block->attr.block.graph_arr[pos];
1591 /* case 2 -- If the value is actually computed, return it. */
1592 if (res) { return res; };
1594 if (block->attr.block.matured) { /* case 3 */
1596 /* The Phi has the same amount of ins as the corresponding block. */
1597 int ins = get_irn_arity(block);
1599 NEW_ARR_A (ir_node *, nin, ins);
1601 /* Phi merge collects the predecessors and then creates a node. */
1602 res = phi_merge (block, pos, mode, nin, ins);
1604 } else { /* case 1 */
1605 /* The block is not mature, we don't know how many in's are needed. A Phi
1606 with zero predecessors is created. Such a Phi node is called Phi0
1607 node. The Phi0 is then added to the list of Phi0 nodes in this block
1608 to be matured by mature_block later.
1609 The Phi0 has to remember the pos of it's internal value. If the real
1610 Phi is computed, pos is used to update the array with the local
1612 res = new_rd_Phi0 (current_ir_graph, block, mode);
1613 res->attr.phi0_pos = pos;
1614 res->link = block->link;
1618 /* If we get here, the frontend missed a use-before-definition error */
1621 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1622 assert (mode->code >= irm_F && mode->code <= irm_P);
1623 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1624 tarval_mode_null[mode->code]);
1627 /* The local valid value is available now. */
1628 block->attr.block.graph_arr[pos] = res;
1633 #endif /* USE_FAST_PHI_CONSTRUCTION */
1635 /* ************************************************************************** */
1637 /** Finalize a Block node, when all control flows are known. */
1638 /** Acceptable parameters are only Block nodes. */
1640 mature_block (ir_node *block)
1647 assert (get_irn_opcode(block) == iro_Block);
1648 /* @@@ should be commented in
1649 assert (!get_Block_matured(block) && "Block already matured"); */
1651 if (!get_Block_matured(block)) {
1652 ins = ARR_LEN (block->in)-1;
1653 /* Fix block parameters */
1654 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1656 /* An array for building the Phi nodes. */
1657 NEW_ARR_A (ir_node *, nin, ins);
1659 /* Traverse a chain of Phi nodes attached to this block and mature
1661 for (n = block->link; n; n=next) {
1662 inc_irg_visited(current_ir_graph);
1664 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1667 block->attr.block.matured = 1;
1669 /* Now, as the block is a finished firm node, we can optimize it.
1670 Since other nodes have been allocated since the block was created
1671 we can not free the node on the obstack. Therefore we have to call
1673 Unfortunately the optimization does not change a lot, as all allocated
1674 nodes refer to the unoptimized node.
1675 We can call _2, as global cse has no effect on blocks. */
1676 block = optimize_in_place_2(block);
1682 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1684 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1689 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1691 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1696 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1698 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1703 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1705 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1710 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1713 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1714 arg->attr.c.kind = fragmentary;
1715 arg->attr.c.default_proj = max_proj;
1716 res = new_Proj (arg, mode_X, max_proj);
1721 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1723 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1728 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1730 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1735 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1737 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1742 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1744 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1750 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1752 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1757 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1759 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1764 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1767 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1769 #if PRECISE_EXC_CONTEXT
1770 if ((current_ir_graph->phase_state == phase_building) &&
1771 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1772 res->attr.frag_arr = new_frag_arr(res);
1779 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1782 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1784 #if PRECISE_EXC_CONTEXT
1785 if ((current_ir_graph->phase_state == phase_building) &&
1786 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1787 res->attr.frag_arr = new_frag_arr(res);
1794 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1797 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1799 #if PRECISE_EXC_CONTEXT
1800 if ((current_ir_graph->phase_state == phase_building) &&
1801 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1802 res->attr.frag_arr = new_frag_arr(res);
1809 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1812 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1814 #if PRECISE_EXC_CONTEXT
1815 if ((current_ir_graph->phase_state == phase_building) &&
1816 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1817 res->attr.frag_arr = new_frag_arr(res);
1824 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1826 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1831 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1833 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1838 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1840 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1845 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1847 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1852 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1854 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1859 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1861 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1866 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1868 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1873 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1875 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1880 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1882 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1887 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1889 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1894 new_d_Jmp (dbg_info* db)
1896 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1900 new_d_Cond (dbg_info* db, ir_node *c)
1902 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1906 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1910 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1911 store, callee, arity, in, tp);
1912 #if PRECISE_EXC_CONTEXT
1913 if ((current_ir_graph->phase_state == phase_building) &&
1914 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1915 res->attr.call.frag_arr = new_frag_arr(res);
1922 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1924 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1929 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1931 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1936 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1939 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1941 #if PRECISE_EXC_CONTEXT
1942 if ((current_ir_graph->phase_state == phase_building) &&
1943 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1944 res->attr.frag_arr = new_frag_arr(res);
1951 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1954 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1956 #if PRECISE_EXC_CONTEXT
1957 if ((current_ir_graph->phase_state == phase_building) &&
1958 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1959 res->attr.frag_arr = new_frag_arr(res);
1966 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1970 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1971 store, size, alloc_type, where);
1972 #if PRECISE_EXC_CONTEXT
1973 if ((current_ir_graph->phase_state == phase_building) &&
1974 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1975 res->attr.a.frag_arr = new_frag_arr(res);
1982 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1984 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1985 store, ptr, size, free_type);
1989 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1990 /* GL: objptr was called frame before. Frame was a bad choice for the name
1991 as the operand could as well be a pointer to a dynamic object. */
1993 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1994 store, objptr, 0, NULL, ent);
1998 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2000 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2001 store, objptr, n_index, index, sel);
2005 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2007 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2008 store, objptr, ent));
2012 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2014 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2019 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2021 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2029 return current_ir_graph->bad;
2033 new_d_Unknown (void)
2035 return current_ir_graph->unknown;
2039 new_d_CallBegin (dbg_info *db, ir_node *call)
2042 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2047 new_d_EndReg (dbg_info *db)
2050 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2055 new_d_EndExcept (dbg_info *db)
2058 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2063 new_d_Break (dbg_info *db)
2065 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2069 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2071 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2075 /* ********************************************************************* */
2076 /* Comfortable interface with automatic Phi node construction. */
2077 /* (Uses also constructors of ?? interface, except new_Block. */
2078 /* ********************************************************************* */
2080 /** Block construction **/
2081 /* immature Block without predecessors */
2082 ir_node *new_d_immBlock (dbg_info* db) {
2085 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2086 /* creates a new dynamic in-array as length of in is -1 */
2087 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2088 current_ir_graph->current_block = res;
2089 res->attr.block.matured = 0;
2090 res->attr.block.exc = exc_normal;
2091 res->attr.block.handler_entry = 0;
2092 res->attr.block.backedge = NULL;
2093 res->attr.block.in_cg = NULL;
2094 res->attr.block.cg_backedge = NULL;
2095 set_Block_block_visited(res, 0);
2097 /* Create and initialize array for Phi-node construction. */
2098 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2099 current_ir_graph->n_loc);
2100 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2102 /* Immature block may not be optimized! */
2110 return new_d_immBlock(NULL);
2113 /* add an adge to a jmp/control flow node */
2115 add_in_edge (ir_node *block, ir_node *jmp)
2117 if (block->attr.block.matured) {
2118 assert(0 && "Error: Block already matured!\n");
2121 assert (jmp != NULL);
2122 ARR_APP1 (ir_node *, block->in, jmp);
2126 /* changing the current block */
2128 switch_block (ir_node *target)
2130 current_ir_graph->current_block = target;
2133 /* ************************ */
2134 /* parameter administration */
2136 /* get a value from the parameter array from the current block by its index */
2138 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2140 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2141 inc_irg_visited(current_ir_graph);
2143 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2145 /* get a value from the parameter array from the current block by its index */
2147 get_value (int pos, ir_mode *mode)
2149 return get_d_value(NULL, pos, mode);
2152 /* set a value at position pos in the parameter array from the current block */
2154 set_value (int pos, ir_node *value)
2156 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2157 assert(pos+1 < current_ir_graph->n_loc);
2158 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2161 /* get the current store */
2165 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2166 /* GL: one could call get_value instead */
2167 inc_irg_visited(current_ir_graph);
2168 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2171 /* set the current store */
2173 set_store (ir_node *store)
2175 /* GL: one could call set_value instead */
2176 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2177 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2181 keep_alive (ir_node *ka)
2183 add_End_keepalive(current_ir_graph->end, ka);
2186 /** Useful access routines **/
2187 /* Returns the current block of the current graph. To set the current
2188 block use switch_block(). */
2189 ir_node *get_cur_block() {
2190 return get_irg_current_block(current_ir_graph);
2193 /* Returns the frame type of the current graph */
2194 type *get_cur_frame_type() {
2195 return get_irg_frame_type(current_ir_graph);
2199 /* ********************************************************************* */
2202 /* call once for each run of the library */
2208 /* call for each graph */
2210 finalize_cons (ir_graph *irg) {
2211 irg->phase_state = phase_high;
2215 ir_node *new_Block(int arity, ir_node **in) {
2216 return new_d_Block(NULL, arity, in);
2218 ir_node *new_Start (void) {
2219 return new_d_Start(NULL);
2221 ir_node *new_End (void) {
2222 return new_d_End(NULL);
2224 ir_node *new_Jmp (void) {
2225 return new_d_Jmp(NULL);
2227 ir_node *new_Cond (ir_node *c) {
2228 return new_d_Cond(NULL, c);
2230 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2231 return new_d_Return(NULL, store, arity, in);
2233 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2234 return new_d_Raise(NULL, store, obj);
2236 ir_node *new_Const (ir_mode *mode, tarval *con) {
2237 return new_d_Const(NULL, mode, con);
2239 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2240 return new_d_SymConst(NULL, value, kind);
2242 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2243 return new_d_simpleSel(NULL, store, objptr, ent);
2245 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2247 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2249 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2250 return (new_d_InstOf (NULL, store, objptr, ent));
2252 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2254 return new_d_Call(NULL, store, callee, arity, in, tp);
2256 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2257 return new_d_Add(NULL, op1, op2, mode);
2259 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2260 return new_d_Sub(NULL, op1, op2, mode);
2262 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2263 return new_d_Minus(NULL, op, mode);
2265 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2266 return new_d_Mul(NULL, op1, op2, mode);
2268 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2269 return new_d_Quot(NULL, memop, op1, op2);
2271 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2272 return new_d_DivMod(NULL, memop, op1, op2);
2274 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2275 return new_d_Div(NULL, memop, op1, op2);
2277 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2278 return new_d_Mod(NULL, memop, op1, op2);
2280 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2281 return new_d_Abs(NULL, op, mode);
2283 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2284 return new_d_And(NULL, op1, op2, mode);
2286 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2287 return new_d_Or(NULL, op1, op2, mode);
2289 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2290 return new_d_Eor(NULL, op1, op2, mode);
2292 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2293 return new_d_Not(NULL, op, mode);
2295 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2296 return new_d_Shl(NULL, op, k, mode);
2298 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2299 return new_d_Shr(NULL, op, k, mode);
2301 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2302 return new_d_Shrs(NULL, op, k, mode);
2304 #define new_Rotate new_Rot
2305 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2306 return new_d_Rot(NULL, op, k, mode);
2308 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2309 return new_d_Cmp(NULL, op1, op2);
2311 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2312 return new_d_Conv(NULL, op, mode);
2314 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2315 return new_d_Phi(NULL, arity, in, mode);
2317 ir_node *new_Load (ir_node *store, ir_node *addr) {
2318 return new_d_Load(NULL, store, addr);
2320 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2321 return new_d_Store(NULL, store, addr, val);
2323 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2324 where_alloc where) {
2325 return new_d_Alloc(NULL, store, size, alloc_type, where);
2327 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2329 return new_d_Free(NULL, store, ptr, size, free_type);
2331 ir_node *new_Sync (int arity, ir_node **in) {
2332 return new_d_Sync(NULL, arity, in);
2334 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2335 return new_d_Proj(NULL, arg, mode, proj);
2337 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2338 return new_d_defaultProj(NULL, arg, max_proj);
2340 ir_node *new_Tuple (int arity, ir_node **in) {
2341 return new_d_Tuple(NULL, arity, in);
2343 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2344 return new_d_Id(NULL, val, mode);
2346 ir_node *new_Bad (void) {
2349 ir_node *new_Unknown(void) {
2350 return new_d_Unknown();
2352 ir_node *new_CallBegin (ir_node *callee) {
2353 return new_d_CallBegin(NULL, callee);
2355 ir_node *new_EndReg (void) {
2356 return new_d_EndReg(NULL);
2358 ir_node *new_EndExcept (void) {
2359 return new_d_EndExcept(NULL);
2361 ir_node *new_Break (void) {
2362 return new_d_Break(NULL);
2364 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2365 return new_d_Filter(NULL, arg, mode, proj);