1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
42 /*** ******************************************** */
43 /** privat interfaces, for professional use only */
45 /* Constructs a Block with a fixed number of predecessors.
46 Does not set current_block. Can not be used with automatic
47 Phi node construction. */
49 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
53 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
54 set_Block_matured(res, 1);
55 set_Block_block_visited(res, 0);
57 res->attr.block.exc = exc_normal;
58 res->attr.block.handler_entry = 0;
59 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
60 res->attr.block.in_cg = NULL;
61 res->attr.block.cg_backedge = NULL;
68 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
72 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
83 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
89 /* Creates a Phi node with all predecessors. Calling this constructor
90 is only allowed if the corresponding block is mature. */
92 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
96 assert( get_Block_matured(block) );
97 assert( get_irn_arity(block) == arity );
99 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
103 res = optimize_node (res);
106 /* Memory Phis in endless loops must be kept alive.
107 As we can't distinguish these easily we keep all of them alive. */
108 if ((res->op == op_Phi) && (mode == mode_M))
109 add_End_keepalive(irg->end, res);
114 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
117 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
119 res = optimize_node (res);
123 res = local_optimize_newby (res);
130 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
132 ir_node *in[1] = {val};
134 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
135 res = optimize_node (res);
141 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
144 ir_node *in[1] = {arg};
146 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
147 res->attr.proj = proj;
150 assert(get_Proj_pred(res));
151 assert(get_nodes_Block(get_Proj_pred(res)));
153 res = optimize_node (res);
161 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
165 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
166 arg->attr.c.kind = fragmentary;
167 arg->attr.c.default_proj = max_proj;
168 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
173 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
175 ir_node *in[1] = {op};
177 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
178 res = optimize_node (res);
185 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
189 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
190 res = optimize_node (res);
196 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
197 ir_node *op1, ir_node *op2, ir_mode *mode)
199 ir_node *in[2] = {op1, op2};
201 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
202 res = optimize_node (res);
208 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
209 ir_node *op1, ir_node *op2, ir_mode *mode)
211 ir_node *in[2] = {op1, op2};
213 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
214 res = optimize_node (res);
220 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
221 ir_node *op, ir_mode *mode)
223 ir_node *in[1] = {op};
225 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
226 res = optimize_node (res);
232 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
233 ir_node *op1, ir_node *op2, ir_mode *mode)
235 ir_node *in[2] = {op1, op2};
237 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
238 res = optimize_node (res);
244 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *memop, ir_node *op1, ir_node *op2)
247 ir_node *in[3] = {memop, op1, op2};
249 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
250 res = optimize_node (res);
256 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
257 ir_node *memop, ir_node *op1, ir_node *op2)
259 ir_node *in[3] = {memop, op1, op2};
261 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
262 res = optimize_node (res);
268 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
269 ir_node *memop, ir_node *op1, ir_node *op2)
271 ir_node *in[3] = {memop, op1, op2};
273 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
274 res = optimize_node (res);
280 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
281 ir_node *memop, ir_node *op1, ir_node *op2)
283 ir_node *in[3] = {memop, op1, op2};
285 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
286 res = optimize_node (res);
292 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
293 ir_node *op1, ir_node *op2, ir_mode *mode)
295 ir_node *in[2] = {op1, op2};
297 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
298 res = optimize_node (res);
304 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
305 ir_node *op1, ir_node *op2, ir_mode *mode)
307 ir_node *in[2] = {op1, op2};
309 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
310 res = optimize_node (res);
316 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *op1, ir_node *op2, ir_mode *mode)
319 ir_node *in[2] = {op1, op2};
321 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
322 res = optimize_node (res);
328 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
329 ir_node *op, ir_mode *mode)
331 ir_node *in[1] = {op};
333 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
334 res = optimize_node (res);
340 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
341 ir_node *op, ir_node *k, ir_mode *mode)
343 ir_node *in[2] = {op, k};
345 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
346 res = optimize_node (res);
352 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
353 ir_node *op, ir_node *k, ir_mode *mode)
355 ir_node *in[2] = {op, k};
357 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
358 res = optimize_node (res);
364 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op, ir_node *k, ir_mode *mode)
367 ir_node *in[2] = {op, k};
369 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
370 res = optimize_node (res);
376 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op, ir_node *k, ir_mode *mode)
379 ir_node *in[2] = {op, k};
381 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
382 res = optimize_node (res);
388 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
391 ir_node *in[1] = {op};
393 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
394 res = optimize_node (res);
400 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op1, ir_node *op2)
403 ir_node *in[2] = {op1, op2};
405 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node (res);
412 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
422 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
424 ir_node *in[1] = {c};
426 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
427 res->attr.c.kind = dense;
428 res->attr.c.default_proj = 0;
429 res = optimize_node (res);
435 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
436 ir_node *callee, int arity, ir_node **in, type *tp)
443 NEW_ARR_A (ir_node *, r_in, r_arity);
446 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
448 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
450 assert(is_method_type(tp));
451 set_Call_type(res, tp);
452 res->attr.call.callee_arr = NULL;
453 res = optimize_node (res);
459 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *store, int arity, ir_node **in)
467 NEW_ARR_A (ir_node *, r_in, r_arity);
469 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
470 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
471 res = optimize_node (res);
477 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
479 ir_node *in[2] = {store, obj};
481 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
482 res = optimize_node (res);
488 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
489 ir_node *store, ir_node *adr)
491 ir_node *in[2] = {store, adr};
493 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
495 res = optimize_node (res);
501 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
502 ir_node *store, ir_node *adr, ir_node *val)
504 ir_node *in[3] = {store, adr, val};
506 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
508 res = optimize_node (res);
515 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
516 ir_node *size, type *alloc_type, where_alloc where)
518 ir_node *in[2] = {store, size};
520 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
522 res->attr.a.where = where;
523 res->attr.a.type = alloc_type;
525 res = optimize_node (res);
531 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
532 ir_node *ptr, ir_node *size, type *free_type)
534 ir_node *in[3] = {store, ptr, size};
536 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
538 res->attr.f = free_type;
540 res = optimize_node (res);
546 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
547 int arity, ir_node **in, entity *ent)
554 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
557 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
558 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
560 res->attr.s.ent = ent;
562 res = optimize_node (res);
568 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
569 ir_node *objptr, type *ent)
576 NEW_ARR_A (ir_node *, r_in, r_arity);
580 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
582 res->attr.io.ent = ent;
584 /* res = optimize (res);
590 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
591 symconst_kind symkind)
595 if (symkind == linkage_ptr_info)
599 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
601 res->attr.i.num = symkind;
602 if (symkind == linkage_ptr_info) {
603 res->attr.i.tori.ptrinfo = (ident *)value;
605 assert ( ( (symkind == type_tag)
606 || (symkind == size))
607 && (is_type(value)));
608 res->attr.i.tori.typ = (type *)value;
610 res = optimize_node (res);
616 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
620 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
622 res = optimize_node (res);
628 new_rd_Bad (ir_graph *irg)
634 new_rd_Unknown (ir_graph *irg)
640 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
642 ir_node *in[1] = { get_Call_ptr(call) };
644 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
645 res->attr.callbegin.irg = irg;
646 res->attr.callbegin.call = call;
647 res = optimize_node (res);
653 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
657 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
658 res->attr.end.irg = irg;
665 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
669 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
670 res->attr.end.irg = irg;
677 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
680 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
681 res = optimize_node (res);
687 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
690 ir_node *in[1] = {arg};
692 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
693 res->attr.filter.proj = proj;
694 res->attr.filter.in_cg = NULL;
695 res->attr.filter.backedge = NULL;
698 assert(get_Proj_pred(res));
699 assert(get_nodes_Block(get_Proj_pred(res)));
701 res = optimize_node (res);
708 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
709 return new_rd_Block(NULL, irg, arity, in);
711 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
712 return new_rd_Start(NULL, irg, block);
714 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
715 return new_rd_End(NULL, irg, block);
717 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
718 return new_rd_Jmp(NULL, irg, block);
720 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
721 return new_rd_Cond(NULL, irg, block, c);
723 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
724 ir_node *store, int arity, ir_node **in) {
725 return new_rd_Return(NULL, irg, block, store, arity, in);
727 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
728 ir_node *store, ir_node *obj) {
729 return new_rd_Raise(NULL, irg, block, store, obj);
731 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
732 ir_mode *mode, tarval *con) {
733 return new_rd_Const(NULL, irg, block, mode, con);
735 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
736 type_or_id_p value, symconst_kind symkind) {
737 return new_rd_SymConst(NULL, irg, block, value, symkind);
739 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
740 ir_node *objptr, int n_index, ir_node **index,
742 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
744 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
746 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
748 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
749 ir_node *callee, int arity, ir_node **in,
751 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
753 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
754 ir_node *op1, ir_node *op2, ir_mode *mode) {
755 return new_rd_Add(NULL, irg, block, op1, op2, mode);
757 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
758 ir_node *op1, ir_node *op2, ir_mode *mode) {
759 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
761 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
762 ir_node *op, ir_mode *mode) {
763 return new_rd_Minus(NULL, irg, block, op, mode);
765 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
766 ir_node *op1, ir_node *op2, ir_mode *mode) {
767 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
769 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
770 ir_node *memop, ir_node *op1, ir_node *op2) {
771 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
773 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
774 ir_node *memop, ir_node *op1, ir_node *op2) {
775 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
777 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
778 ir_node *memop, ir_node *op1, ir_node *op2) {
779 return new_rd_Div(NULL, irg, block, memop, op1, op2);
781 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
782 ir_node *memop, ir_node *op1, ir_node *op2) {
783 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
785 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
786 ir_node *op, ir_mode *mode) {
787 return new_rd_Abs(NULL, irg, block, op, mode);
789 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
790 ir_node *op1, ir_node *op2, ir_mode *mode) {
791 return new_rd_And(NULL, irg, block, op1, op2, mode);
793 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
794 ir_node *op1, ir_node *op2, ir_mode *mode) {
795 return new_rd_Or(NULL, irg, block, op1, op2, mode);
797 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
798 ir_node *op1, ir_node *op2, ir_mode *mode) {
799 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
801 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
802 ir_node *op, ir_mode *mode) {
803 return new_rd_Not(NULL, irg, block, op, mode);
805 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
806 ir_node *op1, ir_node *op2) {
807 return new_rd_Cmp(NULL, irg, block, op1, op2);
809 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
810 ir_node *op, ir_node *k, ir_mode *mode) {
811 return new_rd_Shl(NULL, irg, block, op, k, mode);
813 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
814 ir_node *op, ir_node *k, ir_mode *mode) {
815 return new_rd_Shr(NULL, irg, block, op, k, mode);
817 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
818 ir_node *op, ir_node *k, ir_mode *mode) {
819 return new_rd_Shrs(NULL, irg, block, op, k, mode);
821 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
822 ir_node *op, ir_node *k, ir_mode *mode) {
823 return new_rd_Rot(NULL, irg, block, op, k, mode);
825 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
826 ir_node *op, ir_mode *mode) {
827 return new_rd_Conv(NULL, irg, block, op, mode);
829 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
830 ir_node **in, ir_mode *mode) {
831 return new_rd_Phi(NULL, irg, block, arity, in, mode);
833 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
834 ir_node *store, ir_node *adr) {
835 return new_rd_Load(NULL, irg, block, store, adr);
837 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
838 ir_node *store, ir_node *adr, ir_node *val) {
839 return new_rd_Store(NULL, irg, block, store, adr, val);
841 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
842 ir_node *size, type *alloc_type, where_alloc where) {
843 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
845 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
846 ir_node *ptr, ir_node *size, type *free_type) {
847 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
849 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
850 return new_rd_Sync(NULL, irg, block, arity, in);
852 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
853 ir_mode *mode, long proj) {
854 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
856 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
858 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
860 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
861 int arity, ir_node **in) {
862 return new_rd_Tuple(NULL, irg, block, arity, in );
864 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
865 ir_node *val, ir_mode *mode) {
866 return new_rd_Id(NULL, irg, block, val, mode);
868 INLINE ir_node *new_r_Bad (ir_graph *irg) {
869 return new_rd_Bad(irg);
871 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
872 return new_rd_Unknown(irg);
874 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
875 return new_rd_CallBegin(NULL, irg, block, callee);
877 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
878 return new_rd_EndReg(NULL, irg, block);
880 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
881 return new_rd_EndExcept(NULL, irg, block);
883 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
884 return new_rd_Break(NULL, irg, block);
886 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
887 ir_mode *mode, long proj) {
888 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
892 /** ********************/
893 /** public interfaces */
894 /** construction tools */
898 * - create a new Start node in the current block
900 * @return s - pointer to the created Start node
905 new_d_Start (dbg_info* db)
909 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
910 op_Start, mode_T, 0, NULL);
912 res = optimize_node (res);
918 new_d_End (dbg_info* db)
921 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
922 op_End, mode_X, -1, NULL);
923 res = optimize_node (res);
929 /* Constructs a Block with a fixed number of predecessors.
930 Does set current_block. Can be used with automatic Phi
931 node construction. */
933 new_d_Block (dbg_info* db, int arity, ir_node **in)
937 res = new_rd_Block (db, current_ir_graph, arity, in);
939 /* Create and initialize array for Phi-node construction. */
940 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
941 current_ir_graph->n_loc);
942 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
944 res = optimize_node (res);
945 current_ir_graph->current_block = res;
952 /* ***********************************************************************/
953 /* Methods necessary for automatic Phi node creation */
955 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
956 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
957 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
958 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
960 Call Graph: ( A ---> B == A "calls" B)
962 get_value mature_block
970 get_r_value_internal |
974 new_rd_Phi0 new_rd_Phi_in
976 * *************************************************************************** */
978 /* Creates a Phi node with 0 predecessors */
979 static INLINE ir_node *
980 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
983 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
988 /* There are two implementations of the Phi node construction. The first
989 is faster, but does not work for blocks with more than 2 predecessors.
990 The second works always but is slower and causes more unnecessary Phi
992 Select the implementations by the following preprocessor flag set in
994 #if USE_FAST_PHI_CONSTRUCTION
996 /* This is a stack used for allocating and deallocating nodes in
997 new_rd_Phi_in. The original implementation used the obstack
998 to model this stack, now it is explicit. This reduces side effects.
1000 #if USE_EXPLICIT_PHI_IN_STACK
1001 INLINE Phi_in_stack *
1002 new_Phi_in_stack() {
1005 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1007 res->stack = NEW_ARR_F (ir_node *, 1);
1014 free_Phi_in_stack(Phi_in_stack *s) {
1015 DEL_ARR_F(s->stack);
1019 free_to_Phi_in_stack(ir_node *phi) {
1020 assert(get_irn_opcode(phi) == iro_Phi);
1022 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1023 current_ir_graph->Phi_in_stack->pos)
1024 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1026 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1028 (current_ir_graph->Phi_in_stack->pos)++;
1031 static INLINE ir_node *
1032 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1033 int arity, ir_node **in) {
1035 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1036 int pos = current_ir_graph->Phi_in_stack->pos;
1040 /* We need to allocate a new node */
1041 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1042 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1044 /* reuse the old node and initialize it again. */
1047 assert (res->kind == k_ir_node);
1048 assert (res->op == op_Phi);
1052 assert (arity >= 0);
1053 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1054 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1056 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1058 (current_ir_graph->Phi_in_stack->pos)--;
1062 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1064 /* Creates a Phi node with a given, fixed array **in of predecessors.
1065 If the Phi node is unnecessary, as the same value reaches the block
1066 through all control flow paths, it is eliminated and the value
1067 returned directly. This constructor is only intended for use in
1068 the automatic Phi node generation triggered by get_value or mature.
1069 The implementation is quite tricky and depends on the fact, that
1070 the nodes are allocated on a stack:
1071 The in array contains predecessors and NULLs. The NULLs appear,
1072 if get_r_value_internal, that computed the predecessors, reached
1073 the same block on two paths. In this case the same value reaches
1074 this block on both paths, there is no definition in between. We need
1075 not allocate a Phi where these path's merge, but we have to communicate
1076 this fact to the caller. This happens by returning a pointer to the
1077 node the caller _will_ allocate. (Yes, we predict the address. We can
1078 do so because the nodes are allocated on the obstack.) The caller then
1079 finds a pointer to itself and, when this routine is called again,
1082 static INLINE ir_node *
1083 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1084 ir_node **in, int ins)
1087 ir_node *res, *known;
1089 /* allocate a new node on the obstack.
1090 This can return a node to which some of the pointers in the in-array
1092 Attention: the constructor copies the in array, i.e., the later changes
1093 to the array in this routine do not affect the constructed node! If
1094 the in array contains NULLs, there will be missing predecessors in the
1096 Is this a possible internal state of the Phi node generation? */
1097 #if USE_EXPLICIT_PHI_IN_STACK
1098 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1100 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1101 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1103 /* The in-array can contain NULLs. These were returned by
1104 get_r_value_internal if it reached the same block/definition on a
1106 The NULLs are replaced by the node itself to simplify the test in the
1108 for (i=0; i < ins; ++i)
1109 if (in[i] == NULL) in[i] = res;
1111 /* This loop checks whether the Phi has more than one predecessor.
1112 If so, it is a real Phi node and we break the loop. Else the
1113 Phi node merges the same definition on several paths and therefore
1115 for (i=0; i < ins; ++i)
1117 if (in[i]==res || in[i]==known) continue;
1125 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1127 #if USE_EXPLICIT_PHI_IN_STACK
1128 free_to_Phi_in_stack(res);
1130 obstack_free (current_ir_graph->obst, res);
1134 res = optimize_node (res);
1138 /* return the pointer to the Phi node. This node might be deallocated! */
1143 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1146 allocates and returns this node. The routine called to allocate the
1147 node might optimize it away and return a real value, or even a pointer
1148 to a deallocated Phi node on top of the obstack!
1149 This function is called with an in-array of proper size. **/
1151 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1153 ir_node *prevBlock, *res;
1156 /* This loop goes to all predecessor blocks of the block the Phi node is in
1157 and there finds the operands of the Phi node by calling
1158 get_r_value_internal. */
1159 for (i = 1; i <= ins; ++i) {
1160 assert (block->in[i]);
1161 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1163 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1166 /* After collecting all predecessors into the array nin a new Phi node
1167 with these predecessors is created. This constructor contains an
1168 optimization: If all predecessors of the Phi node are identical it
1169 returns the only operand instead of a new Phi node. If the value
1170 passes two different control flow edges without being defined, and
1171 this is the second path treated, a pointer to the node that will be
1172 allocated for the first path (recursion) is returned. We already
1173 know the address of this node, as it is the next node to be allocated
1174 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1175 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1177 /* Now we now the value for "pos" and can enter it in the array with
1178 all known local variables. Attention: this might be a pointer to
1179 a node, that later will be allocated!!! See new_rd_Phi_in.
1180 If this is called in mature, after some set_value in the same block,
1181 the proper value must not be overwritten:
1183 get_value (makes Phi0, put's it into graph_arr)
1184 set_value (overwrites Phi0 in graph_arr)
1185 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1188 if (!block->attr.block.graph_arr[pos]) {
1189 block->attr.block.graph_arr[pos] = res;
1191 /* printf(" value already computed by %s\n",
1192 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1198 /* This function returns the last definition of a variable. In case
1199 this variable was last defined in a previous block, Phi nodes are
1200 inserted. If the part of the firm graph containing the definition
1201 is not yet constructed, a dummy Phi node is returned. */
1203 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1206 /* There are 4 cases to treat.
1208 1. The block is not mature and we visit it the first time. We can not
1209 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1210 predecessors is returned. This node is added to the linked list (field
1211 "link") of the containing block to be completed when this block is
1212 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1215 2. The value is already known in this block, graph_arr[pos] is set and we
1216 visit the block the first time. We can return the value without
1217 creating any new nodes.
1219 3. The block is mature and we visit it the first time. A Phi node needs
1220 to be created (phi_merge). If the Phi is not needed, as all it's
1221 operands are the same value reaching the block through different
1222 paths, it's optimized away and the value itself is returned.
1224 4. The block is mature, and we visit it the second time. Now two
1225 subcases are possible:
1226 * The value was computed completely the last time we were here. This
1227 is the case if there is no loop. We can return the proper value.
1228 * The recursion that visited this node and set the flag did not
1229 return yet. We are computing a value in a loop and need to
1230 break the recursion without knowing the result yet.
1231 @@@ strange case. Straight forward we would create a Phi before
1232 starting the computation of it's predecessors. In this case we will
1233 find a Phi here in any case. The problem is that this implementation
1234 only creates a Phi after computing the predecessors, so that it is
1235 hard to compute self references of this Phi. @@@
1236 There is no simple check for the second subcase. Therefore we check
1237 for a second visit and treat all such cases as the second subcase.
1238 Anyways, the basic situation is the same: we reached a block
1239 on two paths without finding a definition of the value: No Phi
1240 nodes are needed on both paths.
1241 We return this information "Two paths, no Phi needed" by a very tricky
1242 implementation that relies on the fact that an obstack is a stack and
1243 will return a node with the same address on different allocations.
1244 Look also at phi_merge and new_rd_phi_in to understand this.
1245 @@@ Unfortunately this does not work, see testprogram
1246 three_cfpred_example.
1250 /* case 4 -- already visited. */
1251 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1253 /* visited the first time */
1254 set_irn_visited(block, get_irg_visited(current_ir_graph));
1256 /* Get the local valid value */
1257 res = block->attr.block.graph_arr[pos];
1259 /* case 2 -- If the value is actually computed, return it. */
1260 if (res) { return res;};
1262 if (block->attr.block.matured) { /* case 3 */
1264 /* The Phi has the same amount of ins as the corresponding block. */
1265 int ins = get_irn_arity(block);
1267 NEW_ARR_A (ir_node *, nin, ins);
1269 /* Phi merge collects the predecessors and then creates a node. */
1270 res = phi_merge (block, pos, mode, nin, ins);
1272 } else { /* case 1 */
1273 /* The block is not mature, we don't know how many in's are needed. A Phi
1274 with zero predecessors is created. Such a Phi node is called Phi0
1275 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1276 to the list of Phi0 nodes in this block to be matured by mature_block
1278 The Phi0 has to remember the pos of it's internal value. If the real
1279 Phi is computed, pos is used to update the array with the local
1282 res = new_rd_Phi0 (current_ir_graph, block, mode);
1283 res->attr.phi0_pos = pos;
1284 res->link = block->link;
1288 /* If we get here, the frontend missed a use-before-definition error */
1291 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1292 assert (mode->code >= irm_F && mode->code <= irm_P);
1293 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1294 tarval_mode_null[mode->code]);
1297 /* The local valid value is available now. */
1298 block->attr.block.graph_arr[pos] = res;
1306 it starts the recursion. This causes an Id at the entry of
1307 every block that has no definition of the value! **/
1309 #if USE_EXPLICIT_PHI_IN_STACK
1311 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1312 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1315 static INLINE ir_node *
1316 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1317 ir_node **in, int ins)
1320 ir_node *res, *known;
1322 /* Allocate a new node on the obstack. The allocation copies the in
1324 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1325 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1327 /* This loop checks whether the Phi has more than one predecessor.
1328 If so, it is a real Phi node and we break the loop. Else the
1329 Phi node merges the same definition on several paths and therefore
1330 is not needed. Don't consider Bad nodes! */
1332 for (i=0; i < ins; ++i)
1336 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1344 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1347 obstack_free (current_ir_graph->obst, res);
1350 /* A undefined value, e.g., in unreachable code. */
1354 res = optimize_node (res);
1356 /* Memory Phis in endless loops must be kept alive.
1357 As we can't distinguish these easily we keep all of the alive. */
1358 if ((res->op == op_Phi) && (mode == mode_M))
1359 add_End_keepalive(irg->end, res);
1366 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1368 #if PRECISE_EXC_CONTEXT
1370 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1372 static INLINE ir_node **
1373 new_frag_arr (ir_node *n) {
1376 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1377 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1378 sizeof(ir_node *)*current_ir_graph->n_loc);
1379 /* turn off optimization before allocating Proj nodes, as res isn't
1381 opt = get_optimize(); set_optimize(0);
1382 /* Here we rely on the fact that all frag ops have Memory as first result! */
1383 if (get_irn_op(n) == op_Call)
1384 arr[0] = new_Proj(n, mode_M, 3);
1386 arr[0] = new_Proj(n, mode_M, 0);
1388 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1392 static INLINE ir_node **
1393 get_frag_arr (ir_node *n) {
1394 if (get_irn_op(n) == op_Call) {
1395 return n->attr.call.frag_arr;
1396 } else if (get_irn_op(n) == op_Alloc) {
1397 return n->attr.a.frag_arr;
1399 return n->attr.frag_arr;
1404 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1405 if (!frag_arr[pos]) frag_arr[pos] = val;
1406 if (frag_arr[current_ir_graph->n_loc - 1])
1407 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1411 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1415 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1417 frag_arr = get_frag_arr(cfOp);
1418 res = frag_arr[pos];
1420 if (block->attr.block.graph_arr[pos]) {
1421 /* There was a set_value after the cfOp and no get_value before that
1422 set_value. We must build a Phi node now. */
1423 if (block->attr.block.matured) {
1424 int ins = get_irn_arity(block);
1426 NEW_ARR_A (ir_node *, nin, ins);
1427 res = phi_merge(block, pos, mode, nin, ins);
1429 res = new_rd_Phi0 (current_ir_graph, block, mode);
1430 res->attr.phi0_pos = pos;
1431 res->link = block->link;
1435 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1436 but this should be better: (remove comment if this works) */
1437 /* It's a Phi, we can write this into all graph_arrs with NULL */
1438 set_frag_value(block->attr.block.graph_arr, pos, res);
1440 res = get_r_value_internal(block, pos, mode);
1441 set_frag_value(block->attr.block.graph_arr, pos, res);
1449 computes the predecessors for the real phi node, and then
1450 allocates and returns this node. The routine called to allocate the
1451 node might optimize it away and return a real value.
1452 This function must be called with an in-array of proper size. **/
1454 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1456 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1459 /* If this block has no value at pos create a Phi0 and remember it
1460 in graph_arr to break recursions.
1461 Else we may not set graph_arr as there a later value is remembered. */
1463 if (!block->attr.block.graph_arr[pos]) {
1464 /* This is commented out as collapsing to Bads is no good idea.
1465 Either we need an assert here, or we need to call a routine
1466 that deals with this case as appropriate for the given language.
1467 Right now a self referencing Id is created which will crash irg_vrfy().
1469 Even if all variables are defined before use, it can happen that
1470 we get to the start block, if a cond has been replaced by a tuple
1471 (bad, jmp). As the start has a self referencing control flow edge,
1472 we get a self referencing Id, which is hard to optimize away. We avoid
1473 this by defining the value as a Bad node.
1474 Returning a const with tarval_bad is a preliminary solution. In some
1475 situations we might want a Warning or an Error. */
1477 if (block == get_irg_start_block(current_ir_graph)) {
1478 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1479 /* We don't need to care about exception ops in the start block.
1480 There are none by definition. */
1481 return block->attr.block.graph_arr[pos];
1483 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1484 block->attr.block.graph_arr[pos] = phi0;
1485 #if PRECISE_EXC_CONTEXT
1486 /* Set graph_arr for fragile ops. Also here we should break recursion.
1487 We could choose a cyclic path through an cfop. But the recursion would
1488 break at some point. */
1489 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1494 /* This loop goes to all predecessor blocks of the block the Phi node
1495 is in and there finds the operands of the Phi node by calling
1496 get_r_value_internal. */
1497 for (i = 1; i <= ins; ++i) {
1498 prevCfOp = skip_Proj(block->in[i]);
1500 if (is_Bad(prevCfOp)) {
1501 /* In case a Cond has been optimized we would get right to the start block
1502 with an invalid definition. */
1503 nin[i-1] = new_Bad();
1506 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1508 if (!is_Bad(prevBlock)) {
1509 #if PRECISE_EXC_CONTEXT
1510 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1511 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1512 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1515 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1517 nin[i-1] = new_Bad();
1521 /* After collecting all predecessors into the array nin a new Phi node
1522 with these predecessors is created. This constructor contains an
1523 optimization: If all predecessors of the Phi node are identical it
1524 returns the only operand instead of a new Phi node. */
1525 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1527 /* In case we allocated a Phi0 node at the beginning of this procedure,
1528 we need to exchange this Phi0 with the real Phi. */
1530 exchange(phi0, res);
1531 block->attr.block.graph_arr[pos] = res;
1532 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1533 only an optimization. */
1539 /* This function returns the last definition of a variable. In case
1540 this variable was last defined in a previous block, Phi nodes are
1541 inserted. If the part of the firm graph containing the definition
1542 is not yet constructed, a dummy Phi node is returned. */
1544 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1547 /* There are 4 cases to treat.
1549 1. The block is not mature and we visit it the first time. We can not
1550 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1551 predecessors is returned. This node is added to the linked list (field
1552 "link") of the containing block to be completed when this block is
1553 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1556 2. The value is already known in this block, graph_arr[pos] is set and we
1557 visit the block the first time. We can return the value without
1558 creating any new nodes.
1560 3. The block is mature and we visit it the first time. A Phi node needs
1561 to be created (phi_merge). If the Phi is not needed, as all it's
1562 operands are the same value reaching the block through different
1563 paths, it's optimized away and the value itself is returned.
1565 4. The block is mature, and we visit it the second time. Now two
1566 subcases are possible:
1567 * The value was computed completely the last time we were here. This
1568 is the case if there is no loop. We can return the proper value.
1569 * The recursion that visited this node and set the flag did not
1570 return yet. We are computing a value in a loop and need to
1571 break the recursion. This case only happens if we visited
1572 the same block with phi_merge before, which inserted a Phi0.
1573 So we return the Phi0.
1576 /* case 4 -- already visited. */
1577 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1578 /* As phi_merge allocates a Phi0 this value is always defined. Here
1579 is the critical difference of the two algorithms. */
1580 assert(block->attr.block.graph_arr[pos]);
1581 return block->attr.block.graph_arr[pos];
1584 /* visited the first time */
1585 set_irn_visited(block, get_irg_visited(current_ir_graph));
1587 /* Get the local valid value */
1588 res = block->attr.block.graph_arr[pos];
1590 /* case 2 -- If the value is actually computed, return it. */
1591 if (res) { return res; };
1593 if (block->attr.block.matured) { /* case 3 */
1595 /* The Phi has the same amount of ins as the corresponding block. */
1596 int ins = get_irn_arity(block);
1598 NEW_ARR_A (ir_node *, nin, ins);
1600 /* Phi merge collects the predecessors and then creates a node. */
1601 res = phi_merge (block, pos, mode, nin, ins);
1603 } else { /* case 1 */
1604 /* The block is not mature, we don't know how many in's are needed. A Phi
1605 with zero predecessors is created. Such a Phi node is called Phi0
1606 node. The Phi0 is then added to the list of Phi0 nodes in this block
1607 to be matured by mature_block later.
1608 The Phi0 has to remember the pos of it's internal value. If the real
1609 Phi is computed, pos is used to update the array with the local
1611 res = new_rd_Phi0 (current_ir_graph, block, mode);
1612 res->attr.phi0_pos = pos;
1613 res->link = block->link;
1617 /* If we get here, the frontend missed a use-before-definition error */
1620 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1621 assert (mode->code >= irm_F && mode->code <= irm_P);
1622 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1623 tarval_mode_null[mode->code]);
1626 /* The local valid value is available now. */
1627 block->attr.block.graph_arr[pos] = res;
1632 #endif /* USE_FAST_PHI_CONSTRUCTION */
1634 /* ************************************************************************** */
1636 /** Finalize a Block node, when all control flows are known. */
1637 /** Acceptable parameters are only Block nodes. */
1639 mature_block (ir_node *block)
1646 assert (get_irn_opcode(block) == iro_Block);
1647 /* @@@ should be commented in
1648 assert (!get_Block_matured(block) && "Block already matured"); */
1650 if (!get_Block_matured(block)) {
1651 ins = ARR_LEN (block->in)-1;
1652 /* Fix block parameters */
1653 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1655 /* An array for building the Phi nodes. */
1656 NEW_ARR_A (ir_node *, nin, ins);
1658 /* Traverse a chain of Phi nodes attached to this block and mature
1660 for (n = block->link; n; n=next) {
1661 inc_irg_visited(current_ir_graph);
1663 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1666 block->attr.block.matured = 1;
1668 /* Now, as the block is a finished firm node, we can optimize it.
1669 Since other nodes have been allocated since the block was created
1670 we can not free the node on the obstack. Therefore we have to call
1672 Unfortunately the optimization does not change a lot, as all allocated
1673 nodes refer to the unoptimized node.
1674 We can call _2, as global cse has no effect on blocks. */
1675 block = optimize_in_place_2(block);
1681 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1683 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1688 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1690 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1695 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1697 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1702 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1704 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1709 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1712 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1713 arg->attr.c.kind = fragmentary;
1714 arg->attr.c.default_proj = max_proj;
1715 res = new_Proj (arg, mode_X, max_proj);
1720 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1722 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1727 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1729 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1734 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1736 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1741 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1743 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1749 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1751 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1756 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1758 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1763 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1766 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1768 #if PRECISE_EXC_CONTEXT
1769 if ((current_ir_graph->phase_state == phase_building) &&
1770 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1771 res->attr.frag_arr = new_frag_arr(res);
1778 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1781 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1783 #if PRECISE_EXC_CONTEXT
1784 if ((current_ir_graph->phase_state == phase_building) &&
1785 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1786 res->attr.frag_arr = new_frag_arr(res);
1793 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1796 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1798 #if PRECISE_EXC_CONTEXT
1799 if ((current_ir_graph->phase_state == phase_building) &&
1800 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1801 res->attr.frag_arr = new_frag_arr(res);
1808 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1811 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1813 #if PRECISE_EXC_CONTEXT
1814 if ((current_ir_graph->phase_state == phase_building) &&
1815 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1816 res->attr.frag_arr = new_frag_arr(res);
1823 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1825 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1830 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1832 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1837 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1839 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1844 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1846 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1853 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1858 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1860 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1865 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1867 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1872 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1874 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1879 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1881 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1886 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1888 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1893 new_d_Jmp (dbg_info* db)
1895 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1899 new_d_Cond (dbg_info* db, ir_node *c)
1901 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1905 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1909 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1910 store, callee, arity, in, tp);
1911 #if PRECISE_EXC_CONTEXT
1912 if ((current_ir_graph->phase_state == phase_building) &&
1913 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1914 res->attr.call.frag_arr = new_frag_arr(res);
1921 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1923 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1928 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1930 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1935 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1938 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1940 #if PRECISE_EXC_CONTEXT
1941 if ((current_ir_graph->phase_state == phase_building) &&
1942 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1943 res->attr.frag_arr = new_frag_arr(res);
1950 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1953 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1955 #if PRECISE_EXC_CONTEXT
1956 if ((current_ir_graph->phase_state == phase_building) &&
1957 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1958 res->attr.frag_arr = new_frag_arr(res);
1965 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1969 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1970 store, size, alloc_type, where);
1971 #if PRECISE_EXC_CONTEXT
1972 if ((current_ir_graph->phase_state == phase_building) &&
1973 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1974 res->attr.a.frag_arr = new_frag_arr(res);
1981 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1983 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1984 store, ptr, size, free_type);
1988 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1989 /* GL: objptr was called frame before. Frame was a bad choice for the name
1990 as the operand could as well be a pointer to a dynamic object. */
1992 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
1993 store, objptr, 0, NULL, ent);
1997 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
1999 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2000 store, objptr, n_index, index, sel);
2004 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2006 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2007 store, objptr, ent));
2011 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2013 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2018 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2020 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2028 return current_ir_graph->bad;
2032 new_d_Unknown (void)
2034 return current_ir_graph->unknown;
2038 new_d_CallBegin (dbg_info *db, ir_node *call)
2041 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2046 new_d_EndReg (dbg_info *db)
2049 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2054 new_d_EndExcept (dbg_info *db)
2057 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2062 new_d_Break (dbg_info *db)
2064 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2068 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2070 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2074 /* ********************************************************************* */
2075 /* Comfortable interface with automatic Phi node construction. */
2076 /* (Uses also constructors of ?? interface, except new_Block. */
2077 /* ********************************************************************* */
2079 /** Block construction **/
2080 /* immature Block without predecessors */
2081 ir_node *new_d_immBlock (dbg_info* db) {
2084 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2085 /* creates a new dynamic in-array as length of in is -1 */
2086 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2087 current_ir_graph->current_block = res;
2088 res->attr.block.matured = 0;
2089 res->attr.block.exc = exc_normal;
2090 res->attr.block.handler_entry = 0;
2091 res->attr.block.backedge = NULL;
2092 res->attr.block.in_cg = NULL;
2093 res->attr.block.cg_backedge = NULL;
2094 set_Block_block_visited(res, 0);
2096 /* Create and initialize array for Phi-node construction. */
2097 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2098 current_ir_graph->n_loc);
2099 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2101 /* Immature block may not be optimized! */
2109 return new_d_immBlock(NULL);
2112 /* add an adge to a jmp/control flow node */
2114 add_in_edge (ir_node *block, ir_node *jmp)
2116 if (block->attr.block.matured) {
2117 assert(0 && "Error: Block already matured!\n");
2120 assert (jmp != NULL);
2121 ARR_APP1 (ir_node *, block->in, jmp);
2125 /* changing the current block */
2127 switch_block (ir_node *target)
2129 current_ir_graph->current_block = target;
2132 /* ************************ */
2133 /* parameter administration */
2135 /* get a value from the parameter array from the current block by its index */
2137 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2139 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2140 inc_irg_visited(current_ir_graph);
2142 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2144 /* get a value from the parameter array from the current block by its index */
2146 get_value (int pos, ir_mode *mode)
2148 return get_d_value(NULL, pos, mode);
2151 /* set a value at position pos in the parameter array from the current block */
2153 set_value (int pos, ir_node *value)
2155 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2156 assert(pos+1 < current_ir_graph->n_loc);
2157 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2160 /* get the current store */
2164 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2165 /* GL: one could call get_value instead */
2166 inc_irg_visited(current_ir_graph);
2167 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2170 /* set the current store */
2172 set_store (ir_node *store)
2174 /* GL: one could call set_value instead */
2175 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2176 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2180 keep_alive (ir_node *ka)
2182 add_End_keepalive(current_ir_graph->end, ka);
2185 /** Useful access routines **/
2186 /* Returns the current block of the current graph. To set the current
2187 block use switch_block(). */
2188 ir_node *get_cur_block() {
2189 return get_irg_current_block(current_ir_graph);
2192 /* Returns the frame type of the current graph */
2193 type *get_cur_frame_type() {
2194 return get_irg_frame_type(current_ir_graph);
2198 /* ********************************************************************* */
2201 /* call once for each run of the library */
2207 /* call for each graph */
2209 finalize_cons (ir_graph *irg) {
2210 irg->phase_state = phase_high;
2214 ir_node *new_Block(int arity, ir_node **in) {
2215 return new_d_Block(NULL, arity, in);
2217 ir_node *new_Start (void) {
2218 return new_d_Start(NULL);
2220 ir_node *new_End (void) {
2221 return new_d_End(NULL);
2223 ir_node *new_Jmp (void) {
2224 return new_d_Jmp(NULL);
2226 ir_node *new_Cond (ir_node *c) {
2227 return new_d_Cond(NULL, c);
2229 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2230 return new_d_Return(NULL, store, arity, in);
2232 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2233 return new_d_Raise(NULL, store, obj);
2235 ir_node *new_Const (ir_mode *mode, tarval *con) {
2236 return new_d_Const(NULL, mode, con);
2238 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2239 return new_d_SymConst(NULL, value, kind);
2241 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2242 return new_d_simpleSel(NULL, store, objptr, ent);
2244 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2246 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2248 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2249 return (new_d_InstOf (NULL, store, objptr, ent));
2251 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2253 return new_d_Call(NULL, store, callee, arity, in, tp);
2255 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2256 return new_d_Add(NULL, op1, op2, mode);
2258 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2259 return new_d_Sub(NULL, op1, op2, mode);
2261 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2262 return new_d_Minus(NULL, op, mode);
2264 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2265 return new_d_Mul(NULL, op1, op2, mode);
2267 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2268 return new_d_Quot(NULL, memop, op1, op2);
2270 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2271 return new_d_DivMod(NULL, memop, op1, op2);
2273 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2274 return new_d_Div(NULL, memop, op1, op2);
2276 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2277 return new_d_Mod(NULL, memop, op1, op2);
2279 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2280 return new_d_Abs(NULL, op, mode);
2282 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2283 return new_d_And(NULL, op1, op2, mode);
2285 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2286 return new_d_Or(NULL, op1, op2, mode);
2288 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2289 return new_d_Eor(NULL, op1, op2, mode);
2291 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2292 return new_d_Not(NULL, op, mode);
2294 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2295 return new_d_Shl(NULL, op, k, mode);
2297 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2298 return new_d_Shr(NULL, op, k, mode);
2300 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2301 return new_d_Shrs(NULL, op, k, mode);
2303 #define new_Rotate new_Rot
2304 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2305 return new_d_Rot(NULL, op, k, mode);
2307 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2308 return new_d_Cmp(NULL, op1, op2);
2310 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2311 return new_d_Conv(NULL, op, mode);
2313 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2314 return new_d_Phi(NULL, arity, in, mode);
2316 ir_node *new_Load (ir_node *store, ir_node *addr) {
2317 return new_d_Load(NULL, store, addr);
2319 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2320 return new_d_Store(NULL, store, addr, val);
2322 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2323 where_alloc where) {
2324 return new_d_Alloc(NULL, store, size, alloc_type, where);
2326 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2328 return new_d_Free(NULL, store, ptr, size, free_type);
2330 ir_node *new_Sync (int arity, ir_node **in) {
2331 return new_d_Sync(NULL, arity, in);
2333 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2334 return new_d_Proj(NULL, arg, mode, proj);
2336 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2337 return new_d_defaultProj(NULL, arg, max_proj);
2339 ir_node *new_Tuple (int arity, ir_node **in) {
2340 return new_d_Tuple(NULL, arity, in);
2342 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2343 return new_d_Id(NULL, val, mode);
2345 ir_node *new_Bad (void) {
2348 ir_node *new_Unknown(void) {
2349 return new_d_Unknown();
2351 ir_node *new_CallBegin (ir_node *callee) {
2352 return new_d_CallBegin(NULL, callee);
2354 ir_node *new_EndReg (void) {
2355 return new_d_EndReg(NULL);
2357 ir_node *new_EndExcept (void) {
2358 return new_d_EndExcept(NULL);
2360 ir_node *new_Break (void) {
2361 return new_d_Break(NULL);
2363 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2364 return new_d_Filter(NULL, arg, mode, proj);