1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
43 * language dependant initialization variable
45 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
47 /*** ******************************************** */
48 /** privat interfaces, for professional use only */
50 /* Constructs a Block with a fixed number of predecessors.
51 Does not set current_block. Can not be used with automatic
52 Phi node construction. */
54 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
58 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
59 set_Block_matured(res, 1);
60 set_Block_block_visited(res, 0);
62 res->attr.block.exc = exc_normal;
63 res->attr.block.handler_entry = 0;
64 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
65 res->attr.block.in_cg = NULL;
66 res->attr.block.cg_backedge = NULL;
68 irn_vrfy_irg (res, irg);
73 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
77 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 irn_vrfy_irg (res, irg);
84 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
88 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
90 irn_vrfy_irg (res, irg);
94 /* Creates a Phi node with all predecessors. Calling this constructor
95 is only allowed if the corresponding block is mature. */
97 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
101 bool has_unknown = false;
103 assert( get_Block_matured(block) );
104 assert( get_irn_arity(block) == arity );
106 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
108 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
110 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
111 if (!has_unknown) res = optimize_node (res);
112 irn_vrfy_irg (res, irg);
114 /* Memory Phis in endless loops must be kept alive.
115 As we can't distinguish these easily we keep all of them alive. */
116 if ((res->op == op_Phi) && (mode == mode_M))
117 add_End_keepalive(irg->end, res);
122 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
125 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
127 res = optimize_node (res);
128 irn_vrfy_irg (res, irg);
131 res = local_optimize_newby (res);
138 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
143 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
144 res = optimize_node (res);
145 irn_vrfy_irg (res, irg);
150 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
156 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
157 res->attr.proj = proj;
160 assert(get_Proj_pred(res));
161 assert(get_nodes_Block(get_Proj_pred(res)));
163 res = optimize_node (res);
165 irn_vrfy_irg (res, irg);
171 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
175 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
176 arg->attr.c.kind = fragmentary;
177 arg->attr.c.default_proj = max_proj;
178 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
183 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
188 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
189 res = optimize_node (res);
190 irn_vrfy_irg (res, irg);
196 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
200 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
201 res = optimize_node (res);
202 irn_vrfy_irg (res, irg);
207 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
208 ir_node *op1, ir_node *op2, ir_mode *mode)
214 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
215 res = optimize_node (res);
216 irn_vrfy_irg (res, irg);
221 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
222 ir_node *op1, ir_node *op2, ir_mode *mode)
228 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
229 res = optimize_node (res);
230 irn_vrfy_irg (res, irg);
235 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
236 ir_node *op, ir_mode *mode)
241 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
242 res = optimize_node (res);
243 irn_vrfy_irg (res, irg);
248 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
249 ir_node *op1, ir_node *op2, ir_mode *mode)
255 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
256 res = optimize_node (res);
257 irn_vrfy_irg (res, irg);
262 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
263 ir_node *memop, ir_node *op1, ir_node *op2)
270 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
271 res = optimize_node (res);
272 irn_vrfy_irg (res, irg);
277 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
278 ir_node *memop, ir_node *op1, ir_node *op2)
285 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
286 res = optimize_node (res);
287 irn_vrfy_irg (res, irg);
292 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
293 ir_node *memop, ir_node *op1, ir_node *op2)
300 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
301 res = optimize_node (res);
302 irn_vrfy_irg (res, irg);
307 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
308 ir_node *memop, ir_node *op1, ir_node *op2)
315 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
316 res = optimize_node (res);
317 irn_vrfy_irg (res, irg);
322 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
323 ir_node *op1, ir_node *op2, ir_mode *mode)
329 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
330 res = optimize_node (res);
331 irn_vrfy_irg (res, irg);
336 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
337 ir_node *op1, ir_node *op2, ir_mode *mode)
343 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
344 res = optimize_node (res);
345 irn_vrfy_irg (res, irg);
350 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
351 ir_node *op1, ir_node *op2, ir_mode *mode)
357 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
358 res = optimize_node (res);
359 irn_vrfy_irg (res, irg);
364 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
365 ir_node *op, ir_mode *mode)
370 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
371 res = optimize_node (res);
372 irn_vrfy_irg (res, irg);
377 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
378 ir_node *op, ir_node *k, ir_mode *mode)
384 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
385 res = optimize_node (res);
386 irn_vrfy_irg (res, irg);
391 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
392 ir_node *op, ir_node *k, ir_mode *mode)
398 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
399 res = optimize_node (res);
400 irn_vrfy_irg (res, irg);
405 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op, ir_node *k, ir_mode *mode)
412 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
413 res = optimize_node (res);
414 irn_vrfy_irg (res, irg);
419 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
420 ir_node *op, ir_node *k, ir_mode *mode)
426 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
427 res = optimize_node (res);
428 irn_vrfy_irg (res, irg);
433 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
434 ir_node *op, ir_mode *mode)
439 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
440 res = optimize_node (res);
441 irn_vrfy_irg (res, irg);
446 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
447 ir_node *op1, ir_node *op2)
453 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
454 res = optimize_node (res);
455 irn_vrfy_irg (res, irg);
460 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
463 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
464 res = optimize_node (res);
465 irn_vrfy_irg (res, irg);
470 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
475 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
476 res->attr.c.kind = dense;
477 res->attr.c.default_proj = 0;
478 res = optimize_node (res);
479 irn_vrfy_irg (res, irg);
484 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
485 ir_node *callee, int arity, ir_node **in, type *tp)
492 NEW_ARR_A (ir_node *, r_in, r_arity);
495 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
497 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
499 assert(is_method_type(tp));
500 set_Call_type(res, tp);
501 res->attr.call.callee_arr = NULL;
502 res = optimize_node (res);
503 irn_vrfy_irg (res, irg);
508 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
509 ir_node *store, int arity, ir_node **in)
516 NEW_ARR_A (ir_node *, r_in, r_arity);
518 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
519 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
520 res = optimize_node (res);
521 irn_vrfy_irg (res, irg);
526 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
532 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
533 res = optimize_node (res);
534 irn_vrfy_irg (res, irg);
539 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
540 ir_node *store, ir_node *adr)
546 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
548 res = optimize_node (res);
549 irn_vrfy_irg (res, irg);
554 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
555 ir_node *store, ir_node *adr, ir_node *val)
562 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
564 res = optimize_node (res);
566 irn_vrfy_irg (res, irg);
571 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
572 ir_node *size, type *alloc_type, where_alloc where)
578 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
580 res->attr.a.where = where;
581 res->attr.a.type = alloc_type;
583 res = optimize_node (res);
584 irn_vrfy_irg (res, irg);
589 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
590 ir_node *ptr, ir_node *size, type *free_type)
597 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
599 res->attr.f = free_type;
601 res = optimize_node (res);
602 irn_vrfy_irg (res, irg);
607 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
608 int arity, ir_node **in, entity *ent)
615 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
618 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
619 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
621 res->attr.s.ent = ent;
623 res = optimize_node (res);
624 irn_vrfy_irg (res, irg);
629 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
630 ir_node *objptr, type *ent)
637 NEW_ARR_A (ir_node *, r_in, r_arity);
641 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
643 res->attr.io.ent = ent;
645 /* res = optimize (res);
646 * irn_vrfy_irg (res, irg); */
651 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
652 symconst_kind symkind)
656 if (symkind == linkage_ptr_info)
660 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
662 res->attr.i.num = symkind;
663 if (symkind == linkage_ptr_info) {
664 res->attr.i.tori.ptrinfo = (ident *)value;
666 assert ( ( (symkind == type_tag)
667 || (symkind == size))
668 && (is_type(value)));
669 res->attr.i.tori.typ = (type *)value;
671 res = optimize_node (res);
672 irn_vrfy_irg (res, irg);
677 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
681 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
683 res = optimize_node (res);
684 irn_vrfy_irg (res, irg);
689 new_rd_Bad (ir_graph *irg)
695 new_rd_Unknown (ir_graph *irg)
701 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
705 in[0] = get_Call_ptr(call);
706 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
707 res->attr.callbegin.irg = irg;
708 res->attr.callbegin.call = call;
709 res = optimize_node (res);
710 irn_vrfy_irg (res, irg);
715 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
719 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
720 res->attr.end.irg = irg;
722 irn_vrfy_irg (res, irg);
727 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
731 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
732 res->attr.end.irg = irg;
734 irn_vrfy_irg (res, irg);
739 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
742 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
743 res = optimize_node (res);
744 irn_vrfy_irg (res, irg);
749 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
755 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
756 res->attr.filter.proj = proj;
757 res->attr.filter.in_cg = NULL;
758 res->attr.filter.backedge = NULL;
761 assert(get_Proj_pred(res));
762 assert(get_nodes_Block(get_Proj_pred(res)));
764 res = optimize_node (res);
766 irn_vrfy_irg (res, irg);
771 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
772 return new_rd_Block(NULL, irg, arity, in);
774 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
775 return new_rd_Start(NULL, irg, block);
777 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
778 return new_rd_End(NULL, irg, block);
780 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
781 return new_rd_Jmp(NULL, irg, block);
783 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
784 return new_rd_Cond(NULL, irg, block, c);
786 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
787 ir_node *store, int arity, ir_node **in) {
788 return new_rd_Return(NULL, irg, block, store, arity, in);
790 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
791 ir_node *store, ir_node *obj) {
792 return new_rd_Raise(NULL, irg, block, store, obj);
794 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
795 ir_mode *mode, tarval *con) {
796 return new_rd_Const(NULL, irg, block, mode, con);
798 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
799 type_or_id_p value, symconst_kind symkind) {
800 return new_rd_SymConst(NULL, irg, block, value, symkind);
802 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
803 ir_node *objptr, int n_index, ir_node **index,
805 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
807 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
809 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
811 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
812 ir_node *callee, int arity, ir_node **in,
814 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
816 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
817 ir_node *op1, ir_node *op2, ir_mode *mode) {
818 return new_rd_Add(NULL, irg, block, op1, op2, mode);
820 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
821 ir_node *op1, ir_node *op2, ir_mode *mode) {
822 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
824 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
825 ir_node *op, ir_mode *mode) {
826 return new_rd_Minus(NULL, irg, block, op, mode);
828 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
829 ir_node *op1, ir_node *op2, ir_mode *mode) {
830 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
832 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
833 ir_node *memop, ir_node *op1, ir_node *op2) {
834 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
836 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
837 ir_node *memop, ir_node *op1, ir_node *op2) {
838 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
840 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
841 ir_node *memop, ir_node *op1, ir_node *op2) {
842 return new_rd_Div(NULL, irg, block, memop, op1, op2);
844 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
845 ir_node *memop, ir_node *op1, ir_node *op2) {
846 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
848 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
849 ir_node *op, ir_mode *mode) {
850 return new_rd_Abs(NULL, irg, block, op, mode);
852 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
853 ir_node *op1, ir_node *op2, ir_mode *mode) {
854 return new_rd_And(NULL, irg, block, op1, op2, mode);
856 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
857 ir_node *op1, ir_node *op2, ir_mode *mode) {
858 return new_rd_Or(NULL, irg, block, op1, op2, mode);
860 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
861 ir_node *op1, ir_node *op2, ir_mode *mode) {
862 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
864 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
865 ir_node *op, ir_mode *mode) {
866 return new_rd_Not(NULL, irg, block, op, mode);
868 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
869 ir_node *op1, ir_node *op2) {
870 return new_rd_Cmp(NULL, irg, block, op1, op2);
872 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
873 ir_node *op, ir_node *k, ir_mode *mode) {
874 return new_rd_Shl(NULL, irg, block, op, k, mode);
876 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
877 ir_node *op, ir_node *k, ir_mode *mode) {
878 return new_rd_Shr(NULL, irg, block, op, k, mode);
880 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
881 ir_node *op, ir_node *k, ir_mode *mode) {
882 return new_rd_Shrs(NULL, irg, block, op, k, mode);
884 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
885 ir_node *op, ir_node *k, ir_mode *mode) {
886 return new_rd_Rot(NULL, irg, block, op, k, mode);
888 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
889 ir_node *op, ir_mode *mode) {
890 return new_rd_Conv(NULL, irg, block, op, mode);
892 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
893 ir_node **in, ir_mode *mode) {
894 return new_rd_Phi(NULL, irg, block, arity, in, mode);
896 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
897 ir_node *store, ir_node *adr) {
898 return new_rd_Load(NULL, irg, block, store, adr);
900 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
901 ir_node *store, ir_node *adr, ir_node *val) {
902 return new_rd_Store(NULL, irg, block, store, adr, val);
904 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
905 ir_node *size, type *alloc_type, where_alloc where) {
906 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
908 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
909 ir_node *ptr, ir_node *size, type *free_type) {
910 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
912 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
913 return new_rd_Sync(NULL, irg, block, arity, in);
915 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
916 ir_mode *mode, long proj) {
917 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
919 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
921 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
923 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
924 int arity, ir_node **in) {
925 return new_rd_Tuple(NULL, irg, block, arity, in );
927 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
928 ir_node *val, ir_mode *mode) {
929 return new_rd_Id(NULL, irg, block, val, mode);
931 INLINE ir_node *new_r_Bad (ir_graph *irg) {
932 return new_rd_Bad(irg);
934 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
935 return new_rd_Unknown(irg);
937 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
938 return new_rd_CallBegin(NULL, irg, block, callee);
940 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
941 return new_rd_EndReg(NULL, irg, block);
943 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
944 return new_rd_EndExcept(NULL, irg, block);
946 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
947 return new_rd_Break(NULL, irg, block);
949 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
950 ir_mode *mode, long proj) {
951 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
955 /** ********************/
956 /** public interfaces */
957 /** construction tools */
961 * - create a new Start node in the current block
963 * @return s - pointer to the created Start node
968 new_d_Start (dbg_info* db)
972 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
973 op_Start, mode_T, 0, NULL);
975 res = optimize_node (res);
976 irn_vrfy_irg (res, current_ir_graph);
981 new_d_End (dbg_info* db)
984 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
985 op_End, mode_X, -1, NULL);
986 res = optimize_node (res);
987 irn_vrfy_irg (res, current_ir_graph);
992 /* Constructs a Block with a fixed number of predecessors.
993 Does set current_block. Can be used with automatic Phi
994 node construction. */
996 new_d_Block (dbg_info* db, int arity, ir_node **in)
1000 bool has_unknown = false;
1002 res = new_rd_Block (db, current_ir_graph, arity, in);
1004 /* Create and initialize array for Phi-node construction. */
1005 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1006 current_ir_graph->n_loc);
1007 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1009 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1011 if (!has_unknown) res = optimize_node (res);
1012 current_ir_graph->current_block = res;
1014 irn_vrfy_irg (res, current_ir_graph);
1019 /* ***********************************************************************/
1020 /* Methods necessary for automatic Phi node creation */
1022 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1023 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1024 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1025 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1027 Call Graph: ( A ---> B == A "calls" B)
1029 get_value mature_block
1037 get_r_value_internal |
1041 new_rd_Phi0 new_rd_Phi_in
1043 * *************************************************************************** */
1045 /* Creates a Phi node with 0 predecessors */
1046 static INLINE ir_node *
1047 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1050 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1051 irn_vrfy_irg (res, irg);
1055 /* There are two implementations of the Phi node construction. The first
1056 is faster, but does not work for blocks with more than 2 predecessors.
1057 The second works always but is slower and causes more unnecessary Phi
1059 Select the implementations by the following preprocessor flag set in
1061 #if USE_FAST_PHI_CONSTRUCTION
1063 /* This is a stack used for allocating and deallocating nodes in
1064 new_rd_Phi_in. The original implementation used the obstack
1065 to model this stack, now it is explicit. This reduces side effects.
1067 #if USE_EXPLICIT_PHI_IN_STACK
1068 INLINE Phi_in_stack *
1069 new_Phi_in_stack() {
1072 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1074 res->stack = NEW_ARR_F (ir_node *, 1);
1081 free_Phi_in_stack(Phi_in_stack *s) {
1082 DEL_ARR_F(s->stack);
1086 free_to_Phi_in_stack(ir_node *phi) {
1087 assert(get_irn_opcode(phi) == iro_Phi);
1089 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1090 current_ir_graph->Phi_in_stack->pos)
1091 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1093 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1095 (current_ir_graph->Phi_in_stack->pos)++;
1098 static INLINE ir_node *
1099 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1100 int arity, ir_node **in) {
1102 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1103 int pos = current_ir_graph->Phi_in_stack->pos;
1107 /* We need to allocate a new node */
1108 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1109 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1111 /* reuse the old node and initialize it again. */
1114 assert (res->kind == k_ir_node);
1115 assert (res->op == op_Phi);
1119 assert (arity >= 0);
1120 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1121 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1123 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1125 (current_ir_graph->Phi_in_stack->pos)--;
1129 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1131 /* Creates a Phi node with a given, fixed array **in of predecessors.
1132 If the Phi node is unnecessary, as the same value reaches the block
1133 through all control flow paths, it is eliminated and the value
1134 returned directly. This constructor is only intended for use in
1135 the automatic Phi node generation triggered by get_value or mature.
1136 The implementation is quite tricky and depends on the fact, that
1137 the nodes are allocated on a stack:
1138 The in array contains predecessors and NULLs. The NULLs appear,
1139 if get_r_value_internal, that computed the predecessors, reached
1140 the same block on two paths. In this case the same value reaches
1141 this block on both paths, there is no definition in between. We need
1142 not allocate a Phi where these path's merge, but we have to communicate
1143 this fact to the caller. This happens by returning a pointer to the
1144 node the caller _will_ allocate. (Yes, we predict the address. We can
1145 do so because the nodes are allocated on the obstack.) The caller then
1146 finds a pointer to itself and, when this routine is called again,
1149 static INLINE ir_node *
1150 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1151 ir_node **in, int ins)
1154 ir_node *res, *known;
1156 /* allocate a new node on the obstack.
1157 This can return a node to which some of the pointers in the in-array
1159 Attention: the constructor copies the in array, i.e., the later changes
1160 to the array in this routine do not affect the constructed node! If
1161 the in array contains NULLs, there will be missing predecessors in the
1163 Is this a possible internal state of the Phi node generation? */
1164 #if USE_EXPLICIT_PHI_IN_STACK
1165 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1167 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1168 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1170 /* The in-array can contain NULLs. These were returned by
1171 get_r_value_internal if it reached the same block/definition on a
1173 The NULLs are replaced by the node itself to simplify the test in the
1175 for (i=0; i < ins; ++i)
1176 if (in[i] == NULL) in[i] = res;
1178 /* This loop checks whether the Phi has more than one predecessor.
1179 If so, it is a real Phi node and we break the loop. Else the
1180 Phi node merges the same definition on several paths and therefore
1182 for (i=0; i < ins; ++i)
1184 if (in[i]==res || in[i]==known) continue;
1192 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1194 #if USE_EXPLICIT_PHI_IN_STACK
1195 free_to_Phi_in_stack(res);
1197 obstack_free (current_ir_graph->obst, res);
1201 res = optimize_node (res);
1202 irn_vrfy_irg (res, irg);
1205 /* return the pointer to the Phi node. This node might be deallocated! */
1210 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1213 allocates and returns this node. The routine called to allocate the
1214 node might optimize it away and return a real value, or even a pointer
1215 to a deallocated Phi node on top of the obstack!
1216 This function is called with an in-array of proper size. **/
1218 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1220 ir_node *prevBlock, *res;
1223 /* This loop goes to all predecessor blocks of the block the Phi node is in
1224 and there finds the operands of the Phi node by calling
1225 get_r_value_internal. */
1226 for (i = 1; i <= ins; ++i) {
1227 assert (block->in[i]);
1228 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1230 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1233 /* After collecting all predecessors into the array nin a new Phi node
1234 with these predecessors is created. This constructor contains an
1235 optimization: If all predecessors of the Phi node are identical it
1236 returns the only operand instead of a new Phi node. If the value
1237 passes two different control flow edges without being defined, and
1238 this is the second path treated, a pointer to the node that will be
1239 allocated for the first path (recursion) is returned. We already
1240 know the address of this node, as it is the next node to be allocated
1241 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1242 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1244 /* Now we now the value for "pos" and can enter it in the array with
1245 all known local variables. Attention: this might be a pointer to
1246 a node, that later will be allocated!!! See new_rd_Phi_in.
1247 If this is called in mature, after some set_value in the same block,
1248 the proper value must not be overwritten:
1250 get_value (makes Phi0, put's it into graph_arr)
1251 set_value (overwrites Phi0 in graph_arr)
1252 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1255 if (!block->attr.block.graph_arr[pos]) {
1256 block->attr.block.graph_arr[pos] = res;
1258 /* printf(" value already computed by %s\n",
1259 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1265 /* This function returns the last definition of a variable. In case
1266 this variable was last defined in a previous block, Phi nodes are
1267 inserted. If the part of the firm graph containing the definition
1268 is not yet constructed, a dummy Phi node is returned. */
1270 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1273 /* There are 4 cases to treat.
1275 1. The block is not mature and we visit it the first time. We can not
1276 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1277 predecessors is returned. This node is added to the linked list (field
1278 "link") of the containing block to be completed when this block is
1279 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1282 2. The value is already known in this block, graph_arr[pos] is set and we
1283 visit the block the first time. We can return the value without
1284 creating any new nodes.
1286 3. The block is mature and we visit it the first time. A Phi node needs
1287 to be created (phi_merge). If the Phi is not needed, as all it's
1288 operands are the same value reaching the block through different
1289 paths, it's optimized away and the value itself is returned.
1291 4. The block is mature, and we visit it the second time. Now two
1292 subcases are possible:
1293 * The value was computed completely the last time we were here. This
1294 is the case if there is no loop. We can return the proper value.
1295 * The recursion that visited this node and set the flag did not
1296 return yet. We are computing a value in a loop and need to
1297 break the recursion without knowing the result yet.
1298 @@@ strange case. Straight forward we would create a Phi before
1299 starting the computation of it's predecessors. In this case we will
1300 find a Phi here in any case. The problem is that this implementation
1301 only creates a Phi after computing the predecessors, so that it is
1302 hard to compute self references of this Phi. @@@
1303 There is no simple check for the second subcase. Therefore we check
1304 for a second visit and treat all such cases as the second subcase.
1305 Anyways, the basic situation is the same: we reached a block
1306 on two paths without finding a definition of the value: No Phi
1307 nodes are needed on both paths.
1308 We return this information "Two paths, no Phi needed" by a very tricky
1309 implementation that relies on the fact that an obstack is a stack and
1310 will return a node with the same address on different allocations.
1311 Look also at phi_merge and new_rd_phi_in to understand this.
1312 @@@ Unfortunately this does not work, see testprogram
1313 three_cfpred_example.
1317 /* case 4 -- already visited. */
1318 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1320 /* visited the first time */
1321 set_irn_visited(block, get_irg_visited(current_ir_graph));
1323 /* Get the local valid value */
1324 res = block->attr.block.graph_arr[pos];
1326 /* case 2 -- If the value is actually computed, return it. */
1327 if (res) { return res;};
1329 if (block->attr.block.matured) { /* case 3 */
1331 /* The Phi has the same amount of ins as the corresponding block. */
1332 int ins = get_irn_arity(block);
1334 NEW_ARR_A (ir_node *, nin, ins);
1336 /* Phi merge collects the predecessors and then creates a node. */
1337 res = phi_merge (block, pos, mode, nin, ins);
1339 } else { /* case 1 */
1340 /* The block is not mature, we don't know how many in's are needed. A Phi
1341 with zero predecessors is created. Such a Phi node is called Phi0
1342 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1343 to the list of Phi0 nodes in this block to be matured by mature_block
1345 The Phi0 has to remember the pos of it's internal value. If the real
1346 Phi is computed, pos is used to update the array with the local
1349 res = new_rd_Phi0 (current_ir_graph, block, mode);
1350 res->attr.phi0_pos = pos;
1351 res->link = block->link;
1355 /* If we get here, the frontend missed a use-before-definition error */
1358 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1359 assert (mode->code >= irm_F && mode->code <= irm_P);
1360 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1361 tarval_mode_null[mode->code]);
1364 /* The local valid value is available now. */
1365 block->attr.block.graph_arr[pos] = res;
1373 it starts the recursion. This causes an Id at the entry of
1374 every block that has no definition of the value! **/
1376 #if USE_EXPLICIT_PHI_IN_STACK
1378 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1379 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1382 static INLINE ir_node *
1383 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1384 ir_node **in, int ins)
1387 ir_node *res, *known;
1389 /* Allocate a new node on the obstack. The allocation copies the in
1391 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1392 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1394 /* This loop checks whether the Phi has more than one predecessor.
1395 If so, it is a real Phi node and we break the loop. Else the
1396 Phi node merges the same definition on several paths and therefore
1397 is not needed. Don't consider Bad nodes! */
1399 for (i=0; i < ins; ++i)
1403 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1411 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1414 obstack_free (current_ir_graph->obst, res);
1417 /* A undefined value, e.g., in unreachable code. */
1421 res = optimize_node (res);
1422 irn_vrfy_irg (res, irg);
1423 /* Memory Phis in endless loops must be kept alive.
1424 As we can't distinguish these easily we keep all of the alive. */
1425 if ((res->op == op_Phi) && (mode == mode_M))
1426 add_End_keepalive(irg->end, res);
1433 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1435 #if PRECISE_EXC_CONTEXT
1437 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1439 static INLINE ir_node **
1440 new_frag_arr (ir_node *n) {
1443 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1444 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1445 sizeof(ir_node *)*current_ir_graph->n_loc);
1446 /* turn off optimization before allocating Proj nodes, as res isn't
1448 opt = get_optimize(); set_optimize(0);
1449 /* Here we rely on the fact that all frag ops have Memory as first result! */
1450 if (get_irn_op(n) == op_Call)
1451 arr[0] = new_Proj(n, mode_M, 3);
1453 arr[0] = new_Proj(n, mode_M, 0);
1455 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1459 static INLINE ir_node **
1460 get_frag_arr (ir_node *n) {
1461 if (get_irn_op(n) == op_Call) {
1462 return n->attr.call.frag_arr;
1463 } else if (get_irn_op(n) == op_Alloc) {
1464 return n->attr.a.frag_arr;
1466 return n->attr.frag_arr;
1471 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1472 if (!frag_arr[pos]) frag_arr[pos] = val;
1473 if (frag_arr[current_ir_graph->n_loc - 1])
1474 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1478 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1482 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1484 frag_arr = get_frag_arr(cfOp);
1485 res = frag_arr[pos];
1487 if (block->attr.block.graph_arr[pos]) {
1488 /* There was a set_value after the cfOp and no get_value before that
1489 set_value. We must build a Phi node now. */
1490 if (block->attr.block.matured) {
1491 int ins = get_irn_arity(block);
1493 NEW_ARR_A (ir_node *, nin, ins);
1494 res = phi_merge(block, pos, mode, nin, ins);
1496 res = new_rd_Phi0 (current_ir_graph, block, mode);
1497 res->attr.phi0_pos = pos;
1498 res->link = block->link;
1502 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1503 but this should be better: (remove comment if this works) */
1504 /* It's a Phi, we can write this into all graph_arrs with NULL */
1505 set_frag_value(block->attr.block.graph_arr, pos, res);
1507 res = get_r_value_internal(block, pos, mode);
1508 set_frag_value(block->attr.block.graph_arr, pos, res);
1516 computes the predecessors for the real phi node, and then
1517 allocates and returns this node. The routine called to allocate the
1518 node might optimize it away and return a real value.
1519 This function must be called with an in-array of proper size. **/
1521 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1523 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1526 /* If this block has no value at pos create a Phi0 and remember it
1527 in graph_arr to break recursions.
1528 Else we may not set graph_arr as there a later value is remembered. */
1530 if (!block->attr.block.graph_arr[pos]) {
1531 if (block == get_irg_start_block(current_ir_graph)) {
1532 /* Collapsing to Bad tarvals is no good idea.
1533 So we call a user-supplied routine here that deals with this case as
1534 appropriate for the given language. Sorryly the only help we can give
1535 here is the position.
1537 Even if all variables are defined before use, it can happen that
1538 we get to the start block, if a cond has been replaced by a tuple
1539 (bad, jmp). In this case we call the function needlessly, eventually
1540 generating an non existant error.
1541 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1544 if (default_initialize_local_variable)
1545 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1547 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1548 /* We don't need to care about exception ops in the start block.
1549 There are none by definition. */
1550 return block->attr.block.graph_arr[pos];
1552 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1553 block->attr.block.graph_arr[pos] = phi0;
1554 #if PRECISE_EXC_CONTEXT
1555 /* Set graph_arr for fragile ops. Also here we should break recursion.
1556 We could choose a cyclic path through an cfop. But the recursion would
1557 break at some point. */
1558 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1563 /* This loop goes to all predecessor blocks of the block the Phi node
1564 is in and there finds the operands of the Phi node by calling
1565 get_r_value_internal. */
1566 for (i = 1; i <= ins; ++i) {
1567 prevCfOp = skip_Proj(block->in[i]);
1569 if (is_Bad(prevCfOp)) {
1570 /* In case a Cond has been optimized we would get right to the start block
1571 with an invalid definition. */
1572 nin[i-1] = new_Bad();
1575 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1577 if (!is_Bad(prevBlock)) {
1578 #if PRECISE_EXC_CONTEXT
1579 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1580 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1581 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1584 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1586 nin[i-1] = new_Bad();
1590 /* After collecting all predecessors into the array nin a new Phi node
1591 with these predecessors is created. This constructor contains an
1592 optimization: If all predecessors of the Phi node are identical it
1593 returns the only operand instead of a new Phi node. */
1594 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1596 /* In case we allocated a Phi0 node at the beginning of this procedure,
1597 we need to exchange this Phi0 with the real Phi. */
1599 exchange(phi0, res);
1600 block->attr.block.graph_arr[pos] = res;
1601 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1602 only an optimization. */
1608 /* This function returns the last definition of a variable. In case
1609 this variable was last defined in a previous block, Phi nodes are
1610 inserted. If the part of the firm graph containing the definition
1611 is not yet constructed, a dummy Phi node is returned. */
1613 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1616 /* There are 4 cases to treat.
1618 1. The block is not mature and we visit it the first time. We can not
1619 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1620 predecessors is returned. This node is added to the linked list (field
1621 "link") of the containing block to be completed when this block is
1622 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1625 2. The value is already known in this block, graph_arr[pos] is set and we
1626 visit the block the first time. We can return the value without
1627 creating any new nodes.
1629 3. The block is mature and we visit it the first time. A Phi node needs
1630 to be created (phi_merge). If the Phi is not needed, as all it's
1631 operands are the same value reaching the block through different
1632 paths, it's optimized away and the value itself is returned.
1634 4. The block is mature, and we visit it the second time. Now two
1635 subcases are possible:
1636 * The value was computed completely the last time we were here. This
1637 is the case if there is no loop. We can return the proper value.
1638 * The recursion that visited this node and set the flag did not
1639 return yet. We are computing a value in a loop and need to
1640 break the recursion. This case only happens if we visited
1641 the same block with phi_merge before, which inserted a Phi0.
1642 So we return the Phi0.
1645 /* case 4 -- already visited. */
1646 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1647 /* As phi_merge allocates a Phi0 this value is always defined. Here
1648 is the critical difference of the two algorithms. */
1649 assert(block->attr.block.graph_arr[pos]);
1650 return block->attr.block.graph_arr[pos];
1653 /* visited the first time */
1654 set_irn_visited(block, get_irg_visited(current_ir_graph));
1656 /* Get the local valid value */
1657 res = block->attr.block.graph_arr[pos];
1659 /* case 2 -- If the value is actually computed, return it. */
1660 if (res) { return res; };
1662 if (block->attr.block.matured) { /* case 3 */
1664 /* The Phi has the same amount of ins as the corresponding block. */
1665 int ins = get_irn_arity(block);
1667 NEW_ARR_A (ir_node *, nin, ins);
1669 /* Phi merge collects the predecessors and then creates a node. */
1670 res = phi_merge (block, pos, mode, nin, ins);
1672 } else { /* case 1 */
1673 /* The block is not mature, we don't know how many in's are needed. A Phi
1674 with zero predecessors is created. Such a Phi node is called Phi0
1675 node. The Phi0 is then added to the list of Phi0 nodes in this block
1676 to be matured by mature_block later.
1677 The Phi0 has to remember the pos of it's internal value. If the real
1678 Phi is computed, pos is used to update the array with the local
1680 res = new_rd_Phi0 (current_ir_graph, block, mode);
1681 res->attr.phi0_pos = pos;
1682 res->link = block->link;
1686 /* If we get here, the frontend missed a use-before-definition error */
1689 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1690 assert (mode->code >= irm_F && mode->code <= irm_P);
1691 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1692 get_mode_null(mode));
1695 /* The local valid value is available now. */
1696 block->attr.block.graph_arr[pos] = res;
1701 #endif /* USE_FAST_PHI_CONSTRUCTION */
1703 /* ************************************************************************** */
1705 /** Finalize a Block node, when all control flows are known. */
1706 /** Acceptable parameters are only Block nodes. */
1708 mature_block (ir_node *block)
1715 assert (get_irn_opcode(block) == iro_Block);
1716 /* @@@ should be commented in
1717 assert (!get_Block_matured(block) && "Block already matured"); */
1719 if (!get_Block_matured(block)) {
1720 ins = ARR_LEN (block->in)-1;
1721 /* Fix block parameters */
1722 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1724 /* An array for building the Phi nodes. */
1725 NEW_ARR_A (ir_node *, nin, ins);
1727 /* Traverse a chain of Phi nodes attached to this block and mature
1729 for (n = block->link; n; n=next) {
1730 inc_irg_visited(current_ir_graph);
1732 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1735 block->attr.block.matured = 1;
1737 /* Now, as the block is a finished firm node, we can optimize it.
1738 Since other nodes have been allocated since the block was created
1739 we can not free the node on the obstack. Therefore we have to call
1741 Unfortunately the optimization does not change a lot, as all allocated
1742 nodes refer to the unoptimized node.
1743 We can call _2, as global cse has no effect on blocks. */
1744 block = optimize_in_place_2(block);
1745 irn_vrfy_irg(block, current_ir_graph);
1750 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1752 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1757 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1759 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1764 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1766 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1771 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1773 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1778 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1781 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1782 arg->attr.c.kind = fragmentary;
1783 arg->attr.c.default_proj = max_proj;
1784 res = new_Proj (arg, mode_X, max_proj);
1789 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1791 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1796 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1798 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1803 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1805 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1810 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1812 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1818 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1820 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1825 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1827 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1832 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1835 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1837 #if PRECISE_EXC_CONTEXT
1838 if ((current_ir_graph->phase_state == phase_building) &&
1839 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1840 res->attr.frag_arr = new_frag_arr(res);
1847 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1850 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1852 #if PRECISE_EXC_CONTEXT
1853 if ((current_ir_graph->phase_state == phase_building) &&
1854 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1855 res->attr.frag_arr = new_frag_arr(res);
1862 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1865 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1867 #if PRECISE_EXC_CONTEXT
1868 if ((current_ir_graph->phase_state == phase_building) &&
1869 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1870 res->attr.frag_arr = new_frag_arr(res);
1877 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1880 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1882 #if PRECISE_EXC_CONTEXT
1883 if ((current_ir_graph->phase_state == phase_building) &&
1884 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1885 res->attr.frag_arr = new_frag_arr(res);
1892 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1894 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1899 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1901 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1906 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1908 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1913 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1915 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1920 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1922 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1927 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1929 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1934 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1936 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1941 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1943 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1948 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1950 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1955 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1957 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1962 new_d_Jmp (dbg_info* db)
1964 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1968 new_d_Cond (dbg_info* db, ir_node *c)
1970 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1974 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1978 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1979 store, callee, arity, in, tp);
1980 #if PRECISE_EXC_CONTEXT
1981 if ((current_ir_graph->phase_state == phase_building) &&
1982 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1983 res->attr.call.frag_arr = new_frag_arr(res);
1990 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1992 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1997 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1999 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2004 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2007 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2009 #if PRECISE_EXC_CONTEXT
2010 if ((current_ir_graph->phase_state == phase_building) &&
2011 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2012 res->attr.frag_arr = new_frag_arr(res);
2019 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2022 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2024 #if PRECISE_EXC_CONTEXT
2025 if ((current_ir_graph->phase_state == phase_building) &&
2026 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2027 res->attr.frag_arr = new_frag_arr(res);
2034 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2038 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2039 store, size, alloc_type, where);
2040 #if PRECISE_EXC_CONTEXT
2041 if ((current_ir_graph->phase_state == phase_building) &&
2042 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2043 res->attr.a.frag_arr = new_frag_arr(res);
2050 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2052 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2053 store, ptr, size, free_type);
2057 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2058 /* GL: objptr was called frame before. Frame was a bad choice for the name
2059 as the operand could as well be a pointer to a dynamic object. */
2061 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2062 store, objptr, 0, NULL, ent);
2066 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2068 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2069 store, objptr, n_index, index, sel);
2073 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2075 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2076 store, objptr, ent));
2080 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2082 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2087 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2089 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2097 return current_ir_graph->bad;
2101 new_d_Unknown (void)
2103 return current_ir_graph->unknown;
2107 new_d_CallBegin (dbg_info *db, ir_node *call)
2110 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2115 new_d_EndReg (dbg_info *db)
2118 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2123 new_d_EndExcept (dbg_info *db)
2126 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2131 new_d_Break (dbg_info *db)
2133 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2137 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2139 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2143 /* ********************************************************************* */
2144 /* Comfortable interface with automatic Phi node construction. */
2145 /* (Uses also constructors of ?? interface, except new_Block. */
2146 /* ********************************************************************* */
2148 /** Block construction **/
2149 /* immature Block without predecessors */
2150 ir_node *new_d_immBlock (dbg_info* db) {
2153 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2154 /* creates a new dynamic in-array as length of in is -1 */
2155 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2156 current_ir_graph->current_block = res;
2157 res->attr.block.matured = 0;
2158 res->attr.block.exc = exc_normal;
2159 res->attr.block.handler_entry = 0;
2160 res->attr.block.backedge = NULL;
2161 res->attr.block.in_cg = NULL;
2162 res->attr.block.cg_backedge = NULL;
2163 set_Block_block_visited(res, 0);
2165 /* Create and initialize array for Phi-node construction. */
2166 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2167 current_ir_graph->n_loc);
2168 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2170 /* Immature block may not be optimized! */
2171 irn_vrfy_irg (res, current_ir_graph);
2178 return new_d_immBlock(NULL);
2181 /* add an adge to a jmp/control flow node */
2183 add_in_edge (ir_node *block, ir_node *jmp)
2185 if (block->attr.block.matured) {
2186 assert(0 && "Error: Block already matured!\n");
2189 assert (jmp != NULL);
2190 ARR_APP1 (ir_node *, block->in, jmp);
2194 /* changing the current block */
2196 switch_block (ir_node *target)
2198 current_ir_graph->current_block = target;
2201 /* ************************ */
2202 /* parameter administration */
2204 /* get a value from the parameter array from the current block by its index */
2206 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2208 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2209 inc_irg_visited(current_ir_graph);
2211 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2213 /* get a value from the parameter array from the current block by its index */
2215 get_value (int pos, ir_mode *mode)
2217 return get_d_value(NULL, pos, mode);
2220 /* set a value at position pos in the parameter array from the current block */
2222 set_value (int pos, ir_node *value)
2224 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2225 assert(pos+1 < current_ir_graph->n_loc);
2226 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2229 /* get the current store */
2233 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2234 /* GL: one could call get_value instead */
2235 inc_irg_visited(current_ir_graph);
2236 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2239 /* set the current store */
2241 set_store (ir_node *store)
2243 /* GL: one could call set_value instead */
2244 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2245 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2249 keep_alive (ir_node *ka)
2251 add_End_keepalive(current_ir_graph->end, ka);
2254 /** Useful access routines **/
2255 /* Returns the current block of the current graph. To set the current
2256 block use switch_block(). */
2257 ir_node *get_cur_block() {
2258 return get_irg_current_block(current_ir_graph);
2261 /* Returns the frame type of the current graph */
2262 type *get_cur_frame_type() {
2263 return get_irg_frame_type(current_ir_graph);
2267 /* ********************************************************************* */
2270 /* call once for each run of the library */
2272 init_cons (default_initialize_local_variable_func_t *func)
2274 default_initialize_local_variable = func;
2277 /* call for each graph */
2279 finalize_cons (ir_graph *irg) {
2280 irg->phase_state = phase_high;
2284 ir_node *new_Block(int arity, ir_node **in) {
2285 return new_d_Block(NULL, arity, in);
2287 ir_node *new_Start (void) {
2288 return new_d_Start(NULL);
2290 ir_node *new_End (void) {
2291 return new_d_End(NULL);
2293 ir_node *new_Jmp (void) {
2294 return new_d_Jmp(NULL);
2296 ir_node *new_Cond (ir_node *c) {
2297 return new_d_Cond(NULL, c);
2299 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2300 return new_d_Return(NULL, store, arity, in);
2302 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2303 return new_d_Raise(NULL, store, obj);
2305 ir_node *new_Const (ir_mode *mode, tarval *con) {
2306 return new_d_Const(NULL, mode, con);
2308 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2309 return new_d_SymConst(NULL, value, kind);
2311 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2312 return new_d_simpleSel(NULL, store, objptr, ent);
2314 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2316 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2318 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2319 return (new_d_InstOf (NULL, store, objptr, ent));
2321 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2323 return new_d_Call(NULL, store, callee, arity, in, tp);
2325 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2326 return new_d_Add(NULL, op1, op2, mode);
2328 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2329 return new_d_Sub(NULL, op1, op2, mode);
2331 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2332 return new_d_Minus(NULL, op, mode);
2334 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2335 return new_d_Mul(NULL, op1, op2, mode);
2337 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2338 return new_d_Quot(NULL, memop, op1, op2);
2340 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2341 return new_d_DivMod(NULL, memop, op1, op2);
2343 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2344 return new_d_Div(NULL, memop, op1, op2);
2346 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2347 return new_d_Mod(NULL, memop, op1, op2);
2349 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2350 return new_d_Abs(NULL, op, mode);
2352 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2353 return new_d_And(NULL, op1, op2, mode);
2355 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2356 return new_d_Or(NULL, op1, op2, mode);
2358 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2359 return new_d_Eor(NULL, op1, op2, mode);
2361 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2362 return new_d_Not(NULL, op, mode);
2364 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2365 return new_d_Shl(NULL, op, k, mode);
2367 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2368 return new_d_Shr(NULL, op, k, mode);
2370 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2371 return new_d_Shrs(NULL, op, k, mode);
2373 #define new_Rotate new_Rot
2374 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2375 return new_d_Rot(NULL, op, k, mode);
2377 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2378 return new_d_Cmp(NULL, op1, op2);
2380 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2381 return new_d_Conv(NULL, op, mode);
2383 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2384 return new_d_Phi(NULL, arity, in, mode);
2386 ir_node *new_Load (ir_node *store, ir_node *addr) {
2387 return new_d_Load(NULL, store, addr);
2389 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2390 return new_d_Store(NULL, store, addr, val);
2392 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2393 where_alloc where) {
2394 return new_d_Alloc(NULL, store, size, alloc_type, where);
2396 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2398 return new_d_Free(NULL, store, ptr, size, free_type);
2400 ir_node *new_Sync (int arity, ir_node **in) {
2401 return new_d_Sync(NULL, arity, in);
2403 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2404 return new_d_Proj(NULL, arg, mode, proj);
2406 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2407 return new_d_defaultProj(NULL, arg, max_proj);
2409 ir_node *new_Tuple (int arity, ir_node **in) {
2410 return new_d_Tuple(NULL, arity, in);
2412 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2413 return new_d_Id(NULL, val, mode);
2415 ir_node *new_Bad (void) {
2418 ir_node *new_Unknown(void) {
2419 return new_d_Unknown();
2421 ir_node *new_CallBegin (ir_node *callee) {
2422 return new_d_CallBegin(NULL, callee);
2424 ir_node *new_EndReg (void) {
2425 return new_d_EndReg(NULL);
2427 ir_node *new_EndExcept (void) {
2428 return new_d_EndExcept(NULL);
2430 ir_node *new_Break (void) {
2431 return new_d_Break(NULL);
2433 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2434 return new_d_Filter(NULL, arg, mode, proj);