1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
43 * language dependant initialization variable
45 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
47 /*** ******************************************** */
48 /** privat interfaces, for professional use only */
50 /* Constructs a Block with a fixed number of predecessors.
51 Does not set current_block. Can not be used with automatic
52 Phi node construction. */
54 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
58 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
59 set_Block_matured(res, 1);
60 set_Block_block_visited(res, 0);
62 res->attr.block.exc = exc_normal;
63 res->attr.block.handler_entry = 0;
64 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
65 res->attr.block.in_cg = NULL;
66 res->attr.block.cg_backedge = NULL;
68 irn_vrfy_irg (res, irg);
73 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
77 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 irn_vrfy_irg (res, irg);
84 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
88 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
90 irn_vrfy_irg (res, irg);
94 /* Creates a Phi node with all predecessors. Calling this constructor
95 is only allowed if the corresponding block is mature. */
97 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
101 bool has_unknown = false;
103 assert( get_Block_matured(block) );
104 assert( get_irn_arity(block) == arity );
106 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
108 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
110 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
111 if (!has_unknown) res = optimize_node (res);
112 irn_vrfy_irg (res, irg);
114 /* Memory Phis in endless loops must be kept alive.
115 As we can't distinguish these easily we keep all of them alive. */
116 if ((res->op == op_Phi) && (mode == mode_M))
117 add_End_keepalive(irg->end, res);
122 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
125 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
127 res = optimize_node (res);
128 irn_vrfy_irg (res, irg);
131 res = local_optimize_newby (res);
138 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
143 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
144 res = optimize_node (res);
145 irn_vrfy_irg (res, irg);
150 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
156 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
157 res->attr.proj = proj;
160 assert(get_Proj_pred(res));
161 assert(get_nodes_Block(get_Proj_pred(res)));
163 res = optimize_node (res);
165 irn_vrfy_irg (res, irg);
171 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
175 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
176 arg->attr.c.kind = fragmentary;
177 arg->attr.c.default_proj = max_proj;
178 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
183 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
188 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
189 res = optimize_node (res);
190 irn_vrfy_irg (res, irg);
195 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
198 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
199 res->attr.cast.totype = to_tp;
200 res = optimize_node (res);
201 irn_vrfy_irg (res, irg);
206 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
210 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
211 res = optimize_node (res);
212 irn_vrfy_irg (res, irg);
217 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
218 ir_node *op1, ir_node *op2, ir_mode *mode)
224 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
225 res = optimize_node (res);
226 irn_vrfy_irg (res, irg);
231 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
232 ir_node *op1, ir_node *op2, ir_mode *mode)
238 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
239 res = optimize_node (res);
240 irn_vrfy_irg (res, irg);
245 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
246 ir_node *op, ir_mode *mode)
251 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
252 res = optimize_node (res);
253 irn_vrfy_irg (res, irg);
258 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op1, ir_node *op2, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
266 res = optimize_node (res);
267 irn_vrfy_irg (res, irg);
272 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *memop, ir_node *op1, ir_node *op2)
280 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
281 res = optimize_node (res);
282 irn_vrfy_irg (res, irg);
287 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
288 ir_node *memop, ir_node *op1, ir_node *op2)
295 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
296 res = optimize_node (res);
297 irn_vrfy_irg (res, irg);
302 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
303 ir_node *memop, ir_node *op1, ir_node *op2)
310 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
311 res = optimize_node (res);
312 irn_vrfy_irg (res, irg);
317 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
318 ir_node *memop, ir_node *op1, ir_node *op2)
325 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
326 res = optimize_node (res);
327 irn_vrfy_irg (res, irg);
332 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
333 ir_node *op1, ir_node *op2, ir_mode *mode)
339 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
340 res = optimize_node (res);
341 irn_vrfy_irg (res, irg);
346 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *op1, ir_node *op2, ir_mode *mode)
353 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
354 res = optimize_node (res);
355 irn_vrfy_irg (res, irg);
360 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
367 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
368 res = optimize_node (res);
369 irn_vrfy_irg (res, irg);
374 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
375 ir_node *op, ir_mode *mode)
380 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
381 res = optimize_node (res);
382 irn_vrfy_irg (res, irg);
387 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
388 ir_node *op, ir_node *k, ir_mode *mode)
394 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
395 res = optimize_node (res);
396 irn_vrfy_irg (res, irg);
401 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
402 ir_node *op, ir_node *k, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
409 res = optimize_node (res);
410 irn_vrfy_irg (res, irg);
415 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_node *k, ir_mode *mode)
422 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
423 res = optimize_node (res);
424 irn_vrfy_irg (res, irg);
429 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
436 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
437 res = optimize_node (res);
438 irn_vrfy_irg (res, irg);
443 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
444 ir_node *op, ir_mode *mode)
449 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
450 res = optimize_node (res);
451 irn_vrfy_irg (res, irg);
456 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
457 ir_node *op1, ir_node *op2)
463 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
464 res = optimize_node (res);
465 irn_vrfy_irg (res, irg);
470 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
473 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
474 res = optimize_node (res);
475 irn_vrfy_irg (res, irg);
480 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
485 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
486 res->attr.c.kind = dense;
487 res->attr.c.default_proj = 0;
488 res = optimize_node (res);
489 irn_vrfy_irg (res, irg);
494 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
495 ir_node *callee, int arity, ir_node **in, type *tp)
502 NEW_ARR_A (ir_node *, r_in, r_arity);
505 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
507 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
509 assert(is_method_type(tp));
510 set_Call_type(res, tp);
511 res->attr.call.callee_arr = NULL;
512 res = optimize_node (res);
513 irn_vrfy_irg (res, irg);
518 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
519 ir_node *store, int arity, ir_node **in)
526 NEW_ARR_A (ir_node *, r_in, r_arity);
528 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
529 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
530 res = optimize_node (res);
531 irn_vrfy_irg (res, irg);
536 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
542 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
543 res = optimize_node (res);
544 irn_vrfy_irg (res, irg);
549 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
550 ir_node *store, ir_node *adr)
556 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
558 res = optimize_node (res);
559 irn_vrfy_irg (res, irg);
564 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
565 ir_node *store, ir_node *adr, ir_node *val)
572 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
574 res = optimize_node (res);
576 irn_vrfy_irg (res, irg);
581 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
582 ir_node *size, type *alloc_type, where_alloc where)
588 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
590 res->attr.a.where = where;
591 res->attr.a.type = alloc_type;
593 res = optimize_node (res);
594 irn_vrfy_irg (res, irg);
599 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
600 ir_node *ptr, ir_node *size, type *free_type)
607 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
609 res->attr.f = free_type;
611 res = optimize_node (res);
612 irn_vrfy_irg (res, irg);
617 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
618 int arity, ir_node **in, entity *ent)
625 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
628 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
629 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
631 res->attr.s.ent = ent;
633 res = optimize_node (res);
634 irn_vrfy_irg (res, irg);
639 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
640 ir_node *objptr, type *ent)
647 NEW_ARR_A (ir_node *, r_in, r_arity);
651 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
653 res->attr.io.ent = ent;
655 /* res = optimize (res);
656 * irn_vrfy_irg (res, irg); */
661 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
662 symconst_kind symkind)
666 if (symkind == linkage_ptr_info)
670 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
672 res->attr.i.num = symkind;
673 if (symkind == linkage_ptr_info) {
674 res->attr.i.tori.ptrinfo = (ident *)value;
676 assert ( ( (symkind == type_tag)
677 || (symkind == size))
678 && (is_type(value)));
679 res->attr.i.tori.typ = (type *)value;
681 res = optimize_node (res);
682 irn_vrfy_irg (res, irg);
687 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
691 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
693 res = optimize_node (res);
694 irn_vrfy_irg (res, irg);
699 new_rd_Bad (ir_graph *irg)
705 new_rd_Unknown (ir_graph *irg)
711 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
715 in[0] = get_Call_ptr(call);
716 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
717 res->attr.callbegin.irg = irg;
718 res->attr.callbegin.call = call;
719 res = optimize_node (res);
720 irn_vrfy_irg (res, irg);
725 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
729 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
730 res->attr.end.irg = irg;
732 irn_vrfy_irg (res, irg);
737 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
741 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
742 res->attr.end.irg = irg;
744 irn_vrfy_irg (res, irg);
749 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
752 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
753 res = optimize_node (res);
754 irn_vrfy_irg (res, irg);
759 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
765 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
766 res->attr.filter.proj = proj;
767 res->attr.filter.in_cg = NULL;
768 res->attr.filter.backedge = NULL;
771 assert(get_Proj_pred(res));
772 assert(get_nodes_Block(get_Proj_pred(res)));
774 res = optimize_node (res);
776 irn_vrfy_irg (res, irg);
781 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
782 return new_rd_Block(NULL, irg, arity, in);
784 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
785 return new_rd_Start(NULL, irg, block);
787 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
788 return new_rd_End(NULL, irg, block);
790 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
791 return new_rd_Jmp(NULL, irg, block);
793 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
794 return new_rd_Cond(NULL, irg, block, c);
796 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
797 ir_node *store, int arity, ir_node **in) {
798 return new_rd_Return(NULL, irg, block, store, arity, in);
800 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
801 ir_node *store, ir_node *obj) {
802 return new_rd_Raise(NULL, irg, block, store, obj);
804 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
805 ir_mode *mode, tarval *con) {
806 return new_rd_Const(NULL, irg, block, mode, con);
808 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
809 type_or_id_p value, symconst_kind symkind) {
810 return new_rd_SymConst(NULL, irg, block, value, symkind);
812 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
813 ir_node *objptr, int n_index, ir_node **index,
815 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
817 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
819 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
821 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
822 ir_node *callee, int arity, ir_node **in,
824 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
826 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
827 ir_node *op1, ir_node *op2, ir_mode *mode) {
828 return new_rd_Add(NULL, irg, block, op1, op2, mode);
830 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
831 ir_node *op1, ir_node *op2, ir_mode *mode) {
832 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
834 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
835 ir_node *op, ir_mode *mode) {
836 return new_rd_Minus(NULL, irg, block, op, mode);
838 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
839 ir_node *op1, ir_node *op2, ir_mode *mode) {
840 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
842 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
843 ir_node *memop, ir_node *op1, ir_node *op2) {
844 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
846 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
847 ir_node *memop, ir_node *op1, ir_node *op2) {
848 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
850 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
851 ir_node *memop, ir_node *op1, ir_node *op2) {
852 return new_rd_Div(NULL, irg, block, memop, op1, op2);
854 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
855 ir_node *memop, ir_node *op1, ir_node *op2) {
856 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
858 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
859 ir_node *op, ir_mode *mode) {
860 return new_rd_Abs(NULL, irg, block, op, mode);
862 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
863 ir_node *op1, ir_node *op2, ir_mode *mode) {
864 return new_rd_And(NULL, irg, block, op1, op2, mode);
866 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
867 ir_node *op1, ir_node *op2, ir_mode *mode) {
868 return new_rd_Or(NULL, irg, block, op1, op2, mode);
870 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
871 ir_node *op1, ir_node *op2, ir_mode *mode) {
872 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
874 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
875 ir_node *op, ir_mode *mode) {
876 return new_rd_Not(NULL, irg, block, op, mode);
878 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
879 ir_node *op1, ir_node *op2) {
880 return new_rd_Cmp(NULL, irg, block, op1, op2);
882 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
883 ir_node *op, ir_node *k, ir_mode *mode) {
884 return new_rd_Shl(NULL, irg, block, op, k, mode);
886 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
887 ir_node *op, ir_node *k, ir_mode *mode) {
888 return new_rd_Shr(NULL, irg, block, op, k, mode);
890 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
891 ir_node *op, ir_node *k, ir_mode *mode) {
892 return new_rd_Shrs(NULL, irg, block, op, k, mode);
894 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
895 ir_node *op, ir_node *k, ir_mode *mode) {
896 return new_rd_Rot(NULL, irg, block, op, k, mode);
898 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
899 ir_node *op, ir_mode *mode) {
900 return new_rd_Conv(NULL, irg, block, op, mode);
902 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
903 return new_rd_Cast(NULL, irg, block, op, to_tp);
905 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
906 ir_node **in, ir_mode *mode) {
907 return new_rd_Phi(NULL, irg, block, arity, in, mode);
909 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
910 ir_node *store, ir_node *adr) {
911 return new_rd_Load(NULL, irg, block, store, adr);
913 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
914 ir_node *store, ir_node *adr, ir_node *val) {
915 return new_rd_Store(NULL, irg, block, store, adr, val);
917 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
918 ir_node *size, type *alloc_type, where_alloc where) {
919 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
921 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
922 ir_node *ptr, ir_node *size, type *free_type) {
923 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
925 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
926 return new_rd_Sync(NULL, irg, block, arity, in);
928 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
929 ir_mode *mode, long proj) {
930 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
932 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
934 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
936 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
937 int arity, ir_node **in) {
938 return new_rd_Tuple(NULL, irg, block, arity, in );
940 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
941 ir_node *val, ir_mode *mode) {
942 return new_rd_Id(NULL, irg, block, val, mode);
944 INLINE ir_node *new_r_Bad (ir_graph *irg) {
945 return new_rd_Bad(irg);
947 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
948 return new_rd_Unknown(irg);
950 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
951 return new_rd_CallBegin(NULL, irg, block, callee);
953 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
954 return new_rd_EndReg(NULL, irg, block);
956 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
957 return new_rd_EndExcept(NULL, irg, block);
959 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
960 return new_rd_Break(NULL, irg, block);
962 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
963 ir_mode *mode, long proj) {
964 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
968 /** ********************/
969 /** public interfaces */
970 /** construction tools */
974 * - create a new Start node in the current block
976 * @return s - pointer to the created Start node
981 new_d_Start (dbg_info* db)
985 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
986 op_Start, mode_T, 0, NULL);
988 res = optimize_node (res);
989 irn_vrfy_irg (res, current_ir_graph);
994 new_d_End (dbg_info* db)
997 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
998 op_End, mode_X, -1, NULL);
999 res = optimize_node (res);
1000 irn_vrfy_irg (res, current_ir_graph);
1005 /* Constructs a Block with a fixed number of predecessors.
1006 Does set current_block. Can be used with automatic Phi
1007 node construction. */
1009 new_d_Block (dbg_info* db, int arity, ir_node **in)
1013 bool has_unknown = false;
1015 res = new_rd_Block (db, current_ir_graph, arity, in);
1017 /* Create and initialize array for Phi-node construction. */
1018 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1019 current_ir_graph->n_loc);
1020 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1022 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1024 if (!has_unknown) res = optimize_node (res);
1025 current_ir_graph->current_block = res;
1027 irn_vrfy_irg (res, current_ir_graph);
1032 /* ***********************************************************************/
1033 /* Methods necessary for automatic Phi node creation */
1035 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1036 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1037 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1038 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1040 Call Graph: ( A ---> B == A "calls" B)
1042 get_value mature_block
1050 get_r_value_internal |
1054 new_rd_Phi0 new_rd_Phi_in
1056 * *************************************************************************** */
1058 /* Creates a Phi node with 0 predecessors */
1059 static INLINE ir_node *
1060 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1063 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1064 irn_vrfy_irg (res, irg);
1068 /* There are two implementations of the Phi node construction. The first
1069 is faster, but does not work for blocks with more than 2 predecessors.
1070 The second works always but is slower and causes more unnecessary Phi
1072 Select the implementations by the following preprocessor flag set in
1074 #if USE_FAST_PHI_CONSTRUCTION
1076 /* This is a stack used for allocating and deallocating nodes in
1077 new_rd_Phi_in. The original implementation used the obstack
1078 to model this stack, now it is explicit. This reduces side effects.
1080 #if USE_EXPLICIT_PHI_IN_STACK
1081 INLINE Phi_in_stack *
1082 new_Phi_in_stack() {
1085 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1087 res->stack = NEW_ARR_F (ir_node *, 1);
1094 free_Phi_in_stack(Phi_in_stack *s) {
1095 DEL_ARR_F(s->stack);
1099 free_to_Phi_in_stack(ir_node *phi) {
1100 assert(get_irn_opcode(phi) == iro_Phi);
1102 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1103 current_ir_graph->Phi_in_stack->pos)
1104 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1106 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1108 (current_ir_graph->Phi_in_stack->pos)++;
1111 static INLINE ir_node *
1112 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1113 int arity, ir_node **in) {
1115 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1116 int pos = current_ir_graph->Phi_in_stack->pos;
1120 /* We need to allocate a new node */
1121 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1122 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1124 /* reuse the old node and initialize it again. */
1127 assert (res->kind == k_ir_node);
1128 assert (res->op == op_Phi);
1132 assert (arity >= 0);
1133 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1134 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1136 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1138 (current_ir_graph->Phi_in_stack->pos)--;
1142 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1144 /* Creates a Phi node with a given, fixed array **in of predecessors.
1145 If the Phi node is unnecessary, as the same value reaches the block
1146 through all control flow paths, it is eliminated and the value
1147 returned directly. This constructor is only intended for use in
1148 the automatic Phi node generation triggered by get_value or mature.
1149 The implementation is quite tricky and depends on the fact, that
1150 the nodes are allocated on a stack:
1151 The in array contains predecessors and NULLs. The NULLs appear,
1152 if get_r_value_internal, that computed the predecessors, reached
1153 the same block on two paths. In this case the same value reaches
1154 this block on both paths, there is no definition in between. We need
1155 not allocate a Phi where these path's merge, but we have to communicate
1156 this fact to the caller. This happens by returning a pointer to the
1157 node the caller _will_ allocate. (Yes, we predict the address. We can
1158 do so because the nodes are allocated on the obstack.) The caller then
1159 finds a pointer to itself and, when this routine is called again,
1162 static INLINE ir_node *
1163 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1164 ir_node **in, int ins)
1167 ir_node *res, *known;
1169 /* allocate a new node on the obstack.
1170 This can return a node to which some of the pointers in the in-array
1172 Attention: the constructor copies the in array, i.e., the later changes
1173 to the array in this routine do not affect the constructed node! If
1174 the in array contains NULLs, there will be missing predecessors in the
1176 Is this a possible internal state of the Phi node generation? */
1177 #if USE_EXPLICIT_PHI_IN_STACK
1178 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1180 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1181 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1183 /* The in-array can contain NULLs. These were returned by
1184 get_r_value_internal if it reached the same block/definition on a
1186 The NULLs are replaced by the node itself to simplify the test in the
1188 for (i=0; i < ins; ++i)
1189 if (in[i] == NULL) in[i] = res;
1191 /* This loop checks whether the Phi has more than one predecessor.
1192 If so, it is a real Phi node and we break the loop. Else the
1193 Phi node merges the same definition on several paths and therefore
1195 for (i=0; i < ins; ++i)
1197 if (in[i]==res || in[i]==known) continue;
1205 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1207 #if USE_EXPLICIT_PHI_IN_STACK
1208 free_to_Phi_in_stack(res);
1210 obstack_free (current_ir_graph->obst, res);
1214 res = optimize_node (res);
1215 irn_vrfy_irg (res, irg);
1218 /* return the pointer to the Phi node. This node might be deallocated! */
1223 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1226 allocates and returns this node. The routine called to allocate the
1227 node might optimize it away and return a real value, or even a pointer
1228 to a deallocated Phi node on top of the obstack!
1229 This function is called with an in-array of proper size. **/
1231 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1233 ir_node *prevBlock, *res;
1236 /* This loop goes to all predecessor blocks of the block the Phi node is in
1237 and there finds the operands of the Phi node by calling
1238 get_r_value_internal. */
1239 for (i = 1; i <= ins; ++i) {
1240 assert (block->in[i]);
1241 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1243 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1246 /* After collecting all predecessors into the array nin a new Phi node
1247 with these predecessors is created. This constructor contains an
1248 optimization: If all predecessors of the Phi node are identical it
1249 returns the only operand instead of a new Phi node. If the value
1250 passes two different control flow edges without being defined, and
1251 this is the second path treated, a pointer to the node that will be
1252 allocated for the first path (recursion) is returned. We already
1253 know the address of this node, as it is the next node to be allocated
1254 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1255 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1257 /* Now we now the value for "pos" and can enter it in the array with
1258 all known local variables. Attention: this might be a pointer to
1259 a node, that later will be allocated!!! See new_rd_Phi_in.
1260 If this is called in mature, after some set_value in the same block,
1261 the proper value must not be overwritten:
1263 get_value (makes Phi0, put's it into graph_arr)
1264 set_value (overwrites Phi0 in graph_arr)
1265 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1268 if (!block->attr.block.graph_arr[pos]) {
1269 block->attr.block.graph_arr[pos] = res;
1271 /* printf(" value already computed by %s\n",
1272 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1278 /* This function returns the last definition of a variable. In case
1279 this variable was last defined in a previous block, Phi nodes are
1280 inserted. If the part of the firm graph containing the definition
1281 is not yet constructed, a dummy Phi node is returned. */
1283 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1286 /* There are 4 cases to treat.
1288 1. The block is not mature and we visit it the first time. We can not
1289 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1290 predecessors is returned. This node is added to the linked list (field
1291 "link") of the containing block to be completed when this block is
1292 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1295 2. The value is already known in this block, graph_arr[pos] is set and we
1296 visit the block the first time. We can return the value without
1297 creating any new nodes.
1299 3. The block is mature and we visit it the first time. A Phi node needs
1300 to be created (phi_merge). If the Phi is not needed, as all it's
1301 operands are the same value reaching the block through different
1302 paths, it's optimized away and the value itself is returned.
1304 4. The block is mature, and we visit it the second time. Now two
1305 subcases are possible:
1306 * The value was computed completely the last time we were here. This
1307 is the case if there is no loop. We can return the proper value.
1308 * The recursion that visited this node and set the flag did not
1309 return yet. We are computing a value in a loop and need to
1310 break the recursion without knowing the result yet.
1311 @@@ strange case. Straight forward we would create a Phi before
1312 starting the computation of it's predecessors. In this case we will
1313 find a Phi here in any case. The problem is that this implementation
1314 only creates a Phi after computing the predecessors, so that it is
1315 hard to compute self references of this Phi. @@@
1316 There is no simple check for the second subcase. Therefore we check
1317 for a second visit and treat all such cases as the second subcase.
1318 Anyways, the basic situation is the same: we reached a block
1319 on two paths without finding a definition of the value: No Phi
1320 nodes are needed on both paths.
1321 We return this information "Two paths, no Phi needed" by a very tricky
1322 implementation that relies on the fact that an obstack is a stack and
1323 will return a node with the same address on different allocations.
1324 Look also at phi_merge and new_rd_phi_in to understand this.
1325 @@@ Unfortunately this does not work, see testprogram
1326 three_cfpred_example.
1330 /* case 4 -- already visited. */
1331 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1333 /* visited the first time */
1334 set_irn_visited(block, get_irg_visited(current_ir_graph));
1336 /* Get the local valid value */
1337 res = block->attr.block.graph_arr[pos];
1339 /* case 2 -- If the value is actually computed, return it. */
1340 if (res) { return res;};
1342 if (block->attr.block.matured) { /* case 3 */
1344 /* The Phi has the same amount of ins as the corresponding block. */
1345 int ins = get_irn_arity(block);
1347 NEW_ARR_A (ir_node *, nin, ins);
1349 /* Phi merge collects the predecessors and then creates a node. */
1350 res = phi_merge (block, pos, mode, nin, ins);
1352 } else { /* case 1 */
1353 /* The block is not mature, we don't know how many in's are needed. A Phi
1354 with zero predecessors is created. Such a Phi node is called Phi0
1355 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1356 to the list of Phi0 nodes in this block to be matured by mature_block
1358 The Phi0 has to remember the pos of it's internal value. If the real
1359 Phi is computed, pos is used to update the array with the local
1362 res = new_rd_Phi0 (current_ir_graph, block, mode);
1363 res->attr.phi0_pos = pos;
1364 res->link = block->link;
1368 /* If we get here, the frontend missed a use-before-definition error */
1371 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1372 assert (mode->code >= irm_F && mode->code <= irm_P);
1373 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1374 tarval_mode_null[mode->code]);
1377 /* The local valid value is available now. */
1378 block->attr.block.graph_arr[pos] = res;
1386 it starts the recursion. This causes an Id at the entry of
1387 every block that has no definition of the value! **/
1389 #if USE_EXPLICIT_PHI_IN_STACK
1391 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1392 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1395 static INLINE ir_node *
1396 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1397 ir_node **in, int ins)
1400 ir_node *res, *known;
1402 /* Allocate a new node on the obstack. The allocation copies the in
1404 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1405 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1407 /* This loop checks whether the Phi has more than one predecessor.
1408 If so, it is a real Phi node and we break the loop. Else the
1409 Phi node merges the same definition on several paths and therefore
1410 is not needed. Don't consider Bad nodes! */
1412 for (i=0; i < ins; ++i)
1416 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1424 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1427 obstack_free (current_ir_graph->obst, res);
1430 /* A undefined value, e.g., in unreachable code. */
1434 res = optimize_node (res);
1435 irn_vrfy_irg (res, irg);
1436 /* Memory Phis in endless loops must be kept alive.
1437 As we can't distinguish these easily we keep all of the alive. */
1438 if ((res->op == op_Phi) && (mode == mode_M))
1439 add_End_keepalive(irg->end, res);
1446 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1448 #if PRECISE_EXC_CONTEXT
1450 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1452 static INLINE ir_node **
1453 new_frag_arr (ir_node *n) {
1456 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1457 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1458 sizeof(ir_node *)*current_ir_graph->n_loc);
1459 /* turn off optimization before allocating Proj nodes, as res isn't
1461 opt = get_optimize(); set_optimize(0);
1462 /* Here we rely on the fact that all frag ops have Memory as first result! */
1463 if (get_irn_op(n) == op_Call)
1464 arr[0] = new_Proj(n, mode_M, 3);
1466 arr[0] = new_Proj(n, mode_M, 0);
1468 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1472 static INLINE ir_node **
1473 get_frag_arr (ir_node *n) {
1474 if (get_irn_op(n) == op_Call) {
1475 return n->attr.call.frag_arr;
1476 } else if (get_irn_op(n) == op_Alloc) {
1477 return n->attr.a.frag_arr;
1479 return n->attr.frag_arr;
1484 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1485 if (!frag_arr[pos]) frag_arr[pos] = val;
1486 if (frag_arr[current_ir_graph->n_loc - 1])
1487 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1491 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1495 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1497 frag_arr = get_frag_arr(cfOp);
1498 res = frag_arr[pos];
1500 if (block->attr.block.graph_arr[pos]) {
1501 /* There was a set_value after the cfOp and no get_value before that
1502 set_value. We must build a Phi node now. */
1503 if (block->attr.block.matured) {
1504 int ins = get_irn_arity(block);
1506 NEW_ARR_A (ir_node *, nin, ins);
1507 res = phi_merge(block, pos, mode, nin, ins);
1509 res = new_rd_Phi0 (current_ir_graph, block, mode);
1510 res->attr.phi0_pos = pos;
1511 res->link = block->link;
1515 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1516 but this should be better: (remove comment if this works) */
1517 /* It's a Phi, we can write this into all graph_arrs with NULL */
1518 set_frag_value(block->attr.block.graph_arr, pos, res);
1520 res = get_r_value_internal(block, pos, mode);
1521 set_frag_value(block->attr.block.graph_arr, pos, res);
1529 computes the predecessors for the real phi node, and then
1530 allocates and returns this node. The routine called to allocate the
1531 node might optimize it away and return a real value.
1532 This function must be called with an in-array of proper size. **/
1534 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1536 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1539 /* If this block has no value at pos create a Phi0 and remember it
1540 in graph_arr to break recursions.
1541 Else we may not set graph_arr as there a later value is remembered. */
1543 if (!block->attr.block.graph_arr[pos]) {
1544 if (block == get_irg_start_block(current_ir_graph)) {
1545 /* Collapsing to Bad tarvals is no good idea.
1546 So we call a user-supplied routine here that deals with this case as
1547 appropriate for the given language. Sorryly the only help we can give
1548 here is the position.
1550 Even if all variables are defined before use, it can happen that
1551 we get to the start block, if a cond has been replaced by a tuple
1552 (bad, jmp). In this case we call the function needlessly, eventually
1553 generating an non existant error.
1554 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1557 if (default_initialize_local_variable)
1558 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1560 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1561 /* We don't need to care about exception ops in the start block.
1562 There are none by definition. */
1563 return block->attr.block.graph_arr[pos];
1565 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1566 block->attr.block.graph_arr[pos] = phi0;
1567 #if PRECISE_EXC_CONTEXT
1568 /* Set graph_arr for fragile ops. Also here we should break recursion.
1569 We could choose a cyclic path through an cfop. But the recursion would
1570 break at some point. */
1571 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1576 /* This loop goes to all predecessor blocks of the block the Phi node
1577 is in and there finds the operands of the Phi node by calling
1578 get_r_value_internal. */
1579 for (i = 1; i <= ins; ++i) {
1580 prevCfOp = skip_Proj(block->in[i]);
1582 if (is_Bad(prevCfOp)) {
1583 /* In case a Cond has been optimized we would get right to the start block
1584 with an invalid definition. */
1585 nin[i-1] = new_Bad();
1588 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1590 if (!is_Bad(prevBlock)) {
1591 #if PRECISE_EXC_CONTEXT
1592 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1593 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1594 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1597 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1599 nin[i-1] = new_Bad();
1603 /* After collecting all predecessors into the array nin a new Phi node
1604 with these predecessors is created. This constructor contains an
1605 optimization: If all predecessors of the Phi node are identical it
1606 returns the only operand instead of a new Phi node. */
1607 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1609 /* In case we allocated a Phi0 node at the beginning of this procedure,
1610 we need to exchange this Phi0 with the real Phi. */
1612 exchange(phi0, res);
1613 block->attr.block.graph_arr[pos] = res;
1614 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1615 only an optimization. */
1621 /* This function returns the last definition of a variable. In case
1622 this variable was last defined in a previous block, Phi nodes are
1623 inserted. If the part of the firm graph containing the definition
1624 is not yet constructed, a dummy Phi node is returned. */
1626 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1629 /* There are 4 cases to treat.
1631 1. The block is not mature and we visit it the first time. We can not
1632 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1633 predecessors is returned. This node is added to the linked list (field
1634 "link") of the containing block to be completed when this block is
1635 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1638 2. The value is already known in this block, graph_arr[pos] is set and we
1639 visit the block the first time. We can return the value without
1640 creating any new nodes.
1642 3. The block is mature and we visit it the first time. A Phi node needs
1643 to be created (phi_merge). If the Phi is not needed, as all it's
1644 operands are the same value reaching the block through different
1645 paths, it's optimized away and the value itself is returned.
1647 4. The block is mature, and we visit it the second time. Now two
1648 subcases are possible:
1649 * The value was computed completely the last time we were here. This
1650 is the case if there is no loop. We can return the proper value.
1651 * The recursion that visited this node and set the flag did not
1652 return yet. We are computing a value in a loop and need to
1653 break the recursion. This case only happens if we visited
1654 the same block with phi_merge before, which inserted a Phi0.
1655 So we return the Phi0.
1658 /* case 4 -- already visited. */
1659 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1660 /* As phi_merge allocates a Phi0 this value is always defined. Here
1661 is the critical difference of the two algorithms. */
1662 assert(block->attr.block.graph_arr[pos]);
1663 return block->attr.block.graph_arr[pos];
1666 /* visited the first time */
1667 set_irn_visited(block, get_irg_visited(current_ir_graph));
1669 /* Get the local valid value */
1670 res = block->attr.block.graph_arr[pos];
1672 /* case 2 -- If the value is actually computed, return it. */
1673 if (res) { return res; };
1675 if (block->attr.block.matured) { /* case 3 */
1677 /* The Phi has the same amount of ins as the corresponding block. */
1678 int ins = get_irn_arity(block);
1680 NEW_ARR_A (ir_node *, nin, ins);
1682 /* Phi merge collects the predecessors and then creates a node. */
1683 res = phi_merge (block, pos, mode, nin, ins);
1685 } else { /* case 1 */
1686 /* The block is not mature, we don't know how many in's are needed. A Phi
1687 with zero predecessors is created. Such a Phi node is called Phi0
1688 node. The Phi0 is then added to the list of Phi0 nodes in this block
1689 to be matured by mature_block later.
1690 The Phi0 has to remember the pos of it's internal value. If the real
1691 Phi is computed, pos is used to update the array with the local
1693 res = new_rd_Phi0 (current_ir_graph, block, mode);
1694 res->attr.phi0_pos = pos;
1695 res->link = block->link;
1699 /* If we get here, the frontend missed a use-before-definition error */
1702 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1703 assert (mode->code >= irm_F && mode->code <= irm_P);
1704 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1705 get_mode_null(mode));
1708 /* The local valid value is available now. */
1709 block->attr.block.graph_arr[pos] = res;
1714 #endif /* USE_FAST_PHI_CONSTRUCTION */
1716 /* ************************************************************************** */
1718 /** Finalize a Block node, when all control flows are known. */
1719 /** Acceptable parameters are only Block nodes. */
1721 mature_block (ir_node *block)
1728 assert (get_irn_opcode(block) == iro_Block);
1729 /* @@@ should be commented in
1730 assert (!get_Block_matured(block) && "Block already matured"); */
1732 if (!get_Block_matured(block)) {
1733 ins = ARR_LEN (block->in)-1;
1734 /* Fix block parameters */
1735 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1737 /* An array for building the Phi nodes. */
1738 NEW_ARR_A (ir_node *, nin, ins);
1740 /* Traverse a chain of Phi nodes attached to this block and mature
1742 for (n = block->link; n; n=next) {
1743 inc_irg_visited(current_ir_graph);
1745 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1748 block->attr.block.matured = 1;
1750 /* Now, as the block is a finished firm node, we can optimize it.
1751 Since other nodes have been allocated since the block was created
1752 we can not free the node on the obstack. Therefore we have to call
1754 Unfortunately the optimization does not change a lot, as all allocated
1755 nodes refer to the unoptimized node.
1756 We can call _2, as global cse has no effect on blocks. */
1757 block = optimize_in_place_2(block);
1758 irn_vrfy_irg(block, current_ir_graph);
1763 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1765 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1770 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1772 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1777 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1779 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1784 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1786 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1791 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1794 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1795 arg->attr.c.kind = fragmentary;
1796 arg->attr.c.default_proj = max_proj;
1797 res = new_Proj (arg, mode_X, max_proj);
1802 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1804 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1809 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1811 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1815 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1817 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1822 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1824 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1829 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1831 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1837 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1839 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1844 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1846 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1854 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1856 #if PRECISE_EXC_CONTEXT
1857 if ((current_ir_graph->phase_state == phase_building) &&
1858 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1859 res->attr.frag_arr = new_frag_arr(res);
1866 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1869 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1871 #if PRECISE_EXC_CONTEXT
1872 if ((current_ir_graph->phase_state == phase_building) &&
1873 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1874 res->attr.frag_arr = new_frag_arr(res);
1881 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1884 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1886 #if PRECISE_EXC_CONTEXT
1887 if ((current_ir_graph->phase_state == phase_building) &&
1888 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1889 res->attr.frag_arr = new_frag_arr(res);
1896 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1899 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1901 #if PRECISE_EXC_CONTEXT
1902 if ((current_ir_graph->phase_state == phase_building) &&
1903 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1904 res->attr.frag_arr = new_frag_arr(res);
1911 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1913 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1918 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1920 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1925 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1927 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1932 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1934 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1939 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1941 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1946 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1948 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1953 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1955 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1960 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1962 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1967 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1969 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1974 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1976 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1981 new_d_Jmp (dbg_info* db)
1983 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1987 new_d_Cond (dbg_info* db, ir_node *c)
1989 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1993 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1997 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1998 store, callee, arity, in, tp);
1999 #if PRECISE_EXC_CONTEXT
2000 if ((current_ir_graph->phase_state == phase_building) &&
2001 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2002 res->attr.call.frag_arr = new_frag_arr(res);
2009 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2011 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2016 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2018 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2023 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2026 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2028 #if PRECISE_EXC_CONTEXT
2029 if ((current_ir_graph->phase_state == phase_building) &&
2030 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2031 res->attr.frag_arr = new_frag_arr(res);
2038 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2041 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2043 #if PRECISE_EXC_CONTEXT
2044 if ((current_ir_graph->phase_state == phase_building) &&
2045 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2046 res->attr.frag_arr = new_frag_arr(res);
2053 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2057 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2058 store, size, alloc_type, where);
2059 #if PRECISE_EXC_CONTEXT
2060 if ((current_ir_graph->phase_state == phase_building) &&
2061 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2062 res->attr.a.frag_arr = new_frag_arr(res);
2069 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2071 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2072 store, ptr, size, free_type);
2076 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2077 /* GL: objptr was called frame before. Frame was a bad choice for the name
2078 as the operand could as well be a pointer to a dynamic object. */
2080 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2081 store, objptr, 0, NULL, ent);
2085 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2087 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2088 store, objptr, n_index, index, sel);
2092 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2094 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2095 store, objptr, ent));
2099 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2101 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2106 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2108 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2116 return current_ir_graph->bad;
2120 new_d_Unknown (void)
2122 return current_ir_graph->unknown;
2126 new_d_CallBegin (dbg_info *db, ir_node *call)
2129 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2134 new_d_EndReg (dbg_info *db)
2137 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2142 new_d_EndExcept (dbg_info *db)
2145 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2150 new_d_Break (dbg_info *db)
2152 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2156 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2158 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2162 /* ********************************************************************* */
2163 /* Comfortable interface with automatic Phi node construction. */
2164 /* (Uses also constructors of ?? interface, except new_Block. */
2165 /* ********************************************************************* */
2167 /** Block construction **/
2168 /* immature Block without predecessors */
2169 ir_node *new_d_immBlock (dbg_info* db) {
2172 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2173 /* creates a new dynamic in-array as length of in is -1 */
2174 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2175 current_ir_graph->current_block = res;
2176 res->attr.block.matured = 0;
2177 res->attr.block.exc = exc_normal;
2178 res->attr.block.handler_entry = 0;
2179 res->attr.block.backedge = NULL;
2180 res->attr.block.in_cg = NULL;
2181 res->attr.block.cg_backedge = NULL;
2182 set_Block_block_visited(res, 0);
2184 /* Create and initialize array for Phi-node construction. */
2185 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2186 current_ir_graph->n_loc);
2187 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2189 /* Immature block may not be optimized! */
2190 irn_vrfy_irg (res, current_ir_graph);
2197 return new_d_immBlock(NULL);
2200 /* add an adge to a jmp/control flow node */
2202 add_in_edge (ir_node *block, ir_node *jmp)
2204 if (block->attr.block.matured) {
2205 assert(0 && "Error: Block already matured!\n");
2208 assert (jmp != NULL);
2209 ARR_APP1 (ir_node *, block->in, jmp);
2213 /* changing the current block */
2215 switch_block (ir_node *target)
2217 current_ir_graph->current_block = target;
2220 /* ************************ */
2221 /* parameter administration */
2223 /* get a value from the parameter array from the current block by its index */
2225 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2227 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2228 inc_irg_visited(current_ir_graph);
2230 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2232 /* get a value from the parameter array from the current block by its index */
2234 get_value (int pos, ir_mode *mode)
2236 return get_d_value(NULL, pos, mode);
2239 /* set a value at position pos in the parameter array from the current block */
2241 set_value (int pos, ir_node *value)
2243 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2244 assert(pos+1 < current_ir_graph->n_loc);
2245 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2248 /* get the current store */
2252 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2253 /* GL: one could call get_value instead */
2254 inc_irg_visited(current_ir_graph);
2255 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2258 /* set the current store */
2260 set_store (ir_node *store)
2262 /* GL: one could call set_value instead */
2263 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2264 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2268 keep_alive (ir_node *ka)
2270 add_End_keepalive(current_ir_graph->end, ka);
2273 /** Useful access routines **/
2274 /* Returns the current block of the current graph. To set the current
2275 block use switch_block(). */
2276 ir_node *get_cur_block() {
2277 return get_irg_current_block(current_ir_graph);
2280 /* Returns the frame type of the current graph */
2281 type *get_cur_frame_type() {
2282 return get_irg_frame_type(current_ir_graph);
2286 /* ********************************************************************* */
2289 /* call once for each run of the library */
2291 init_cons (default_initialize_local_variable_func_t *func)
2293 default_initialize_local_variable = func;
2296 /* call for each graph */
2298 finalize_cons (ir_graph *irg) {
2299 irg->phase_state = phase_high;
2303 ir_node *new_Block(int arity, ir_node **in) {
2304 return new_d_Block(NULL, arity, in);
2306 ir_node *new_Start (void) {
2307 return new_d_Start(NULL);
2309 ir_node *new_End (void) {
2310 return new_d_End(NULL);
2312 ir_node *new_Jmp (void) {
2313 return new_d_Jmp(NULL);
2315 ir_node *new_Cond (ir_node *c) {
2316 return new_d_Cond(NULL, c);
2318 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2319 return new_d_Return(NULL, store, arity, in);
2321 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2322 return new_d_Raise(NULL, store, obj);
2324 ir_node *new_Const (ir_mode *mode, tarval *con) {
2325 return new_d_Const(NULL, mode, con);
2327 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2328 return new_d_SymConst(NULL, value, kind);
2330 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2331 return new_d_simpleSel(NULL, store, objptr, ent);
2333 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2335 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2337 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2338 return (new_d_InstOf (NULL, store, objptr, ent));
2340 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2342 return new_d_Call(NULL, store, callee, arity, in, tp);
2344 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2345 return new_d_Add(NULL, op1, op2, mode);
2347 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2348 return new_d_Sub(NULL, op1, op2, mode);
2350 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2351 return new_d_Minus(NULL, op, mode);
2353 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2354 return new_d_Mul(NULL, op1, op2, mode);
2356 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2357 return new_d_Quot(NULL, memop, op1, op2);
2359 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2360 return new_d_DivMod(NULL, memop, op1, op2);
2362 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2363 return new_d_Div(NULL, memop, op1, op2);
2365 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2366 return new_d_Mod(NULL, memop, op1, op2);
2368 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2369 return new_d_Abs(NULL, op, mode);
2371 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2372 return new_d_And(NULL, op1, op2, mode);
2374 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2375 return new_d_Or(NULL, op1, op2, mode);
2377 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2378 return new_d_Eor(NULL, op1, op2, mode);
2380 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2381 return new_d_Not(NULL, op, mode);
2383 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2384 return new_d_Shl(NULL, op, k, mode);
2386 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2387 return new_d_Shr(NULL, op, k, mode);
2389 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2390 return new_d_Shrs(NULL, op, k, mode);
2392 #define new_Rotate new_Rot
2393 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2394 return new_d_Rot(NULL, op, k, mode);
2396 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2397 return new_d_Cmp(NULL, op1, op2);
2399 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2400 return new_d_Conv(NULL, op, mode);
2402 ir_node *new_Cast (ir_node *op, type *to_tp) {
2403 return new_d_Cast(NULL, op, to_tp);
2405 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2406 return new_d_Phi(NULL, arity, in, mode);
2408 ir_node *new_Load (ir_node *store, ir_node *addr) {
2409 return new_d_Load(NULL, store, addr);
2411 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2412 return new_d_Store(NULL, store, addr, val);
2414 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2415 where_alloc where) {
2416 return new_d_Alloc(NULL, store, size, alloc_type, where);
2418 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2420 return new_d_Free(NULL, store, ptr, size, free_type);
2422 ir_node *new_Sync (int arity, ir_node **in) {
2423 return new_d_Sync(NULL, arity, in);
2425 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2426 return new_d_Proj(NULL, arg, mode, proj);
2428 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2429 return new_d_defaultProj(NULL, arg, max_proj);
2431 ir_node *new_Tuple (int arity, ir_node **in) {
2432 return new_d_Tuple(NULL, arity, in);
2434 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2435 return new_d_Id(NULL, val, mode);
2437 ir_node *new_Bad (void) {
2440 ir_node *new_Unknown(void) {
2441 return new_d_Unknown();
2443 ir_node *new_CallBegin (ir_node *callee) {
2444 return new_d_CallBegin(NULL, callee);
2446 ir_node *new_EndReg (void) {
2447 return new_d_EndReg(NULL);
2449 ir_node *new_EndExcept (void) {
2450 return new_d_EndExcept(NULL);
2452 ir_node *new_Break (void) {
2453 return new_d_Break(NULL);
2455 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2456 return new_d_Filter(NULL, arg, mode, proj);