3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 # include "irgraph_t.h"
20 # include "irnode_t.h"
21 # include "irmode_t.h"
23 # include "firm_common_t.h"
29 /* memset belongs to string.h */
31 # include "irbackedge_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 res->attr.block.exc = exc_normal;
64 res->attr.block.handler_entry = 0;
65 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
66 res->attr.block.in_cg = NULL;
67 res->attr.block.cg_backedge = NULL;
69 irn_vrfy_irg (res, irg);
74 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
78 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 res->attr.start.irg = irg;
81 irn_vrfy_irg (res, irg);
86 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
90 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
92 irn_vrfy_irg (res, irg);
96 /* Creates a Phi node with all predecessors. Calling this constructor
97 is only allowed if the corresponding block is mature. */
99 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
103 bool has_unknown = false;
105 assert( get_Block_matured(block) );
106 assert( get_irn_arity(block) == arity );
108 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
110 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
112 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
113 if (!has_unknown) res = optimize_node (res);
114 irn_vrfy_irg (res, irg);
116 /* Memory Phis in endless loops must be kept alive.
117 As we can't distinguish these easily we keep all of them alive. */
118 if ((res->op == op_Phi) && (mode == mode_M))
119 add_End_keepalive(irg->end, res);
124 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
127 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
128 res->attr.con.tv = con;
129 set_Const_type(res, tp); /* Call method because of complex assertion. */
130 res = optimize_node (res);
131 assert(get_Const_type(res) == tp);
132 irn_vrfy_irg (res, irg);
135 res = local_optimize_newby (res);
142 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
144 type *tp = unknown_type;
145 if (tarval_is_entity(con))
146 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
147 return new_rd_Const_type (db, irg, block, mode, con, tp);
151 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
156 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
157 res = optimize_node (res);
158 irn_vrfy_irg (res, irg);
163 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
169 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
170 res->attr.proj = proj;
173 assert(get_Proj_pred(res));
174 assert(get_nodes_Block(get_Proj_pred(res)));
176 res = optimize_node (res);
178 irn_vrfy_irg (res, irg);
184 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
188 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
189 arg->attr.c.kind = fragmentary;
190 arg->attr.c.default_proj = max_proj;
191 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
196 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
201 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
202 res = optimize_node (res);
203 irn_vrfy_irg (res, irg);
208 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
211 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
212 res->attr.cast.totype = to_tp;
213 res = optimize_node (res);
214 irn_vrfy_irg (res, irg);
219 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
223 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
224 res = optimize_node (res);
225 irn_vrfy_irg (res, irg);
230 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
231 ir_node *op1, ir_node *op2, ir_mode *mode)
237 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
238 res = optimize_node (res);
239 irn_vrfy_irg (res, irg);
244 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *op1, ir_node *op2, ir_mode *mode)
251 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
252 res = optimize_node (res);
253 irn_vrfy_irg (res, irg);
258 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op, ir_mode *mode)
264 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
265 res = optimize_node (res);
266 irn_vrfy_irg (res, irg);
271 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
278 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
279 res = optimize_node (res);
280 irn_vrfy_irg (res, irg);
285 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
286 ir_node *memop, ir_node *op1, ir_node *op2)
293 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
294 res = optimize_node (res);
295 irn_vrfy_irg (res, irg);
300 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *memop, ir_node *op1, ir_node *op2)
308 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
309 res = optimize_node (res);
310 irn_vrfy_irg (res, irg);
315 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
323 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
324 res = optimize_node (res);
325 irn_vrfy_irg (res, irg);
330 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
331 ir_node *memop, ir_node *op1, ir_node *op2)
338 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
339 res = optimize_node (res);
340 irn_vrfy_irg (res, irg);
345 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *op1, ir_node *op2, ir_mode *mode)
352 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
353 res = optimize_node (res);
354 irn_vrfy_irg (res, irg);
359 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
360 ir_node *op1, ir_node *op2, ir_mode *mode)
366 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
367 res = optimize_node (res);
368 irn_vrfy_irg (res, irg);
373 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
374 ir_node *op1, ir_node *op2, ir_mode *mode)
380 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
381 res = optimize_node (res);
382 irn_vrfy_irg (res, irg);
387 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
388 ir_node *op, ir_mode *mode)
393 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
394 res = optimize_node (res);
395 irn_vrfy_irg (res, irg);
400 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op, ir_node *k, ir_mode *mode)
407 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
408 res = optimize_node (res);
409 irn_vrfy_irg (res, irg);
414 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
415 ir_node *op, ir_node *k, ir_mode *mode)
421 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
422 res = optimize_node (res);
423 irn_vrfy_irg (res, irg);
428 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
429 ir_node *op, ir_node *k, ir_mode *mode)
435 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
436 res = optimize_node (res);
437 irn_vrfy_irg (res, irg);
442 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
443 ir_node *op, ir_node *k, ir_mode *mode)
449 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
450 res = optimize_node (res);
451 irn_vrfy_irg (res, irg);
456 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
457 ir_node *op, ir_mode *mode)
462 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
463 res = optimize_node (res);
464 irn_vrfy_irg (res, irg);
469 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
470 ir_node *op1, ir_node *op2)
476 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
477 res = optimize_node (res);
478 irn_vrfy_irg (res, irg);
483 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
486 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
487 res = optimize_node (res);
488 irn_vrfy_irg (res, irg);
493 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
498 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
499 res->attr.c.kind = dense;
500 res->attr.c.default_proj = 0;
501 res = optimize_node (res);
502 irn_vrfy_irg (res, irg);
507 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
508 ir_node *callee, int arity, ir_node **in, type *tp)
515 NEW_ARR_A (ir_node *, r_in, r_arity);
518 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
520 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
522 assert(is_method_type(tp));
523 set_Call_type(res, tp);
524 res->attr.call.callee_arr = NULL;
525 res = optimize_node (res);
526 irn_vrfy_irg (res, irg);
531 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
532 ir_node *store, int arity, ir_node **in)
539 NEW_ARR_A (ir_node *, r_in, r_arity);
541 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
542 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
543 res = optimize_node (res);
544 irn_vrfy_irg (res, irg);
549 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
555 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
556 res = optimize_node (res);
557 irn_vrfy_irg (res, irg);
562 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
563 ir_node *store, ir_node *adr)
569 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
571 res = optimize_node (res);
572 irn_vrfy_irg (res, irg);
577 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
578 ir_node *store, ir_node *adr, ir_node *val)
585 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
587 res = optimize_node (res);
589 irn_vrfy_irg (res, irg);
594 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
595 ir_node *size, type *alloc_type, where_alloc where)
601 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
603 res->attr.a.where = where;
604 res->attr.a.type = alloc_type;
606 res = optimize_node (res);
607 irn_vrfy_irg (res, irg);
612 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
613 ir_node *ptr, ir_node *size, type *free_type)
620 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
622 res->attr.f = free_type;
624 res = optimize_node (res);
625 irn_vrfy_irg (res, irg);
630 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
631 int arity, ir_node **in, entity *ent)
638 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
641 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
642 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
644 res->attr.s.ent = ent;
646 res = optimize_node (res);
647 irn_vrfy_irg (res, irg);
652 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
653 ir_node *objptr, type *ent)
660 NEW_ARR_A (ir_node *, r_in, r_arity);
664 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
666 res->attr.io.ent = ent;
668 /* res = optimize (res);
669 * irn_vrfy_irg (res, irg); */
674 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
675 symconst_kind symkind)
679 if (symkind == linkage_ptr_info)
683 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
685 res->attr.i.num = symkind;
686 if (symkind == linkage_ptr_info) {
687 res->attr.i.tori.ptrinfo = (ident *)value;
689 assert ( ( (symkind == type_tag)
690 || (symkind == size))
691 && (is_type(value)));
692 res->attr.i.tori.typ = (type *)value;
694 res = optimize_node (res);
695 irn_vrfy_irg (res, irg);
700 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
704 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
706 res = optimize_node (res);
707 irn_vrfy_irg (res, irg);
712 new_rd_Bad (ir_graph *irg)
718 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
720 ir_node *in[2], *res;
724 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
726 res->attr.confirm_cmp = cmp;
728 res = optimize_node (res);
729 irn_vrfy_irg(res, irg);
734 new_rd_Unknown (ir_graph *irg)
740 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
744 in[0] = get_Call_ptr(call);
745 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
746 res->attr.callbegin.irg = irg;
747 res->attr.callbegin.call = call;
748 res = optimize_node (res);
749 irn_vrfy_irg (res, irg);
754 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
758 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
759 res->attr.end.irg = irg;
761 irn_vrfy_irg (res, irg);
766 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
770 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
771 res->attr.end.irg = irg;
773 irn_vrfy_irg (res, irg);
778 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
781 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
782 res = optimize_node (res);
783 irn_vrfy_irg (res, irg);
788 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
794 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
795 res->attr.filter.proj = proj;
796 res->attr.filter.in_cg = NULL;
797 res->attr.filter.backedge = NULL;
800 assert(get_Proj_pred(res));
801 assert(get_nodes_Block(get_Proj_pred(res)));
803 res = optimize_node (res);
805 irn_vrfy_irg (res, irg);
810 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
811 return new_rd_Block(NULL, irg, arity, in);
813 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
814 return new_rd_Start(NULL, irg, block);
816 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
817 return new_rd_End(NULL, irg, block);
819 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
820 return new_rd_Jmp(NULL, irg, block);
822 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
823 return new_rd_Cond(NULL, irg, block, c);
825 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
826 ir_node *store, int arity, ir_node **in) {
827 return new_rd_Return(NULL, irg, block, store, arity, in);
829 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
830 ir_node *store, ir_node *obj) {
831 return new_rd_Raise(NULL, irg, block, store, obj);
833 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
834 ir_mode *mode, tarval *con) {
835 return new_rd_Const(NULL, irg, block, mode, con);
837 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
838 type_or_id_p value, symconst_kind symkind) {
839 return new_rd_SymConst(NULL, irg, block, value, symkind);
841 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
842 ir_node *objptr, int n_index, ir_node **index,
844 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
846 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
848 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
850 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
851 ir_node *callee, int arity, ir_node **in,
853 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
855 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
856 ir_node *op1, ir_node *op2, ir_mode *mode) {
857 return new_rd_Add(NULL, irg, block, op1, op2, mode);
859 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
860 ir_node *op1, ir_node *op2, ir_mode *mode) {
861 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
863 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
864 ir_node *op, ir_mode *mode) {
865 return new_rd_Minus(NULL, irg, block, op, mode);
867 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
868 ir_node *op1, ir_node *op2, ir_mode *mode) {
869 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
871 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
872 ir_node *memop, ir_node *op1, ir_node *op2) {
873 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
875 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
876 ir_node *memop, ir_node *op1, ir_node *op2) {
877 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
879 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
880 ir_node *memop, ir_node *op1, ir_node *op2) {
881 return new_rd_Div(NULL, irg, block, memop, op1, op2);
883 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
884 ir_node *memop, ir_node *op1, ir_node *op2) {
885 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
887 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
888 ir_node *op, ir_mode *mode) {
889 return new_rd_Abs(NULL, irg, block, op, mode);
891 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
892 ir_node *op1, ir_node *op2, ir_mode *mode) {
893 return new_rd_And(NULL, irg, block, op1, op2, mode);
895 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
896 ir_node *op1, ir_node *op2, ir_mode *mode) {
897 return new_rd_Or(NULL, irg, block, op1, op2, mode);
899 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
900 ir_node *op1, ir_node *op2, ir_mode *mode) {
901 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
903 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
904 ir_node *op, ir_mode *mode) {
905 return new_rd_Not(NULL, irg, block, op, mode);
907 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
908 ir_node *op1, ir_node *op2) {
909 return new_rd_Cmp(NULL, irg, block, op1, op2);
911 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
912 ir_node *op, ir_node *k, ir_mode *mode) {
913 return new_rd_Shl(NULL, irg, block, op, k, mode);
915 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
916 ir_node *op, ir_node *k, ir_mode *mode) {
917 return new_rd_Shr(NULL, irg, block, op, k, mode);
919 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
920 ir_node *op, ir_node *k, ir_mode *mode) {
921 return new_rd_Shrs(NULL, irg, block, op, k, mode);
923 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
924 ir_node *op, ir_node *k, ir_mode *mode) {
925 return new_rd_Rot(NULL, irg, block, op, k, mode);
927 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
928 ir_node *op, ir_mode *mode) {
929 return new_rd_Conv(NULL, irg, block, op, mode);
931 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
932 return new_rd_Cast(NULL, irg, block, op, to_tp);
934 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
935 ir_node **in, ir_mode *mode) {
936 return new_rd_Phi(NULL, irg, block, arity, in, mode);
938 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
939 ir_node *store, ir_node *adr) {
940 return new_rd_Load(NULL, irg, block, store, adr);
942 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
943 ir_node *store, ir_node *adr, ir_node *val) {
944 return new_rd_Store(NULL, irg, block, store, adr, val);
946 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
947 ir_node *size, type *alloc_type, where_alloc where) {
948 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
950 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
951 ir_node *ptr, ir_node *size, type *free_type) {
952 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
954 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
955 return new_rd_Sync(NULL, irg, block, arity, in);
957 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
958 ir_mode *mode, long proj) {
959 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
961 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
963 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
965 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
966 int arity, ir_node **in) {
967 return new_rd_Tuple(NULL, irg, block, arity, in );
969 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
970 ir_node *val, ir_mode *mode) {
971 return new_rd_Id(NULL, irg, block, val, mode);
973 INLINE ir_node *new_r_Bad (ir_graph *irg) {
974 return new_rd_Bad(irg);
976 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
977 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
979 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
980 return new_rd_Unknown(irg);
982 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
983 return new_rd_CallBegin(NULL, irg, block, callee);
985 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
986 return new_rd_EndReg(NULL, irg, block);
988 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
989 return new_rd_EndExcept(NULL, irg, block);
991 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
992 return new_rd_Break(NULL, irg, block);
994 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
995 ir_mode *mode, long proj) {
996 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1000 /** ********************/
1001 /** public interfaces */
1002 /** construction tools */
1006 * - create a new Start node in the current block
1008 * @return s - pointer to the created Start node
1013 new_d_Start (dbg_info* db)
1017 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1018 op_Start, mode_T, 0, NULL);
1019 res->attr.start.irg = current_ir_graph;
1021 res = optimize_node (res);
1022 irn_vrfy_irg (res, current_ir_graph);
1027 new_d_End (dbg_info* db)
1030 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1031 op_End, mode_X, -1, NULL);
1032 res = optimize_node (res);
1033 irn_vrfy_irg (res, current_ir_graph);
1038 /* Constructs a Block with a fixed number of predecessors.
1039 Does set current_block. Can be used with automatic Phi
1040 node construction. */
1042 new_d_Block (dbg_info* db, int arity, ir_node **in)
1046 bool has_unknown = false;
1048 res = new_rd_Block (db, current_ir_graph, arity, in);
1050 /* Create and initialize array for Phi-node construction. */
1051 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1052 current_ir_graph->n_loc);
1053 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1055 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1057 if (!has_unknown) res = optimize_node (res);
1058 current_ir_graph->current_block = res;
1060 irn_vrfy_irg (res, current_ir_graph);
1065 /* ***********************************************************************/
1066 /* Methods necessary for automatic Phi node creation */
1068 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1069 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1070 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1071 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1073 Call Graph: ( A ---> B == A "calls" B)
1075 get_value mature_block
1083 get_r_value_internal |
1087 new_rd_Phi0 new_rd_Phi_in
1089 * *************************************************************************** */
1091 /* Creates a Phi node with 0 predecessors */
1092 static INLINE ir_node *
1093 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1096 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1097 irn_vrfy_irg (res, irg);
1101 /* There are two implementations of the Phi node construction. The first
1102 is faster, but does not work for blocks with more than 2 predecessors.
1103 The second works always but is slower and causes more unnecessary Phi
1105 Select the implementations by the following preprocessor flag set in
1107 #if USE_FAST_PHI_CONSTRUCTION
1109 /* This is a stack used for allocating and deallocating nodes in
1110 new_rd_Phi_in. The original implementation used the obstack
1111 to model this stack, now it is explicit. This reduces side effects.
1113 #if USE_EXPLICIT_PHI_IN_STACK
1114 INLINE Phi_in_stack *
1115 new_Phi_in_stack() {
1118 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1120 res->stack = NEW_ARR_F (ir_node *, 1);
1127 free_Phi_in_stack(Phi_in_stack *s) {
1128 DEL_ARR_F(s->stack);
1132 free_to_Phi_in_stack(ir_node *phi) {
1133 assert(get_irn_opcode(phi) == iro_Phi);
1135 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1136 current_ir_graph->Phi_in_stack->pos)
1137 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1139 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1141 (current_ir_graph->Phi_in_stack->pos)++;
1144 static INLINE ir_node *
1145 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1146 int arity, ir_node **in) {
1148 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1149 int pos = current_ir_graph->Phi_in_stack->pos;
1153 /* We need to allocate a new node */
1154 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1155 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1157 /* reuse the old node and initialize it again. */
1160 assert (res->kind == k_ir_node);
1161 assert (res->op == op_Phi);
1165 assert (arity >= 0);
1166 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1167 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1169 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1171 (current_ir_graph->Phi_in_stack->pos)--;
1175 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1177 /* Creates a Phi node with a given, fixed array **in of predecessors.
1178 If the Phi node is unnecessary, as the same value reaches the block
1179 through all control flow paths, it is eliminated and the value
1180 returned directly. This constructor is only intended for use in
1181 the automatic Phi node generation triggered by get_value or mature.
1182 The implementation is quite tricky and depends on the fact, that
1183 the nodes are allocated on a stack:
1184 The in array contains predecessors and NULLs. The NULLs appear,
1185 if get_r_value_internal, that computed the predecessors, reached
1186 the same block on two paths. In this case the same value reaches
1187 this block on both paths, there is no definition in between. We need
1188 not allocate a Phi where these path's merge, but we have to communicate
1189 this fact to the caller. This happens by returning a pointer to the
1190 node the caller _will_ allocate. (Yes, we predict the address. We can
1191 do so because the nodes are allocated on the obstack.) The caller then
1192 finds a pointer to itself and, when this routine is called again,
1195 static INLINE ir_node *
1196 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1197 ir_node **in, int ins)
1200 ir_node *res, *known;
1202 /* allocate a new node on the obstack.
1203 This can return a node to which some of the pointers in the in-array
1205 Attention: the constructor copies the in array, i.e., the later changes
1206 to the array in this routine do not affect the constructed node! If
1207 the in array contains NULLs, there will be missing predecessors in the
1209 Is this a possible internal state of the Phi node generation? */
1210 #if USE_EXPLICIT_PHI_IN_STACK
1211 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1213 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1214 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1216 /* The in-array can contain NULLs. These were returned by
1217 get_r_value_internal if it reached the same block/definition on a
1219 The NULLs are replaced by the node itself to simplify the test in the
1221 for (i=0; i < ins; ++i)
1222 if (in[i] == NULL) in[i] = res;
1224 /* This loop checks whether the Phi has more than one predecessor.
1225 If so, it is a real Phi node and we break the loop. Else the
1226 Phi node merges the same definition on several paths and therefore
1228 for (i=0; i < ins; ++i)
1230 if (in[i]==res || in[i]==known) continue;
1238 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1240 #if USE_EXPLICIT_PHI_IN_STACK
1241 free_to_Phi_in_stack(res);
1243 obstack_free (current_ir_graph->obst, res);
1247 res = optimize_node (res);
1248 irn_vrfy_irg (res, irg);
1251 /* return the pointer to the Phi node. This node might be deallocated! */
1256 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1259 allocates and returns this node. The routine called to allocate the
1260 node might optimize it away and return a real value, or even a pointer
1261 to a deallocated Phi node on top of the obstack!
1262 This function is called with an in-array of proper size. **/
1264 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1266 ir_node *prevBlock, *res;
1269 /* This loop goes to all predecessor blocks of the block the Phi node is in
1270 and there finds the operands of the Phi node by calling
1271 get_r_value_internal. */
1272 for (i = 1; i <= ins; ++i) {
1273 assert (block->in[i]);
1274 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1276 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1279 /* After collecting all predecessors into the array nin a new Phi node
1280 with these predecessors is created. This constructor contains an
1281 optimization: If all predecessors of the Phi node are identical it
1282 returns the only operand instead of a new Phi node. If the value
1283 passes two different control flow edges without being defined, and
1284 this is the second path treated, a pointer to the node that will be
1285 allocated for the first path (recursion) is returned. We already
1286 know the address of this node, as it is the next node to be allocated
1287 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1288 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1290 /* Now we now the value for "pos" and can enter it in the array with
1291 all known local variables. Attention: this might be a pointer to
1292 a node, that later will be allocated!!! See new_rd_Phi_in.
1293 If this is called in mature, after some set_value in the same block,
1294 the proper value must not be overwritten:
1296 get_value (makes Phi0, put's it into graph_arr)
1297 set_value (overwrites Phi0 in graph_arr)
1298 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1301 if (!block->attr.block.graph_arr[pos]) {
1302 block->attr.block.graph_arr[pos] = res;
1304 /* printf(" value already computed by %s\n",
1305 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1311 /* This function returns the last definition of a variable. In case
1312 this variable was last defined in a previous block, Phi nodes are
1313 inserted. If the part of the firm graph containing the definition
1314 is not yet constructed, a dummy Phi node is returned. */
1316 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1319 /* There are 4 cases to treat.
1321 1. The block is not mature and we visit it the first time. We can not
1322 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1323 predecessors is returned. This node is added to the linked list (field
1324 "link") of the containing block to be completed when this block is
1325 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1328 2. The value is already known in this block, graph_arr[pos] is set and we
1329 visit the block the first time. We can return the value without
1330 creating any new nodes.
1332 3. The block is mature and we visit it the first time. A Phi node needs
1333 to be created (phi_merge). If the Phi is not needed, as all it's
1334 operands are the same value reaching the block through different
1335 paths, it's optimized away and the value itself is returned.
1337 4. The block is mature, and we visit it the second time. Now two
1338 subcases are possible:
1339 * The value was computed completely the last time we were here. This
1340 is the case if there is no loop. We can return the proper value.
1341 * The recursion that visited this node and set the flag did not
1342 return yet. We are computing a value in a loop and need to
1343 break the recursion without knowing the result yet.
1344 @@@ strange case. Straight forward we would create a Phi before
1345 starting the computation of it's predecessors. In this case we will
1346 find a Phi here in any case. The problem is that this implementation
1347 only creates a Phi after computing the predecessors, so that it is
1348 hard to compute self references of this Phi. @@@
1349 There is no simple check for the second subcase. Therefore we check
1350 for a second visit and treat all such cases as the second subcase.
1351 Anyways, the basic situation is the same: we reached a block
1352 on two paths without finding a definition of the value: No Phi
1353 nodes are needed on both paths.
1354 We return this information "Two paths, no Phi needed" by a very tricky
1355 implementation that relies on the fact that an obstack is a stack and
1356 will return a node with the same address on different allocations.
1357 Look also at phi_merge and new_rd_phi_in to understand this.
1358 @@@ Unfortunately this does not work, see testprogram
1359 three_cfpred_example.
1363 /* case 4 -- already visited. */
1364 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1366 /* visited the first time */
1367 set_irn_visited(block, get_irg_visited(current_ir_graph));
1369 /* Get the local valid value */
1370 res = block->attr.block.graph_arr[pos];
1372 /* case 2 -- If the value is actually computed, return it. */
1373 if (res) { return res;};
1375 if (block->attr.block.matured) { /* case 3 */
1377 /* The Phi has the same amount of ins as the corresponding block. */
1378 int ins = get_irn_arity(block);
1380 NEW_ARR_A (ir_node *, nin, ins);
1382 /* Phi merge collects the predecessors and then creates a node. */
1383 res = phi_merge (block, pos, mode, nin, ins);
1385 } else { /* case 1 */
1386 /* The block is not mature, we don't know how many in's are needed. A Phi
1387 with zero predecessors is created. Such a Phi node is called Phi0
1388 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1389 to the list of Phi0 nodes in this block to be matured by mature_block
1391 The Phi0 has to remember the pos of it's internal value. If the real
1392 Phi is computed, pos is used to update the array with the local
1395 res = new_rd_Phi0 (current_ir_graph, block, mode);
1396 res->attr.phi0_pos = pos;
1397 res->link = block->link;
1401 /* If we get here, the frontend missed a use-before-definition error */
1404 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1405 assert (mode->code >= irm_F && mode->code <= irm_P);
1406 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1407 tarval_mode_null[mode->code]);
1410 /* The local valid value is available now. */
1411 block->attr.block.graph_arr[pos] = res;
1419 it starts the recursion. This causes an Id at the entry of
1420 every block that has no definition of the value! **/
1422 #if USE_EXPLICIT_PHI_IN_STACK
1424 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1425 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1428 static INLINE ir_node *
1429 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1430 ir_node **in, int ins)
1433 ir_node *res, *known;
1435 /* Allocate a new node on the obstack. The allocation copies the in
1437 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1438 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1440 /* This loop checks whether the Phi has more than one predecessor.
1441 If so, it is a real Phi node and we break the loop. Else the
1442 Phi node merges the same definition on several paths and therefore
1443 is not needed. Don't consider Bad nodes! */
1445 for (i=0; i < ins; ++i)
1449 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1457 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1460 obstack_free (current_ir_graph->obst, res);
1463 /* A undefined value, e.g., in unreachable code. */
1467 res = optimize_node (res);
1468 irn_vrfy_irg (res, irg);
1469 /* Memory Phis in endless loops must be kept alive.
1470 As we can't distinguish these easily we keep all of the alive. */
1471 if ((res->op == op_Phi) && (mode == mode_M))
1472 add_End_keepalive(irg->end, res);
1479 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1481 #if PRECISE_EXC_CONTEXT
1483 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1485 static INLINE ir_node **
1486 new_frag_arr (ir_node *n) {
1489 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1490 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1491 sizeof(ir_node *)*current_ir_graph->n_loc);
1492 /* turn off optimization before allocating Proj nodes, as res isn't
1494 opt = get_optimize(); set_optimize(0);
1495 /* Here we rely on the fact that all frag ops have Memory as first result! */
1496 if (get_irn_op(n) == op_Call)
1497 arr[0] = new_Proj(n, mode_M, 3);
1499 arr[0] = new_Proj(n, mode_M, 0);
1501 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1505 static INLINE ir_node **
1506 get_frag_arr (ir_node *n) {
1507 if (get_irn_op(n) == op_Call) {
1508 return n->attr.call.frag_arr;
1509 } else if (get_irn_op(n) == op_Alloc) {
1510 return n->attr.a.frag_arr;
1512 return n->attr.frag_arr;
1517 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1518 if (!frag_arr[pos]) frag_arr[pos] = val;
1519 if (frag_arr[current_ir_graph->n_loc - 1])
1520 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1524 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1528 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1530 frag_arr = get_frag_arr(cfOp);
1531 res = frag_arr[pos];
1533 if (block->attr.block.graph_arr[pos]) {
1534 /* There was a set_value after the cfOp and no get_value before that
1535 set_value. We must build a Phi node now. */
1536 if (block->attr.block.matured) {
1537 int ins = get_irn_arity(block);
1539 NEW_ARR_A (ir_node *, nin, ins);
1540 res = phi_merge(block, pos, mode, nin, ins);
1542 res = new_rd_Phi0 (current_ir_graph, block, mode);
1543 res->attr.phi0_pos = pos;
1544 res->link = block->link;
1548 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1549 but this should be better: (remove comment if this works) */
1550 /* It's a Phi, we can write this into all graph_arrs with NULL */
1551 set_frag_value(block->attr.block.graph_arr, pos, res);
1553 res = get_r_value_internal(block, pos, mode);
1554 set_frag_value(block->attr.block.graph_arr, pos, res);
1562 computes the predecessors for the real phi node, and then
1563 allocates and returns this node. The routine called to allocate the
1564 node might optimize it away and return a real value.
1565 This function must be called with an in-array of proper size. **/
1567 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1569 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1572 /* If this block has no value at pos create a Phi0 and remember it
1573 in graph_arr to break recursions.
1574 Else we may not set graph_arr as there a later value is remembered. */
1576 if (!block->attr.block.graph_arr[pos]) {
1577 if (block == get_irg_start_block(current_ir_graph)) {
1578 /* Collapsing to Bad tarvals is no good idea.
1579 So we call a user-supplied routine here that deals with this case as
1580 appropriate for the given language. Sorryly the only help we can give
1581 here is the position.
1583 Even if all variables are defined before use, it can happen that
1584 we get to the start block, if a cond has been replaced by a tuple
1585 (bad, jmp). In this case we call the function needlessly, eventually
1586 generating an non existant error.
1587 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1590 if (default_initialize_local_variable)
1591 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1593 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1594 /* We don't need to care about exception ops in the start block.
1595 There are none by definition. */
1596 return block->attr.block.graph_arr[pos];
1598 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1599 block->attr.block.graph_arr[pos] = phi0;
1600 #if PRECISE_EXC_CONTEXT
1601 /* Set graph_arr for fragile ops. Also here we should break recursion.
1602 We could choose a cyclic path through an cfop. But the recursion would
1603 break at some point. */
1604 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1609 /* This loop goes to all predecessor blocks of the block the Phi node
1610 is in and there finds the operands of the Phi node by calling
1611 get_r_value_internal. */
1612 for (i = 1; i <= ins; ++i) {
1613 prevCfOp = skip_Proj(block->in[i]);
1615 if (is_Bad(prevCfOp)) {
1616 /* In case a Cond has been optimized we would get right to the start block
1617 with an invalid definition. */
1618 nin[i-1] = new_Bad();
1621 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1623 if (!is_Bad(prevBlock)) {
1624 #if PRECISE_EXC_CONTEXT
1625 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1626 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1627 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1630 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1632 nin[i-1] = new_Bad();
1636 /* After collecting all predecessors into the array nin a new Phi node
1637 with these predecessors is created. This constructor contains an
1638 optimization: If all predecessors of the Phi node are identical it
1639 returns the only operand instead of a new Phi node. */
1640 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1642 /* In case we allocated a Phi0 node at the beginning of this procedure,
1643 we need to exchange this Phi0 with the real Phi. */
1645 exchange(phi0, res);
1646 block->attr.block.graph_arr[pos] = res;
1647 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1648 only an optimization. */
1654 /* This function returns the last definition of a variable. In case
1655 this variable was last defined in a previous block, Phi nodes are
1656 inserted. If the part of the firm graph containing the definition
1657 is not yet constructed, a dummy Phi node is returned. */
1659 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1662 /* There are 4 cases to treat.
1664 1. The block is not mature and we visit it the first time. We can not
1665 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1666 predecessors is returned. This node is added to the linked list (field
1667 "link") of the containing block to be completed when this block is
1668 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1671 2. The value is already known in this block, graph_arr[pos] is set and we
1672 visit the block the first time. We can return the value without
1673 creating any new nodes.
1675 3. The block is mature and we visit it the first time. A Phi node needs
1676 to be created (phi_merge). If the Phi is not needed, as all it's
1677 operands are the same value reaching the block through different
1678 paths, it's optimized away and the value itself is returned.
1680 4. The block is mature, and we visit it the second time. Now two
1681 subcases are possible:
1682 * The value was computed completely the last time we were here. This
1683 is the case if there is no loop. We can return the proper value.
1684 * The recursion that visited this node and set the flag did not
1685 return yet. We are computing a value in a loop and need to
1686 break the recursion. This case only happens if we visited
1687 the same block with phi_merge before, which inserted a Phi0.
1688 So we return the Phi0.
1691 /* case 4 -- already visited. */
1692 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1693 /* As phi_merge allocates a Phi0 this value is always defined. Here
1694 is the critical difference of the two algorithms. */
1695 assert(block->attr.block.graph_arr[pos]);
1696 return block->attr.block.graph_arr[pos];
1699 /* visited the first time */
1700 set_irn_visited(block, get_irg_visited(current_ir_graph));
1702 /* Get the local valid value */
1703 res = block->attr.block.graph_arr[pos];
1705 /* case 2 -- If the value is actually computed, return it. */
1706 if (res) { return res; };
1708 if (block->attr.block.matured) { /* case 3 */
1710 /* The Phi has the same amount of ins as the corresponding block. */
1711 int ins = get_irn_arity(block);
1713 NEW_ARR_A (ir_node *, nin, ins);
1715 /* Phi merge collects the predecessors and then creates a node. */
1716 res = phi_merge (block, pos, mode, nin, ins);
1718 } else { /* case 1 */
1719 /* The block is not mature, we don't know how many in's are needed. A Phi
1720 with zero predecessors is created. Such a Phi node is called Phi0
1721 node. The Phi0 is then added to the list of Phi0 nodes in this block
1722 to be matured by mature_block later.
1723 The Phi0 has to remember the pos of it's internal value. If the real
1724 Phi is computed, pos is used to update the array with the local
1726 res = new_rd_Phi0 (current_ir_graph, block, mode);
1727 res->attr.phi0_pos = pos;
1728 res->link = block->link;
1732 /* If we get here, the frontend missed a use-before-definition error */
1735 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1736 assert (mode->code >= irm_F && mode->code <= irm_P);
1737 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1738 get_mode_null(mode));
1741 /* The local valid value is available now. */
1742 block->attr.block.graph_arr[pos] = res;
1747 #endif /* USE_FAST_PHI_CONSTRUCTION */
1749 /* ************************************************************************** */
1751 /** Finalize a Block node, when all control flows are known. */
1752 /** Acceptable parameters are only Block nodes. */
1754 mature_block (ir_node *block)
1761 assert (get_irn_opcode(block) == iro_Block);
1762 /* @@@ should be commented in
1763 assert (!get_Block_matured(block) && "Block already matured"); */
1765 if (!get_Block_matured(block)) {
1766 ins = ARR_LEN (block->in)-1;
1767 /* Fix block parameters */
1768 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1770 /* An array for building the Phi nodes. */
1771 NEW_ARR_A (ir_node *, nin, ins);
1773 /* Traverse a chain of Phi nodes attached to this block and mature
1775 for (n = block->link; n; n=next) {
1776 inc_irg_visited(current_ir_graph);
1778 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1781 block->attr.block.matured = 1;
1783 /* Now, as the block is a finished firm node, we can optimize it.
1784 Since other nodes have been allocated since the block was created
1785 we can not free the node on the obstack. Therefore we have to call
1787 Unfortunately the optimization does not change a lot, as all allocated
1788 nodes refer to the unoptimized node.
1789 We can call _2, as global cse has no effect on blocks. */
1790 block = optimize_in_place_2(block);
1791 irn_vrfy_irg(block, current_ir_graph);
1796 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1798 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1803 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1805 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1810 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1812 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1818 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1820 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1825 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1827 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1832 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1835 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1836 arg->attr.c.kind = fragmentary;
1837 arg->attr.c.default_proj = max_proj;
1838 res = new_Proj (arg, mode_X, max_proj);
1843 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1845 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1850 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1852 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1856 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1858 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1863 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1865 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1870 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1872 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1878 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1880 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1885 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1887 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1892 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1895 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1897 #if PRECISE_EXC_CONTEXT
1898 if ((current_ir_graph->phase_state == phase_building) &&
1899 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1900 res->attr.frag_arr = new_frag_arr(res);
1907 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1910 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1912 #if PRECISE_EXC_CONTEXT
1913 if ((current_ir_graph->phase_state == phase_building) &&
1914 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1915 res->attr.frag_arr = new_frag_arr(res);
1922 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1925 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1927 #if PRECISE_EXC_CONTEXT
1928 if ((current_ir_graph->phase_state == phase_building) &&
1929 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1930 res->attr.frag_arr = new_frag_arr(res);
1937 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1940 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1942 #if PRECISE_EXC_CONTEXT
1943 if ((current_ir_graph->phase_state == phase_building) &&
1944 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1945 res->attr.frag_arr = new_frag_arr(res);
1952 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1954 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1959 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1961 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1966 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1968 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1973 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1975 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1980 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1982 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1987 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1989 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1994 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1996 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2001 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2003 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2008 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2010 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2015 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2017 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2022 new_d_Jmp (dbg_info* db)
2024 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2028 new_d_Cond (dbg_info* db, ir_node *c)
2030 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2034 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2038 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2039 store, callee, arity, in, tp);
2040 #if PRECISE_EXC_CONTEXT
2041 if ((current_ir_graph->phase_state == phase_building) &&
2042 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2043 res->attr.call.frag_arr = new_frag_arr(res);
2050 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2052 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2057 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2059 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2064 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2067 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2069 #if PRECISE_EXC_CONTEXT
2070 if ((current_ir_graph->phase_state == phase_building) &&
2071 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2072 res->attr.frag_arr = new_frag_arr(res);
2079 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2082 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2084 #if PRECISE_EXC_CONTEXT
2085 if ((current_ir_graph->phase_state == phase_building) &&
2086 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2087 res->attr.frag_arr = new_frag_arr(res);
2094 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2098 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2099 store, size, alloc_type, where);
2100 #if PRECISE_EXC_CONTEXT
2101 if ((current_ir_graph->phase_state == phase_building) &&
2102 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2103 res->attr.a.frag_arr = new_frag_arr(res);
2110 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2112 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2113 store, ptr, size, free_type);
2117 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2118 /* GL: objptr was called frame before. Frame was a bad choice for the name
2119 as the operand could as well be a pointer to a dynamic object. */
2121 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2122 store, objptr, 0, NULL, ent);
2126 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2128 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2129 store, objptr, n_index, index, sel);
2133 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2135 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2136 store, objptr, ent));
2140 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2142 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2147 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2149 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2157 return current_ir_graph->bad;
2161 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2163 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2168 new_d_Unknown (void)
2170 return current_ir_graph->unknown;
2174 new_d_CallBegin (dbg_info *db, ir_node *call)
2177 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2182 new_d_EndReg (dbg_info *db)
2185 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2190 new_d_EndExcept (dbg_info *db)
2193 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2198 new_d_Break (dbg_info *db)
2200 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2204 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2206 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2210 /* ********************************************************************* */
2211 /* Comfortable interface with automatic Phi node construction. */
2212 /* (Uses also constructors of ?? interface, except new_Block. */
2213 /* ********************************************************************* */
2215 /** Block construction **/
2216 /* immature Block without predecessors */
2217 ir_node *new_d_immBlock (dbg_info* db) {
2220 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2221 /* creates a new dynamic in-array as length of in is -1 */
2222 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2223 current_ir_graph->current_block = res;
2224 res->attr.block.matured = 0;
2225 res->attr.block.exc = exc_normal;
2226 res->attr.block.handler_entry = 0;
2227 res->attr.block.backedge = NULL;
2228 res->attr.block.in_cg = NULL;
2229 res->attr.block.cg_backedge = NULL;
2230 set_Block_block_visited(res, 0);
2232 /* Create and initialize array for Phi-node construction. */
2233 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2234 current_ir_graph->n_loc);
2235 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2237 /* Immature block may not be optimized! */
2238 irn_vrfy_irg (res, current_ir_graph);
2245 return new_d_immBlock(NULL);
2248 /* add an adge to a jmp/control flow node */
2250 add_in_edge (ir_node *block, ir_node *jmp)
2252 if (block->attr.block.matured) {
2253 assert(0 && "Error: Block already matured!\n");
2256 assert (jmp != NULL);
2257 ARR_APP1 (ir_node *, block->in, jmp);
2261 /* changing the current block */
2263 switch_block (ir_node *target)
2265 current_ir_graph->current_block = target;
2268 /* ************************ */
2269 /* parameter administration */
2271 /* get a value from the parameter array from the current block by its index */
2273 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2275 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2276 inc_irg_visited(current_ir_graph);
2278 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2280 /* get a value from the parameter array from the current block by its index */
2282 get_value (int pos, ir_mode *mode)
2284 return get_d_value(NULL, pos, mode);
2287 /* set a value at position pos in the parameter array from the current block */
2289 set_value (int pos, ir_node *value)
2291 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2292 assert(pos+1 < current_ir_graph->n_loc);
2293 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2296 /* get the current store */
2300 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2301 /* GL: one could call get_value instead */
2302 inc_irg_visited(current_ir_graph);
2303 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2306 /* set the current store */
2308 set_store (ir_node *store)
2310 /* GL: one could call set_value instead */
2311 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2312 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2316 keep_alive (ir_node *ka)
2318 add_End_keepalive(current_ir_graph->end, ka);
2321 /** Useful access routines **/
2322 /* Returns the current block of the current graph. To set the current
2323 block use switch_block(). */
2324 ir_node *get_cur_block() {
2325 return get_irg_current_block(current_ir_graph);
2328 /* Returns the frame type of the current graph */
2329 type *get_cur_frame_type() {
2330 return get_irg_frame_type(current_ir_graph);
2334 /* ********************************************************************* */
2337 /* call once for each run of the library */
2339 init_cons (default_initialize_local_variable_func_t *func)
2341 default_initialize_local_variable = func;
2344 /* call for each graph */
2346 finalize_cons (ir_graph *irg) {
2347 irg->phase_state = phase_high;
2351 ir_node *new_Block(int arity, ir_node **in) {
2352 return new_d_Block(NULL, arity, in);
2354 ir_node *new_Start (void) {
2355 return new_d_Start(NULL);
2357 ir_node *new_End (void) {
2358 return new_d_End(NULL);
2360 ir_node *new_Jmp (void) {
2361 return new_d_Jmp(NULL);
2363 ir_node *new_Cond (ir_node *c) {
2364 return new_d_Cond(NULL, c);
2366 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2367 return new_d_Return(NULL, store, arity, in);
2369 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2370 return new_d_Raise(NULL, store, obj);
2372 ir_node *new_Const (ir_mode *mode, tarval *con) {
2373 return new_d_Const(NULL, mode, con);
2375 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2376 return new_d_SymConst(NULL, value, kind);
2378 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2379 return new_d_simpleSel(NULL, store, objptr, ent);
2381 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2383 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2385 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2386 return (new_d_InstOf (NULL, store, objptr, ent));
2388 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2390 return new_d_Call(NULL, store, callee, arity, in, tp);
2392 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2393 return new_d_Add(NULL, op1, op2, mode);
2395 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2396 return new_d_Sub(NULL, op1, op2, mode);
2398 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2399 return new_d_Minus(NULL, op, mode);
2401 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2402 return new_d_Mul(NULL, op1, op2, mode);
2404 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2405 return new_d_Quot(NULL, memop, op1, op2);
2407 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2408 return new_d_DivMod(NULL, memop, op1, op2);
2410 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2411 return new_d_Div(NULL, memop, op1, op2);
2413 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2414 return new_d_Mod(NULL, memop, op1, op2);
2416 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2417 return new_d_Abs(NULL, op, mode);
2419 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2420 return new_d_And(NULL, op1, op2, mode);
2422 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2423 return new_d_Or(NULL, op1, op2, mode);
2425 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2426 return new_d_Eor(NULL, op1, op2, mode);
2428 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2429 return new_d_Not(NULL, op, mode);
2431 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2432 return new_d_Shl(NULL, op, k, mode);
2434 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2435 return new_d_Shr(NULL, op, k, mode);
2437 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2438 return new_d_Shrs(NULL, op, k, mode);
2440 #define new_Rotate new_Rot
2441 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2442 return new_d_Rot(NULL, op, k, mode);
2444 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2445 return new_d_Cmp(NULL, op1, op2);
2447 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2448 return new_d_Conv(NULL, op, mode);
2450 ir_node *new_Cast (ir_node *op, type *to_tp) {
2451 return new_d_Cast(NULL, op, to_tp);
2453 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2454 return new_d_Phi(NULL, arity, in, mode);
2456 ir_node *new_Load (ir_node *store, ir_node *addr) {
2457 return new_d_Load(NULL, store, addr);
2459 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2460 return new_d_Store(NULL, store, addr, val);
2462 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2463 where_alloc where) {
2464 return new_d_Alloc(NULL, store, size, alloc_type, where);
2466 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2468 return new_d_Free(NULL, store, ptr, size, free_type);
2470 ir_node *new_Sync (int arity, ir_node **in) {
2471 return new_d_Sync(NULL, arity, in);
2473 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2474 return new_d_Proj(NULL, arg, mode, proj);
2476 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2477 return new_d_defaultProj(NULL, arg, max_proj);
2479 ir_node *new_Tuple (int arity, ir_node **in) {
2480 return new_d_Tuple(NULL, arity, in);
2482 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2483 return new_d_Id(NULL, val, mode);
2485 ir_node *new_Bad (void) {
2488 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2489 return new_d_Confirm (NULL, val, bound, cmp);
2491 ir_node *new_Unknown(void) {
2492 return new_d_Unknown();
2494 ir_node *new_CallBegin (ir_node *callee) {
2495 return new_d_CallBegin(NULL, callee);
2497 ir_node *new_EndReg (void) {
2498 return new_d_EndReg(NULL);
2500 ir_node *new_EndExcept (void) {
2501 return new_d_EndExcept(NULL);
2503 ir_node *new_Break (void) {
2504 return new_d_Break(NULL);
2506 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2507 return new_d_Filter(NULL, arg, mode, proj);