3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 # include "irgraph_t.h"
20 # include "irnode_t.h"
21 # include "irmode_t.h"
23 # include "firm_common_t.h"
29 /* memset belongs to string.h */
31 # include "irbackedge_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 res->attr.block.exc = exc_normal;
64 res->attr.block.handler_entry = 0;
65 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
66 res->attr.block.in_cg = NULL;
67 res->attr.block.cg_backedge = NULL;
69 irn_vrfy_irg (res, irg);
74 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
78 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 res->attr.start.irg = irg;
81 irn_vrfy_irg (res, irg);
86 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
90 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
92 irn_vrfy_irg (res, irg);
96 /* Creates a Phi node with all predecessors. Calling this constructor
97 is only allowed if the corresponding block is mature. */
99 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
103 bool has_unknown = false;
105 assert( get_Block_matured(block) );
106 assert( get_irn_arity(block) == arity );
108 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
110 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
112 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
113 if (!has_unknown) res = optimize_node (res);
114 irn_vrfy_irg (res, irg);
116 /* Memory Phis in endless loops must be kept alive.
117 As we can't distinguish these easily we keep all of them alive. */
118 if ((res->op == op_Phi) && (mode == mode_M))
119 add_End_keepalive(irg->end, res);
124 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
127 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
128 res->attr.con.tv = con;
129 set_Const_type(res, tp); /* Call method because of complex assertion. */
130 res = optimize_node (res);
131 assert(get_Const_type(res) == tp);
132 irn_vrfy_irg (res, irg);
135 res = local_optimize_newby (res);
142 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
144 type *tp = unknown_type;
145 if (tarval_is_entity(con))
146 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
147 return new_rd_Const_type (db, irg, block, mode, con, tp);
151 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
156 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
157 res = optimize_node (res);
158 irn_vrfy_irg (res, irg);
163 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
169 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
170 res->attr.proj = proj;
173 assert(get_Proj_pred(res));
174 assert(get_nodes_Block(get_Proj_pred(res)));
176 res = optimize_node (res);
178 irn_vrfy_irg (res, irg);
184 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
188 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
189 arg->attr.c.kind = fragmentary;
190 arg->attr.c.default_proj = max_proj;
191 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
196 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
201 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
202 res = optimize_node (res);
203 irn_vrfy_irg (res, irg);
208 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
211 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
212 res->attr.cast.totype = to_tp;
213 res = optimize_node (res);
214 irn_vrfy_irg (res, irg);
219 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
223 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
224 res = optimize_node (res);
225 irn_vrfy_irg (res, irg);
230 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
231 ir_node *op1, ir_node *op2, ir_mode *mode)
237 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
238 res = optimize_node (res);
239 irn_vrfy_irg (res, irg);
244 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
245 ir_node *op1, ir_node *op2, ir_mode *mode)
251 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
252 res = optimize_node (res);
253 irn_vrfy_irg (res, irg);
258 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
259 ir_node *op, ir_mode *mode)
264 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
265 res = optimize_node (res);
266 irn_vrfy_irg (res, irg);
271 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
272 ir_node *op1, ir_node *op2, ir_mode *mode)
278 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
279 res = optimize_node (res);
280 irn_vrfy_irg (res, irg);
285 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
286 ir_node *memop, ir_node *op1, ir_node *op2)
293 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
294 res = optimize_node (res);
295 irn_vrfy_irg (res, irg);
300 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
301 ir_node *memop, ir_node *op1, ir_node *op2)
308 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
309 res = optimize_node (res);
310 irn_vrfy_irg (res, irg);
315 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
316 ir_node *memop, ir_node *op1, ir_node *op2)
323 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
324 res = optimize_node (res);
325 irn_vrfy_irg (res, irg);
330 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
331 ir_node *memop, ir_node *op1, ir_node *op2)
338 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
339 res = optimize_node (res);
340 irn_vrfy_irg (res, irg);
345 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *op1, ir_node *op2, ir_mode *mode)
352 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
353 res = optimize_node (res);
354 irn_vrfy_irg (res, irg);
359 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
360 ir_node *op1, ir_node *op2, ir_mode *mode)
366 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
367 res = optimize_node (res);
368 irn_vrfy_irg (res, irg);
373 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
374 ir_node *op1, ir_node *op2, ir_mode *mode)
380 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
381 res = optimize_node (res);
382 irn_vrfy_irg (res, irg);
387 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
388 ir_node *op, ir_mode *mode)
393 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
394 res = optimize_node (res);
395 irn_vrfy_irg (res, irg);
400 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
401 ir_node *op, ir_node *k, ir_mode *mode)
407 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
408 res = optimize_node (res);
409 irn_vrfy_irg (res, irg);
414 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
415 ir_node *op, ir_node *k, ir_mode *mode)
421 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
422 res = optimize_node (res);
423 irn_vrfy_irg (res, irg);
428 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
429 ir_node *op, ir_node *k, ir_mode *mode)
435 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
436 res = optimize_node (res);
437 irn_vrfy_irg (res, irg);
442 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
443 ir_node *op, ir_node *k, ir_mode *mode)
449 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
450 res = optimize_node (res);
451 irn_vrfy_irg (res, irg);
456 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
457 ir_node *op, ir_mode *mode)
462 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
463 res = optimize_node (res);
464 irn_vrfy_irg (res, irg);
469 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
470 ir_node *op1, ir_node *op2)
476 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
477 res = optimize_node (res);
478 irn_vrfy_irg (res, irg);
483 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
486 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
487 res = optimize_node (res);
488 irn_vrfy_irg (res, irg);
493 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
498 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
499 res->attr.c.kind = dense;
500 res->attr.c.default_proj = 0;
501 res = optimize_node (res);
502 irn_vrfy_irg (res, irg);
507 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
508 ir_node *callee, int arity, ir_node **in, type *tp)
515 NEW_ARR_A (ir_node *, r_in, r_arity);
518 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
520 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
522 assert(is_method_type(tp));
523 set_Call_type(res, tp);
524 res->attr.call.callee_arr = NULL;
525 res = optimize_node (res);
526 irn_vrfy_irg (res, irg);
531 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
532 ir_node *store, int arity, ir_node **in)
539 NEW_ARR_A (ir_node *, r_in, r_arity);
541 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
542 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
543 res = optimize_node (res);
544 irn_vrfy_irg (res, irg);
549 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
555 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
556 res = optimize_node (res);
557 irn_vrfy_irg (res, irg);
562 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
563 ir_node *store, ir_node *adr)
569 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
571 res = optimize_node (res);
572 irn_vrfy_irg (res, irg);
577 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
578 ir_node *store, ir_node *adr, ir_node *val)
585 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
587 res = optimize_node (res);
589 irn_vrfy_irg (res, irg);
594 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
595 ir_node *size, type *alloc_type, where_alloc where)
601 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
603 res->attr.a.where = where;
604 res->attr.a.type = alloc_type;
606 res = optimize_node (res);
607 irn_vrfy_irg (res, irg);
612 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
613 ir_node *ptr, ir_node *size, type *free_type)
620 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
622 res->attr.f = free_type;
624 res = optimize_node (res);
625 irn_vrfy_irg (res, irg);
630 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
631 int arity, ir_node **in, entity *ent)
638 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
641 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
642 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
644 res->attr.s.ent = ent;
646 res = optimize_node (res);
647 irn_vrfy_irg (res, irg);
652 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
653 ir_node *objptr, type *ent)
660 NEW_ARR_A (ir_node *, r_in, r_arity);
664 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
666 res->attr.io.ent = ent;
668 /* res = optimize (res);
669 * irn_vrfy_irg (res, irg); */
674 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
675 symconst_kind symkind)
679 if (symkind == linkage_ptr_info)
683 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
685 res->attr.i.num = symkind;
686 if (symkind == linkage_ptr_info) {
687 res->attr.i.tori.ptrinfo = (ident *)value;
689 assert ( ( (symkind == type_tag)
690 || (symkind == size))
691 && (is_type(value)));
692 res->attr.i.tori.typ = (type *)value;
694 res = optimize_node (res);
695 irn_vrfy_irg (res, irg);
700 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
704 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
706 res = optimize_node (res);
707 irn_vrfy_irg (res, irg);
712 new_rd_Bad (ir_graph *irg)
718 new_rd_Unknown (ir_graph *irg)
724 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
728 in[0] = get_Call_ptr(call);
729 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
730 res->attr.callbegin.irg = irg;
731 res->attr.callbegin.call = call;
732 res = optimize_node (res);
733 irn_vrfy_irg (res, irg);
738 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
742 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
743 res->attr.end.irg = irg;
745 irn_vrfy_irg (res, irg);
750 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
754 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
755 res->attr.end.irg = irg;
757 irn_vrfy_irg (res, irg);
762 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
765 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
766 res = optimize_node (res);
767 irn_vrfy_irg (res, irg);
772 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
778 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
779 res->attr.filter.proj = proj;
780 res->attr.filter.in_cg = NULL;
781 res->attr.filter.backedge = NULL;
784 assert(get_Proj_pred(res));
785 assert(get_nodes_Block(get_Proj_pred(res)));
787 res = optimize_node (res);
789 irn_vrfy_irg (res, irg);
794 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
795 return new_rd_Block(NULL, irg, arity, in);
797 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
798 return new_rd_Start(NULL, irg, block);
800 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
801 return new_rd_End(NULL, irg, block);
803 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
804 return new_rd_Jmp(NULL, irg, block);
806 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
807 return new_rd_Cond(NULL, irg, block, c);
809 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
810 ir_node *store, int arity, ir_node **in) {
811 return new_rd_Return(NULL, irg, block, store, arity, in);
813 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
814 ir_node *store, ir_node *obj) {
815 return new_rd_Raise(NULL, irg, block, store, obj);
817 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
818 ir_mode *mode, tarval *con) {
819 return new_rd_Const(NULL, irg, block, mode, con);
821 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
822 type_or_id_p value, symconst_kind symkind) {
823 return new_rd_SymConst(NULL, irg, block, value, symkind);
825 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
826 ir_node *objptr, int n_index, ir_node **index,
828 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
830 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
832 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
834 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
835 ir_node *callee, int arity, ir_node **in,
837 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
839 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
840 ir_node *op1, ir_node *op2, ir_mode *mode) {
841 return new_rd_Add(NULL, irg, block, op1, op2, mode);
843 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
844 ir_node *op1, ir_node *op2, ir_mode *mode) {
845 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
847 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
848 ir_node *op, ir_mode *mode) {
849 return new_rd_Minus(NULL, irg, block, op, mode);
851 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
852 ir_node *op1, ir_node *op2, ir_mode *mode) {
853 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
855 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
856 ir_node *memop, ir_node *op1, ir_node *op2) {
857 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
859 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
860 ir_node *memop, ir_node *op1, ir_node *op2) {
861 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
863 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
864 ir_node *memop, ir_node *op1, ir_node *op2) {
865 return new_rd_Div(NULL, irg, block, memop, op1, op2);
867 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
868 ir_node *memop, ir_node *op1, ir_node *op2) {
869 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
871 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
872 ir_node *op, ir_mode *mode) {
873 return new_rd_Abs(NULL, irg, block, op, mode);
875 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
876 ir_node *op1, ir_node *op2, ir_mode *mode) {
877 return new_rd_And(NULL, irg, block, op1, op2, mode);
879 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
880 ir_node *op1, ir_node *op2, ir_mode *mode) {
881 return new_rd_Or(NULL, irg, block, op1, op2, mode);
883 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
884 ir_node *op1, ir_node *op2, ir_mode *mode) {
885 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
887 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
888 ir_node *op, ir_mode *mode) {
889 return new_rd_Not(NULL, irg, block, op, mode);
891 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
892 ir_node *op1, ir_node *op2) {
893 return new_rd_Cmp(NULL, irg, block, op1, op2);
895 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
896 ir_node *op, ir_node *k, ir_mode *mode) {
897 return new_rd_Shl(NULL, irg, block, op, k, mode);
899 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
900 ir_node *op, ir_node *k, ir_mode *mode) {
901 return new_rd_Shr(NULL, irg, block, op, k, mode);
903 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
904 ir_node *op, ir_node *k, ir_mode *mode) {
905 return new_rd_Shrs(NULL, irg, block, op, k, mode);
907 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
908 ir_node *op, ir_node *k, ir_mode *mode) {
909 return new_rd_Rot(NULL, irg, block, op, k, mode);
911 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
912 ir_node *op, ir_mode *mode) {
913 return new_rd_Conv(NULL, irg, block, op, mode);
915 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
916 return new_rd_Cast(NULL, irg, block, op, to_tp);
918 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
919 ir_node **in, ir_mode *mode) {
920 return new_rd_Phi(NULL, irg, block, arity, in, mode);
922 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
923 ir_node *store, ir_node *adr) {
924 return new_rd_Load(NULL, irg, block, store, adr);
926 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
927 ir_node *store, ir_node *adr, ir_node *val) {
928 return new_rd_Store(NULL, irg, block, store, adr, val);
930 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
931 ir_node *size, type *alloc_type, where_alloc where) {
932 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
934 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
935 ir_node *ptr, ir_node *size, type *free_type) {
936 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
938 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
939 return new_rd_Sync(NULL, irg, block, arity, in);
941 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
942 ir_mode *mode, long proj) {
943 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
945 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
947 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
949 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
950 int arity, ir_node **in) {
951 return new_rd_Tuple(NULL, irg, block, arity, in );
953 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
954 ir_node *val, ir_mode *mode) {
955 return new_rd_Id(NULL, irg, block, val, mode);
957 INLINE ir_node *new_r_Bad (ir_graph *irg) {
958 return new_rd_Bad(irg);
960 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
961 return new_rd_Unknown(irg);
963 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
964 return new_rd_CallBegin(NULL, irg, block, callee);
966 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
967 return new_rd_EndReg(NULL, irg, block);
969 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
970 return new_rd_EndExcept(NULL, irg, block);
972 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
973 return new_rd_Break(NULL, irg, block);
975 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
976 ir_mode *mode, long proj) {
977 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
981 /** ********************/
982 /** public interfaces */
983 /** construction tools */
987 * - create a new Start node in the current block
989 * @return s - pointer to the created Start node
994 new_d_Start (dbg_info* db)
998 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
999 op_Start, mode_T, 0, NULL);
1000 res->attr.start.irg = current_ir_graph;
1002 res = optimize_node (res);
1003 irn_vrfy_irg (res, current_ir_graph);
1008 new_d_End (dbg_info* db)
1011 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1012 op_End, mode_X, -1, NULL);
1013 res = optimize_node (res);
1014 irn_vrfy_irg (res, current_ir_graph);
1019 /* Constructs a Block with a fixed number of predecessors.
1020 Does set current_block. Can be used with automatic Phi
1021 node construction. */
1023 new_d_Block (dbg_info* db, int arity, ir_node **in)
1027 bool has_unknown = false;
1029 res = new_rd_Block (db, current_ir_graph, arity, in);
1031 /* Create and initialize array for Phi-node construction. */
1032 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1033 current_ir_graph->n_loc);
1034 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1036 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1038 if (!has_unknown) res = optimize_node (res);
1039 current_ir_graph->current_block = res;
1041 irn_vrfy_irg (res, current_ir_graph);
1046 /* ***********************************************************************/
1047 /* Methods necessary for automatic Phi node creation */
1049 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1050 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1051 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1052 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1054 Call Graph: ( A ---> B == A "calls" B)
1056 get_value mature_block
1064 get_r_value_internal |
1068 new_rd_Phi0 new_rd_Phi_in
1070 * *************************************************************************** */
1072 /* Creates a Phi node with 0 predecessors */
1073 static INLINE ir_node *
1074 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1077 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1078 irn_vrfy_irg (res, irg);
1082 /* There are two implementations of the Phi node construction. The first
1083 is faster, but does not work for blocks with more than 2 predecessors.
1084 The second works always but is slower and causes more unnecessary Phi
1086 Select the implementations by the following preprocessor flag set in
1088 #if USE_FAST_PHI_CONSTRUCTION
1090 /* This is a stack used for allocating and deallocating nodes in
1091 new_rd_Phi_in. The original implementation used the obstack
1092 to model this stack, now it is explicit. This reduces side effects.
1094 #if USE_EXPLICIT_PHI_IN_STACK
1095 INLINE Phi_in_stack *
1096 new_Phi_in_stack() {
1099 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1101 res->stack = NEW_ARR_F (ir_node *, 1);
1108 free_Phi_in_stack(Phi_in_stack *s) {
1109 DEL_ARR_F(s->stack);
1113 free_to_Phi_in_stack(ir_node *phi) {
1114 assert(get_irn_opcode(phi) == iro_Phi);
1116 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1117 current_ir_graph->Phi_in_stack->pos)
1118 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1120 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1122 (current_ir_graph->Phi_in_stack->pos)++;
1125 static INLINE ir_node *
1126 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1127 int arity, ir_node **in) {
1129 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1130 int pos = current_ir_graph->Phi_in_stack->pos;
1134 /* We need to allocate a new node */
1135 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1136 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1138 /* reuse the old node and initialize it again. */
1141 assert (res->kind == k_ir_node);
1142 assert (res->op == op_Phi);
1146 assert (arity >= 0);
1147 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1150 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1152 (current_ir_graph->Phi_in_stack->pos)--;
1156 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1158 /* Creates a Phi node with a given, fixed array **in of predecessors.
1159 If the Phi node is unnecessary, as the same value reaches the block
1160 through all control flow paths, it is eliminated and the value
1161 returned directly. This constructor is only intended for use in
1162 the automatic Phi node generation triggered by get_value or mature.
1163 The implementation is quite tricky and depends on the fact, that
1164 the nodes are allocated on a stack:
1165 The in array contains predecessors and NULLs. The NULLs appear,
1166 if get_r_value_internal, that computed the predecessors, reached
1167 the same block on two paths. In this case the same value reaches
1168 this block on both paths, there is no definition in between. We need
1169 not allocate a Phi where these path's merge, but we have to communicate
1170 this fact to the caller. This happens by returning a pointer to the
1171 node the caller _will_ allocate. (Yes, we predict the address. We can
1172 do so because the nodes are allocated on the obstack.) The caller then
1173 finds a pointer to itself and, when this routine is called again,
1176 static INLINE ir_node *
1177 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1178 ir_node **in, int ins)
1181 ir_node *res, *known;
1183 /* allocate a new node on the obstack.
1184 This can return a node to which some of the pointers in the in-array
1186 Attention: the constructor copies the in array, i.e., the later changes
1187 to the array in this routine do not affect the constructed node! If
1188 the in array contains NULLs, there will be missing predecessors in the
1190 Is this a possible internal state of the Phi node generation? */
1191 #if USE_EXPLICIT_PHI_IN_STACK
1192 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1194 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1195 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1197 /* The in-array can contain NULLs. These were returned by
1198 get_r_value_internal if it reached the same block/definition on a
1200 The NULLs are replaced by the node itself to simplify the test in the
1202 for (i=0; i < ins; ++i)
1203 if (in[i] == NULL) in[i] = res;
1205 /* This loop checks whether the Phi has more than one predecessor.
1206 If so, it is a real Phi node and we break the loop. Else the
1207 Phi node merges the same definition on several paths and therefore
1209 for (i=0; i < ins; ++i)
1211 if (in[i]==res || in[i]==known) continue;
1219 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1221 #if USE_EXPLICIT_PHI_IN_STACK
1222 free_to_Phi_in_stack(res);
1224 obstack_free (current_ir_graph->obst, res);
1228 res = optimize_node (res);
1229 irn_vrfy_irg (res, irg);
1232 /* return the pointer to the Phi node. This node might be deallocated! */
1237 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1240 allocates and returns this node. The routine called to allocate the
1241 node might optimize it away and return a real value, or even a pointer
1242 to a deallocated Phi node on top of the obstack!
1243 This function is called with an in-array of proper size. **/
1245 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1247 ir_node *prevBlock, *res;
1250 /* This loop goes to all predecessor blocks of the block the Phi node is in
1251 and there finds the operands of the Phi node by calling
1252 get_r_value_internal. */
1253 for (i = 1; i <= ins; ++i) {
1254 assert (block->in[i]);
1255 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1257 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1260 /* After collecting all predecessors into the array nin a new Phi node
1261 with these predecessors is created. This constructor contains an
1262 optimization: If all predecessors of the Phi node are identical it
1263 returns the only operand instead of a new Phi node. If the value
1264 passes two different control flow edges without being defined, and
1265 this is the second path treated, a pointer to the node that will be
1266 allocated for the first path (recursion) is returned. We already
1267 know the address of this node, as it is the next node to be allocated
1268 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1269 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1271 /* Now we now the value for "pos" and can enter it in the array with
1272 all known local variables. Attention: this might be a pointer to
1273 a node, that later will be allocated!!! See new_rd_Phi_in.
1274 If this is called in mature, after some set_value in the same block,
1275 the proper value must not be overwritten:
1277 get_value (makes Phi0, put's it into graph_arr)
1278 set_value (overwrites Phi0 in graph_arr)
1279 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1282 if (!block->attr.block.graph_arr[pos]) {
1283 block->attr.block.graph_arr[pos] = res;
1285 /* printf(" value already computed by %s\n",
1286 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1292 /* This function returns the last definition of a variable. In case
1293 this variable was last defined in a previous block, Phi nodes are
1294 inserted. If the part of the firm graph containing the definition
1295 is not yet constructed, a dummy Phi node is returned. */
1297 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1300 /* There are 4 cases to treat.
1302 1. The block is not mature and we visit it the first time. We can not
1303 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1304 predecessors is returned. This node is added to the linked list (field
1305 "link") of the containing block to be completed when this block is
1306 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1309 2. The value is already known in this block, graph_arr[pos] is set and we
1310 visit the block the first time. We can return the value without
1311 creating any new nodes.
1313 3. The block is mature and we visit it the first time. A Phi node needs
1314 to be created (phi_merge). If the Phi is not needed, as all it's
1315 operands are the same value reaching the block through different
1316 paths, it's optimized away and the value itself is returned.
1318 4. The block is mature, and we visit it the second time. Now two
1319 subcases are possible:
1320 * The value was computed completely the last time we were here. This
1321 is the case if there is no loop. We can return the proper value.
1322 * The recursion that visited this node and set the flag did not
1323 return yet. We are computing a value in a loop and need to
1324 break the recursion without knowing the result yet.
1325 @@@ strange case. Straight forward we would create a Phi before
1326 starting the computation of it's predecessors. In this case we will
1327 find a Phi here in any case. The problem is that this implementation
1328 only creates a Phi after computing the predecessors, so that it is
1329 hard to compute self references of this Phi. @@@
1330 There is no simple check for the second subcase. Therefore we check
1331 for a second visit and treat all such cases as the second subcase.
1332 Anyways, the basic situation is the same: we reached a block
1333 on two paths without finding a definition of the value: No Phi
1334 nodes are needed on both paths.
1335 We return this information "Two paths, no Phi needed" by a very tricky
1336 implementation that relies on the fact that an obstack is a stack and
1337 will return a node with the same address on different allocations.
1338 Look also at phi_merge and new_rd_phi_in to understand this.
1339 @@@ Unfortunately this does not work, see testprogram
1340 three_cfpred_example.
1344 /* case 4 -- already visited. */
1345 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1347 /* visited the first time */
1348 set_irn_visited(block, get_irg_visited(current_ir_graph));
1350 /* Get the local valid value */
1351 res = block->attr.block.graph_arr[pos];
1353 /* case 2 -- If the value is actually computed, return it. */
1354 if (res) { return res;};
1356 if (block->attr.block.matured) { /* case 3 */
1358 /* The Phi has the same amount of ins as the corresponding block. */
1359 int ins = get_irn_arity(block);
1361 NEW_ARR_A (ir_node *, nin, ins);
1363 /* Phi merge collects the predecessors and then creates a node. */
1364 res = phi_merge (block, pos, mode, nin, ins);
1366 } else { /* case 1 */
1367 /* The block is not mature, we don't know how many in's are needed. A Phi
1368 with zero predecessors is created. Such a Phi node is called Phi0
1369 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1370 to the list of Phi0 nodes in this block to be matured by mature_block
1372 The Phi0 has to remember the pos of it's internal value. If the real
1373 Phi is computed, pos is used to update the array with the local
1376 res = new_rd_Phi0 (current_ir_graph, block, mode);
1377 res->attr.phi0_pos = pos;
1378 res->link = block->link;
1382 /* If we get here, the frontend missed a use-before-definition error */
1385 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1386 assert (mode->code >= irm_F && mode->code <= irm_P);
1387 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1388 tarval_mode_null[mode->code]);
1391 /* The local valid value is available now. */
1392 block->attr.block.graph_arr[pos] = res;
1400 it starts the recursion. This causes an Id at the entry of
1401 every block that has no definition of the value! **/
1403 #if USE_EXPLICIT_PHI_IN_STACK
1405 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1406 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1409 static INLINE ir_node *
1410 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1411 ir_node **in, int ins)
1414 ir_node *res, *known;
1416 /* Allocate a new node on the obstack. The allocation copies the in
1418 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1419 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1421 /* This loop checks whether the Phi has more than one predecessor.
1422 If so, it is a real Phi node and we break the loop. Else the
1423 Phi node merges the same definition on several paths and therefore
1424 is not needed. Don't consider Bad nodes! */
1426 for (i=0; i < ins; ++i)
1430 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1438 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1441 obstack_free (current_ir_graph->obst, res);
1444 /* A undefined value, e.g., in unreachable code. */
1448 res = optimize_node (res);
1449 irn_vrfy_irg (res, irg);
1450 /* Memory Phis in endless loops must be kept alive.
1451 As we can't distinguish these easily we keep all of the alive. */
1452 if ((res->op == op_Phi) && (mode == mode_M))
1453 add_End_keepalive(irg->end, res);
1460 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1462 #if PRECISE_EXC_CONTEXT
1464 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1466 static INLINE ir_node **
1467 new_frag_arr (ir_node *n) {
1470 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1471 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1472 sizeof(ir_node *)*current_ir_graph->n_loc);
1473 /* turn off optimization before allocating Proj nodes, as res isn't
1475 opt = get_optimize(); set_optimize(0);
1476 /* Here we rely on the fact that all frag ops have Memory as first result! */
1477 if (get_irn_op(n) == op_Call)
1478 arr[0] = new_Proj(n, mode_M, 3);
1480 arr[0] = new_Proj(n, mode_M, 0);
1482 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1486 static INLINE ir_node **
1487 get_frag_arr (ir_node *n) {
1488 if (get_irn_op(n) == op_Call) {
1489 return n->attr.call.frag_arr;
1490 } else if (get_irn_op(n) == op_Alloc) {
1491 return n->attr.a.frag_arr;
1493 return n->attr.frag_arr;
1498 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1499 if (!frag_arr[pos]) frag_arr[pos] = val;
1500 if (frag_arr[current_ir_graph->n_loc - 1])
1501 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1505 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1509 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1511 frag_arr = get_frag_arr(cfOp);
1512 res = frag_arr[pos];
1514 if (block->attr.block.graph_arr[pos]) {
1515 /* There was a set_value after the cfOp and no get_value before that
1516 set_value. We must build a Phi node now. */
1517 if (block->attr.block.matured) {
1518 int ins = get_irn_arity(block);
1520 NEW_ARR_A (ir_node *, nin, ins);
1521 res = phi_merge(block, pos, mode, nin, ins);
1523 res = new_rd_Phi0 (current_ir_graph, block, mode);
1524 res->attr.phi0_pos = pos;
1525 res->link = block->link;
1529 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1530 but this should be better: (remove comment if this works) */
1531 /* It's a Phi, we can write this into all graph_arrs with NULL */
1532 set_frag_value(block->attr.block.graph_arr, pos, res);
1534 res = get_r_value_internal(block, pos, mode);
1535 set_frag_value(block->attr.block.graph_arr, pos, res);
1543 computes the predecessors for the real phi node, and then
1544 allocates and returns this node. The routine called to allocate the
1545 node might optimize it away and return a real value.
1546 This function must be called with an in-array of proper size. **/
1548 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1550 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1553 /* If this block has no value at pos create a Phi0 and remember it
1554 in graph_arr to break recursions.
1555 Else we may not set graph_arr as there a later value is remembered. */
1557 if (!block->attr.block.graph_arr[pos]) {
1558 if (block == get_irg_start_block(current_ir_graph)) {
1559 /* Collapsing to Bad tarvals is no good idea.
1560 So we call a user-supplied routine here that deals with this case as
1561 appropriate for the given language. Sorryly the only help we can give
1562 here is the position.
1564 Even if all variables are defined before use, it can happen that
1565 we get to the start block, if a cond has been replaced by a tuple
1566 (bad, jmp). In this case we call the function needlessly, eventually
1567 generating an non existant error.
1568 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1571 if (default_initialize_local_variable)
1572 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1574 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1575 /* We don't need to care about exception ops in the start block.
1576 There are none by definition. */
1577 return block->attr.block.graph_arr[pos];
1579 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1580 block->attr.block.graph_arr[pos] = phi0;
1581 #if PRECISE_EXC_CONTEXT
1582 /* Set graph_arr for fragile ops. Also here we should break recursion.
1583 We could choose a cyclic path through an cfop. But the recursion would
1584 break at some point. */
1585 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1590 /* This loop goes to all predecessor blocks of the block the Phi node
1591 is in and there finds the operands of the Phi node by calling
1592 get_r_value_internal. */
1593 for (i = 1; i <= ins; ++i) {
1594 prevCfOp = skip_Proj(block->in[i]);
1596 if (is_Bad(prevCfOp)) {
1597 /* In case a Cond has been optimized we would get right to the start block
1598 with an invalid definition. */
1599 nin[i-1] = new_Bad();
1602 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1604 if (!is_Bad(prevBlock)) {
1605 #if PRECISE_EXC_CONTEXT
1606 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1607 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1608 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1611 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1613 nin[i-1] = new_Bad();
1617 /* After collecting all predecessors into the array nin a new Phi node
1618 with these predecessors is created. This constructor contains an
1619 optimization: If all predecessors of the Phi node are identical it
1620 returns the only operand instead of a new Phi node. */
1621 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1623 /* In case we allocated a Phi0 node at the beginning of this procedure,
1624 we need to exchange this Phi0 with the real Phi. */
1626 exchange(phi0, res);
1627 block->attr.block.graph_arr[pos] = res;
1628 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1629 only an optimization. */
1635 /* This function returns the last definition of a variable. In case
1636 this variable was last defined in a previous block, Phi nodes are
1637 inserted. If the part of the firm graph containing the definition
1638 is not yet constructed, a dummy Phi node is returned. */
1640 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1643 /* There are 4 cases to treat.
1645 1. The block is not mature and we visit it the first time. We can not
1646 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1647 predecessors is returned. This node is added to the linked list (field
1648 "link") of the containing block to be completed when this block is
1649 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1652 2. The value is already known in this block, graph_arr[pos] is set and we
1653 visit the block the first time. We can return the value without
1654 creating any new nodes.
1656 3. The block is mature and we visit it the first time. A Phi node needs
1657 to be created (phi_merge). If the Phi is not needed, as all it's
1658 operands are the same value reaching the block through different
1659 paths, it's optimized away and the value itself is returned.
1661 4. The block is mature, and we visit it the second time. Now two
1662 subcases are possible:
1663 * The value was computed completely the last time we were here. This
1664 is the case if there is no loop. We can return the proper value.
1665 * The recursion that visited this node and set the flag did not
1666 return yet. We are computing a value in a loop and need to
1667 break the recursion. This case only happens if we visited
1668 the same block with phi_merge before, which inserted a Phi0.
1669 So we return the Phi0.
1672 /* case 4 -- already visited. */
1673 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1674 /* As phi_merge allocates a Phi0 this value is always defined. Here
1675 is the critical difference of the two algorithms. */
1676 assert(block->attr.block.graph_arr[pos]);
1677 return block->attr.block.graph_arr[pos];
1680 /* visited the first time */
1681 set_irn_visited(block, get_irg_visited(current_ir_graph));
1683 /* Get the local valid value */
1684 res = block->attr.block.graph_arr[pos];
1686 /* case 2 -- If the value is actually computed, return it. */
1687 if (res) { return res; };
1689 if (block->attr.block.matured) { /* case 3 */
1691 /* The Phi has the same amount of ins as the corresponding block. */
1692 int ins = get_irn_arity(block);
1694 NEW_ARR_A (ir_node *, nin, ins);
1696 /* Phi merge collects the predecessors and then creates a node. */
1697 res = phi_merge (block, pos, mode, nin, ins);
1699 } else { /* case 1 */
1700 /* The block is not mature, we don't know how many in's are needed. A Phi
1701 with zero predecessors is created. Such a Phi node is called Phi0
1702 node. The Phi0 is then added to the list of Phi0 nodes in this block
1703 to be matured by mature_block later.
1704 The Phi0 has to remember the pos of it's internal value. If the real
1705 Phi is computed, pos is used to update the array with the local
1707 res = new_rd_Phi0 (current_ir_graph, block, mode);
1708 res->attr.phi0_pos = pos;
1709 res->link = block->link;
1713 /* If we get here, the frontend missed a use-before-definition error */
1716 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1717 assert (mode->code >= irm_F && mode->code <= irm_P);
1718 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1719 get_mode_null(mode));
1722 /* The local valid value is available now. */
1723 block->attr.block.graph_arr[pos] = res;
1728 #endif /* USE_FAST_PHI_CONSTRUCTION */
1730 /* ************************************************************************** */
1732 /** Finalize a Block node, when all control flows are known. */
1733 /** Acceptable parameters are only Block nodes. */
1735 mature_block (ir_node *block)
1742 assert (get_irn_opcode(block) == iro_Block);
1743 /* @@@ should be commented in
1744 assert (!get_Block_matured(block) && "Block already matured"); */
1746 if (!get_Block_matured(block)) {
1747 ins = ARR_LEN (block->in)-1;
1748 /* Fix block parameters */
1749 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1751 /* An array for building the Phi nodes. */
1752 NEW_ARR_A (ir_node *, nin, ins);
1754 /* Traverse a chain of Phi nodes attached to this block and mature
1756 for (n = block->link; n; n=next) {
1757 inc_irg_visited(current_ir_graph);
1759 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1762 block->attr.block.matured = 1;
1764 /* Now, as the block is a finished firm node, we can optimize it.
1765 Since other nodes have been allocated since the block was created
1766 we can not free the node on the obstack. Therefore we have to call
1768 Unfortunately the optimization does not change a lot, as all allocated
1769 nodes refer to the unoptimized node.
1770 We can call _2, as global cse has no effect on blocks. */
1771 block = optimize_in_place_2(block);
1772 irn_vrfy_irg(block, current_ir_graph);
1777 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1779 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1784 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1786 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1791 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1793 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1799 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1801 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1806 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1808 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1813 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1816 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1817 arg->attr.c.kind = fragmentary;
1818 arg->attr.c.default_proj = max_proj;
1819 res = new_Proj (arg, mode_X, max_proj);
1824 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1826 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1831 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1833 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1837 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1839 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1844 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1846 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1853 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1859 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1861 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1866 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1868 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1873 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1876 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1878 #if PRECISE_EXC_CONTEXT
1879 if ((current_ir_graph->phase_state == phase_building) &&
1880 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1881 res->attr.frag_arr = new_frag_arr(res);
1888 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1891 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1893 #if PRECISE_EXC_CONTEXT
1894 if ((current_ir_graph->phase_state == phase_building) &&
1895 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1896 res->attr.frag_arr = new_frag_arr(res);
1903 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1906 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1908 #if PRECISE_EXC_CONTEXT
1909 if ((current_ir_graph->phase_state == phase_building) &&
1910 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1911 res->attr.frag_arr = new_frag_arr(res);
1918 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1921 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1923 #if PRECISE_EXC_CONTEXT
1924 if ((current_ir_graph->phase_state == phase_building) &&
1925 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1926 res->attr.frag_arr = new_frag_arr(res);
1933 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1935 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1940 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1942 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1947 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1949 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1954 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1956 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1961 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1963 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1968 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1970 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1975 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1977 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1982 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1984 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1989 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1991 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1996 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1998 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2003 new_d_Jmp (dbg_info* db)
2005 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2009 new_d_Cond (dbg_info* db, ir_node *c)
2011 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2015 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2019 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2020 store, callee, arity, in, tp);
2021 #if PRECISE_EXC_CONTEXT
2022 if ((current_ir_graph->phase_state == phase_building) &&
2023 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2024 res->attr.call.frag_arr = new_frag_arr(res);
2031 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2033 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2038 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2040 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2045 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2048 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2050 #if PRECISE_EXC_CONTEXT
2051 if ((current_ir_graph->phase_state == phase_building) &&
2052 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2053 res->attr.frag_arr = new_frag_arr(res);
2060 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2063 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2065 #if PRECISE_EXC_CONTEXT
2066 if ((current_ir_graph->phase_state == phase_building) &&
2067 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2068 res->attr.frag_arr = new_frag_arr(res);
2075 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2079 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2080 store, size, alloc_type, where);
2081 #if PRECISE_EXC_CONTEXT
2082 if ((current_ir_graph->phase_state == phase_building) &&
2083 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2084 res->attr.a.frag_arr = new_frag_arr(res);
2091 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2093 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2094 store, ptr, size, free_type);
2098 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2099 /* GL: objptr was called frame before. Frame was a bad choice for the name
2100 as the operand could as well be a pointer to a dynamic object. */
2102 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2103 store, objptr, 0, NULL, ent);
2107 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2109 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2110 store, objptr, n_index, index, sel);
2114 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2116 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2117 store, objptr, ent));
2121 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2123 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2128 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2130 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2138 return current_ir_graph->bad;
2142 new_d_Unknown (void)
2144 return current_ir_graph->unknown;
2148 new_d_CallBegin (dbg_info *db, ir_node *call)
2151 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2156 new_d_EndReg (dbg_info *db)
2159 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2164 new_d_EndExcept (dbg_info *db)
2167 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2172 new_d_Break (dbg_info *db)
2174 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2178 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2180 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2184 /* ********************************************************************* */
2185 /* Comfortable interface with automatic Phi node construction. */
2186 /* (Uses also constructors of ?? interface, except new_Block. */
2187 /* ********************************************************************* */
2189 /** Block construction **/
2190 /* immature Block without predecessors */
2191 ir_node *new_d_immBlock (dbg_info* db) {
2194 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2195 /* creates a new dynamic in-array as length of in is -1 */
2196 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2197 current_ir_graph->current_block = res;
2198 res->attr.block.matured = 0;
2199 res->attr.block.exc = exc_normal;
2200 res->attr.block.handler_entry = 0;
2201 res->attr.block.backedge = NULL;
2202 res->attr.block.in_cg = NULL;
2203 res->attr.block.cg_backedge = NULL;
2204 set_Block_block_visited(res, 0);
2206 /* Create and initialize array for Phi-node construction. */
2207 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2208 current_ir_graph->n_loc);
2209 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2211 /* Immature block may not be optimized! */
2212 irn_vrfy_irg (res, current_ir_graph);
2219 return new_d_immBlock(NULL);
2222 /* add an adge to a jmp/control flow node */
2224 add_in_edge (ir_node *block, ir_node *jmp)
2226 if (block->attr.block.matured) {
2227 assert(0 && "Error: Block already matured!\n");
2230 assert (jmp != NULL);
2231 ARR_APP1 (ir_node *, block->in, jmp);
2235 /* changing the current block */
2237 switch_block (ir_node *target)
2239 current_ir_graph->current_block = target;
2242 /* ************************ */
2243 /* parameter administration */
2245 /* get a value from the parameter array from the current block by its index */
2247 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2249 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2250 inc_irg_visited(current_ir_graph);
2252 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2254 /* get a value from the parameter array from the current block by its index */
2256 get_value (int pos, ir_mode *mode)
2258 return get_d_value(NULL, pos, mode);
2261 /* set a value at position pos in the parameter array from the current block */
2263 set_value (int pos, ir_node *value)
2265 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2266 assert(pos+1 < current_ir_graph->n_loc);
2267 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2270 /* get the current store */
2274 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2275 /* GL: one could call get_value instead */
2276 inc_irg_visited(current_ir_graph);
2277 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2280 /* set the current store */
2282 set_store (ir_node *store)
2284 /* GL: one could call set_value instead */
2285 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2286 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2290 keep_alive (ir_node *ka)
2292 add_End_keepalive(current_ir_graph->end, ka);
2295 /** Useful access routines **/
2296 /* Returns the current block of the current graph. To set the current
2297 block use switch_block(). */
2298 ir_node *get_cur_block() {
2299 return get_irg_current_block(current_ir_graph);
2302 /* Returns the frame type of the current graph */
2303 type *get_cur_frame_type() {
2304 return get_irg_frame_type(current_ir_graph);
2308 /* ********************************************************************* */
2311 /* call once for each run of the library */
2313 init_cons (default_initialize_local_variable_func_t *func)
2315 default_initialize_local_variable = func;
2318 /* call for each graph */
2320 finalize_cons (ir_graph *irg) {
2321 irg->phase_state = phase_high;
2325 ir_node *new_Block(int arity, ir_node **in) {
2326 return new_d_Block(NULL, arity, in);
2328 ir_node *new_Start (void) {
2329 return new_d_Start(NULL);
2331 ir_node *new_End (void) {
2332 return new_d_End(NULL);
2334 ir_node *new_Jmp (void) {
2335 return new_d_Jmp(NULL);
2337 ir_node *new_Cond (ir_node *c) {
2338 return new_d_Cond(NULL, c);
2340 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2341 return new_d_Return(NULL, store, arity, in);
2343 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2344 return new_d_Raise(NULL, store, obj);
2346 ir_node *new_Const (ir_mode *mode, tarval *con) {
2347 return new_d_Const(NULL, mode, con);
2349 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2350 return new_d_SymConst(NULL, value, kind);
2352 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2353 return new_d_simpleSel(NULL, store, objptr, ent);
2355 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2357 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2359 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2360 return (new_d_InstOf (NULL, store, objptr, ent));
2362 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2364 return new_d_Call(NULL, store, callee, arity, in, tp);
2366 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2367 return new_d_Add(NULL, op1, op2, mode);
2369 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2370 return new_d_Sub(NULL, op1, op2, mode);
2372 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2373 return new_d_Minus(NULL, op, mode);
2375 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2376 return new_d_Mul(NULL, op1, op2, mode);
2378 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2379 return new_d_Quot(NULL, memop, op1, op2);
2381 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2382 return new_d_DivMod(NULL, memop, op1, op2);
2384 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2385 return new_d_Div(NULL, memop, op1, op2);
2387 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2388 return new_d_Mod(NULL, memop, op1, op2);
2390 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2391 return new_d_Abs(NULL, op, mode);
2393 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2394 return new_d_And(NULL, op1, op2, mode);
2396 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2397 return new_d_Or(NULL, op1, op2, mode);
2399 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2400 return new_d_Eor(NULL, op1, op2, mode);
2402 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2403 return new_d_Not(NULL, op, mode);
2405 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2406 return new_d_Shl(NULL, op, k, mode);
2408 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2409 return new_d_Shr(NULL, op, k, mode);
2411 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2412 return new_d_Shrs(NULL, op, k, mode);
2414 #define new_Rotate new_Rot
2415 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2416 return new_d_Rot(NULL, op, k, mode);
2418 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2419 return new_d_Cmp(NULL, op1, op2);
2421 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2422 return new_d_Conv(NULL, op, mode);
2424 ir_node *new_Cast (ir_node *op, type *to_tp) {
2425 return new_d_Cast(NULL, op, to_tp);
2427 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2428 return new_d_Phi(NULL, arity, in, mode);
2430 ir_node *new_Load (ir_node *store, ir_node *addr) {
2431 return new_d_Load(NULL, store, addr);
2433 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2434 return new_d_Store(NULL, store, addr, val);
2436 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2437 where_alloc where) {
2438 return new_d_Alloc(NULL, store, size, alloc_type, where);
2440 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2442 return new_d_Free(NULL, store, ptr, size, free_type);
2444 ir_node *new_Sync (int arity, ir_node **in) {
2445 return new_d_Sync(NULL, arity, in);
2447 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2448 return new_d_Proj(NULL, arg, mode, proj);
2450 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2451 return new_d_defaultProj(NULL, arg, max_proj);
2453 ir_node *new_Tuple (int arity, ir_node **in) {
2454 return new_d_Tuple(NULL, arity, in);
2456 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2457 return new_d_Id(NULL, val, mode);
2459 ir_node *new_Bad (void) {
2462 ir_node *new_Unknown(void) {
2463 return new_d_Unknown();
2465 ir_node *new_CallBegin (ir_node *callee) {
2466 return new_d_CallBegin(NULL, callee);
2468 ir_node *new_EndReg (void) {
2469 return new_d_EndReg(NULL);
2471 ir_node *new_EndExcept (void) {
2472 return new_d_EndExcept(NULL);
2474 ir_node *new_Break (void) {
2475 return new_d_Break(NULL);
2477 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2478 return new_d_Filter(NULL, arg, mode, proj);