3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 # include "irgraph_t.h"
20 # include "irnode_t.h"
21 # include "irmode_t.h"
23 # include "firm_common_t.h"
29 /* memset belongs to string.h */
31 # include "irbackedge_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 //res->attr.block.exc = exc_normal;
64 //res->attr.block.handler_entry = 0;
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 //res->attr.start.irg = irg;
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 assert( get_Block_matured(block) );
107 assert( get_irn_arity(block) == arity );
109 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
111 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
113 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
114 if (!has_unknown) res = optimize_node (res);
115 irn_vrfy_irg (res, irg);
117 /* Memory Phis in endless loops must be kept alive.
118 As we can't distinguish these easily we keep all of them alive. */
119 if ((res->op == op_Phi) && (mode == mode_M))
120 add_End_keepalive(irg->end, res);
125 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
128 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
129 res->attr.con.tv = con;
130 set_Const_type(res, tp); /* Call method because of complex assertion. */
131 res = optimize_node (res);
132 assert(get_Const_type(res) == tp);
133 irn_vrfy_irg (res, irg);
136 res = local_optimize_newby (res);
143 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
145 type *tp = unknown_type;
146 if (tarval_is_entity(con))
147 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
148 return new_rd_Const_type (db, irg, block, mode, con, tp);
152 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
157 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
158 res = optimize_node (res);
159 irn_vrfy_irg (res, irg);
164 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
170 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
171 res->attr.proj = proj;
174 assert(get_Proj_pred(res));
175 assert(get_nodes_Block(get_Proj_pred(res)));
177 res = optimize_node (res);
179 irn_vrfy_irg (res, irg);
185 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
189 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
190 arg->attr.c.kind = fragmentary;
191 arg->attr.c.default_proj = max_proj;
192 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
197 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
202 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
203 res = optimize_node (res);
204 irn_vrfy_irg (res, irg);
209 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
212 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
213 res->attr.cast.totype = to_tp;
214 res = optimize_node (res);
215 irn_vrfy_irg (res, irg);
220 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
224 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
225 res = optimize_node (res);
226 irn_vrfy_irg (res, irg);
231 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
232 ir_node *op1, ir_node *op2, ir_mode *mode)
238 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
239 res = optimize_node (res);
240 irn_vrfy_irg (res, irg);
245 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
246 ir_node *op1, ir_node *op2, ir_mode *mode)
252 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
253 res = optimize_node (res);
254 irn_vrfy_irg (res, irg);
259 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
260 ir_node *op, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
266 res = optimize_node (res);
267 irn_vrfy_irg (res, irg);
272 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
280 res = optimize_node (res);
281 irn_vrfy_irg (res, irg);
286 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *memop, ir_node *op1, ir_node *op2)
294 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
295 res = optimize_node (res);
296 irn_vrfy_irg (res, irg);
301 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
302 ir_node *memop, ir_node *op1, ir_node *op2)
309 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
310 res = optimize_node (res);
311 irn_vrfy_irg (res, irg);
316 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
325 res = optimize_node (res);
326 irn_vrfy_irg (res, irg);
331 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
340 res = optimize_node (res);
341 irn_vrfy_irg (res, irg);
346 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *op1, ir_node *op2, ir_mode *mode)
353 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
354 res = optimize_node (res);
355 irn_vrfy_irg (res, irg);
360 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
367 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
368 res = optimize_node (res);
369 irn_vrfy_irg (res, irg);
374 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
375 ir_node *op1, ir_node *op2, ir_mode *mode)
381 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
382 res = optimize_node (res);
383 irn_vrfy_irg (res, irg);
388 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
394 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
395 res = optimize_node (res);
396 irn_vrfy_irg (res, irg);
401 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
402 ir_node *op, ir_node *k, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
409 res = optimize_node (res);
410 irn_vrfy_irg (res, irg);
415 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_node *k, ir_mode *mode)
422 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
423 res = optimize_node (res);
424 irn_vrfy_irg (res, irg);
429 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
436 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
437 res = optimize_node (res);
438 irn_vrfy_irg (res, irg);
443 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
444 ir_node *op, ir_node *k, ir_mode *mode)
450 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
451 res = optimize_node (res);
452 irn_vrfy_irg (res, irg);
457 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op, ir_mode *mode)
463 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
464 res = optimize_node (res);
465 irn_vrfy_irg (res, irg);
470 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
471 ir_node *op1, ir_node *op2)
477 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
478 res = optimize_node (res);
479 irn_vrfy_irg (res, irg);
484 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
487 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
488 res = optimize_node (res);
489 irn_vrfy_irg (res, irg);
494 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
499 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
500 res->attr.c.kind = dense;
501 res->attr.c.default_proj = 0;
502 res = optimize_node (res);
503 irn_vrfy_irg (res, irg);
508 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
509 ir_node *callee, int arity, ir_node **in, type *tp)
516 NEW_ARR_A (ir_node *, r_in, r_arity);
519 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
521 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
523 assert(is_method_type(tp));
524 set_Call_type(res, tp);
525 res->attr.call.callee_arr = NULL;
526 res = optimize_node (res);
527 irn_vrfy_irg (res, irg);
532 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
533 ir_node *store, int arity, ir_node **in)
540 NEW_ARR_A (ir_node *, r_in, r_arity);
542 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
543 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
544 res = optimize_node (res);
545 irn_vrfy_irg (res, irg);
550 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
556 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
557 res = optimize_node (res);
558 irn_vrfy_irg (res, irg);
563 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
564 ir_node *store, ir_node *adr)
570 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
572 res = optimize_node (res);
573 irn_vrfy_irg (res, irg);
578 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
579 ir_node *store, ir_node *adr, ir_node *val)
586 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
588 res = optimize_node (res);
590 irn_vrfy_irg (res, irg);
595 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
596 ir_node *size, type *alloc_type, where_alloc where)
602 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
604 res->attr.a.where = where;
605 res->attr.a.type = alloc_type;
607 res = optimize_node (res);
608 irn_vrfy_irg (res, irg);
613 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
614 ir_node *ptr, ir_node *size, type *free_type)
621 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
623 res->attr.f = free_type;
625 res = optimize_node (res);
626 irn_vrfy_irg (res, irg);
631 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
632 int arity, ir_node **in, entity *ent)
639 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
642 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
643 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
645 res->attr.s.ent = ent;
647 res = optimize_node (res);
648 irn_vrfy_irg (res, irg);
653 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
654 ir_node *objptr, type *ent)
661 NEW_ARR_A (ir_node *, r_in, r_arity);
665 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
667 res->attr.io.ent = ent;
669 /* res = optimize (res);
670 * irn_vrfy_irg (res, irg); */
675 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
676 symconst_kind symkind)
680 if (symkind == linkage_ptr_info)
684 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
686 res->attr.i.num = symkind;
687 if (symkind == linkage_ptr_info) {
688 res->attr.i.tori.ptrinfo = (ident *)value;
690 assert ( ( (symkind == type_tag)
691 || (symkind == size))
692 && (is_type(value)));
693 res->attr.i.tori.typ = (type *)value;
695 res = optimize_node (res);
696 irn_vrfy_irg (res, irg);
701 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
705 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
707 res = optimize_node (res);
708 irn_vrfy_irg (res, irg);
713 new_rd_Bad (ir_graph *irg)
719 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
721 ir_node *in[2], *res;
725 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
727 res->attr.confirm_cmp = cmp;
729 res = optimize_node (res);
730 irn_vrfy_irg(res, irg);
735 new_rd_Unknown (ir_graph *irg, ir_mode *m)
737 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
741 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
745 in[0] = get_Call_ptr(call);
746 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
747 //res->attr.callbegin.irg = irg;
748 res->attr.callbegin.call = call;
749 res = optimize_node (res);
750 irn_vrfy_irg (res, irg);
755 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
759 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
760 //res->attr.end.irg = irg;
762 irn_vrfy_irg (res, irg);
767 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
771 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
772 //res->attr.end.irg = irg;
774 irn_vrfy_irg (res, irg);
779 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
782 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
783 res = optimize_node (res);
784 irn_vrfy_irg (res, irg);
789 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
795 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
796 res->attr.filter.proj = proj;
797 res->attr.filter.in_cg = NULL;
798 res->attr.filter.backedge = NULL;
801 assert(get_Proj_pred(res));
802 assert(get_nodes_Block(get_Proj_pred(res)));
804 res = optimize_node (res);
806 irn_vrfy_irg (res, irg);
812 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
813 ir_node *callee, int arity, ir_node **in, type *tp)
820 NEW_ARR_A (ir_node *, r_in, r_arity);
822 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
824 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
826 assert(is_method_type(tp));
827 set_Call_type(res, tp);
828 res->attr.call.callee_arr = NULL;
829 res = optimize_node (res);
830 irn_vrfy_irg (res, irg);
835 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
836 return new_rd_Block(NULL, irg, arity, in);
838 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
839 return new_rd_Start(NULL, irg, block);
841 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
842 return new_rd_End(NULL, irg, block);
844 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
845 return new_rd_Jmp(NULL, irg, block);
847 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
848 return new_rd_Cond(NULL, irg, block, c);
850 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
851 ir_node *store, int arity, ir_node **in) {
852 return new_rd_Return(NULL, irg, block, store, arity, in);
854 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
855 ir_node *store, ir_node *obj) {
856 return new_rd_Raise(NULL, irg, block, store, obj);
858 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
859 ir_mode *mode, tarval *con) {
860 return new_rd_Const(NULL, irg, block, mode, con);
862 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
863 type_or_id_p value, symconst_kind symkind) {
864 return new_rd_SymConst(NULL, irg, block, value, symkind);
866 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
867 ir_node *objptr, int n_index, ir_node **index,
869 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
871 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
873 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
875 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
876 ir_node *callee, int arity, ir_node **in,
878 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
880 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
881 ir_node *op1, ir_node *op2, ir_mode *mode) {
882 return new_rd_Add(NULL, irg, block, op1, op2, mode);
884 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
885 ir_node *op1, ir_node *op2, ir_mode *mode) {
886 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
888 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
889 ir_node *op, ir_mode *mode) {
890 return new_rd_Minus(NULL, irg, block, op, mode);
892 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
893 ir_node *op1, ir_node *op2, ir_mode *mode) {
894 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
896 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
897 ir_node *memop, ir_node *op1, ir_node *op2) {
898 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
900 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
901 ir_node *memop, ir_node *op1, ir_node *op2) {
902 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
904 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
905 ir_node *memop, ir_node *op1, ir_node *op2) {
906 return new_rd_Div(NULL, irg, block, memop, op1, op2);
908 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
909 ir_node *memop, ir_node *op1, ir_node *op2) {
910 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
912 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
913 ir_node *op, ir_mode *mode) {
914 return new_rd_Abs(NULL, irg, block, op, mode);
916 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
917 ir_node *op1, ir_node *op2, ir_mode *mode) {
918 return new_rd_And(NULL, irg, block, op1, op2, mode);
920 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
921 ir_node *op1, ir_node *op2, ir_mode *mode) {
922 return new_rd_Or(NULL, irg, block, op1, op2, mode);
924 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
925 ir_node *op1, ir_node *op2, ir_mode *mode) {
926 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
928 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
929 ir_node *op, ir_mode *mode) {
930 return new_rd_Not(NULL, irg, block, op, mode);
932 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
933 ir_node *op1, ir_node *op2) {
934 return new_rd_Cmp(NULL, irg, block, op1, op2);
936 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
937 ir_node *op, ir_node *k, ir_mode *mode) {
938 return new_rd_Shl(NULL, irg, block, op, k, mode);
940 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
941 ir_node *op, ir_node *k, ir_mode *mode) {
942 return new_rd_Shr(NULL, irg, block, op, k, mode);
944 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
945 ir_node *op, ir_node *k, ir_mode *mode) {
946 return new_rd_Shrs(NULL, irg, block, op, k, mode);
948 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
949 ir_node *op, ir_node *k, ir_mode *mode) {
950 return new_rd_Rot(NULL, irg, block, op, k, mode);
952 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
953 ir_node *op, ir_mode *mode) {
954 return new_rd_Conv(NULL, irg, block, op, mode);
956 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
957 return new_rd_Cast(NULL, irg, block, op, to_tp);
959 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
960 ir_node **in, ir_mode *mode) {
961 return new_rd_Phi(NULL, irg, block, arity, in, mode);
963 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
964 ir_node *store, ir_node *adr) {
965 return new_rd_Load(NULL, irg, block, store, adr);
967 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
968 ir_node *store, ir_node *adr, ir_node *val) {
969 return new_rd_Store(NULL, irg, block, store, adr, val);
971 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
972 ir_node *size, type *alloc_type, where_alloc where) {
973 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
975 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
976 ir_node *ptr, ir_node *size, type *free_type) {
977 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
979 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
980 return new_rd_Sync(NULL, irg, block, arity, in);
982 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
983 ir_mode *mode, long proj) {
984 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
986 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
988 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
990 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
991 int arity, ir_node **in) {
992 return new_rd_Tuple(NULL, irg, block, arity, in );
994 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
995 ir_node *val, ir_mode *mode) {
996 return new_rd_Id(NULL, irg, block, val, mode);
998 INLINE ir_node *new_r_Bad (ir_graph *irg) {
999 return new_rd_Bad(irg);
1001 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1002 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1004 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1005 return new_rd_Unknown(irg, m);
1007 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1008 return new_rd_CallBegin(NULL, irg, block, callee);
1010 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1011 return new_rd_EndReg(NULL, irg, block);
1013 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1014 return new_rd_EndExcept(NULL, irg, block);
1016 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1017 return new_rd_Break(NULL, irg, block);
1019 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1020 ir_mode *mode, long proj) {
1021 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1023 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1024 ir_node *callee, int arity, ir_node **in,
1026 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1030 /** ********************/
1031 /** public interfaces */
1032 /** construction tools */
1036 * - create a new Start node in the current block
1038 * @return s - pointer to the created Start node
1043 new_d_Start (dbg_info* db)
1047 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1048 op_Start, mode_T, 0, NULL);
1049 //res->attr.start.irg = current_ir_graph;
1051 res = optimize_node (res);
1052 irn_vrfy_irg (res, current_ir_graph);
1057 new_d_End (dbg_info* db)
1060 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1061 op_End, mode_X, -1, NULL);
1062 res = optimize_node (res);
1063 irn_vrfy_irg (res, current_ir_graph);
1068 /* Constructs a Block with a fixed number of predecessors.
1069 Does set current_block. Can be used with automatic Phi
1070 node construction. */
1072 new_d_Block (dbg_info* db, int arity, ir_node **in)
1076 bool has_unknown = false;
1078 res = new_rd_Block (db, current_ir_graph, arity, in);
1080 /* Create and initialize array for Phi-node construction. */
1081 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1082 current_ir_graph->n_loc);
1083 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1085 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1087 if (!has_unknown) res = optimize_node (res);
1088 current_ir_graph->current_block = res;
1090 irn_vrfy_irg (res, current_ir_graph);
1095 /* ***********************************************************************/
1096 /* Methods necessary for automatic Phi node creation */
1098 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1099 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1100 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1101 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1103 Call Graph: ( A ---> B == A "calls" B)
1105 get_value mature_block
1113 get_r_value_internal |
1117 new_rd_Phi0 new_rd_Phi_in
1119 * *************************************************************************** */
1121 /* Creates a Phi node with 0 predecessors */
1122 static INLINE ir_node *
1123 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1126 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1127 irn_vrfy_irg (res, irg);
1131 /* There are two implementations of the Phi node construction. The first
1132 is faster, but does not work for blocks with more than 2 predecessors.
1133 The second works always but is slower and causes more unnecessary Phi
1135 Select the implementations by the following preprocessor flag set in
1137 #if USE_FAST_PHI_CONSTRUCTION
1139 /* This is a stack used for allocating and deallocating nodes in
1140 new_rd_Phi_in. The original implementation used the obstack
1141 to model this stack, now it is explicit. This reduces side effects.
1143 #if USE_EXPLICIT_PHI_IN_STACK
1144 INLINE Phi_in_stack *
1145 new_Phi_in_stack() {
1148 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1150 res->stack = NEW_ARR_F (ir_node *, 1);
1157 free_Phi_in_stack(Phi_in_stack *s) {
1158 DEL_ARR_F(s->stack);
1162 free_to_Phi_in_stack(ir_node *phi) {
1163 assert(get_irn_opcode(phi) == iro_Phi);
1165 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1166 current_ir_graph->Phi_in_stack->pos)
1167 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1169 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1171 (current_ir_graph->Phi_in_stack->pos)++;
1174 static INLINE ir_node *
1175 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1176 int arity, ir_node **in) {
1178 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1179 int pos = current_ir_graph->Phi_in_stack->pos;
1183 /* We need to allocate a new node */
1184 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1185 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1187 /* reuse the old node and initialize it again. */
1190 assert (res->kind == k_ir_node);
1191 assert (res->op == op_Phi);
1195 assert (arity >= 0);
1196 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1197 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1199 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1201 (current_ir_graph->Phi_in_stack->pos)--;
1205 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1207 /* Creates a Phi node with a given, fixed array **in of predecessors.
1208 If the Phi node is unnecessary, as the same value reaches the block
1209 through all control flow paths, it is eliminated and the value
1210 returned directly. This constructor is only intended for use in
1211 the automatic Phi node generation triggered by get_value or mature.
1212 The implementation is quite tricky and depends on the fact, that
1213 the nodes are allocated on a stack:
1214 The in array contains predecessors and NULLs. The NULLs appear,
1215 if get_r_value_internal, that computed the predecessors, reached
1216 the same block on two paths. In this case the same value reaches
1217 this block on both paths, there is no definition in between. We need
1218 not allocate a Phi where these path's merge, but we have to communicate
1219 this fact to the caller. This happens by returning a pointer to the
1220 node the caller _will_ allocate. (Yes, we predict the address. We can
1221 do so because the nodes are allocated on the obstack.) The caller then
1222 finds a pointer to itself and, when this routine is called again,
1225 static INLINE ir_node *
1226 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1227 ir_node **in, int ins)
1230 ir_node *res, *known;
1232 /* allocate a new node on the obstack.
1233 This can return a node to which some of the pointers in the in-array
1235 Attention: the constructor copies the in array, i.e., the later changes
1236 to the array in this routine do not affect the constructed node! If
1237 the in array contains NULLs, there will be missing predecessors in the
1239 Is this a possible internal state of the Phi node generation? */
1240 #if USE_EXPLICIT_PHI_IN_STACK
1241 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1243 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1244 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1246 /* The in-array can contain NULLs. These were returned by
1247 get_r_value_internal if it reached the same block/definition on a
1249 The NULLs are replaced by the node itself to simplify the test in the
1251 for (i=0; i < ins; ++i)
1252 if (in[i] == NULL) in[i] = res;
1254 /* This loop checks whether the Phi has more than one predecessor.
1255 If so, it is a real Phi node and we break the loop. Else the
1256 Phi node merges the same definition on several paths and therefore
1258 for (i=0; i < ins; ++i)
1260 if (in[i]==res || in[i]==known) continue;
1268 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1270 #if USE_EXPLICIT_PHI_IN_STACK
1271 free_to_Phi_in_stack(res);
1273 obstack_free (current_ir_graph->obst, res);
1277 res = optimize_node (res);
1278 irn_vrfy_irg (res, irg);
1281 /* return the pointer to the Phi node. This node might be deallocated! */
1286 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1289 allocates and returns this node. The routine called to allocate the
1290 node might optimize it away and return a real value, or even a pointer
1291 to a deallocated Phi node on top of the obstack!
1292 This function is called with an in-array of proper size. **/
1294 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1296 ir_node *prevBlock, *res;
1299 /* This loop goes to all predecessor blocks of the block the Phi node is in
1300 and there finds the operands of the Phi node by calling
1301 get_r_value_internal. */
1302 for (i = 1; i <= ins; ++i) {
1303 assert (block->in[i]);
1304 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1306 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1309 /* After collecting all predecessors into the array nin a new Phi node
1310 with these predecessors is created. This constructor contains an
1311 optimization: If all predecessors of the Phi node are identical it
1312 returns the only operand instead of a new Phi node. If the value
1313 passes two different control flow edges without being defined, and
1314 this is the second path treated, a pointer to the node that will be
1315 allocated for the first path (recursion) is returned. We already
1316 know the address of this node, as it is the next node to be allocated
1317 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1318 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1320 /* Now we now the value for "pos" and can enter it in the array with
1321 all known local variables. Attention: this might be a pointer to
1322 a node, that later will be allocated!!! See new_rd_Phi_in.
1323 If this is called in mature, after some set_value in the same block,
1324 the proper value must not be overwritten:
1326 get_value (makes Phi0, put's it into graph_arr)
1327 set_value (overwrites Phi0 in graph_arr)
1328 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1331 if (!block->attr.block.graph_arr[pos]) {
1332 block->attr.block.graph_arr[pos] = res;
1334 /* printf(" value already computed by %s\n",
1335 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1341 /* This function returns the last definition of a variable. In case
1342 this variable was last defined in a previous block, Phi nodes are
1343 inserted. If the part of the firm graph containing the definition
1344 is not yet constructed, a dummy Phi node is returned. */
1346 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1349 /* There are 4 cases to treat.
1351 1. The block is not mature and we visit it the first time. We can not
1352 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1353 predecessors is returned. This node is added to the linked list (field
1354 "link") of the containing block to be completed when this block is
1355 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1358 2. The value is already known in this block, graph_arr[pos] is set and we
1359 visit the block the first time. We can return the value without
1360 creating any new nodes.
1362 3. The block is mature and we visit it the first time. A Phi node needs
1363 to be created (phi_merge). If the Phi is not needed, as all it's
1364 operands are the same value reaching the block through different
1365 paths, it's optimized away and the value itself is returned.
1367 4. The block is mature, and we visit it the second time. Now two
1368 subcases are possible:
1369 * The value was computed completely the last time we were here. This
1370 is the case if there is no loop. We can return the proper value.
1371 * The recursion that visited this node and set the flag did not
1372 return yet. We are computing a value in a loop and need to
1373 break the recursion without knowing the result yet.
1374 @@@ strange case. Straight forward we would create a Phi before
1375 starting the computation of it's predecessors. In this case we will
1376 find a Phi here in any case. The problem is that this implementation
1377 only creates a Phi after computing the predecessors, so that it is
1378 hard to compute self references of this Phi. @@@
1379 There is no simple check for the second subcase. Therefore we check
1380 for a second visit and treat all such cases as the second subcase.
1381 Anyways, the basic situation is the same: we reached a block
1382 on two paths without finding a definition of the value: No Phi
1383 nodes are needed on both paths.
1384 We return this information "Two paths, no Phi needed" by a very tricky
1385 implementation that relies on the fact that an obstack is a stack and
1386 will return a node with the same address on different allocations.
1387 Look also at phi_merge and new_rd_phi_in to understand this.
1388 @@@ Unfortunately this does not work, see testprogram
1389 three_cfpred_example.
1393 /* case 4 -- already visited. */
1394 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1396 /* visited the first time */
1397 set_irn_visited(block, get_irg_visited(current_ir_graph));
1399 /* Get the local valid value */
1400 res = block->attr.block.graph_arr[pos];
1402 /* case 2 -- If the value is actually computed, return it. */
1403 if (res) { return res;};
1405 if (block->attr.block.matured) { /* case 3 */
1407 /* The Phi has the same amount of ins as the corresponding block. */
1408 int ins = get_irn_arity(block);
1410 NEW_ARR_A (ir_node *, nin, ins);
1412 /* Phi merge collects the predecessors and then creates a node. */
1413 res = phi_merge (block, pos, mode, nin, ins);
1415 } else { /* case 1 */
1416 /* The block is not mature, we don't know how many in's are needed. A Phi
1417 with zero predecessors is created. Such a Phi node is called Phi0
1418 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1419 to the list of Phi0 nodes in this block to be matured by mature_block
1421 The Phi0 has to remember the pos of it's internal value. If the real
1422 Phi is computed, pos is used to update the array with the local
1425 res = new_rd_Phi0 (current_ir_graph, block, mode);
1426 res->attr.phi0_pos = pos;
1427 res->link = block->link;
1431 /* If we get here, the frontend missed a use-before-definition error */
1434 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1435 assert (mode->code >= irm_F && mode->code <= irm_P);
1436 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1437 tarval_mode_null[mode->code]);
1440 /* The local valid value is available now. */
1441 block->attr.block.graph_arr[pos] = res;
1449 it starts the recursion. This causes an Id at the entry of
1450 every block that has no definition of the value! **/
1452 #if USE_EXPLICIT_PHI_IN_STACK
1454 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1455 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1458 static INLINE ir_node *
1459 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1460 ir_node **in, int ins)
1463 ir_node *res, *known;
1465 /* Allocate a new node on the obstack. The allocation copies the in
1467 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1468 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1470 /* This loop checks whether the Phi has more than one predecessor.
1471 If so, it is a real Phi node and we break the loop. Else the
1472 Phi node merges the same definition on several paths and therefore
1473 is not needed. Don't consider Bad nodes! */
1475 for (i=0; i < ins; ++i)
1479 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1487 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1490 obstack_free (current_ir_graph->obst, res);
1493 /* A undefined value, e.g., in unreachable code. */
1497 res = optimize_node (res);
1498 irn_vrfy_irg (res, irg);
1499 /* Memory Phis in endless loops must be kept alive.
1500 As we can't distinguish these easily we keep all of the alive. */
1501 if ((res->op == op_Phi) && (mode == mode_M))
1502 add_End_keepalive(irg->end, res);
1509 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1511 #if PRECISE_EXC_CONTEXT
1513 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1515 static INLINE ir_node ** new_frag_arr (ir_node *n)
1519 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1520 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1521 sizeof(ir_node *)*current_ir_graph->n_loc);
1522 /* turn off optimization before allocating Proj nodes, as res isn't
1524 opt = get_optimize(); set_optimize(0);
1525 /* Here we rely on the fact that all frag ops have Memory as first result! */
1526 if (get_irn_op(n) == op_Call)
1527 arr[0] = new_Proj(n, mode_M, 3);
1529 arr[0] = new_Proj(n, mode_M, 0);
1531 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1535 static INLINE ir_node **
1536 get_frag_arr (ir_node *n) {
1537 if (get_irn_op(n) == op_Call) {
1538 return n->attr.call.frag_arr;
1539 } else if (get_irn_op(n) == op_Alloc) {
1540 return n->attr.a.frag_arr;
1542 return n->attr.frag_arr;
1547 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1548 if (!frag_arr[pos]) frag_arr[pos] = val;
1549 if (frag_arr[current_ir_graph->n_loc - 1])
1550 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1554 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1558 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1560 frag_arr = get_frag_arr(cfOp);
1561 res = frag_arr[pos];
1563 if (block->attr.block.graph_arr[pos]) {
1564 /* There was a set_value after the cfOp and no get_value before that
1565 set_value. We must build a Phi node now. */
1566 if (block->attr.block.matured) {
1567 int ins = get_irn_arity(block);
1569 NEW_ARR_A (ir_node *, nin, ins);
1570 res = phi_merge(block, pos, mode, nin, ins);
1572 res = new_rd_Phi0 (current_ir_graph, block, mode);
1573 res->attr.phi0_pos = pos;
1574 res->link = block->link;
1578 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1579 but this should be better: (remove comment if this works) */
1580 /* It's a Phi, we can write this into all graph_arrs with NULL */
1581 set_frag_value(block->attr.block.graph_arr, pos, res);
1583 res = get_r_value_internal(block, pos, mode);
1584 set_frag_value(block->attr.block.graph_arr, pos, res);
1592 computes the predecessors for the real phi node, and then
1593 allocates and returns this node. The routine called to allocate the
1594 node might optimize it away and return a real value.
1595 This function must be called with an in-array of proper size. **/
1597 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1599 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1602 /* If this block has no value at pos create a Phi0 and remember it
1603 in graph_arr to break recursions.
1604 Else we may not set graph_arr as there a later value is remembered. */
1606 if (!block->attr.block.graph_arr[pos]) {
1607 if (block == get_irg_start_block(current_ir_graph)) {
1608 /* Collapsing to Bad tarvals is no good idea.
1609 So we call a user-supplied routine here that deals with this case as
1610 appropriate for the given language. Sorryly the only help we can give
1611 here is the position.
1613 Even if all variables are defined before use, it can happen that
1614 we get to the start block, if a cond has been replaced by a tuple
1615 (bad, jmp). In this case we call the function needlessly, eventually
1616 generating an non existant error.
1617 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1620 if (default_initialize_local_variable)
1621 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1623 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1624 /* We don't need to care about exception ops in the start block.
1625 There are none by definition. */
1626 return block->attr.block.graph_arr[pos];
1628 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1629 block->attr.block.graph_arr[pos] = phi0;
1630 #if PRECISE_EXC_CONTEXT
1631 /* Set graph_arr for fragile ops. Also here we should break recursion.
1632 We could choose a cyclic path through an cfop. But the recursion would
1633 break at some point. */
1634 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1639 /* This loop goes to all predecessor blocks of the block the Phi node
1640 is in and there finds the operands of the Phi node by calling
1641 get_r_value_internal. */
1642 for (i = 1; i <= ins; ++i) {
1643 prevCfOp = skip_Proj(block->in[i]);
1645 if (is_Bad(prevCfOp)) {
1646 /* In case a Cond has been optimized we would get right to the start block
1647 with an invalid definition. */
1648 nin[i-1] = new_Bad();
1651 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1653 if (!is_Bad(prevBlock)) {
1654 #if PRECISE_EXC_CONTEXT
1655 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1656 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1657 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1660 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1662 nin[i-1] = new_Bad();
1666 /* After collecting all predecessors into the array nin a new Phi node
1667 with these predecessors is created. This constructor contains an
1668 optimization: If all predecessors of the Phi node are identical it
1669 returns the only operand instead of a new Phi node. */
1670 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1672 /* In case we allocated a Phi0 node at the beginning of this procedure,
1673 we need to exchange this Phi0 with the real Phi. */
1675 exchange(phi0, res);
1676 block->attr.block.graph_arr[pos] = res;
1677 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1678 only an optimization. */
1684 /* This function returns the last definition of a variable. In case
1685 this variable was last defined in a previous block, Phi nodes are
1686 inserted. If the part of the firm graph containing the definition
1687 is not yet constructed, a dummy Phi node is returned. */
1689 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1692 /* There are 4 cases to treat.
1694 1. The block is not mature and we visit it the first time. We can not
1695 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1696 predecessors is returned. This node is added to the linked list (field
1697 "link") of the containing block to be completed when this block is
1698 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1701 2. The value is already known in this block, graph_arr[pos] is set and we
1702 visit the block the first time. We can return the value without
1703 creating any new nodes.
1705 3. The block is mature and we visit it the first time. A Phi node needs
1706 to be created (phi_merge). If the Phi is not needed, as all it's
1707 operands are the same value reaching the block through different
1708 paths, it's optimized away and the value itself is returned.
1710 4. The block is mature, and we visit it the second time. Now two
1711 subcases are possible:
1712 * The value was computed completely the last time we were here. This
1713 is the case if there is no loop. We can return the proper value.
1714 * The recursion that visited this node and set the flag did not
1715 return yet. We are computing a value in a loop and need to
1716 break the recursion. This case only happens if we visited
1717 the same block with phi_merge before, which inserted a Phi0.
1718 So we return the Phi0.
1721 /* case 4 -- already visited. */
1722 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1723 /* As phi_merge allocates a Phi0 this value is always defined. Here
1724 is the critical difference of the two algorithms. */
1725 assert(block->attr.block.graph_arr[pos]);
1726 return block->attr.block.graph_arr[pos];
1729 /* visited the first time */
1730 set_irn_visited(block, get_irg_visited(current_ir_graph));
1732 /* Get the local valid value */
1733 res = block->attr.block.graph_arr[pos];
1735 /* case 2 -- If the value is actually computed, return it. */
1736 if (res) { return res; };
1738 if (block->attr.block.matured) { /* case 3 */
1740 /* The Phi has the same amount of ins as the corresponding block. */
1741 int ins = get_irn_arity(block);
1743 NEW_ARR_A (ir_node *, nin, ins);
1745 /* Phi merge collects the predecessors and then creates a node. */
1746 res = phi_merge (block, pos, mode, nin, ins);
1748 } else { /* case 1 */
1749 /* The block is not mature, we don't know how many in's are needed. A Phi
1750 with zero predecessors is created. Such a Phi node is called Phi0
1751 node. The Phi0 is then added to the list of Phi0 nodes in this block
1752 to be matured by mature_block later.
1753 The Phi0 has to remember the pos of it's internal value. If the real
1754 Phi is computed, pos is used to update the array with the local
1756 res = new_rd_Phi0 (current_ir_graph, block, mode);
1757 res->attr.phi0_pos = pos;
1758 res->link = block->link;
1762 /* If we get here, the frontend missed a use-before-definition error */
1765 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1766 assert (mode->code >= irm_F && mode->code <= irm_P);
1767 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1768 get_mode_null(mode));
1771 /* The local valid value is available now. */
1772 block->attr.block.graph_arr[pos] = res;
1777 #endif /* USE_FAST_PHI_CONSTRUCTION */
1779 /* ************************************************************************** */
1781 /** Finalize a Block node, when all control flows are known. */
1782 /** Acceptable parameters are only Block nodes. */
1784 mature_block (ir_node *block)
1791 assert (get_irn_opcode(block) == iro_Block);
1792 /* @@@ should be commented in
1793 assert (!get_Block_matured(block) && "Block already matured"); */
1795 if (!get_Block_matured(block)) {
1796 ins = ARR_LEN (block->in)-1;
1797 /* Fix block parameters */
1798 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1800 /* An array for building the Phi nodes. */
1801 NEW_ARR_A (ir_node *, nin, ins);
1803 /* Traverse a chain of Phi nodes attached to this block and mature
1805 for (n = block->link; n; n=next) {
1806 inc_irg_visited(current_ir_graph);
1808 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1811 block->attr.block.matured = 1;
1813 /* Now, as the block is a finished firm node, we can optimize it.
1814 Since other nodes have been allocated since the block was created
1815 we can not free the node on the obstack. Therefore we have to call
1817 Unfortunately the optimization does not change a lot, as all allocated
1818 nodes refer to the unoptimized node.
1819 We can call _2, as global cse has no effect on blocks. */
1820 block = optimize_in_place_2(block);
1821 irn_vrfy_irg(block, current_ir_graph);
1826 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1828 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1833 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1835 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1840 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1842 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1848 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1850 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1855 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1857 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1862 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1865 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1866 arg->attr.c.kind = fragmentary;
1867 arg->attr.c.default_proj = max_proj;
1868 res = new_Proj (arg, mode_X, max_proj);
1873 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1875 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1880 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1882 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1886 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1888 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1893 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1895 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1900 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1902 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1908 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1910 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1915 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1917 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1922 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1925 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1927 #if PRECISE_EXC_CONTEXT
1928 if ((current_ir_graph->phase_state == phase_building) &&
1929 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1930 res->attr.frag_arr = new_frag_arr(res);
1937 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1940 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1942 #if PRECISE_EXC_CONTEXT
1943 if ((current_ir_graph->phase_state == phase_building) &&
1944 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1945 res->attr.frag_arr = new_frag_arr(res);
1952 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1955 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1957 #if PRECISE_EXC_CONTEXT
1958 if ((current_ir_graph->phase_state == phase_building) &&
1959 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1960 res->attr.frag_arr = new_frag_arr(res);
1967 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1970 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1972 #if PRECISE_EXC_CONTEXT
1973 if ((current_ir_graph->phase_state == phase_building) &&
1974 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1975 res->attr.frag_arr = new_frag_arr(res);
1982 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1984 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1989 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1991 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1996 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1998 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2003 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2005 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2010 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2012 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2017 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2019 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2024 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2026 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2031 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2033 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2038 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2040 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2045 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2047 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2052 new_d_Jmp (dbg_info* db)
2054 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2058 new_d_Cond (dbg_info* db, ir_node *c)
2060 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2064 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2068 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2069 store, callee, arity, in, tp);
2070 #if PRECISE_EXC_CONTEXT
2071 if ((current_ir_graph->phase_state == phase_building) &&
2072 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2073 res->attr.call.frag_arr = new_frag_arr(res);
2080 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2082 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2087 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2089 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2094 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2097 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2099 #if PRECISE_EXC_CONTEXT
2100 if ((current_ir_graph->phase_state == phase_building) &&
2101 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2102 res->attr.frag_arr = new_frag_arr(res);
2109 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2112 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2114 #if PRECISE_EXC_CONTEXT
2115 if ((current_ir_graph->phase_state == phase_building) &&
2116 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2117 res->attr.frag_arr = new_frag_arr(res);
2124 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2128 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2129 store, size, alloc_type, where);
2130 #if PRECISE_EXC_CONTEXT
2131 if ((current_ir_graph->phase_state == phase_building) &&
2132 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2133 res->attr.a.frag_arr = new_frag_arr(res);
2140 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2142 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2143 store, ptr, size, free_type);
2147 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2148 /* GL: objptr was called frame before. Frame was a bad choice for the name
2149 as the operand could as well be a pointer to a dynamic object. */
2151 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2152 store, objptr, 0, NULL, ent);
2156 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2158 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2159 store, objptr, n_index, index, sel);
2163 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2165 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2166 store, objptr, ent));
2170 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2172 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2177 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2179 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2187 return current_ir_graph->bad;
2191 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2193 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2198 new_d_Unknown (ir_mode *m)
2200 return new_rd_Unknown(current_ir_graph, m);
2204 new_d_CallBegin (dbg_info *db, ir_node *call)
2207 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2212 new_d_EndReg (dbg_info *db)
2215 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2220 new_d_EndExcept (dbg_info *db)
2223 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2228 new_d_Break (dbg_info *db)
2230 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2234 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2236 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2241 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2245 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2246 callee, arity, in, tp);
2251 /* ********************************************************************* */
2252 /* Comfortable interface with automatic Phi node construction. */
2253 /* (Uses also constructors of ?? interface, except new_Block. */
2254 /* ********************************************************************* */
2256 /** Block construction **/
2257 /* immature Block without predecessors */
2258 ir_node *new_d_immBlock (dbg_info* db) {
2261 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2262 /* creates a new dynamic in-array as length of in is -1 */
2263 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2264 current_ir_graph->current_block = res;
2265 res->attr.block.matured = 0;
2266 //res->attr.block.exc = exc_normal;
2267 //res->attr.block.handler_entry = 0;
2268 res->attr.block.irg = current_ir_graph;
2269 res->attr.block.backedge = NULL;
2270 res->attr.block.in_cg = NULL;
2271 res->attr.block.cg_backedge = NULL;
2272 set_Block_block_visited(res, 0);
2274 /* Create and initialize array for Phi-node construction. */
2275 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2276 current_ir_graph->n_loc);
2277 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2279 /* Immature block may not be optimized! */
2280 irn_vrfy_irg (res, current_ir_graph);
2287 return new_d_immBlock(NULL);
2290 /* add an adge to a jmp/control flow node */
2292 add_in_edge (ir_node *block, ir_node *jmp)
2294 if (block->attr.block.matured) {
2295 assert(0 && "Error: Block already matured!\n");
2298 assert (jmp != NULL);
2299 ARR_APP1 (ir_node *, block->in, jmp);
2303 /* changing the current block */
2305 switch_block (ir_node *target)
2307 current_ir_graph->current_block = target;
2310 /* ************************ */
2311 /* parameter administration */
2313 /* get a value from the parameter array from the current block by its index */
2315 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2317 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2318 inc_irg_visited(current_ir_graph);
2320 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2322 /* get a value from the parameter array from the current block by its index */
2324 get_value (int pos, ir_mode *mode)
2326 return get_d_value(NULL, pos, mode);
2329 /* set a value at position pos in the parameter array from the current block */
2331 set_value (int pos, ir_node *value)
2333 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2334 assert(pos+1 < current_ir_graph->n_loc);
2335 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2338 /* get the current store */
2342 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2343 /* GL: one could call get_value instead */
2344 inc_irg_visited(current_ir_graph);
2345 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2348 /* set the current store */
2350 set_store (ir_node *store)
2352 /* GL: one could call set_value instead */
2353 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2354 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2358 keep_alive (ir_node *ka)
2360 add_End_keepalive(current_ir_graph->end, ka);
2363 /** Useful access routines **/
2364 /* Returns the current block of the current graph. To set the current
2365 block use switch_block(). */
2366 ir_node *get_cur_block() {
2367 return get_irg_current_block(current_ir_graph);
2370 /* Returns the frame type of the current graph */
2371 type *get_cur_frame_type() {
2372 return get_irg_frame_type(current_ir_graph);
2376 /* ********************************************************************* */
2379 /* call once for each run of the library */
2381 init_cons (default_initialize_local_variable_func_t *func)
2383 default_initialize_local_variable = func;
2386 /* call for each graph */
2388 finalize_cons (ir_graph *irg) {
2389 irg->phase_state = phase_high;
2393 ir_node *new_Block(int arity, ir_node **in) {
2394 return new_d_Block(NULL, arity, in);
2396 ir_node *new_Start (void) {
2397 return new_d_Start(NULL);
2399 ir_node *new_End (void) {
2400 return new_d_End(NULL);
2402 ir_node *new_Jmp (void) {
2403 return new_d_Jmp(NULL);
2405 ir_node *new_Cond (ir_node *c) {
2406 return new_d_Cond(NULL, c);
2408 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2409 return new_d_Return(NULL, store, arity, in);
2411 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2412 return new_d_Raise(NULL, store, obj);
2414 ir_node *new_Const (ir_mode *mode, tarval *con) {
2415 return new_d_Const(NULL, mode, con);
2417 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2418 return new_d_SymConst(NULL, value, kind);
2420 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2421 return new_d_simpleSel(NULL, store, objptr, ent);
2423 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2425 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2427 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2428 return new_d_InstOf (NULL, store, objptr, ent);
2430 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2432 return new_d_Call(NULL, store, callee, arity, in, tp);
2434 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2435 return new_d_Add(NULL, op1, op2, mode);
2437 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2438 return new_d_Sub(NULL, op1, op2, mode);
2440 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2441 return new_d_Minus(NULL, op, mode);
2443 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2444 return new_d_Mul(NULL, op1, op2, mode);
2446 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2447 return new_d_Quot(NULL, memop, op1, op2);
2449 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2450 return new_d_DivMod(NULL, memop, op1, op2);
2452 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2453 return new_d_Div(NULL, memop, op1, op2);
2455 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2456 return new_d_Mod(NULL, memop, op1, op2);
2458 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2459 return new_d_Abs(NULL, op, mode);
2461 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2462 return new_d_And(NULL, op1, op2, mode);
2464 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2465 return new_d_Or(NULL, op1, op2, mode);
2467 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2468 return new_d_Eor(NULL, op1, op2, mode);
2470 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2471 return new_d_Not(NULL, op, mode);
2473 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2474 return new_d_Shl(NULL, op, k, mode);
2476 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2477 return new_d_Shr(NULL, op, k, mode);
2479 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2480 return new_d_Shrs(NULL, op, k, mode);
2482 #define new_Rotate new_Rot
2483 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2484 return new_d_Rot(NULL, op, k, mode);
2486 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2487 return new_d_Cmp(NULL, op1, op2);
2489 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2490 return new_d_Conv(NULL, op, mode);
2492 ir_node *new_Cast (ir_node *op, type *to_tp) {
2493 return new_d_Cast(NULL, op, to_tp);
2495 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2496 return new_d_Phi(NULL, arity, in, mode);
2498 ir_node *new_Load (ir_node *store, ir_node *addr) {
2499 return new_d_Load(NULL, store, addr);
2501 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2502 return new_d_Store(NULL, store, addr, val);
2504 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2505 where_alloc where) {
2506 return new_d_Alloc(NULL, store, size, alloc_type, where);
2508 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2510 return new_d_Free(NULL, store, ptr, size, free_type);
2512 ir_node *new_Sync (int arity, ir_node **in) {
2513 return new_d_Sync(NULL, arity, in);
2515 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2516 return new_d_Proj(NULL, arg, mode, proj);
2518 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2519 return new_d_defaultProj(NULL, arg, max_proj);
2521 ir_node *new_Tuple (int arity, ir_node **in) {
2522 return new_d_Tuple(NULL, arity, in);
2524 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2525 return new_d_Id(NULL, val, mode);
2527 ir_node *new_Bad (void) {
2530 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2531 return new_d_Confirm (NULL, val, bound, cmp);
2533 ir_node *new_Unknown(ir_mode *m) {
2534 return new_d_Unknown(m);
2536 ir_node *new_CallBegin (ir_node *callee) {
2537 return new_d_CallBegin(NULL, callee);
2539 ir_node *new_EndReg (void) {
2540 return new_d_EndReg(NULL);
2542 ir_node *new_EndExcept (void) {
2543 return new_d_EndExcept(NULL);
2545 ir_node *new_Break (void) {
2546 return new_d_Break(NULL);
2548 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2549 return new_d_Filter(NULL, arg, mode, proj);
2551 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2552 return new_d_FuncCall(NULL, callee, arity, in, tp);