3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 //res->attr.block.exc = exc_normal;
64 //res->attr.block.handler_entry = 0;
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 //res->attr.start.irg = irg;
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 assert( get_Block_matured(block) );
107 assert( get_irn_arity(block) == arity );
109 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
111 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
113 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
114 if (!has_unknown) res = optimize_node (res);
115 irn_vrfy_irg (res, irg);
117 /* Memory Phis in endless loops must be kept alive.
118 As we can't distinguish these easily we keep all of them alive. */
119 if ((res->op == op_Phi) && (mode == mode_M))
120 add_End_keepalive(irg->end, res);
125 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
128 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
129 res->attr.con.tv = con;
130 set_Const_type(res, tp); /* Call method because of complex assertion. */
131 res = optimize_node (res);
132 assert(get_Const_type(res) == tp);
133 irn_vrfy_irg (res, irg);
136 res = local_optimize_newby (res);
143 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
145 type *tp = unknown_type;
146 if (tarval_is_entity(con))
147 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
148 return new_rd_Const_type (db, irg, block, mode, con, tp);
152 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
157 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
158 res = optimize_node (res);
159 irn_vrfy_irg (res, irg);
164 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
170 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
171 res->attr.proj = proj;
174 assert(get_Proj_pred(res));
175 assert(get_nodes_Block(get_Proj_pred(res)));
177 res = optimize_node (res);
179 irn_vrfy_irg (res, irg);
185 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
189 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
190 arg->attr.c.kind = fragmentary;
191 arg->attr.c.default_proj = max_proj;
192 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
197 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
202 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
203 res = optimize_node (res);
204 irn_vrfy_irg (res, irg);
209 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
212 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
213 res->attr.cast.totype = to_tp;
214 res = optimize_node (res);
215 irn_vrfy_irg (res, irg);
220 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
224 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
225 res = optimize_node (res);
226 irn_vrfy_irg (res, irg);
231 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
232 ir_node *op1, ir_node *op2, ir_mode *mode)
238 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
239 res = optimize_node (res);
240 irn_vrfy_irg (res, irg);
245 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
246 ir_node *op1, ir_node *op2, ir_mode *mode)
252 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
253 res = optimize_node (res);
254 irn_vrfy_irg (res, irg);
259 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
260 ir_node *op, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
266 res = optimize_node (res);
267 irn_vrfy_irg (res, irg);
272 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
280 res = optimize_node (res);
281 irn_vrfy_irg (res, irg);
286 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *memop, ir_node *op1, ir_node *op2)
294 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
295 res = optimize_node (res);
296 irn_vrfy_irg (res, irg);
301 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
302 ir_node *memop, ir_node *op1, ir_node *op2)
309 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
310 res = optimize_node (res);
311 irn_vrfy_irg (res, irg);
316 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
325 res = optimize_node (res);
326 irn_vrfy_irg (res, irg);
331 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
340 res = optimize_node (res);
341 irn_vrfy_irg (res, irg);
346 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *op1, ir_node *op2, ir_mode *mode)
353 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
354 res = optimize_node (res);
355 irn_vrfy_irg (res, irg);
360 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
367 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
368 res = optimize_node (res);
369 irn_vrfy_irg (res, irg);
374 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
375 ir_node *op1, ir_node *op2, ir_mode *mode)
381 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
382 res = optimize_node (res);
383 irn_vrfy_irg (res, irg);
388 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
394 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
395 res = optimize_node (res);
396 irn_vrfy_irg (res, irg);
401 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
402 ir_node *op, ir_node *k, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
409 res = optimize_node (res);
410 irn_vrfy_irg (res, irg);
415 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_node *k, ir_mode *mode)
422 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
423 res = optimize_node (res);
424 irn_vrfy_irg (res, irg);
429 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
436 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
437 res = optimize_node (res);
438 irn_vrfy_irg (res, irg);
443 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
444 ir_node *op, ir_node *k, ir_mode *mode)
450 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
451 res = optimize_node (res);
452 irn_vrfy_irg (res, irg);
457 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op, ir_mode *mode)
463 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
464 res = optimize_node (res);
465 irn_vrfy_irg (res, irg);
470 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
471 ir_node *op1, ir_node *op2)
477 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
478 res = optimize_node (res);
479 irn_vrfy_irg (res, irg);
484 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
487 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
488 res = optimize_node (res);
489 irn_vrfy_irg (res, irg);
494 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
499 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
500 res->attr.c.kind = dense;
501 res->attr.c.default_proj = 0;
502 res = optimize_node (res);
503 irn_vrfy_irg (res, irg);
508 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
509 ir_node *callee, int arity, ir_node **in, type *tp)
516 NEW_ARR_A (ir_node *, r_in, r_arity);
519 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
521 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
523 assert(is_method_type(tp));
524 set_Call_type(res, tp);
525 res->attr.call.callee_arr = NULL;
526 res = optimize_node (res);
527 irn_vrfy_irg (res, irg);
532 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
533 ir_node *store, int arity, ir_node **in)
540 NEW_ARR_A (ir_node *, r_in, r_arity);
542 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
543 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
544 res = optimize_node (res);
545 irn_vrfy_irg (res, irg);
550 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
556 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
557 res = optimize_node (res);
558 irn_vrfy_irg (res, irg);
563 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
564 ir_node *store, ir_node *adr)
570 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
572 res = optimize_node (res);
573 irn_vrfy_irg (res, irg);
578 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
579 ir_node *store, ir_node *adr, ir_node *val)
586 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
588 res = optimize_node (res);
590 irn_vrfy_irg (res, irg);
595 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
596 ir_node *size, type *alloc_type, where_alloc where)
602 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
604 res->attr.a.where = where;
605 res->attr.a.type = alloc_type;
607 res = optimize_node (res);
608 irn_vrfy_irg (res, irg);
613 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
614 ir_node *ptr, ir_node *size, type *free_type)
621 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
623 res->attr.f = free_type;
625 res = optimize_node (res);
626 irn_vrfy_irg (res, irg);
631 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
632 int arity, ir_node **in, entity *ent)
638 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
641 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
644 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
645 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
647 res->attr.s.ent = ent;
649 res = optimize_node (res);
650 irn_vrfy_irg (res, irg);
655 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
656 ir_node *objptr, type *ent)
663 NEW_ARR_A (ir_node *, r_in, r_arity);
667 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
669 res->attr.io.ent = ent;
671 /* res = optimize (res);
672 * irn_vrfy_irg (res, irg); */
677 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
678 symconst_kind symkind)
682 if (symkind == linkage_ptr_info)
686 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
688 res->attr.i.num = symkind;
689 if (symkind == linkage_ptr_info) {
690 res->attr.i.tori.ptrinfo = (ident *)value;
692 assert ( ( (symkind == type_tag)
693 || (symkind == size))
694 && (is_type(value)));
695 res->attr.i.tori.typ = (type *)value;
697 res = optimize_node (res);
698 irn_vrfy_irg (res, irg);
703 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
707 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
709 res = optimize_node (res);
710 irn_vrfy_irg (res, irg);
715 new_rd_Bad (ir_graph *irg)
721 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
723 ir_node *in[2], *res;
727 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
729 res->attr.confirm_cmp = cmp;
731 res = optimize_node (res);
732 irn_vrfy_irg(res, irg);
737 new_rd_Unknown (ir_graph *irg, ir_mode *m)
739 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
743 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
747 in[0] = get_Call_ptr(call);
748 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
749 //res->attr.callbegin.irg = irg;
750 res->attr.callbegin.call = call;
751 res = optimize_node (res);
752 irn_vrfy_irg (res, irg);
757 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
761 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
762 //res->attr.end.irg = irg;
764 irn_vrfy_irg (res, irg);
769 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
773 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
774 //res->attr.end.irg = irg;
776 irn_vrfy_irg (res, irg);
781 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
784 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
785 res = optimize_node (res);
786 irn_vrfy_irg (res, irg);
791 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
797 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
798 res->attr.filter.proj = proj;
799 res->attr.filter.in_cg = NULL;
800 res->attr.filter.backedge = NULL;
803 assert(get_Proj_pred(res));
804 assert(get_nodes_Block(get_Proj_pred(res)));
806 res = optimize_node (res);
808 irn_vrfy_irg (res, irg);
814 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
815 ir_node *callee, int arity, ir_node **in, type *tp)
822 NEW_ARR_A (ir_node *, r_in, r_arity);
824 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
826 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
828 assert(is_method_type(tp));
829 set_FuncCall_type(res, tp);
830 res->attr.call.callee_arr = NULL;
831 res = optimize_node (res);
832 irn_vrfy_irg (res, irg);
837 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
838 return new_rd_Block(NULL, irg, arity, in);
840 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
841 return new_rd_Start(NULL, irg, block);
843 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
844 return new_rd_End(NULL, irg, block);
846 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
847 return new_rd_Jmp(NULL, irg, block);
849 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
850 return new_rd_Cond(NULL, irg, block, c);
852 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
853 ir_node *store, int arity, ir_node **in) {
854 return new_rd_Return(NULL, irg, block, store, arity, in);
856 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
857 ir_node *store, ir_node *obj) {
858 return new_rd_Raise(NULL, irg, block, store, obj);
860 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
861 ir_mode *mode, tarval *con) {
862 return new_rd_Const(NULL, irg, block, mode, con);
864 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
865 type_or_id_p value, symconst_kind symkind) {
866 return new_rd_SymConst(NULL, irg, block, value, symkind);
868 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
869 ir_node *objptr, int n_index, ir_node **index,
871 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
873 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
875 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
877 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
878 ir_node *callee, int arity, ir_node **in,
880 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
882 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
883 ir_node *op1, ir_node *op2, ir_mode *mode) {
884 return new_rd_Add(NULL, irg, block, op1, op2, mode);
886 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
887 ir_node *op1, ir_node *op2, ir_mode *mode) {
888 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
890 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
891 ir_node *op, ir_mode *mode) {
892 return new_rd_Minus(NULL, irg, block, op, mode);
894 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
895 ir_node *op1, ir_node *op2, ir_mode *mode) {
896 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
898 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
899 ir_node *memop, ir_node *op1, ir_node *op2) {
900 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
902 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
903 ir_node *memop, ir_node *op1, ir_node *op2) {
904 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
906 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
907 ir_node *memop, ir_node *op1, ir_node *op2) {
908 return new_rd_Div(NULL, irg, block, memop, op1, op2);
910 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
911 ir_node *memop, ir_node *op1, ir_node *op2) {
912 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
914 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
915 ir_node *op, ir_mode *mode) {
916 return new_rd_Abs(NULL, irg, block, op, mode);
918 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
919 ir_node *op1, ir_node *op2, ir_mode *mode) {
920 return new_rd_And(NULL, irg, block, op1, op2, mode);
922 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
923 ir_node *op1, ir_node *op2, ir_mode *mode) {
924 return new_rd_Or(NULL, irg, block, op1, op2, mode);
926 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
927 ir_node *op1, ir_node *op2, ir_mode *mode) {
928 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
930 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
931 ir_node *op, ir_mode *mode) {
932 return new_rd_Not(NULL, irg, block, op, mode);
934 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
935 ir_node *op1, ir_node *op2) {
936 return new_rd_Cmp(NULL, irg, block, op1, op2);
938 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
939 ir_node *op, ir_node *k, ir_mode *mode) {
940 return new_rd_Shl(NULL, irg, block, op, k, mode);
942 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
943 ir_node *op, ir_node *k, ir_mode *mode) {
944 return new_rd_Shr(NULL, irg, block, op, k, mode);
946 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
947 ir_node *op, ir_node *k, ir_mode *mode) {
948 return new_rd_Shrs(NULL, irg, block, op, k, mode);
950 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
951 ir_node *op, ir_node *k, ir_mode *mode) {
952 return new_rd_Rot(NULL, irg, block, op, k, mode);
954 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
955 ir_node *op, ir_mode *mode) {
956 return new_rd_Conv(NULL, irg, block, op, mode);
958 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
959 return new_rd_Cast(NULL, irg, block, op, to_tp);
961 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
962 ir_node **in, ir_mode *mode) {
963 return new_rd_Phi(NULL, irg, block, arity, in, mode);
965 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
966 ir_node *store, ir_node *adr) {
967 return new_rd_Load(NULL, irg, block, store, adr);
969 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
970 ir_node *store, ir_node *adr, ir_node *val) {
971 return new_rd_Store(NULL, irg, block, store, adr, val);
973 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
974 ir_node *size, type *alloc_type, where_alloc where) {
975 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
977 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
978 ir_node *ptr, ir_node *size, type *free_type) {
979 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
981 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
982 return new_rd_Sync(NULL, irg, block, arity, in);
984 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
985 ir_mode *mode, long proj) {
986 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
988 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
990 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
992 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
993 int arity, ir_node **in) {
994 return new_rd_Tuple(NULL, irg, block, arity, in );
996 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
997 ir_node *val, ir_mode *mode) {
998 return new_rd_Id(NULL, irg, block, val, mode);
1000 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1001 return new_rd_Bad(irg);
1003 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1004 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1006 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1007 return new_rd_Unknown(irg, m);
1009 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1010 return new_rd_CallBegin(NULL, irg, block, callee);
1012 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1013 return new_rd_EndReg(NULL, irg, block);
1015 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1016 return new_rd_EndExcept(NULL, irg, block);
1018 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1019 return new_rd_Break(NULL, irg, block);
1021 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1022 ir_mode *mode, long proj) {
1023 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1025 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1026 ir_node *callee, int arity, ir_node **in,
1028 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1032 /** ********************/
1033 /** public interfaces */
1034 /** construction tools */
1038 * - create a new Start node in the current block
1040 * @return s - pointer to the created Start node
1045 new_d_Start (dbg_info* db)
1049 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1050 op_Start, mode_T, 0, NULL);
1051 //res->attr.start.irg = current_ir_graph;
1053 res = optimize_node (res);
1054 irn_vrfy_irg (res, current_ir_graph);
1059 new_d_End (dbg_info* db)
1062 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1063 op_End, mode_X, -1, NULL);
1064 res = optimize_node (res);
1065 irn_vrfy_irg (res, current_ir_graph);
1070 /* Constructs a Block with a fixed number of predecessors.
1071 Does set current_block. Can be used with automatic Phi
1072 node construction. */
1074 new_d_Block (dbg_info* db, int arity, ir_node **in)
1078 bool has_unknown = false;
1080 res = new_rd_Block (db, current_ir_graph, arity, in);
1082 /* Create and initialize array for Phi-node construction. */
1083 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1084 current_ir_graph->n_loc);
1085 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1087 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1089 if (!has_unknown) res = optimize_node (res);
1090 current_ir_graph->current_block = res;
1092 irn_vrfy_irg (res, current_ir_graph);
1097 /* ***********************************************************************/
1098 /* Methods necessary for automatic Phi node creation */
1100 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1101 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1102 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1103 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1105 Call Graph: ( A ---> B == A "calls" B)
1107 get_value mature_block
1115 get_r_value_internal |
1119 new_rd_Phi0 new_rd_Phi_in
1121 * *************************************************************************** */
1123 /* Creates a Phi node with 0 predecessors */
1124 static INLINE ir_node *
1125 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1128 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1129 irn_vrfy_irg (res, irg);
1133 /* There are two implementations of the Phi node construction. The first
1134 is faster, but does not work for blocks with more than 2 predecessors.
1135 The second works always but is slower and causes more unnecessary Phi
1137 Select the implementations by the following preprocessor flag set in
1139 #if USE_FAST_PHI_CONSTRUCTION
1141 /* This is a stack used for allocating and deallocating nodes in
1142 new_rd_Phi_in. The original implementation used the obstack
1143 to model this stack, now it is explicit. This reduces side effects.
1145 #if USE_EXPLICIT_PHI_IN_STACK
1146 INLINE Phi_in_stack *
1147 new_Phi_in_stack() {
1150 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1152 res->stack = NEW_ARR_F (ir_node *, 1);
1159 free_Phi_in_stack(Phi_in_stack *s) {
1160 DEL_ARR_F(s->stack);
1164 free_to_Phi_in_stack(ir_node *phi) {
1165 assert(get_irn_opcode(phi) == iro_Phi);
1167 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1168 current_ir_graph->Phi_in_stack->pos)
1169 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1171 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1173 (current_ir_graph->Phi_in_stack->pos)++;
1176 static INLINE ir_node *
1177 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1178 int arity, ir_node **in) {
1180 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1181 int pos = current_ir_graph->Phi_in_stack->pos;
1185 /* We need to allocate a new node */
1186 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1187 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1189 /* reuse the old node and initialize it again. */
1192 assert (res->kind == k_ir_node);
1193 assert (res->op == op_Phi);
1197 assert (arity >= 0);
1198 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1199 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1201 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1203 (current_ir_graph->Phi_in_stack->pos)--;
1207 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1209 /* Creates a Phi node with a given, fixed array **in of predecessors.
1210 If the Phi node is unnecessary, as the same value reaches the block
1211 through all control flow paths, it is eliminated and the value
1212 returned directly. This constructor is only intended for use in
1213 the automatic Phi node generation triggered by get_value or mature.
1214 The implementation is quite tricky and depends on the fact, that
1215 the nodes are allocated on a stack:
1216 The in array contains predecessors and NULLs. The NULLs appear,
1217 if get_r_value_internal, that computed the predecessors, reached
1218 the same block on two paths. In this case the same value reaches
1219 this block on both paths, there is no definition in between. We need
1220 not allocate a Phi where these path's merge, but we have to communicate
1221 this fact to the caller. This happens by returning a pointer to the
1222 node the caller _will_ allocate. (Yes, we predict the address. We can
1223 do so because the nodes are allocated on the obstack.) The caller then
1224 finds a pointer to itself and, when this routine is called again,
1227 static INLINE ir_node *
1228 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1229 ir_node **in, int ins)
1232 ir_node *res, *known;
1234 /* allocate a new node on the obstack.
1235 This can return a node to which some of the pointers in the in-array
1237 Attention: the constructor copies the in array, i.e., the later changes
1238 to the array in this routine do not affect the constructed node! If
1239 the in array contains NULLs, there will be missing predecessors in the
1241 Is this a possible internal state of the Phi node generation? */
1242 #if USE_EXPLICIT_PHI_IN_STACK
1243 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1245 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1246 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1248 /* The in-array can contain NULLs. These were returned by
1249 get_r_value_internal if it reached the same block/definition on a
1251 The NULLs are replaced by the node itself to simplify the test in the
1253 for (i=0; i < ins; ++i)
1254 if (in[i] == NULL) in[i] = res;
1256 /* This loop checks whether the Phi has more than one predecessor.
1257 If so, it is a real Phi node and we break the loop. Else the
1258 Phi node merges the same definition on several paths and therefore
1260 for (i=0; i < ins; ++i)
1262 if (in[i]==res || in[i]==known) continue;
1270 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1272 #if USE_EXPLICIT_PHI_IN_STACK
1273 free_to_Phi_in_stack(res);
1275 obstack_free (current_ir_graph->obst, res);
1279 res = optimize_node (res);
1280 irn_vrfy_irg (res, irg);
1283 /* return the pointer to the Phi node. This node might be deallocated! */
1288 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1291 allocates and returns this node. The routine called to allocate the
1292 node might optimize it away and return a real value, or even a pointer
1293 to a deallocated Phi node on top of the obstack!
1294 This function is called with an in-array of proper size. **/
1296 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1298 ir_node *prevBlock, *res;
1301 /* This loop goes to all predecessor blocks of the block the Phi node is in
1302 and there finds the operands of the Phi node by calling
1303 get_r_value_internal. */
1304 for (i = 1; i <= ins; ++i) {
1305 assert (block->in[i]);
1306 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1308 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1311 /* After collecting all predecessors into the array nin a new Phi node
1312 with these predecessors is created. This constructor contains an
1313 optimization: If all predecessors of the Phi node are identical it
1314 returns the only operand instead of a new Phi node. If the value
1315 passes two different control flow edges without being defined, and
1316 this is the second path treated, a pointer to the node that will be
1317 allocated for the first path (recursion) is returned. We already
1318 know the address of this node, as it is the next node to be allocated
1319 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1320 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1322 /* Now we now the value for "pos" and can enter it in the array with
1323 all known local variables. Attention: this might be a pointer to
1324 a node, that later will be allocated!!! See new_rd_Phi_in.
1325 If this is called in mature, after some set_value in the same block,
1326 the proper value must not be overwritten:
1328 get_value (makes Phi0, put's it into graph_arr)
1329 set_value (overwrites Phi0 in graph_arr)
1330 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1333 if (!block->attr.block.graph_arr[pos]) {
1334 block->attr.block.graph_arr[pos] = res;
1336 /* printf(" value already computed by %s\n",
1337 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1343 /* This function returns the last definition of a variable. In case
1344 this variable was last defined in a previous block, Phi nodes are
1345 inserted. If the part of the firm graph containing the definition
1346 is not yet constructed, a dummy Phi node is returned. */
1348 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1351 /* There are 4 cases to treat.
1353 1. The block is not mature and we visit it the first time. We can not
1354 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1355 predecessors is returned. This node is added to the linked list (field
1356 "link") of the containing block to be completed when this block is
1357 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1360 2. The value is already known in this block, graph_arr[pos] is set and we
1361 visit the block the first time. We can return the value without
1362 creating any new nodes.
1364 3. The block is mature and we visit it the first time. A Phi node needs
1365 to be created (phi_merge). If the Phi is not needed, as all it's
1366 operands are the same value reaching the block through different
1367 paths, it's optimized away and the value itself is returned.
1369 4. The block is mature, and we visit it the second time. Now two
1370 subcases are possible:
1371 * The value was computed completely the last time we were here. This
1372 is the case if there is no loop. We can return the proper value.
1373 * The recursion that visited this node and set the flag did not
1374 return yet. We are computing a value in a loop and need to
1375 break the recursion without knowing the result yet.
1376 @@@ strange case. Straight forward we would create a Phi before
1377 starting the computation of it's predecessors. In this case we will
1378 find a Phi here in any case. The problem is that this implementation
1379 only creates a Phi after computing the predecessors, so that it is
1380 hard to compute self references of this Phi. @@@
1381 There is no simple check for the second subcase. Therefore we check
1382 for a second visit and treat all such cases as the second subcase.
1383 Anyways, the basic situation is the same: we reached a block
1384 on two paths without finding a definition of the value: No Phi
1385 nodes are needed on both paths.
1386 We return this information "Two paths, no Phi needed" by a very tricky
1387 implementation that relies on the fact that an obstack is a stack and
1388 will return a node with the same address on different allocations.
1389 Look also at phi_merge and new_rd_phi_in to understand this.
1390 @@@ Unfortunately this does not work, see testprogram
1391 three_cfpred_example.
1395 /* case 4 -- already visited. */
1396 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1398 /* visited the first time */
1399 set_irn_visited(block, get_irg_visited(current_ir_graph));
1401 /* Get the local valid value */
1402 res = block->attr.block.graph_arr[pos];
1404 /* case 2 -- If the value is actually computed, return it. */
1405 if (res) { return res;};
1407 if (block->attr.block.matured) { /* case 3 */
1409 /* The Phi has the same amount of ins as the corresponding block. */
1410 int ins = get_irn_arity(block);
1412 NEW_ARR_A (ir_node *, nin, ins);
1414 /* Phi merge collects the predecessors and then creates a node. */
1415 res = phi_merge (block, pos, mode, nin, ins);
1417 } else { /* case 1 */
1418 /* The block is not mature, we don't know how many in's are needed. A Phi
1419 with zero predecessors is created. Such a Phi node is called Phi0
1420 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1421 to the list of Phi0 nodes in this block to be matured by mature_block
1423 The Phi0 has to remember the pos of it's internal value. If the real
1424 Phi is computed, pos is used to update the array with the local
1427 res = new_rd_Phi0 (current_ir_graph, block, mode);
1428 res->attr.phi0_pos = pos;
1429 res->link = block->link;
1433 /* If we get here, the frontend missed a use-before-definition error */
1436 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1437 assert (mode->code >= irm_F && mode->code <= irm_P);
1438 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1439 tarval_mode_null[mode->code]);
1442 /* The local valid value is available now. */
1443 block->attr.block.graph_arr[pos] = res;
1451 it starts the recursion. This causes an Id at the entry of
1452 every block that has no definition of the value! **/
1454 #if USE_EXPLICIT_PHI_IN_STACK
1456 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1457 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1460 static INLINE ir_node *
1461 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1462 ir_node **in, int ins)
1465 ir_node *res, *known;
1467 /* Allocate a new node on the obstack. The allocation copies the in
1469 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1470 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1472 /* This loop checks whether the Phi has more than one predecessor.
1473 If so, it is a real Phi node and we break the loop. Else the
1474 Phi node merges the same definition on several paths and therefore
1475 is not needed. Don't consider Bad nodes! */
1477 for (i=0; i < ins; ++i)
1481 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1489 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1492 obstack_free (current_ir_graph->obst, res);
1495 /* A undefined value, e.g., in unreachable code. */
1499 res = optimize_node (res);
1500 irn_vrfy_irg (res, irg);
1501 /* Memory Phis in endless loops must be kept alive.
1502 As we can't distinguish these easily we keep all of the alive. */
1503 if ((res->op == op_Phi) && (mode == mode_M))
1504 add_End_keepalive(irg->end, res);
1511 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1513 #if PRECISE_EXC_CONTEXT
1515 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1517 static INLINE ir_node ** new_frag_arr (ir_node *n)
1521 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1522 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1523 sizeof(ir_node *)*current_ir_graph->n_loc);
1524 /* turn off optimization before allocating Proj nodes, as res isn't
1526 opt = get_opt_optimize(); set_optimize(0);
1527 /* Here we rely on the fact that all frag ops have Memory as first result! */
1528 if (get_irn_op(n) == op_Call)
1529 arr[0] = new_Proj(n, mode_M, 3);
1531 arr[0] = new_Proj(n, mode_M, 0);
1533 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1537 static INLINE ir_node **
1538 get_frag_arr (ir_node *n) {
1539 if (get_irn_op(n) == op_Call) {
1540 return n->attr.call.frag_arr;
1541 } else if (get_irn_op(n) == op_Alloc) {
1542 return n->attr.a.frag_arr;
1544 return n->attr.frag_arr;
1549 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1550 if (!frag_arr[pos]) frag_arr[pos] = val;
1551 if (frag_arr[current_ir_graph->n_loc - 1])
1552 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1556 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1560 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1562 frag_arr = get_frag_arr(cfOp);
1563 res = frag_arr[pos];
1565 if (block->attr.block.graph_arr[pos]) {
1566 /* There was a set_value after the cfOp and no get_value before that
1567 set_value. We must build a Phi node now. */
1568 if (block->attr.block.matured) {
1569 int ins = get_irn_arity(block);
1571 NEW_ARR_A (ir_node *, nin, ins);
1572 res = phi_merge(block, pos, mode, nin, ins);
1574 res = new_rd_Phi0 (current_ir_graph, block, mode);
1575 res->attr.phi0_pos = pos;
1576 res->link = block->link;
1580 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1581 but this should be better: (remove comment if this works) */
1582 /* It's a Phi, we can write this into all graph_arrs with NULL */
1583 set_frag_value(block->attr.block.graph_arr, pos, res);
1585 res = get_r_value_internal(block, pos, mode);
1586 set_frag_value(block->attr.block.graph_arr, pos, res);
1594 computes the predecessors for the real phi node, and then
1595 allocates and returns this node. The routine called to allocate the
1596 node might optimize it away and return a real value.
1597 This function must be called with an in-array of proper size. **/
1599 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1601 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1604 /* If this block has no value at pos create a Phi0 and remember it
1605 in graph_arr to break recursions.
1606 Else we may not set graph_arr as there a later value is remembered. */
1608 if (!block->attr.block.graph_arr[pos]) {
1609 if (block == get_irg_start_block(current_ir_graph)) {
1610 /* Collapsing to Bad tarvals is no good idea.
1611 So we call a user-supplied routine here that deals with this case as
1612 appropriate for the given language. Sorryly the only help we can give
1613 here is the position.
1615 Even if all variables are defined before use, it can happen that
1616 we get to the start block, if a cond has been replaced by a tuple
1617 (bad, jmp). In this case we call the function needlessly, eventually
1618 generating an non existant error.
1619 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1622 if (default_initialize_local_variable)
1623 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1625 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1626 /* We don't need to care about exception ops in the start block.
1627 There are none by definition. */
1628 return block->attr.block.graph_arr[pos];
1630 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1631 block->attr.block.graph_arr[pos] = phi0;
1632 #if PRECISE_EXC_CONTEXT
1633 /* Set graph_arr for fragile ops. Also here we should break recursion.
1634 We could choose a cyclic path through an cfop. But the recursion would
1635 break at some point. */
1636 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1641 /* This loop goes to all predecessor blocks of the block the Phi node
1642 is in and there finds the operands of the Phi node by calling
1643 get_r_value_internal. */
1644 for (i = 1; i <= ins; ++i) {
1645 prevCfOp = skip_Proj(block->in[i]);
1647 if (is_Bad(prevCfOp)) {
1648 /* In case a Cond has been optimized we would get right to the start block
1649 with an invalid definition. */
1650 nin[i-1] = new_Bad();
1653 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1655 if (!is_Bad(prevBlock)) {
1656 #if PRECISE_EXC_CONTEXT
1657 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1658 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1659 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1662 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1664 nin[i-1] = new_Bad();
1668 /* After collecting all predecessors into the array nin a new Phi node
1669 with these predecessors is created. This constructor contains an
1670 optimization: If all predecessors of the Phi node are identical it
1671 returns the only operand instead of a new Phi node. */
1672 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1674 /* In case we allocated a Phi0 node at the beginning of this procedure,
1675 we need to exchange this Phi0 with the real Phi. */
1677 exchange(phi0, res);
1678 block->attr.block.graph_arr[pos] = res;
1679 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1680 only an optimization. */
1686 /* This function returns the last definition of a variable. In case
1687 this variable was last defined in a previous block, Phi nodes are
1688 inserted. If the part of the firm graph containing the definition
1689 is not yet constructed, a dummy Phi node is returned. */
1691 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1694 /* There are 4 cases to treat.
1696 1. The block is not mature and we visit it the first time. We can not
1697 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1698 predecessors is returned. This node is added to the linked list (field
1699 "link") of the containing block to be completed when this block is
1700 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1703 2. The value is already known in this block, graph_arr[pos] is set and we
1704 visit the block the first time. We can return the value without
1705 creating any new nodes.
1707 3. The block is mature and we visit it the first time. A Phi node needs
1708 to be created (phi_merge). If the Phi is not needed, as all it's
1709 operands are the same value reaching the block through different
1710 paths, it's optimized away and the value itself is returned.
1712 4. The block is mature, and we visit it the second time. Now two
1713 subcases are possible:
1714 * The value was computed completely the last time we were here. This
1715 is the case if there is no loop. We can return the proper value.
1716 * The recursion that visited this node and set the flag did not
1717 return yet. We are computing a value in a loop and need to
1718 break the recursion. This case only happens if we visited
1719 the same block with phi_merge before, which inserted a Phi0.
1720 So we return the Phi0.
1723 /* case 4 -- already visited. */
1724 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1725 /* As phi_merge allocates a Phi0 this value is always defined. Here
1726 is the critical difference of the two algorithms. */
1727 assert(block->attr.block.graph_arr[pos]);
1728 return block->attr.block.graph_arr[pos];
1731 /* visited the first time */
1732 set_irn_visited(block, get_irg_visited(current_ir_graph));
1734 /* Get the local valid value */
1735 res = block->attr.block.graph_arr[pos];
1737 /* case 2 -- If the value is actually computed, return it. */
1738 if (res) { return res; };
1740 if (block->attr.block.matured) { /* case 3 */
1742 /* The Phi has the same amount of ins as the corresponding block. */
1743 int ins = get_irn_arity(block);
1745 NEW_ARR_A (ir_node *, nin, ins);
1747 /* Phi merge collects the predecessors and then creates a node. */
1748 res = phi_merge (block, pos, mode, nin, ins);
1750 } else { /* case 1 */
1751 /* The block is not mature, we don't know how many in's are needed. A Phi
1752 with zero predecessors is created. Such a Phi node is called Phi0
1753 node. The Phi0 is then added to the list of Phi0 nodes in this block
1754 to be matured by mature_block later.
1755 The Phi0 has to remember the pos of it's internal value. If the real
1756 Phi is computed, pos is used to update the array with the local
1758 res = new_rd_Phi0 (current_ir_graph, block, mode);
1759 res->attr.phi0_pos = pos;
1760 res->link = block->link;
1764 /* If we get here, the frontend missed a use-before-definition error */
1767 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1768 assert (mode->code >= irm_F && mode->code <= irm_P);
1769 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1770 get_mode_null(mode));
1773 /* The local valid value is available now. */
1774 block->attr.block.graph_arr[pos] = res;
1779 #endif /* USE_FAST_PHI_CONSTRUCTION */
1781 /* ************************************************************************** */
1783 /** Finalize a Block node, when all control flows are known. */
1784 /** Acceptable parameters are only Block nodes. */
1786 mature_block (ir_node *block)
1793 assert (get_irn_opcode(block) == iro_Block);
1794 /* @@@ should be commented in
1795 assert (!get_Block_matured(block) && "Block already matured"); */
1797 if (!get_Block_matured(block)) {
1798 ins = ARR_LEN (block->in)-1;
1799 /* Fix block parameters */
1800 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1802 /* An array for building the Phi nodes. */
1803 NEW_ARR_A (ir_node *, nin, ins);
1805 /* Traverse a chain of Phi nodes attached to this block and mature
1807 for (n = block->link; n; n=next) {
1808 inc_irg_visited(current_ir_graph);
1810 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1813 block->attr.block.matured = 1;
1815 /* Now, as the block is a finished firm node, we can optimize it.
1816 Since other nodes have been allocated since the block was created
1817 we can not free the node on the obstack. Therefore we have to call
1819 Unfortunately the optimization does not change a lot, as all allocated
1820 nodes refer to the unoptimized node.
1821 We can call _2, as global cse has no effect on blocks. */
1822 block = optimize_in_place_2(block);
1823 irn_vrfy_irg(block, current_ir_graph);
1828 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1830 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1835 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1837 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1842 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1844 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1850 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1852 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1857 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1859 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1864 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1867 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1868 arg->attr.c.kind = fragmentary;
1869 arg->attr.c.default_proj = max_proj;
1870 res = new_Proj (arg, mode_X, max_proj);
1875 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1877 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1882 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1884 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1888 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1890 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1895 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1897 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1902 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1904 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1910 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1912 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1917 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1919 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1924 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1927 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1929 #if PRECISE_EXC_CONTEXT
1930 if ((current_ir_graph->phase_state == phase_building) &&
1931 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1932 res->attr.frag_arr = new_frag_arr(res);
1939 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1942 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1944 #if PRECISE_EXC_CONTEXT
1945 if ((current_ir_graph->phase_state == phase_building) &&
1946 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1947 res->attr.frag_arr = new_frag_arr(res);
1954 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1957 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1959 #if PRECISE_EXC_CONTEXT
1960 if ((current_ir_graph->phase_state == phase_building) &&
1961 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1962 res->attr.frag_arr = new_frag_arr(res);
1969 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1972 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1974 #if PRECISE_EXC_CONTEXT
1975 if ((current_ir_graph->phase_state == phase_building) &&
1976 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1977 res->attr.frag_arr = new_frag_arr(res);
1984 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1986 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1991 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1993 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1998 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2000 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2005 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2007 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2012 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2014 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2019 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2021 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2026 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2028 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2033 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2035 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2040 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2042 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2047 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2049 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2054 new_d_Jmp (dbg_info* db)
2056 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2060 new_d_Cond (dbg_info* db, ir_node *c)
2062 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2066 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2070 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2071 store, callee, arity, in, tp);
2072 #if PRECISE_EXC_CONTEXT
2073 if ((current_ir_graph->phase_state == phase_building) &&
2074 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2075 res->attr.call.frag_arr = new_frag_arr(res);
2082 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2084 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2089 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2091 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2096 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2099 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2101 #if PRECISE_EXC_CONTEXT
2102 if ((current_ir_graph->phase_state == phase_building) &&
2103 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2104 res->attr.frag_arr = new_frag_arr(res);
2111 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2114 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2116 #if PRECISE_EXC_CONTEXT
2117 if ((current_ir_graph->phase_state == phase_building) &&
2118 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2119 res->attr.frag_arr = new_frag_arr(res);
2126 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2130 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2131 store, size, alloc_type, where);
2132 #if PRECISE_EXC_CONTEXT
2133 if ((current_ir_graph->phase_state == phase_building) &&
2134 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2135 res->attr.a.frag_arr = new_frag_arr(res);
2142 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2144 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2145 store, ptr, size, free_type);
2149 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2150 /* GL: objptr was called frame before. Frame was a bad choice for the name
2151 as the operand could as well be a pointer to a dynamic object. */
2153 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2154 store, objptr, 0, NULL, ent);
2158 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2160 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2161 store, objptr, n_index, index, sel);
2165 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2167 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2168 store, objptr, ent));
2172 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2174 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2179 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2181 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2189 return current_ir_graph->bad;
2193 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2195 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2200 new_d_Unknown (ir_mode *m)
2202 return new_rd_Unknown(current_ir_graph, m);
2206 new_d_CallBegin (dbg_info *db, ir_node *call)
2209 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2214 new_d_EndReg (dbg_info *db)
2217 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2222 new_d_EndExcept (dbg_info *db)
2225 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2230 new_d_Break (dbg_info *db)
2232 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2236 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2238 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2243 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2247 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2248 callee, arity, in, tp);
2253 /* ********************************************************************* */
2254 /* Comfortable interface with automatic Phi node construction. */
2255 /* (Uses also constructors of ?? interface, except new_Block. */
2256 /* ********************************************************************* */
2258 /** Block construction **/
2259 /* immature Block without predecessors */
2260 ir_node *new_d_immBlock (dbg_info* db) {
2263 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2264 /* creates a new dynamic in-array as length of in is -1 */
2265 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2266 current_ir_graph->current_block = res;
2267 res->attr.block.matured = 0;
2268 //res->attr.block.exc = exc_normal;
2269 //res->attr.block.handler_entry = 0;
2270 res->attr.block.irg = current_ir_graph;
2271 res->attr.block.backedge = NULL;
2272 res->attr.block.in_cg = NULL;
2273 res->attr.block.cg_backedge = NULL;
2274 set_Block_block_visited(res, 0);
2276 /* Create and initialize array for Phi-node construction. */
2277 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2278 current_ir_graph->n_loc);
2279 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2281 /* Immature block may not be optimized! */
2282 irn_vrfy_irg (res, current_ir_graph);
2289 return new_d_immBlock(NULL);
2292 /* add an adge to a jmp/control flow node */
2294 add_in_edge (ir_node *block, ir_node *jmp)
2296 if (block->attr.block.matured) {
2297 assert(0 && "Error: Block already matured!\n");
2300 assert (jmp != NULL);
2301 ARR_APP1 (ir_node *, block->in, jmp);
2305 /* changing the current block */
2307 switch_block (ir_node *target)
2309 current_ir_graph->current_block = target;
2312 /* ************************ */
2313 /* parameter administration */
2315 /* get a value from the parameter array from the current block by its index */
2317 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2319 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2320 inc_irg_visited(current_ir_graph);
2322 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2324 /* get a value from the parameter array from the current block by its index */
2326 get_value (int pos, ir_mode *mode)
2328 return get_d_value(NULL, pos, mode);
2331 /* set a value at position pos in the parameter array from the current block */
2333 set_value (int pos, ir_node *value)
2335 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2336 assert(pos+1 < current_ir_graph->n_loc);
2337 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2340 /* get the current store */
2344 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2345 /* GL: one could call get_value instead */
2346 inc_irg_visited(current_ir_graph);
2347 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2350 /* set the current store */
2352 set_store (ir_node *store)
2354 /* GL: one could call set_value instead */
2355 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2356 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2360 keep_alive (ir_node *ka)
2362 add_End_keepalive(current_ir_graph->end, ka);
2365 /** Useful access routines **/
2366 /* Returns the current block of the current graph. To set the current
2367 block use switch_block(). */
2368 ir_node *get_cur_block() {
2369 return get_irg_current_block(current_ir_graph);
2372 /* Returns the frame type of the current graph */
2373 type *get_cur_frame_type() {
2374 return get_irg_frame_type(current_ir_graph);
2378 /* ********************************************************************* */
2381 /* call once for each run of the library */
2383 init_cons (default_initialize_local_variable_func_t *func)
2385 default_initialize_local_variable = func;
2388 /* call for each graph */
2390 finalize_cons (ir_graph *irg) {
2391 irg->phase_state = phase_high;
2395 ir_node *new_Block(int arity, ir_node **in) {
2396 return new_d_Block(NULL, arity, in);
2398 ir_node *new_Start (void) {
2399 return new_d_Start(NULL);
2401 ir_node *new_End (void) {
2402 return new_d_End(NULL);
2404 ir_node *new_Jmp (void) {
2405 return new_d_Jmp(NULL);
2407 ir_node *new_Cond (ir_node *c) {
2408 return new_d_Cond(NULL, c);
2410 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2411 return new_d_Return(NULL, store, arity, in);
2413 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2414 return new_d_Raise(NULL, store, obj);
2416 ir_node *new_Const (ir_mode *mode, tarval *con) {
2417 return new_d_Const(NULL, mode, con);
2419 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2420 return new_d_SymConst(NULL, value, kind);
2422 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2423 return new_d_simpleSel(NULL, store, objptr, ent);
2425 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2427 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2429 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2430 return new_d_InstOf (NULL, store, objptr, ent);
2432 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2434 return new_d_Call(NULL, store, callee, arity, in, tp);
2436 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2437 return new_d_Add(NULL, op1, op2, mode);
2439 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2440 return new_d_Sub(NULL, op1, op2, mode);
2442 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2443 return new_d_Minus(NULL, op, mode);
2445 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2446 return new_d_Mul(NULL, op1, op2, mode);
2448 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2449 return new_d_Quot(NULL, memop, op1, op2);
2451 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2452 return new_d_DivMod(NULL, memop, op1, op2);
2454 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2455 return new_d_Div(NULL, memop, op1, op2);
2457 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2458 return new_d_Mod(NULL, memop, op1, op2);
2460 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2461 return new_d_Abs(NULL, op, mode);
2463 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2464 return new_d_And(NULL, op1, op2, mode);
2466 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2467 return new_d_Or(NULL, op1, op2, mode);
2469 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2470 return new_d_Eor(NULL, op1, op2, mode);
2472 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2473 return new_d_Not(NULL, op, mode);
2475 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2476 return new_d_Shl(NULL, op, k, mode);
2478 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2479 return new_d_Shr(NULL, op, k, mode);
2481 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2482 return new_d_Shrs(NULL, op, k, mode);
2484 #define new_Rotate new_Rot
2485 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2486 return new_d_Rot(NULL, op, k, mode);
2488 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2489 return new_d_Cmp(NULL, op1, op2);
2491 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2492 return new_d_Conv(NULL, op, mode);
2494 ir_node *new_Cast (ir_node *op, type *to_tp) {
2495 return new_d_Cast(NULL, op, to_tp);
2497 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2498 return new_d_Phi(NULL, arity, in, mode);
2500 ir_node *new_Load (ir_node *store, ir_node *addr) {
2501 return new_d_Load(NULL, store, addr);
2503 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2504 return new_d_Store(NULL, store, addr, val);
2506 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2507 where_alloc where) {
2508 return new_d_Alloc(NULL, store, size, alloc_type, where);
2510 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2512 return new_d_Free(NULL, store, ptr, size, free_type);
2514 ir_node *new_Sync (int arity, ir_node **in) {
2515 return new_d_Sync(NULL, arity, in);
2517 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2518 return new_d_Proj(NULL, arg, mode, proj);
2520 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2521 return new_d_defaultProj(NULL, arg, max_proj);
2523 ir_node *new_Tuple (int arity, ir_node **in) {
2524 return new_d_Tuple(NULL, arity, in);
2526 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2527 return new_d_Id(NULL, val, mode);
2529 ir_node *new_Bad (void) {
2532 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2533 return new_d_Confirm (NULL, val, bound, cmp);
2535 ir_node *new_Unknown(ir_mode *m) {
2536 return new_d_Unknown(m);
2538 ir_node *new_CallBegin (ir_node *callee) {
2539 return new_d_CallBegin(NULL, callee);
2541 ir_node *new_EndReg (void) {
2542 return new_d_EndReg(NULL);
2544 ir_node *new_EndExcept (void) {
2545 return new_d_EndExcept(NULL);
2547 ir_node *new_Break (void) {
2548 return new_d_Break(NULL);
2550 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2551 return new_d_Filter(NULL, arg, mode, proj);
2553 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2554 return new_d_FuncCall(NULL, callee, arity, in, tp);