3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 //res->attr.block.exc = exc_normal;
64 //res->attr.block.handler_entry = 0;
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 //res->attr.start.irg = irg;
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
116 if (!has_unknown) res = optimize_node (res);
117 irn_vrfy_irg (res, irg);
119 /* Memory Phis in endless loops must be kept alive.
120 As we can't distinguish these easily we keep all of them alive. */
121 if ((res->op == op_Phi) && (mode == mode_M))
122 add_End_keepalive(irg->end, res);
127 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
130 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
131 res->attr.con.tv = con;
132 set_Const_type(res, tp); /* Call method because of complex assertion. */
133 res = optimize_node (res);
134 assert(get_Const_type(res) == tp);
135 irn_vrfy_irg (res, irg);
138 res = local_optimize_newby (res);
145 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
147 type *tp = unknown_type;
148 if (tarval_is_entity(con))
149 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
150 return new_rd_Const_type (db, irg, block, mode, con, tp);
154 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
159 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
160 res = optimize_node (res);
161 irn_vrfy_irg (res, irg);
166 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
172 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
173 res->attr.proj = proj;
176 assert(get_Proj_pred(res));
177 assert(get_nodes_Block(get_Proj_pred(res)));
179 res = optimize_node (res);
181 irn_vrfy_irg (res, irg);
187 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
191 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
192 arg->attr.c.kind = fragmentary;
193 arg->attr.c.default_proj = max_proj;
194 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
199 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
204 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
205 res = optimize_node (res);
206 irn_vrfy_irg (res, irg);
211 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
214 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
215 res->attr.cast.totype = to_tp;
216 res = optimize_node (res);
217 irn_vrfy_irg (res, irg);
222 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
226 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
227 res = optimize_node (res);
228 irn_vrfy_irg (res, irg);
233 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
234 ir_node *op1, ir_node *op2, ir_mode *mode)
240 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
241 res = optimize_node (res);
242 irn_vrfy_irg (res, irg);
247 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
248 ir_node *op1, ir_node *op2, ir_mode *mode)
254 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
255 res = optimize_node (res);
256 irn_vrfy_irg (res, irg);
261 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
262 ir_node *op, ir_mode *mode)
267 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
268 res = optimize_node (res);
269 irn_vrfy_irg (res, irg);
274 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
275 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
282 res = optimize_node (res);
283 irn_vrfy_irg (res, irg);
288 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *memop, ir_node *op1, ir_node *op2)
296 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
297 res = optimize_node (res);
298 irn_vrfy_irg (res, irg);
303 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
304 ir_node *memop, ir_node *op1, ir_node *op2)
311 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
312 res = optimize_node (res);
313 irn_vrfy_irg (res, irg);
318 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
319 ir_node *memop, ir_node *op1, ir_node *op2)
326 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
327 res = optimize_node (res);
328 irn_vrfy_irg (res, irg);
333 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
341 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
342 res = optimize_node (res);
343 irn_vrfy_irg (res, irg);
348 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
349 ir_node *op1, ir_node *op2, ir_mode *mode)
355 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
356 res = optimize_node (res);
357 irn_vrfy_irg (res, irg);
362 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
363 ir_node *op1, ir_node *op2, ir_mode *mode)
369 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
370 res = optimize_node (res);
371 irn_vrfy_irg (res, irg);
376 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
384 res = optimize_node (res);
385 irn_vrfy_irg (res, irg);
390 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op, ir_mode *mode)
396 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
397 res = optimize_node (res);
398 irn_vrfy_irg (res, irg);
403 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
404 ir_node *op, ir_node *k, ir_mode *mode)
410 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
411 res = optimize_node (res);
412 irn_vrfy_irg (res, irg);
417 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
424 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
425 res = optimize_node (res);
426 irn_vrfy_irg (res, irg);
431 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
432 ir_node *op, ir_node *k, ir_mode *mode)
438 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
439 res = optimize_node (res);
440 irn_vrfy_irg (res, irg);
445 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
446 ir_node *op, ir_node *k, ir_mode *mode)
452 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
453 res = optimize_node (res);
454 irn_vrfy_irg (res, irg);
459 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *op, ir_mode *mode)
465 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
466 res = optimize_node (res);
467 irn_vrfy_irg (res, irg);
472 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
473 ir_node *op1, ir_node *op2)
479 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
480 res = optimize_node (res);
481 irn_vrfy_irg (res, irg);
486 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
489 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
490 res = optimize_node (res);
491 irn_vrfy_irg (res, irg);
496 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
501 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
502 res->attr.c.kind = dense;
503 res->attr.c.default_proj = 0;
504 res = optimize_node (res);
505 irn_vrfy_irg (res, irg);
510 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
511 ir_node *callee, int arity, ir_node **in, type *tp)
518 NEW_ARR_A (ir_node *, r_in, r_arity);
521 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
523 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
525 assert(is_method_type(tp));
526 set_Call_type(res, tp);
527 res->attr.call.callee_arr = NULL;
528 res = optimize_node (res);
529 irn_vrfy_irg (res, irg);
534 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
535 ir_node *store, int arity, ir_node **in)
542 NEW_ARR_A (ir_node *, r_in, r_arity);
544 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
545 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
546 res = optimize_node (res);
547 irn_vrfy_irg (res, irg);
552 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
558 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
559 res = optimize_node (res);
560 irn_vrfy_irg (res, irg);
565 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
566 ir_node *store, ir_node *adr)
572 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
574 res = optimize_node (res);
575 irn_vrfy_irg (res, irg);
580 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
581 ir_node *store, ir_node *adr, ir_node *val)
588 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
590 res = optimize_node (res);
592 irn_vrfy_irg (res, irg);
597 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
598 ir_node *size, type *alloc_type, where_alloc where)
604 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
606 res->attr.a.where = where;
607 res->attr.a.type = alloc_type;
609 res = optimize_node (res);
610 irn_vrfy_irg (res, irg);
615 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
616 ir_node *ptr, ir_node *size, type *free_type)
623 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
625 res->attr.f = free_type;
627 res = optimize_node (res);
628 irn_vrfy_irg (res, irg);
633 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
634 int arity, ir_node **in, entity *ent)
640 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
643 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
646 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
647 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
649 res->attr.s.ent = ent;
651 res = optimize_node (res);
652 irn_vrfy_irg (res, irg);
657 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
658 ir_node *objptr, type *ent)
665 NEW_ARR_A (ir_node *, r_in, r_arity);
669 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
671 res->attr.io.ent = ent;
673 /* res = optimize (res);
674 * irn_vrfy_irg (res, irg); */
679 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
680 symconst_kind symkind)
684 if (symkind == linkage_ptr_info)
688 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
690 res->attr.i.num = symkind;
691 if (symkind == linkage_ptr_info) {
692 res->attr.i.tori.ptrinfo = (ident *)value;
694 assert ( ( (symkind == type_tag)
695 || (symkind == size))
696 && (is_type(value)));
697 res->attr.i.tori.typ = (type *)value;
699 res = optimize_node (res);
700 irn_vrfy_irg (res, irg);
705 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
709 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
711 res = optimize_node (res);
712 irn_vrfy_irg (res, irg);
717 new_rd_Bad (ir_graph *irg)
723 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
725 ir_node *in[2], *res;
729 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
731 res->attr.confirm_cmp = cmp;
733 res = optimize_node (res);
734 irn_vrfy_irg(res, irg);
739 new_rd_Unknown (ir_graph *irg, ir_mode *m)
741 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
745 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
749 in[0] = get_Call_ptr(call);
750 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
751 //res->attr.callbegin.irg = irg;
752 res->attr.callbegin.call = call;
753 res = optimize_node (res);
754 irn_vrfy_irg (res, irg);
759 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
763 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
764 //res->attr.end.irg = irg;
766 irn_vrfy_irg (res, irg);
771 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
775 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
776 //res->attr.end.irg = irg;
778 irn_vrfy_irg (res, irg);
783 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
786 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
787 res = optimize_node (res);
788 irn_vrfy_irg (res, irg);
793 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
799 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
800 res->attr.filter.proj = proj;
801 res->attr.filter.in_cg = NULL;
802 res->attr.filter.backedge = NULL;
805 assert(get_Proj_pred(res));
806 assert(get_nodes_Block(get_Proj_pred(res)));
808 res = optimize_node (res);
810 irn_vrfy_irg (res, irg);
816 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
817 ir_node *callee, int arity, ir_node **in, type *tp)
824 NEW_ARR_A (ir_node *, r_in, r_arity);
826 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
828 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
830 assert(is_method_type(tp));
831 set_FuncCall_type(res, tp);
832 res->attr.call.callee_arr = NULL;
833 res = optimize_node (res);
834 irn_vrfy_irg (res, irg);
839 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
840 return new_rd_Block(NULL, irg, arity, in);
842 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
843 return new_rd_Start(NULL, irg, block);
845 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
846 return new_rd_End(NULL, irg, block);
848 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
849 return new_rd_Jmp(NULL, irg, block);
851 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
852 return new_rd_Cond(NULL, irg, block, c);
854 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
855 ir_node *store, int arity, ir_node **in) {
856 return new_rd_Return(NULL, irg, block, store, arity, in);
858 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
859 ir_node *store, ir_node *obj) {
860 return new_rd_Raise(NULL, irg, block, store, obj);
862 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
863 ir_mode *mode, tarval *con) {
864 return new_rd_Const(NULL, irg, block, mode, con);
866 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
867 type_or_id_p value, symconst_kind symkind) {
868 return new_rd_SymConst(NULL, irg, block, value, symkind);
870 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
871 ir_node *objptr, int n_index, ir_node **index,
873 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
875 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
877 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
879 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
880 ir_node *callee, int arity, ir_node **in,
882 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
884 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
885 ir_node *op1, ir_node *op2, ir_mode *mode) {
886 return new_rd_Add(NULL, irg, block, op1, op2, mode);
888 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
889 ir_node *op1, ir_node *op2, ir_mode *mode) {
890 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
892 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
893 ir_node *op, ir_mode *mode) {
894 return new_rd_Minus(NULL, irg, block, op, mode);
896 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
897 ir_node *op1, ir_node *op2, ir_mode *mode) {
898 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
900 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
901 ir_node *memop, ir_node *op1, ir_node *op2) {
902 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
904 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
905 ir_node *memop, ir_node *op1, ir_node *op2) {
906 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
908 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
909 ir_node *memop, ir_node *op1, ir_node *op2) {
910 return new_rd_Div(NULL, irg, block, memop, op1, op2);
912 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
913 ir_node *memop, ir_node *op1, ir_node *op2) {
914 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
916 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
917 ir_node *op, ir_mode *mode) {
918 return new_rd_Abs(NULL, irg, block, op, mode);
920 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
921 ir_node *op1, ir_node *op2, ir_mode *mode) {
922 return new_rd_And(NULL, irg, block, op1, op2, mode);
924 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
925 ir_node *op1, ir_node *op2, ir_mode *mode) {
926 return new_rd_Or(NULL, irg, block, op1, op2, mode);
928 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
929 ir_node *op1, ir_node *op2, ir_mode *mode) {
930 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
932 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
933 ir_node *op, ir_mode *mode) {
934 return new_rd_Not(NULL, irg, block, op, mode);
936 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
937 ir_node *op1, ir_node *op2) {
938 return new_rd_Cmp(NULL, irg, block, op1, op2);
940 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
941 ir_node *op, ir_node *k, ir_mode *mode) {
942 return new_rd_Shl(NULL, irg, block, op, k, mode);
944 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
945 ir_node *op, ir_node *k, ir_mode *mode) {
946 return new_rd_Shr(NULL, irg, block, op, k, mode);
948 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
949 ir_node *op, ir_node *k, ir_mode *mode) {
950 return new_rd_Shrs(NULL, irg, block, op, k, mode);
952 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
953 ir_node *op, ir_node *k, ir_mode *mode) {
954 return new_rd_Rot(NULL, irg, block, op, k, mode);
956 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Conv(NULL, irg, block, op, mode);
960 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
961 return new_rd_Cast(NULL, irg, block, op, to_tp);
963 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
964 ir_node **in, ir_mode *mode) {
965 return new_rd_Phi(NULL, irg, block, arity, in, mode);
967 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
968 ir_node *store, ir_node *adr) {
969 return new_rd_Load(NULL, irg, block, store, adr);
971 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
972 ir_node *store, ir_node *adr, ir_node *val) {
973 return new_rd_Store(NULL, irg, block, store, adr, val);
975 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
976 ir_node *size, type *alloc_type, where_alloc where) {
977 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
979 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
980 ir_node *ptr, ir_node *size, type *free_type) {
981 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
983 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
984 return new_rd_Sync(NULL, irg, block, arity, in);
986 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
987 ir_mode *mode, long proj) {
988 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
990 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
992 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
994 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
995 int arity, ir_node **in) {
996 return new_rd_Tuple(NULL, irg, block, arity, in );
998 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
999 ir_node *val, ir_mode *mode) {
1000 return new_rd_Id(NULL, irg, block, val, mode);
1002 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1003 return new_rd_Bad(irg);
1005 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1006 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1008 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1009 return new_rd_Unknown(irg, m);
1011 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1012 return new_rd_CallBegin(NULL, irg, block, callee);
1014 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1015 return new_rd_EndReg(NULL, irg, block);
1017 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1018 return new_rd_EndExcept(NULL, irg, block);
1020 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1021 return new_rd_Break(NULL, irg, block);
1023 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1024 ir_mode *mode, long proj) {
1025 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1027 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1028 ir_node *callee, int arity, ir_node **in,
1030 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1034 /** ********************/
1035 /** public interfaces */
1036 /** construction tools */
1040 * - create a new Start node in the current block
1042 * @return s - pointer to the created Start node
1047 new_d_Start (dbg_info* db)
1051 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1052 op_Start, mode_T, 0, NULL);
1053 //res->attr.start.irg = current_ir_graph;
1055 res = optimize_node (res);
1056 irn_vrfy_irg (res, current_ir_graph);
1061 new_d_End (dbg_info* db)
1064 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1065 op_End, mode_X, -1, NULL);
1066 res = optimize_node (res);
1067 irn_vrfy_irg (res, current_ir_graph);
1072 /* Constructs a Block with a fixed number of predecessors.
1073 Does set current_block. Can be used with automatic Phi
1074 node construction. */
1076 new_d_Block (dbg_info* db, int arity, ir_node **in)
1080 bool has_unknown = false;
1082 res = new_rd_Block (db, current_ir_graph, arity, in);
1084 /* Create and initialize array for Phi-node construction. */
1085 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1086 current_ir_graph->n_loc);
1087 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1089 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1091 if (!has_unknown) res = optimize_node (res);
1092 current_ir_graph->current_block = res;
1094 irn_vrfy_irg (res, current_ir_graph);
1099 /* ***********************************************************************/
1100 /* Methods necessary for automatic Phi node creation */
1102 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1103 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1104 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1105 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1107 Call Graph: ( A ---> B == A "calls" B)
1109 get_value mature_block
1117 get_r_value_internal |
1121 new_rd_Phi0 new_rd_Phi_in
1123 * *************************************************************************** */
1125 /* Creates a Phi node with 0 predecessors */
1126 static INLINE ir_node *
1127 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1130 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1131 irn_vrfy_irg (res, irg);
1135 /* There are two implementations of the Phi node construction. The first
1136 is faster, but does not work for blocks with more than 2 predecessors.
1137 The second works always but is slower and causes more unnecessary Phi
1139 Select the implementations by the following preprocessor flag set in
1141 #if USE_FAST_PHI_CONSTRUCTION
1143 /* This is a stack used for allocating and deallocating nodes in
1144 new_rd_Phi_in. The original implementation used the obstack
1145 to model this stack, now it is explicit. This reduces side effects.
1147 #if USE_EXPLICIT_PHI_IN_STACK
1148 INLINE Phi_in_stack *
1149 new_Phi_in_stack(void) {
1152 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1154 res->stack = NEW_ARR_F (ir_node *, 1);
1161 free_Phi_in_stack(Phi_in_stack *s) {
1162 DEL_ARR_F(s->stack);
1166 free_to_Phi_in_stack(ir_node *phi) {
1167 assert(get_irn_opcode(phi) == iro_Phi);
1169 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1170 current_ir_graph->Phi_in_stack->pos)
1171 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1173 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1175 (current_ir_graph->Phi_in_stack->pos)++;
1178 static INLINE ir_node *
1179 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1180 int arity, ir_node **in) {
1182 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1183 int pos = current_ir_graph->Phi_in_stack->pos;
1187 /* We need to allocate a new node */
1188 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1189 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1191 /* reuse the old node and initialize it again. */
1194 assert (res->kind == k_ir_node);
1195 assert (res->op == op_Phi);
1199 assert (arity >= 0);
1200 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1201 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1203 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1205 (current_ir_graph->Phi_in_stack->pos)--;
1209 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1211 /* Creates a Phi node with a given, fixed array **in of predecessors.
1212 If the Phi node is unnecessary, as the same value reaches the block
1213 through all control flow paths, it is eliminated and the value
1214 returned directly. This constructor is only intended for use in
1215 the automatic Phi node generation triggered by get_value or mature.
1216 The implementation is quite tricky and depends on the fact, that
1217 the nodes are allocated on a stack:
1218 The in array contains predecessors and NULLs. The NULLs appear,
1219 if get_r_value_internal, that computed the predecessors, reached
1220 the same block on two paths. In this case the same value reaches
1221 this block on both paths, there is no definition in between. We need
1222 not allocate a Phi where these path's merge, but we have to communicate
1223 this fact to the caller. This happens by returning a pointer to the
1224 node the caller _will_ allocate. (Yes, we predict the address. We can
1225 do so because the nodes are allocated on the obstack.) The caller then
1226 finds a pointer to itself and, when this routine is called again,
1229 static INLINE ir_node *
1230 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1231 ir_node **in, int ins)
1234 ir_node *res, *known;
1236 /* allocate a new node on the obstack.
1237 This can return a node to which some of the pointers in the in-array
1239 Attention: the constructor copies the in array, i.e., the later changes
1240 to the array in this routine do not affect the constructed node! If
1241 the in array contains NULLs, there will be missing predecessors in the
1243 Is this a possible internal state of the Phi node generation? */
1244 #if USE_EXPLICIT_PHI_IN_STACK
1245 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1247 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1248 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1250 /* The in-array can contain NULLs. These were returned by
1251 get_r_value_internal if it reached the same block/definition on a
1253 The NULLs are replaced by the node itself to simplify the test in the
1255 for (i=0; i < ins; ++i)
1256 if (in[i] == NULL) in[i] = res;
1258 /* This loop checks whether the Phi has more than one predecessor.
1259 If so, it is a real Phi node and we break the loop. Else the
1260 Phi node merges the same definition on several paths and therefore
1262 for (i=0; i < ins; ++i)
1264 if (in[i]==res || in[i]==known) continue;
1272 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1274 #if USE_EXPLICIT_PHI_IN_STACK
1275 free_to_Phi_in_stack(res);
1277 obstack_free (current_ir_graph->obst, res);
1281 res = optimize_node (res);
1282 irn_vrfy_irg (res, irg);
1285 /* return the pointer to the Phi node. This node might be deallocated! */
1290 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1293 allocates and returns this node. The routine called to allocate the
1294 node might optimize it away and return a real value, or even a pointer
1295 to a deallocated Phi node on top of the obstack!
1296 This function is called with an in-array of proper size. **/
1298 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1300 ir_node *prevBlock, *res;
1303 /* This loop goes to all predecessor blocks of the block the Phi node is in
1304 and there finds the operands of the Phi node by calling
1305 get_r_value_internal. */
1306 for (i = 1; i <= ins; ++i) {
1307 assert (block->in[i]);
1308 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1310 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1313 /* After collecting all predecessors into the array nin a new Phi node
1314 with these predecessors is created. This constructor contains an
1315 optimization: If all predecessors of the Phi node are identical it
1316 returns the only operand instead of a new Phi node. If the value
1317 passes two different control flow edges without being defined, and
1318 this is the second path treated, a pointer to the node that will be
1319 allocated for the first path (recursion) is returned. We already
1320 know the address of this node, as it is the next node to be allocated
1321 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1322 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1324 /* Now we now the value for "pos" and can enter it in the array with
1325 all known local variables. Attention: this might be a pointer to
1326 a node, that later will be allocated!!! See new_rd_Phi_in.
1327 If this is called in mature, after some set_value in the same block,
1328 the proper value must not be overwritten:
1330 get_value (makes Phi0, put's it into graph_arr)
1331 set_value (overwrites Phi0 in graph_arr)
1332 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1335 if (!block->attr.block.graph_arr[pos]) {
1336 block->attr.block.graph_arr[pos] = res;
1338 /* printf(" value already computed by %s\n",
1339 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1345 /* This function returns the last definition of a variable. In case
1346 this variable was last defined in a previous block, Phi nodes are
1347 inserted. If the part of the firm graph containing the definition
1348 is not yet constructed, a dummy Phi node is returned. */
1350 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1353 /* There are 4 cases to treat.
1355 1. The block is not mature and we visit it the first time. We can not
1356 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1357 predecessors is returned. This node is added to the linked list (field
1358 "link") of the containing block to be completed when this block is
1359 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1362 2. The value is already known in this block, graph_arr[pos] is set and we
1363 visit the block the first time. We can return the value without
1364 creating any new nodes.
1366 3. The block is mature and we visit it the first time. A Phi node needs
1367 to be created (phi_merge). If the Phi is not needed, as all it's
1368 operands are the same value reaching the block through different
1369 paths, it's optimized away and the value itself is returned.
1371 4. The block is mature, and we visit it the second time. Now two
1372 subcases are possible:
1373 * The value was computed completely the last time we were here. This
1374 is the case if there is no loop. We can return the proper value.
1375 * The recursion that visited this node and set the flag did not
1376 return yet. We are computing a value in a loop and need to
1377 break the recursion without knowing the result yet.
1378 @@@ strange case. Straight forward we would create a Phi before
1379 starting the computation of it's predecessors. In this case we will
1380 find a Phi here in any case. The problem is that this implementation
1381 only creates a Phi after computing the predecessors, so that it is
1382 hard to compute self references of this Phi. @@@
1383 There is no simple check for the second subcase. Therefore we check
1384 for a second visit and treat all such cases as the second subcase.
1385 Anyways, the basic situation is the same: we reached a block
1386 on two paths without finding a definition of the value: No Phi
1387 nodes are needed on both paths.
1388 We return this information "Two paths, no Phi needed" by a very tricky
1389 implementation that relies on the fact that an obstack is a stack and
1390 will return a node with the same address on different allocations.
1391 Look also at phi_merge and new_rd_phi_in to understand this.
1392 @@@ Unfortunately this does not work, see testprogram
1393 three_cfpred_example.
1397 /* case 4 -- already visited. */
1398 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1400 /* visited the first time */
1401 set_irn_visited(block, get_irg_visited(current_ir_graph));
1403 /* Get the local valid value */
1404 res = block->attr.block.graph_arr[pos];
1406 /* case 2 -- If the value is actually computed, return it. */
1407 if (res) { return res;};
1409 if (block->attr.block.matured) { /* case 3 */
1411 /* The Phi has the same amount of ins as the corresponding block. */
1412 int ins = get_irn_arity(block);
1414 NEW_ARR_A (ir_node *, nin, ins);
1416 /* Phi merge collects the predecessors and then creates a node. */
1417 res = phi_merge (block, pos, mode, nin, ins);
1419 } else { /* case 1 */
1420 /* The block is not mature, we don't know how many in's are needed. A Phi
1421 with zero predecessors is created. Such a Phi node is called Phi0
1422 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1423 to the list of Phi0 nodes in this block to be matured by mature_block
1425 The Phi0 has to remember the pos of it's internal value. If the real
1426 Phi is computed, pos is used to update the array with the local
1429 res = new_rd_Phi0 (current_ir_graph, block, mode);
1430 res->attr.phi0_pos = pos;
1431 res->link = block->link;
1435 /* If we get here, the frontend missed a use-before-definition error */
1438 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1439 assert (mode->code >= irm_F && mode->code <= irm_P);
1440 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1441 tarval_mode_null[mode->code]);
1444 /* The local valid value is available now. */
1445 block->attr.block.graph_arr[pos] = res;
1453 it starts the recursion. This causes an Id at the entry of
1454 every block that has no definition of the value! **/
1456 #if USE_EXPLICIT_PHI_IN_STACK
1458 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1459 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1462 static INLINE ir_node *
1463 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1464 ir_node **in, int ins)
1467 ir_node *res, *known;
1469 /* Allocate a new node on the obstack. The allocation copies the in
1471 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1472 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1474 /* This loop checks whether the Phi has more than one predecessor.
1475 If so, it is a real Phi node and we break the loop. Else the
1476 Phi node merges the same definition on several paths and therefore
1477 is not needed. Don't consider Bad nodes! */
1479 for (i=0; i < ins; ++i)
1483 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1491 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1494 obstack_free (current_ir_graph->obst, res);
1497 /* A undefined value, e.g., in unreachable code. */
1501 res = optimize_node (res);
1502 irn_vrfy_irg (res, irg);
1503 /* Memory Phis in endless loops must be kept alive.
1504 As we can't distinguish these easily we keep all of the alive. */
1505 if ((res->op == op_Phi) && (mode == mode_M))
1506 add_End_keepalive(irg->end, res);
1513 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1515 #if PRECISE_EXC_CONTEXT
1517 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1519 static INLINE ir_node ** new_frag_arr (ir_node *n)
1523 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1524 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1525 sizeof(ir_node *)*current_ir_graph->n_loc);
1526 /* turn off optimization before allocating Proj nodes, as res isn't
1528 opt = get_opt_optimize(); set_optimize(0);
1529 /* Here we rely on the fact that all frag ops have Memory as first result! */
1530 if (get_irn_op(n) == op_Call)
1531 arr[0] = new_Proj(n, mode_M, 3);
1533 arr[0] = new_Proj(n, mode_M, 0);
1535 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1539 static INLINE ir_node **
1540 get_frag_arr (ir_node *n) {
1541 if (get_irn_op(n) == op_Call) {
1542 return n->attr.call.frag_arr;
1543 } else if (get_irn_op(n) == op_Alloc) {
1544 return n->attr.a.frag_arr;
1546 return n->attr.frag_arr;
1551 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1552 if (!frag_arr[pos]) frag_arr[pos] = val;
1553 if (frag_arr[current_ir_graph->n_loc - 1])
1554 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1558 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1562 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1564 frag_arr = get_frag_arr(cfOp);
1565 res = frag_arr[pos];
1567 if (block->attr.block.graph_arr[pos]) {
1568 /* There was a set_value after the cfOp and no get_value before that
1569 set_value. We must build a Phi node now. */
1570 if (block->attr.block.matured) {
1571 int ins = get_irn_arity(block);
1573 NEW_ARR_A (ir_node *, nin, ins);
1574 res = phi_merge(block, pos, mode, nin, ins);
1576 res = new_rd_Phi0 (current_ir_graph, block, mode);
1577 res->attr.phi0_pos = pos;
1578 res->link = block->link;
1582 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1583 but this should be better: (remove comment if this works) */
1584 /* It's a Phi, we can write this into all graph_arrs with NULL */
1585 set_frag_value(block->attr.block.graph_arr, pos, res);
1587 res = get_r_value_internal(block, pos, mode);
1588 set_frag_value(block->attr.block.graph_arr, pos, res);
1596 computes the predecessors for the real phi node, and then
1597 allocates and returns this node. The routine called to allocate the
1598 node might optimize it away and return a real value.
1599 This function must be called with an in-array of proper size. **/
1601 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1603 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1606 /* If this block has no value at pos create a Phi0 and remember it
1607 in graph_arr to break recursions.
1608 Else we may not set graph_arr as there a later value is remembered. */
1610 if (!block->attr.block.graph_arr[pos]) {
1611 if (block == get_irg_start_block(current_ir_graph)) {
1612 /* Collapsing to Bad tarvals is no good idea.
1613 So we call a user-supplied routine here that deals with this case as
1614 appropriate for the given language. Sorryly the only help we can give
1615 here is the position.
1617 Even if all variables are defined before use, it can happen that
1618 we get to the start block, if a cond has been replaced by a tuple
1619 (bad, jmp). In this case we call the function needlessly, eventually
1620 generating an non existant error.
1621 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1624 if (default_initialize_local_variable)
1625 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1627 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1628 /* We don't need to care about exception ops in the start block.
1629 There are none by definition. */
1630 return block->attr.block.graph_arr[pos];
1632 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1633 block->attr.block.graph_arr[pos] = phi0;
1634 #if PRECISE_EXC_CONTEXT
1635 /* Set graph_arr for fragile ops. Also here we should break recursion.
1636 We could choose a cyclic path through an cfop. But the recursion would
1637 break at some point. */
1638 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1643 /* This loop goes to all predecessor blocks of the block the Phi node
1644 is in and there finds the operands of the Phi node by calling
1645 get_r_value_internal. */
1646 for (i = 1; i <= ins; ++i) {
1647 prevCfOp = skip_Proj(block->in[i]);
1649 if (is_Bad(prevCfOp)) {
1650 /* In case a Cond has been optimized we would get right to the start block
1651 with an invalid definition. */
1652 nin[i-1] = new_Bad();
1655 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1657 if (!is_Bad(prevBlock)) {
1658 #if PRECISE_EXC_CONTEXT
1659 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1660 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1661 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1664 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1666 nin[i-1] = new_Bad();
1670 /* After collecting all predecessors into the array nin a new Phi node
1671 with these predecessors is created. This constructor contains an
1672 optimization: If all predecessors of the Phi node are identical it
1673 returns the only operand instead of a new Phi node. */
1674 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1676 /* In case we allocated a Phi0 node at the beginning of this procedure,
1677 we need to exchange this Phi0 with the real Phi. */
1679 exchange(phi0, res);
1680 block->attr.block.graph_arr[pos] = res;
1681 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1682 only an optimization. */
1688 /* This function returns the last definition of a variable. In case
1689 this variable was last defined in a previous block, Phi nodes are
1690 inserted. If the part of the firm graph containing the definition
1691 is not yet constructed, a dummy Phi node is returned. */
1693 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1696 /* There are 4 cases to treat.
1698 1. The block is not mature and we visit it the first time. We can not
1699 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1700 predecessors is returned. This node is added to the linked list (field
1701 "link") of the containing block to be completed when this block is
1702 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1705 2. The value is already known in this block, graph_arr[pos] is set and we
1706 visit the block the first time. We can return the value without
1707 creating any new nodes.
1709 3. The block is mature and we visit it the first time. A Phi node needs
1710 to be created (phi_merge). If the Phi is not needed, as all it's
1711 operands are the same value reaching the block through different
1712 paths, it's optimized away and the value itself is returned.
1714 4. The block is mature, and we visit it the second time. Now two
1715 subcases are possible:
1716 * The value was computed completely the last time we were here. This
1717 is the case if there is no loop. We can return the proper value.
1718 * The recursion that visited this node and set the flag did not
1719 return yet. We are computing a value in a loop and need to
1720 break the recursion. This case only happens if we visited
1721 the same block with phi_merge before, which inserted a Phi0.
1722 So we return the Phi0.
1725 /* case 4 -- already visited. */
1726 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1727 /* As phi_merge allocates a Phi0 this value is always defined. Here
1728 is the critical difference of the two algorithms. */
1729 assert(block->attr.block.graph_arr[pos]);
1730 return block->attr.block.graph_arr[pos];
1733 /* visited the first time */
1734 set_irn_visited(block, get_irg_visited(current_ir_graph));
1736 /* Get the local valid value */
1737 res = block->attr.block.graph_arr[pos];
1739 /* case 2 -- If the value is actually computed, return it. */
1740 if (res) { return res; };
1742 if (block->attr.block.matured) { /* case 3 */
1744 /* The Phi has the same amount of ins as the corresponding block. */
1745 int ins = get_irn_arity(block);
1747 NEW_ARR_A (ir_node *, nin, ins);
1749 /* Phi merge collects the predecessors and then creates a node. */
1750 res = phi_merge (block, pos, mode, nin, ins);
1752 } else { /* case 1 */
1753 /* The block is not mature, we don't know how many in's are needed. A Phi
1754 with zero predecessors is created. Such a Phi node is called Phi0
1755 node. The Phi0 is then added to the list of Phi0 nodes in this block
1756 to be matured by mature_block later.
1757 The Phi0 has to remember the pos of it's internal value. If the real
1758 Phi is computed, pos is used to update the array with the local
1760 res = new_rd_Phi0 (current_ir_graph, block, mode);
1761 res->attr.phi0_pos = pos;
1762 res->link = block->link;
1766 /* If we get here, the frontend missed a use-before-definition error */
1769 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1770 assert (mode->code >= irm_F && mode->code <= irm_P);
1771 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1772 get_mode_null(mode));
1775 /* The local valid value is available now. */
1776 block->attr.block.graph_arr[pos] = res;
1781 #endif /* USE_FAST_PHI_CONSTRUCTION */
1783 /* ************************************************************************** */
1785 /** Finalize a Block node, when all control flows are known. */
1786 /** Acceptable parameters are only Block nodes. */
1788 mature_block (ir_node *block)
1795 assert (get_irn_opcode(block) == iro_Block);
1796 /* @@@ should be commented in
1797 assert (!get_Block_matured(block) && "Block already matured"); */
1799 if (!get_Block_matured(block)) {
1800 ins = ARR_LEN (block->in)-1;
1801 /* Fix block parameters */
1802 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1804 /* An array for building the Phi nodes. */
1805 NEW_ARR_A (ir_node *, nin, ins);
1807 /* Traverse a chain of Phi nodes attached to this block and mature
1809 for (n = block->link; n; n=next) {
1810 inc_irg_visited(current_ir_graph);
1812 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1815 block->attr.block.matured = 1;
1817 /* Now, as the block is a finished firm node, we can optimize it.
1818 Since other nodes have been allocated since the block was created
1819 we can not free the node on the obstack. Therefore we have to call
1821 Unfortunately the optimization does not change a lot, as all allocated
1822 nodes refer to the unoptimized node.
1823 We can call _2, as global cse has no effect on blocks. */
1824 block = optimize_in_place_2(block);
1825 irn_vrfy_irg(block, current_ir_graph);
1830 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1832 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1837 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1839 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1844 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1846 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1852 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1854 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1859 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1861 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1866 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1869 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1870 arg->attr.c.kind = fragmentary;
1871 arg->attr.c.default_proj = max_proj;
1872 res = new_Proj (arg, mode_X, max_proj);
1877 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1879 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1884 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1886 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1890 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1892 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1897 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1899 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1904 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1906 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1912 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1914 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1919 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1921 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1926 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1929 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1931 #if PRECISE_EXC_CONTEXT
1932 if ((current_ir_graph->phase_state == phase_building) &&
1933 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1934 res->attr.frag_arr = new_frag_arr(res);
1941 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1944 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1946 #if PRECISE_EXC_CONTEXT
1947 if ((current_ir_graph->phase_state == phase_building) &&
1948 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1949 res->attr.frag_arr = new_frag_arr(res);
1956 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1959 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1961 #if PRECISE_EXC_CONTEXT
1962 if ((current_ir_graph->phase_state == phase_building) &&
1963 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1964 res->attr.frag_arr = new_frag_arr(res);
1971 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1974 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1976 #if PRECISE_EXC_CONTEXT
1977 if ((current_ir_graph->phase_state == phase_building) &&
1978 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1979 res->attr.frag_arr = new_frag_arr(res);
1986 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1988 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1993 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1995 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
2000 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2002 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2007 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2009 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2014 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2016 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2021 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2023 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2028 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2030 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2035 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2037 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2042 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2044 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2049 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2051 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2056 new_d_Jmp (dbg_info* db)
2058 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2062 new_d_Cond (dbg_info* db, ir_node *c)
2064 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2068 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2072 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2073 store, callee, arity, in, tp);
2074 #if PRECISE_EXC_CONTEXT
2075 if ((current_ir_graph->phase_state == phase_building) &&
2076 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2077 res->attr.call.frag_arr = new_frag_arr(res);
2084 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2086 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2091 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2093 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2098 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2101 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2103 #if PRECISE_EXC_CONTEXT
2104 if ((current_ir_graph->phase_state == phase_building) &&
2105 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2106 res->attr.frag_arr = new_frag_arr(res);
2113 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2116 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2118 #if PRECISE_EXC_CONTEXT
2119 if ((current_ir_graph->phase_state == phase_building) &&
2120 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2121 res->attr.frag_arr = new_frag_arr(res);
2128 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2132 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2133 store, size, alloc_type, where);
2134 #if PRECISE_EXC_CONTEXT
2135 if ((current_ir_graph->phase_state == phase_building) &&
2136 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2137 res->attr.a.frag_arr = new_frag_arr(res);
2144 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2146 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2147 store, ptr, size, free_type);
2151 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2152 /* GL: objptr was called frame before. Frame was a bad choice for the name
2153 as the operand could as well be a pointer to a dynamic object. */
2155 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2156 store, objptr, 0, NULL, ent);
2160 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2162 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2163 store, objptr, n_index, index, sel);
2167 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2169 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2170 store, objptr, ent));
2174 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2176 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2181 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2183 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2191 return current_ir_graph->bad;
2195 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2197 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2202 new_d_Unknown (ir_mode *m)
2204 return new_rd_Unknown(current_ir_graph, m);
2208 new_d_CallBegin (dbg_info *db, ir_node *call)
2211 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2216 new_d_EndReg (dbg_info *db)
2219 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2224 new_d_EndExcept (dbg_info *db)
2227 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2232 new_d_Break (dbg_info *db)
2234 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2238 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2240 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2245 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2249 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2250 callee, arity, in, tp);
2255 /* ********************************************************************* */
2256 /* Comfortable interface with automatic Phi node construction. */
2257 /* (Uses also constructors of ?? interface, except new_Block. */
2258 /* ********************************************************************* */
2260 /** Block construction **/
2261 /* immature Block without predecessors */
2262 ir_node *new_d_immBlock (dbg_info* db) {
2265 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2266 /* creates a new dynamic in-array as length of in is -1 */
2267 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2268 current_ir_graph->current_block = res;
2269 res->attr.block.matured = 0;
2270 //res->attr.block.exc = exc_normal;
2271 //res->attr.block.handler_entry = 0;
2272 res->attr.block.irg = current_ir_graph;
2273 res->attr.block.backedge = NULL;
2274 res->attr.block.in_cg = NULL;
2275 res->attr.block.cg_backedge = NULL;
2276 set_Block_block_visited(res, 0);
2278 /* Create and initialize array for Phi-node construction. */
2279 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2280 current_ir_graph->n_loc);
2281 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2283 /* Immature block may not be optimized! */
2284 irn_vrfy_irg (res, current_ir_graph);
2291 return new_d_immBlock(NULL);
2294 /* add an adge to a jmp/control flow node */
2296 add_in_edge (ir_node *block, ir_node *jmp)
2298 if (block->attr.block.matured) {
2299 assert(0 && "Error: Block already matured!\n");
2302 assert (jmp != NULL);
2303 ARR_APP1 (ir_node *, block->in, jmp);
2307 /* changing the current block */
2309 switch_block (ir_node *target)
2311 current_ir_graph->current_block = target;
2314 /* ************************ */
2315 /* parameter administration */
2317 /* get a value from the parameter array from the current block by its index */
2319 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2321 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2322 inc_irg_visited(current_ir_graph);
2324 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2326 /* get a value from the parameter array from the current block by its index */
2328 get_value (int pos, ir_mode *mode)
2330 return get_d_value(NULL, pos, mode);
2333 /* set a value at position pos in the parameter array from the current block */
2335 set_value (int pos, ir_node *value)
2337 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2338 assert(pos+1 < current_ir_graph->n_loc);
2339 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2342 /* get the current store */
2346 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2347 /* GL: one could call get_value instead */
2348 inc_irg_visited(current_ir_graph);
2349 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2352 /* set the current store */
2354 set_store (ir_node *store)
2356 /* GL: one could call set_value instead */
2357 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2358 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2362 keep_alive (ir_node *ka)
2364 add_End_keepalive(current_ir_graph->end, ka);
2367 /** Useful access routines **/
2368 /* Returns the current block of the current graph. To set the current
2369 block use switch_block(). */
2370 ir_node *get_cur_block() {
2371 return get_irg_current_block(current_ir_graph);
2374 /* Returns the frame type of the current graph */
2375 type *get_cur_frame_type() {
2376 return get_irg_frame_type(current_ir_graph);
2380 /* ********************************************************************* */
2383 /* call once for each run of the library */
2385 init_cons (default_initialize_local_variable_func_t *func)
2387 default_initialize_local_variable = func;
2390 /* call for each graph */
2392 finalize_cons (ir_graph *irg) {
2393 irg->phase_state = phase_high;
2397 ir_node *new_Block(int arity, ir_node **in) {
2398 return new_d_Block(NULL, arity, in);
2400 ir_node *new_Start (void) {
2401 return new_d_Start(NULL);
2403 ir_node *new_End (void) {
2404 return new_d_End(NULL);
2406 ir_node *new_Jmp (void) {
2407 return new_d_Jmp(NULL);
2409 ir_node *new_Cond (ir_node *c) {
2410 return new_d_Cond(NULL, c);
2412 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2413 return new_d_Return(NULL, store, arity, in);
2415 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2416 return new_d_Raise(NULL, store, obj);
2418 ir_node *new_Const (ir_mode *mode, tarval *con) {
2419 return new_d_Const(NULL, mode, con);
2421 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2422 return new_d_SymConst(NULL, value, kind);
2424 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2425 return new_d_simpleSel(NULL, store, objptr, ent);
2427 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2429 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2431 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2432 return new_d_InstOf (NULL, store, objptr, ent);
2434 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2436 return new_d_Call(NULL, store, callee, arity, in, tp);
2438 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2439 return new_d_Add(NULL, op1, op2, mode);
2441 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2442 return new_d_Sub(NULL, op1, op2, mode);
2444 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2445 return new_d_Minus(NULL, op, mode);
2447 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2448 return new_d_Mul(NULL, op1, op2, mode);
2450 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2451 return new_d_Quot(NULL, memop, op1, op2);
2453 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2454 return new_d_DivMod(NULL, memop, op1, op2);
2456 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2457 return new_d_Div(NULL, memop, op1, op2);
2459 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2460 return new_d_Mod(NULL, memop, op1, op2);
2462 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2463 return new_d_Abs(NULL, op, mode);
2465 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2466 return new_d_And(NULL, op1, op2, mode);
2468 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2469 return new_d_Or(NULL, op1, op2, mode);
2471 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2472 return new_d_Eor(NULL, op1, op2, mode);
2474 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2475 return new_d_Not(NULL, op, mode);
2477 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2478 return new_d_Shl(NULL, op, k, mode);
2480 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2481 return new_d_Shr(NULL, op, k, mode);
2483 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2484 return new_d_Shrs(NULL, op, k, mode);
2486 #define new_Rotate new_Rot
2487 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2488 return new_d_Rot(NULL, op, k, mode);
2490 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2491 return new_d_Cmp(NULL, op1, op2);
2493 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2494 return new_d_Conv(NULL, op, mode);
2496 ir_node *new_Cast (ir_node *op, type *to_tp) {
2497 return new_d_Cast(NULL, op, to_tp);
2499 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2500 return new_d_Phi(NULL, arity, in, mode);
2502 ir_node *new_Load (ir_node *store, ir_node *addr) {
2503 return new_d_Load(NULL, store, addr);
2505 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2506 return new_d_Store(NULL, store, addr, val);
2508 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2509 where_alloc where) {
2510 return new_d_Alloc(NULL, store, size, alloc_type, where);
2512 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2514 return new_d_Free(NULL, store, ptr, size, free_type);
2516 ir_node *new_Sync (int arity, ir_node **in) {
2517 return new_d_Sync(NULL, arity, in);
2519 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2520 return new_d_Proj(NULL, arg, mode, proj);
2522 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2523 return new_d_defaultProj(NULL, arg, max_proj);
2525 ir_node *new_Tuple (int arity, ir_node **in) {
2526 return new_d_Tuple(NULL, arity, in);
2528 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2529 return new_d_Id(NULL, val, mode);
2531 ir_node *new_Bad (void) {
2534 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2535 return new_d_Confirm (NULL, val, bound, cmp);
2537 ir_node *new_Unknown(ir_mode *m) {
2538 return new_d_Unknown(m);
2540 ir_node *new_CallBegin (ir_node *callee) {
2541 return new_d_CallBegin(NULL, callee);
2543 ir_node *new_EndReg (void) {
2544 return new_d_EndReg(NULL);
2546 ir_node *new_EndExcept (void) {
2547 return new_d_EndExcept(NULL);
2549 ir_node *new_Break (void) {
2550 return new_d_Break(NULL);
2552 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2553 return new_d_Filter(NULL, arg, mode, proj);
2555 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2556 return new_d_FuncCall(NULL, callee, arity, in, tp);