3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
31 # include "irflag_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 /* res->attr.block.exc = exc_normal; */
64 /* res->attr.block.handler_entry = 0; */
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 /* res->attr.start.irg = irg; */
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 /* Don't assert that block matured: the use of this constructor is strongly
108 if ( get_Block_matured(block) )
109 assert( intern_get_irn_arity(block) == arity );
111 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
113 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
115 for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true;
116 if (!has_unknown) res = optimize_node (res);
117 irn_vrfy_irg (res, irg);
119 /* Memory Phis in endless loops must be kept alive.
120 As we can't distinguish these easily we keep all of them alive. */
121 if ((res->op == op_Phi) && (mode == mode_M))
122 add_End_keepalive(irg->end, res);
127 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
130 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
131 res->attr.con.tv = con;
132 set_Const_type(res, tp); /* Call method because of complex assertion. */
133 res = optimize_node (res);
134 assert(get_Const_type(res) == tp);
135 irn_vrfy_irg (res, irg);
138 res = local_optimize_newby (res);
145 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
147 type *tp = unknown_type;
148 if (tarval_is_entity(con))
149 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
150 return new_rd_Const_type (db, irg, block, mode, con, tp);
154 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
159 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
160 res = optimize_node (res);
161 irn_vrfy_irg (res, irg);
166 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
172 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
173 res->attr.proj = proj;
176 assert(get_Proj_pred(res));
177 assert(get_nodes_Block(get_Proj_pred(res)));
179 res = optimize_node (res);
181 irn_vrfy_irg (res, irg);
187 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
191 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
192 arg->attr.c.kind = fragmentary;
193 arg->attr.c.default_proj = max_proj;
194 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
199 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
204 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
205 res = optimize_node (res);
206 irn_vrfy_irg (res, irg);
211 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
214 res = new_ir_node (db, irg, block, op_Cast, intern_get_irn_mode(op), 1, &op);
215 res->attr.cast.totype = to_tp;
216 res = optimize_node (res);
217 irn_vrfy_irg (res, irg);
222 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
226 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
227 res = optimize_node (res);
228 irn_vrfy_irg (res, irg);
233 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
234 ir_node *op1, ir_node *op2, ir_mode *mode)
240 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
241 res = optimize_node (res);
242 irn_vrfy_irg (res, irg);
247 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
248 ir_node *op1, ir_node *op2, ir_mode *mode)
254 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
255 res = optimize_node (res);
256 irn_vrfy_irg (res, irg);
261 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
262 ir_node *op, ir_mode *mode)
267 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
268 res = optimize_node (res);
269 irn_vrfy_irg (res, irg);
274 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
275 ir_node *op1, ir_node *op2, ir_mode *mode)
281 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
282 res = optimize_node (res);
283 irn_vrfy_irg (res, irg);
288 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
289 ir_node *memop, ir_node *op1, ir_node *op2)
296 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
297 res = optimize_node (res);
298 irn_vrfy_irg (res, irg);
303 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
304 ir_node *memop, ir_node *op1, ir_node *op2)
311 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
312 res = optimize_node (res);
313 irn_vrfy_irg (res, irg);
318 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
319 ir_node *memop, ir_node *op1, ir_node *op2)
326 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
327 res = optimize_node (res);
328 irn_vrfy_irg (res, irg);
333 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
334 ir_node *memop, ir_node *op1, ir_node *op2)
341 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
342 res = optimize_node (res);
343 irn_vrfy_irg (res, irg);
348 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
349 ir_node *op1, ir_node *op2, ir_mode *mode)
355 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
356 res = optimize_node (res);
357 irn_vrfy_irg (res, irg);
362 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
363 ir_node *op1, ir_node *op2, ir_mode *mode)
369 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
370 res = optimize_node (res);
371 irn_vrfy_irg (res, irg);
376 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
377 ir_node *op1, ir_node *op2, ir_mode *mode)
383 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
384 res = optimize_node (res);
385 irn_vrfy_irg (res, irg);
390 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
391 ir_node *op, ir_mode *mode)
396 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
397 res = optimize_node (res);
398 irn_vrfy_irg (res, irg);
403 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
404 ir_node *op, ir_node *k, ir_mode *mode)
410 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
411 res = optimize_node (res);
412 irn_vrfy_irg (res, irg);
417 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
418 ir_node *op, ir_node *k, ir_mode *mode)
424 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
425 res = optimize_node (res);
426 irn_vrfy_irg (res, irg);
431 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
432 ir_node *op, ir_node *k, ir_mode *mode)
438 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
439 res = optimize_node (res);
440 irn_vrfy_irg (res, irg);
445 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
446 ir_node *op, ir_node *k, ir_mode *mode)
452 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
453 res = optimize_node (res);
454 irn_vrfy_irg (res, irg);
459 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
460 ir_node *op, ir_mode *mode)
465 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
466 res = optimize_node (res);
467 irn_vrfy_irg (res, irg);
472 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
473 ir_node *op1, ir_node *op2)
479 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
480 res = optimize_node (res);
481 irn_vrfy_irg (res, irg);
486 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
489 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
490 res = optimize_node (res);
491 irn_vrfy_irg (res, irg);
496 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
501 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
502 res->attr.c.kind = dense;
503 res->attr.c.default_proj = 0;
504 res = optimize_node (res);
505 irn_vrfy_irg (res, irg);
510 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
511 ir_node *callee, int arity, ir_node **in, type *tp)
518 NEW_ARR_A (ir_node *, r_in, r_arity);
521 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
523 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
525 assert(is_method_type(tp));
526 set_Call_type(res, tp);
527 res->attr.call.callee_arr = NULL;
528 res = optimize_node (res);
529 irn_vrfy_irg (res, irg);
534 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
535 ir_node *store, int arity, ir_node **in)
542 NEW_ARR_A (ir_node *, r_in, r_arity);
544 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
545 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
546 res = optimize_node (res);
547 irn_vrfy_irg (res, irg);
552 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
558 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
559 res = optimize_node (res);
560 irn_vrfy_irg (res, irg);
565 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
566 ir_node *store, ir_node *adr)
572 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
574 res = optimize_node (res);
575 irn_vrfy_irg (res, irg);
580 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
581 ir_node *store, ir_node *adr, ir_node *val)
588 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
590 res = optimize_node (res);
592 irn_vrfy_irg (res, irg);
597 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
598 ir_node *size, type *alloc_type, where_alloc where)
604 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
606 res->attr.a.where = where;
607 res->attr.a.type = alloc_type;
609 res = optimize_node (res);
610 irn_vrfy_irg (res, irg);
615 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
616 ir_node *ptr, ir_node *size, type *free_type)
623 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
625 res->attr.f = free_type;
627 res = optimize_node (res);
628 irn_vrfy_irg (res, irg);
633 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
634 int arity, ir_node **in, entity *ent)
640 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
643 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
646 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
647 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
649 res->attr.s.ent = ent;
651 res = optimize_node (res);
652 irn_vrfy_irg (res, irg);
657 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
658 ir_node *objptr, type *ent)
665 NEW_ARR_A (ir_node *, r_in, r_arity);
669 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
671 res->attr.io.ent = ent;
673 /* res = optimize (res);
674 * irn_vrfy_irg (res, irg); */
679 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
680 symconst_kind symkind)
684 if (symkind == linkage_ptr_info)
688 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
690 res->attr.i.num = symkind;
691 if (symkind == linkage_ptr_info) {
692 res->attr.i.tori.ptrinfo = (ident *)value;
694 assert ( ( (symkind == type_tag)
695 || (symkind == size))
696 && (is_type(value)));
697 res->attr.i.tori.typ = (type *)value;
699 res = optimize_node (res);
700 irn_vrfy_irg (res, irg);
705 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
709 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
711 res = optimize_node (res);
712 irn_vrfy_irg (res, irg);
717 new_rd_Bad (ir_graph *irg)
723 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
725 ir_node *in[2], *res;
729 res = new_ir_node (db, irg, block, op_Confirm, intern_get_irn_mode(val), 2, in);
731 res->attr.confirm_cmp = cmp;
733 res = optimize_node (res);
734 irn_vrfy_irg(res, irg);
739 new_rd_Unknown (ir_graph *irg, ir_mode *m)
741 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
745 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
749 in[0] = get_Call_ptr(call);
750 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
751 /* res->attr.callbegin.irg = irg; */
752 res->attr.callbegin.call = call;
753 res = optimize_node (res);
754 irn_vrfy_irg (res, irg);
759 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
763 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
766 irn_vrfy_irg (res, irg);
771 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
775 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
776 irg->end_except = res;
778 irn_vrfy_irg (res, irg);
783 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
786 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
787 res = optimize_node (res);
788 irn_vrfy_irg (res, irg);
793 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
799 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
800 res->attr.filter.proj = proj;
801 res->attr.filter.in_cg = NULL;
802 res->attr.filter.backedge = NULL;
805 assert(get_Proj_pred(res));
806 assert(get_nodes_Block(get_Proj_pred(res)));
808 res = optimize_node (res);
810 irn_vrfy_irg (res, irg);
816 new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
817 ir_node *callee, int arity, ir_node **in, type *tp)
824 NEW_ARR_A (ir_node *, r_in, r_arity);
826 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
828 res = new_ir_node (db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
830 assert(is_method_type(tp));
831 set_FuncCall_type(res, tp);
832 res->attr.call.callee_arr = NULL;
833 res = optimize_node (res);
834 irn_vrfy_irg (res, irg);
839 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
840 return new_rd_Block(NULL, irg, arity, in);
842 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
843 return new_rd_Start(NULL, irg, block);
845 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
846 return new_rd_End(NULL, irg, block);
848 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
849 return new_rd_Jmp(NULL, irg, block);
851 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
852 return new_rd_Cond(NULL, irg, block, c);
854 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
855 ir_node *store, int arity, ir_node **in) {
856 return new_rd_Return(NULL, irg, block, store, arity, in);
858 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
859 ir_node *store, ir_node *obj) {
860 return new_rd_Raise(NULL, irg, block, store, obj);
862 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
863 ir_mode *mode, tarval *con) {
864 return new_rd_Const(NULL, irg, block, mode, con);
866 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
867 type_or_id_p value, symconst_kind symkind) {
868 return new_rd_SymConst(NULL, irg, block, value, symkind);
870 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
871 ir_node *objptr, int n_index, ir_node **index,
873 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
875 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
877 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
879 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
880 ir_node *callee, int arity, ir_node **in,
882 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
884 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
885 ir_node *op1, ir_node *op2, ir_mode *mode) {
886 return new_rd_Add(NULL, irg, block, op1, op2, mode);
888 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
889 ir_node *op1, ir_node *op2, ir_mode *mode) {
890 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
892 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
893 ir_node *op, ir_mode *mode) {
894 return new_rd_Minus(NULL, irg, block, op, mode);
896 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
897 ir_node *op1, ir_node *op2, ir_mode *mode) {
898 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
900 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
901 ir_node *memop, ir_node *op1, ir_node *op2) {
902 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
904 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
905 ir_node *memop, ir_node *op1, ir_node *op2) {
906 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
908 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
909 ir_node *memop, ir_node *op1, ir_node *op2) {
910 return new_rd_Div(NULL, irg, block, memop, op1, op2);
912 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
913 ir_node *memop, ir_node *op1, ir_node *op2) {
914 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
916 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
917 ir_node *op, ir_mode *mode) {
918 return new_rd_Abs(NULL, irg, block, op, mode);
920 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
921 ir_node *op1, ir_node *op2, ir_mode *mode) {
922 return new_rd_And(NULL, irg, block, op1, op2, mode);
924 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
925 ir_node *op1, ir_node *op2, ir_mode *mode) {
926 return new_rd_Or(NULL, irg, block, op1, op2, mode);
928 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
929 ir_node *op1, ir_node *op2, ir_mode *mode) {
930 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
932 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
933 ir_node *op, ir_mode *mode) {
934 return new_rd_Not(NULL, irg, block, op, mode);
936 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
937 ir_node *op1, ir_node *op2) {
938 return new_rd_Cmp(NULL, irg, block, op1, op2);
940 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
941 ir_node *op, ir_node *k, ir_mode *mode) {
942 return new_rd_Shl(NULL, irg, block, op, k, mode);
944 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
945 ir_node *op, ir_node *k, ir_mode *mode) {
946 return new_rd_Shr(NULL, irg, block, op, k, mode);
948 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
949 ir_node *op, ir_node *k, ir_mode *mode) {
950 return new_rd_Shrs(NULL, irg, block, op, k, mode);
952 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
953 ir_node *op, ir_node *k, ir_mode *mode) {
954 return new_rd_Rot(NULL, irg, block, op, k, mode);
956 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
957 ir_node *op, ir_mode *mode) {
958 return new_rd_Conv(NULL, irg, block, op, mode);
960 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
961 return new_rd_Cast(NULL, irg, block, op, to_tp);
963 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
964 ir_node **in, ir_mode *mode) {
965 return new_rd_Phi(NULL, irg, block, arity, in, mode);
967 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
968 ir_node *store, ir_node *adr) {
969 return new_rd_Load(NULL, irg, block, store, adr);
971 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
972 ir_node *store, ir_node *adr, ir_node *val) {
973 return new_rd_Store(NULL, irg, block, store, adr, val);
975 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
976 ir_node *size, type *alloc_type, where_alloc where) {
977 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
979 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
980 ir_node *ptr, ir_node *size, type *free_type) {
981 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
983 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
984 return new_rd_Sync(NULL, irg, block, arity, in);
986 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
987 ir_mode *mode, long proj) {
988 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
990 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
992 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
994 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
995 int arity, ir_node **in) {
996 return new_rd_Tuple(NULL, irg, block, arity, in );
998 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
999 ir_node *val, ir_mode *mode) {
1000 return new_rd_Id(NULL, irg, block, val, mode);
1002 INLINE ir_node *new_r_Bad (ir_graph *irg) {
1003 return new_rd_Bad(irg);
1005 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1006 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1008 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1009 return new_rd_Unknown(irg, m);
1011 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1012 return new_rd_CallBegin(NULL, irg, block, callee);
1014 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1015 return new_rd_EndReg(NULL, irg, block);
1017 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1018 return new_rd_EndExcept(NULL, irg, block);
1020 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1021 return new_rd_Break(NULL, irg, block);
1023 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1024 ir_mode *mode, long proj) {
1025 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1027 INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
1028 ir_node *callee, int arity, ir_node **in,
1030 return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
1034 /** ********************/
1035 /** public interfaces */
1036 /** construction tools */
1040 * - create a new Start node in the current block
1042 * @return s - pointer to the created Start node
1047 new_d_Start (dbg_info* db)
1051 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1052 op_Start, mode_T, 0, NULL);
1053 /* res->attr.start.irg = current_ir_graph; */
1055 res = optimize_node (res);
1056 irn_vrfy_irg (res, current_ir_graph);
1061 new_d_End (dbg_info* db)
1064 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1065 op_End, mode_X, -1, NULL);
1066 res = optimize_node (res);
1067 irn_vrfy_irg (res, current_ir_graph);
1072 /* Constructs a Block with a fixed number of predecessors.
1073 Does set current_block. Can be used with automatic Phi
1074 node construction. */
1076 new_d_Block (dbg_info* db, int arity, ir_node **in)
1080 bool has_unknown = false;
1082 res = new_rd_Block (db, current_ir_graph, arity, in);
1084 /* Create and initialize array for Phi-node construction. */
1085 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1086 current_ir_graph->n_loc);
1087 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1089 for (i = arity-1; i >= 0; i--) if (intern_get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1091 if (!has_unknown) res = optimize_node (res);
1092 current_ir_graph->current_block = res;
1094 irn_vrfy_irg (res, current_ir_graph);
1099 /* ***********************************************************************/
1100 /* Methods necessary for automatic Phi node creation */
1102 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1103 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1104 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1105 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1107 Call Graph: ( A ---> B == A "calls" B)
1109 get_value mature_block
1117 get_r_value_internal |
1121 new_rd_Phi0 new_rd_Phi_in
1123 * *************************************************************************** */
1125 /** Creates a Phi node with 0 predecessors */
1126 static INLINE ir_node *
1127 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1130 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1131 irn_vrfy_irg (res, irg);
1135 /* There are two implementations of the Phi node construction. The first
1136 is faster, but does not work for blocks with more than 2 predecessors.
1137 The second works always but is slower and causes more unnecessary Phi
1139 Select the implementations by the following preprocessor flag set in
1141 #if USE_FAST_PHI_CONSTRUCTION
1143 /* This is a stack used for allocating and deallocating nodes in
1144 new_rd_Phi_in. The original implementation used the obstack
1145 to model this stack, now it is explicit. This reduces side effects.
1147 #if USE_EXPLICIT_PHI_IN_STACK
1148 INLINE Phi_in_stack *
1149 new_Phi_in_stack(void) {
1152 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1154 res->stack = NEW_ARR_F (ir_node *, 1);
1161 free_Phi_in_stack(Phi_in_stack *s) {
1162 DEL_ARR_F(s->stack);
1166 free_to_Phi_in_stack(ir_node *phi) {
1167 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1168 current_ir_graph->Phi_in_stack->pos)
1169 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1171 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1173 (current_ir_graph->Phi_in_stack->pos)++;
1176 static INLINE ir_node *
1177 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1178 int arity, ir_node **in) {
1180 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1181 int pos = current_ir_graph->Phi_in_stack->pos;
1185 /* We need to allocate a new node */
1186 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1187 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1189 /* reuse the old node and initialize it again. */
1192 assert (res->kind == k_ir_node);
1193 assert (res->op == op_Phi);
1197 assert (arity >= 0);
1198 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1199 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1201 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1203 (current_ir_graph->Phi_in_stack->pos)--;
1207 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1209 /* Creates a Phi node with a given, fixed array **in of predecessors.
1210 If the Phi node is unnecessary, as the same value reaches the block
1211 through all control flow paths, it is eliminated and the value
1212 returned directly. This constructor is only intended for use in
1213 the automatic Phi node generation triggered by get_value or mature.
1214 The implementation is quite tricky and depends on the fact, that
1215 the nodes are allocated on a stack:
1216 The in array contains predecessors and NULLs. The NULLs appear,
1217 if get_r_value_internal, that computed the predecessors, reached
1218 the same block on two paths. In this case the same value reaches
1219 this block on both paths, there is no definition in between. We need
1220 not allocate a Phi where these path's merge, but we have to communicate
1221 this fact to the caller. This happens by returning a pointer to the
1222 node the caller _will_ allocate. (Yes, we predict the address. We can
1223 do so because the nodes are allocated on the obstack.) The caller then
1224 finds a pointer to itself and, when this routine is called again,
1227 static INLINE ir_node *
1228 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1229 ir_node **in, int ins)
1232 ir_node *res, *known;
1234 /* allocate a new node on the obstack.
1235 This can return a node to which some of the pointers in the in-array
1237 Attention: the constructor copies the in array, i.e., the later changes
1238 to the array in this routine do not affect the constructed node! If
1239 the in array contains NULLs, there will be missing predecessors in the
1241 Is this a possible internal state of the Phi node generation? */
1242 #if USE_EXPLICIT_PHI_IN_STACK
1243 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1245 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1246 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1248 /* The in-array can contain NULLs. These were returned by
1249 get_r_value_internal if it reached the same block/definition on a
1251 The NULLs are replaced by the node itself to simplify the test in the
1253 for (i=0; i < ins; ++i)
1254 if (in[i] == NULL) in[i] = res;
1256 /* This loop checks whether the Phi has more than one predecessor.
1257 If so, it is a real Phi node and we break the loop. Else the
1258 Phi node merges the same definition on several paths and therefore
1260 for (i=0; i < ins; ++i)
1262 if (in[i]==res || in[i]==known) continue;
1270 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1272 #if USE_EXPLICIT_PHI_IN_STACK
1273 free_to_Phi_in_stack(res);
1275 obstack_free (current_ir_graph->obst, res);
1279 res = optimize_node (res);
1280 irn_vrfy_irg (res, irg);
1283 /* return the pointer to the Phi node. This node might be deallocated! */
1288 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1291 allocates and returns this node. The routine called to allocate the
1292 node might optimize it away and return a real value, or even a pointer
1293 to a deallocated Phi node on top of the obstack!
1294 This function is called with an in-array of proper size. **/
1296 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1298 ir_node *prevBlock, *res;
1301 /* This loop goes to all predecessor blocks of the block the Phi node is in
1302 and there finds the operands of the Phi node by calling
1303 get_r_value_internal. */
1304 for (i = 1; i <= ins; ++i) {
1305 assert (block->in[i]);
1306 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1308 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1311 /* After collecting all predecessors into the array nin a new Phi node
1312 with these predecessors is created. This constructor contains an
1313 optimization: If all predecessors of the Phi node are identical it
1314 returns the only operand instead of a new Phi node. If the value
1315 passes two different control flow edges without being defined, and
1316 this is the second path treated, a pointer to the node that will be
1317 allocated for the first path (recursion) is returned. We already
1318 know the address of this node, as it is the next node to be allocated
1319 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1320 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1322 /* Now we now the value for "pos" and can enter it in the array with
1323 all known local variables. Attention: this might be a pointer to
1324 a node, that later will be allocated!!! See new_rd_Phi_in.
1325 If this is called in mature, after some set_value in the same block,
1326 the proper value must not be overwritten:
1328 get_value (makes Phi0, put's it into graph_arr)
1329 set_value (overwrites Phi0 in graph_arr)
1330 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1333 if (!block->attr.block.graph_arr[pos]) {
1334 block->attr.block.graph_arr[pos] = res;
1336 /* printf(" value already computed by %s\n",
1337 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1343 /* This function returns the last definition of a variable. In case
1344 this variable was last defined in a previous block, Phi nodes are
1345 inserted. If the part of the firm graph containing the definition
1346 is not yet constructed, a dummy Phi node is returned. */
1348 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1351 /* There are 4 cases to treat.
1353 1. The block is not mature and we visit it the first time. We can not
1354 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1355 predecessors is returned. This node is added to the linked list (field
1356 "link") of the containing block to be completed when this block is
1357 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1360 2. The value is already known in this block, graph_arr[pos] is set and we
1361 visit the block the first time. We can return the value without
1362 creating any new nodes.
1364 3. The block is mature and we visit it the first time. A Phi node needs
1365 to be created (phi_merge). If the Phi is not needed, as all it's
1366 operands are the same value reaching the block through different
1367 paths, it's optimized away and the value itself is returned.
1369 4. The block is mature, and we visit it the second time. Now two
1370 subcases are possible:
1371 * The value was computed completely the last time we were here. This
1372 is the case if there is no loop. We can return the proper value.
1373 * The recursion that visited this node and set the flag did not
1374 return yet. We are computing a value in a loop and need to
1375 break the recursion without knowing the result yet.
1376 @@@ strange case. Straight forward we would create a Phi before
1377 starting the computation of it's predecessors. In this case we will
1378 find a Phi here in any case. The problem is that this implementation
1379 only creates a Phi after computing the predecessors, so that it is
1380 hard to compute self references of this Phi. @@@
1381 There is no simple check for the second subcase. Therefore we check
1382 for a second visit and treat all such cases as the second subcase.
1383 Anyways, the basic situation is the same: we reached a block
1384 on two paths without finding a definition of the value: No Phi
1385 nodes are needed on both paths.
1386 We return this information "Two paths, no Phi needed" by a very tricky
1387 implementation that relies on the fact that an obstack is a stack and
1388 will return a node with the same address on different allocations.
1389 Look also at phi_merge and new_rd_phi_in to understand this.
1390 @@@ Unfortunately this does not work, see testprogram
1391 three_cfpred_example.
1395 /* case 4 -- already visited. */
1396 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1398 /* visited the first time */
1399 set_irn_visited(block, get_irg_visited(current_ir_graph));
1401 /* Get the local valid value */
1402 res = block->attr.block.graph_arr[pos];
1404 /* case 2 -- If the value is actually computed, return it. */
1405 if (res) return res;
1407 if (block->attr.block.matured) { /* case 3 */
1409 /* The Phi has the same amount of ins as the corresponding block. */
1410 int ins = intern_get_irn_arity(block);
1412 NEW_ARR_A (ir_node *, nin, ins);
1414 /* Phi merge collects the predecessors and then creates a node. */
1415 res = phi_merge (block, pos, mode, nin, ins);
1417 } else { /* case 1 */
1418 /* The block is not mature, we don't know how many in's are needed. A Phi
1419 with zero predecessors is created. Such a Phi node is called Phi0
1420 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1421 to the list of Phi0 nodes in this block to be matured by mature_block
1423 The Phi0 has to remember the pos of it's internal value. If the real
1424 Phi is computed, pos is used to update the array with the local
1427 res = new_rd_Phi0 (current_ir_graph, block, mode);
1428 res->attr.phi0_pos = pos;
1429 res->link = block->link;
1433 /* If we get here, the frontend missed a use-before-definition error */
1436 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1437 assert (mode->code >= irm_F && mode->code <= irm_P);
1438 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1439 tarval_mode_null[mode->code]);
1442 /* The local valid value is available now. */
1443 block->attr.block.graph_arr[pos] = res;
1451 it starts the recursion. This causes an Id at the entry of
1452 every block that has no definition of the value! **/
1454 #if USE_EXPLICIT_PHI_IN_STACK
1456 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1457 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1460 static INLINE ir_node *
1461 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1462 ir_node **in, int ins)
1465 ir_node *res, *known;
1467 /* Allocate a new node on the obstack. The allocation copies the in
1469 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1470 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1472 /* This loop checks whether the Phi has more than one predecessor.
1473 If so, it is a real Phi node and we break the loop. Else the
1474 Phi node merges the same definition on several paths and therefore
1475 is not needed. Don't consider Bad nodes! */
1477 for (i=0; i < ins; ++i)
1481 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1489 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1492 obstack_free (current_ir_graph->obst, res);
1495 /* A undefined value, e.g., in unreachable code. */
1499 res = optimize_node (res);
1500 irn_vrfy_irg (res, irg);
1501 /* Memory Phis in endless loops must be kept alive.
1502 As we can't distinguish these easily we keep all of the alive. */
1503 if ((res->op == op_Phi) && (mode == mode_M))
1504 add_End_keepalive(irg->end, res);
1511 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1513 #if PRECISE_EXC_CONTEXT
1515 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1517 static INLINE ir_node ** new_frag_arr (ir_node *n)
1521 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1522 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1523 sizeof(ir_node *)*current_ir_graph->n_loc);
1524 /* turn off optimization before allocating Proj nodes, as res isn't
1526 opt = get_opt_optimize(); set_optimize(0);
1527 /* Here we rely on the fact that all frag ops have Memory as first result! */
1528 if (intern_get_irn_op(n) == op_Call)
1529 arr[0] = new_Proj(n, mode_M, 3);
1531 arr[0] = new_Proj(n, mode_M, 0);
1533 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1537 static INLINE ir_node **
1538 get_frag_arr (ir_node *n) {
1539 if (intern_get_irn_op(n) == op_Call) {
1540 return n->attr.call.frag_arr;
1541 } else if (intern_get_irn_op(n) == op_Alloc) {
1542 return n->attr.a.frag_arr;
1544 return n->attr.frag_arr;
1549 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1550 if (!frag_arr[pos]) frag_arr[pos] = val;
1551 if (frag_arr[current_ir_graph->n_loc - 1])
1552 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1556 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1560 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1562 frag_arr = get_frag_arr(cfOp);
1563 res = frag_arr[pos];
1565 if (block->attr.block.graph_arr[pos]) {
1566 /* There was a set_value after the cfOp and no get_value before that
1567 set_value. We must build a Phi node now. */
1568 if (block->attr.block.matured) {
1569 int ins = intern_get_irn_arity(block);
1571 NEW_ARR_A (ir_node *, nin, ins);
1572 res = phi_merge(block, pos, mode, nin, ins);
1574 res = new_rd_Phi0 (current_ir_graph, block, mode);
1575 res->attr.phi0_pos = pos;
1576 res->link = block->link;
1580 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1581 but this should be better: (remove comment if this works) */
1582 /* It's a Phi, we can write this into all graph_arrs with NULL */
1583 set_frag_value(block->attr.block.graph_arr, pos, res);
1585 res = get_r_value_internal(block, pos, mode);
1586 set_frag_value(block->attr.block.graph_arr, pos, res);
1594 computes the predecessors for the real phi node, and then
1595 allocates and returns this node. The routine called to allocate the
1596 node might optimize it away and return a real value.
1597 This function must be called with an in-array of proper size. **/
1599 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1601 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1604 /* If this block has no value at pos create a Phi0 and remember it
1605 in graph_arr to break recursions.
1606 Else we may not set graph_arr as there a later value is remembered. */
1608 if (!block->attr.block.graph_arr[pos]) {
1609 if (block == get_irg_start_block(current_ir_graph)) {
1610 /* Collapsing to Bad tarvals is no good idea.
1611 So we call a user-supplied routine here that deals with this case as
1612 appropriate for the given language. Sorryly the only help we can give
1613 here is the position.
1615 Even if all variables are defined before use, it can happen that
1616 we get to the start block, if a cond has been replaced by a tuple
1617 (bad, jmp). In this case we call the function needlessly, eventually
1618 generating an non existant error.
1619 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1622 if (default_initialize_local_variable)
1623 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1625 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1626 /* We don't need to care about exception ops in the start block.
1627 There are none by definition. */
1628 return block->attr.block.graph_arr[pos];
1630 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1631 block->attr.block.graph_arr[pos] = phi0;
1632 #if PRECISE_EXC_CONTEXT
1633 /* Set graph_arr for fragile ops. Also here we should break recursion.
1634 We could choose a cyclic path through an cfop. But the recursion would
1635 break at some point. */
1636 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1641 /* This loop goes to all predecessor blocks of the block the Phi node
1642 is in and there finds the operands of the Phi node by calling
1643 get_r_value_internal. */
1644 for (i = 1; i <= ins; ++i) {
1645 prevCfOp = skip_Proj(block->in[i]);
1647 if (is_Bad(prevCfOp)) {
1648 /* In case a Cond has been optimized we would get right to the start block
1649 with an invalid definition. */
1650 nin[i-1] = new_Bad();
1653 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1655 if (!is_Bad(prevBlock)) {
1656 #if PRECISE_EXC_CONTEXT
1657 if (is_fragile_op(prevCfOp) && (intern_get_irn_op (prevCfOp) != op_Bad)) {
1658 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1659 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1662 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1664 nin[i-1] = new_Bad();
1668 /* After collecting all predecessors into the array nin a new Phi node
1669 with these predecessors is created. This constructor contains an
1670 optimization: If all predecessors of the Phi node are identical it
1671 returns the only operand instead of a new Phi node. */
1672 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1674 /* In case we allocated a Phi0 node at the beginning of this procedure,
1675 we need to exchange this Phi0 with the real Phi. */
1677 exchange(phi0, res);
1678 block->attr.block.graph_arr[pos] = res;
1679 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1680 only an optimization. */
1686 /* This function returns the last definition of a variable. In case
1687 this variable was last defined in a previous block, Phi nodes are
1688 inserted. If the part of the firm graph containing the definition
1689 is not yet constructed, a dummy Phi node is returned. */
1691 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1694 /* There are 4 cases to treat.
1696 1. The block is not mature and we visit it the first time. We can not
1697 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1698 predecessors is returned. This node is added to the linked list (field
1699 "link") of the containing block to be completed when this block is
1700 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1703 2. The value is already known in this block, graph_arr[pos] is set and we
1704 visit the block the first time. We can return the value without
1705 creating any new nodes.
1707 3. The block is mature and we visit it the first time. A Phi node needs
1708 to be created (phi_merge). If the Phi is not needed, as all it's
1709 operands are the same value reaching the block through different
1710 paths, it's optimized away and the value itself is returned.
1712 4. The block is mature, and we visit it the second time. Now two
1713 subcases are possible:
1714 * The value was computed completely the last time we were here. This
1715 is the case if there is no loop. We can return the proper value.
1716 * The recursion that visited this node and set the flag did not
1717 return yet. We are computing a value in a loop and need to
1718 break the recursion. This case only happens if we visited
1719 the same block with phi_merge before, which inserted a Phi0.
1720 So we return the Phi0.
1723 /* case 4 -- already visited. */
1724 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1725 /* As phi_merge allocates a Phi0 this value is always defined. Here
1726 is the critical difference of the two algorithms. */
1727 assert(block->attr.block.graph_arr[pos]);
1728 return block->attr.block.graph_arr[pos];
1731 /* visited the first time */
1732 set_irn_visited(block, get_irg_visited(current_ir_graph));
1734 /* Get the local valid value */
1735 res = block->attr.block.graph_arr[pos];
1737 /* case 2 -- If the value is actually computed, return it. */
1738 if (res) { return res; };
1740 if (block->attr.block.matured) { /* case 3 */
1742 /* The Phi has the same amount of ins as the corresponding block. */
1743 int ins = intern_get_irn_arity(block);
1745 NEW_ARR_A (ir_node *, nin, ins);
1747 /* Phi merge collects the predecessors and then creates a node. */
1748 res = phi_merge (block, pos, mode, nin, ins);
1750 } else { /* case 1 */
1751 /* The block is not mature, we don't know how many in's are needed. A Phi
1752 with zero predecessors is created. Such a Phi node is called Phi0
1753 node. The Phi0 is then added to the list of Phi0 nodes in this block
1754 to be matured by mature_block later.
1755 The Phi0 has to remember the pos of it's internal value. If the real
1756 Phi is computed, pos is used to update the array with the local
1758 res = new_rd_Phi0 (current_ir_graph, block, mode);
1759 res->attr.phi0_pos = pos;
1760 res->link = block->link;
1764 /* If we get here, the frontend missed a use-before-definition error */
1767 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1768 assert (mode->code >= irm_F && mode->code <= irm_P);
1769 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1770 get_mode_null(mode));
1773 /* The local valid value is available now. */
1774 block->attr.block.graph_arr[pos] = res;
1779 #endif /* USE_FAST_PHI_CONSTRUCTION */
1781 /* ************************************************************************** */
1783 /** Finalize a Block node, when all control flows are known. */
1784 /** Acceptable parameters are only Block nodes. */
1786 mature_block (ir_node *block)
1793 assert (get_irn_opcode(block) == iro_Block);
1794 /* @@@ should be commented in
1795 assert (!get_Block_matured(block) && "Block already matured"); */
1797 if (!get_Block_matured(block)) {
1798 ins = ARR_LEN (block->in)-1;
1799 /* Fix block parameters */
1800 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1802 /* An array for building the Phi nodes. */
1803 NEW_ARR_A (ir_node *, nin, ins);
1805 /* Traverse a chain of Phi nodes attached to this block and mature
1807 for (n = block->link; n; n=next) {
1808 inc_irg_visited(current_ir_graph);
1810 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1813 block->attr.block.matured = 1;
1815 /* Now, as the block is a finished firm node, we can optimize it.
1816 Since other nodes have been allocated since the block was created
1817 we can not free the node on the obstack. Therefore we have to call
1819 Unfortunately the optimization does not change a lot, as all allocated
1820 nodes refer to the unoptimized node.
1821 We can call _2, as global cse has no effect on blocks. */
1822 block = optimize_in_place_2(block);
1823 irn_vrfy_irg(block, current_ir_graph);
1828 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1830 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1835 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1837 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1842 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1844 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1850 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1852 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1857 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1859 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1864 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1867 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1868 arg->attr.c.kind = fragmentary;
1869 arg->attr.c.default_proj = max_proj;
1870 res = new_Proj (arg, mode_X, max_proj);
1875 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1877 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1882 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1884 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1888 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1890 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1895 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1897 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1902 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1904 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1910 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1912 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1917 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1919 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1924 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1927 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1929 #if PRECISE_EXC_CONTEXT
1930 if ((current_ir_graph->phase_state == phase_building) &&
1931 (intern_get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1932 res->attr.frag_arr = new_frag_arr(res);
1939 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1942 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1944 #if PRECISE_EXC_CONTEXT
1945 if ((current_ir_graph->phase_state == phase_building) &&
1946 (intern_get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1947 res->attr.frag_arr = new_frag_arr(res);
1954 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1957 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1959 #if PRECISE_EXC_CONTEXT
1960 if ((current_ir_graph->phase_state == phase_building) &&
1961 (intern_get_irn_op(res) == op_Div)) /* Could be optimized away. */
1962 res->attr.frag_arr = new_frag_arr(res);
1969 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1972 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1974 #if PRECISE_EXC_CONTEXT
1975 if ((current_ir_graph->phase_state == phase_building) &&
1976 (intern_get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1977 res->attr.frag_arr = new_frag_arr(res);
1984 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1986 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1991 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1993 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1998 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
2000 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
2005 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
2007 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
2012 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2014 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
2019 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2021 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
2026 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2028 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2033 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2035 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2040 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2042 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2047 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2049 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2054 new_d_Jmp (dbg_info* db)
2056 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2060 new_d_Cond (dbg_info* db, ir_node *c)
2062 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2066 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2070 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2071 store, callee, arity, in, tp);
2072 #if PRECISE_EXC_CONTEXT
2073 if ((current_ir_graph->phase_state == phase_building) &&
2074 (intern_get_irn_op(res) == op_Call)) /* Could be optimized away. */
2075 res->attr.call.frag_arr = new_frag_arr(res);
2082 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2084 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2089 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2091 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2096 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2099 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2101 #if PRECISE_EXC_CONTEXT
2102 if ((current_ir_graph->phase_state == phase_building) &&
2103 (intern_get_irn_op(res) == op_Load)) /* Could be optimized away. */
2104 res->attr.frag_arr = new_frag_arr(res);
2111 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2114 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2116 #if PRECISE_EXC_CONTEXT
2117 if ((current_ir_graph->phase_state == phase_building) &&
2118 (intern_get_irn_op(res) == op_Store)) /* Could be optimized away. */
2119 res->attr.frag_arr = new_frag_arr(res);
2126 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2130 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2131 store, size, alloc_type, where);
2132 #if PRECISE_EXC_CONTEXT
2133 if ((current_ir_graph->phase_state == phase_building) &&
2134 (intern_get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2135 res->attr.a.frag_arr = new_frag_arr(res);
2142 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2144 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2145 store, ptr, size, free_type);
2149 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2150 /* GL: objptr was called frame before. Frame was a bad choice for the name
2151 as the operand could as well be a pointer to a dynamic object. */
2153 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2154 store, objptr, 0, NULL, ent);
2158 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2160 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2161 store, objptr, n_index, index, sel);
2165 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2167 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2168 store, objptr, ent));
2172 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2174 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2179 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2181 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2189 return current_ir_graph->bad;
2193 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2195 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2200 new_d_Unknown (ir_mode *m)
2202 return new_rd_Unknown(current_ir_graph, m);
2206 new_d_CallBegin (dbg_info *db, ir_node *call)
2209 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2214 new_d_EndReg (dbg_info *db)
2217 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2222 new_d_EndExcept (dbg_info *db)
2225 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2230 new_d_Break (dbg_info *db)
2232 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2236 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2238 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2243 new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
2247 res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
2248 callee, arity, in, tp);
2253 /* ********************************************************************* */
2254 /* Comfortable interface with automatic Phi node construction. */
2255 /* (Uses also constructors of ?? interface, except new_Block. */
2256 /* ********************************************************************* */
2258 /** Block construction **/
2259 /* immature Block without predecessors */
2260 ir_node *new_d_immBlock (dbg_info* db) {
2263 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2264 /* creates a new dynamic in-array as length of in is -1 */
2265 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2266 current_ir_graph->current_block = res;
2267 res->attr.block.matured = 0;
2268 /* res->attr.block.exc = exc_normal; */
2269 /* res->attr.block.handler_entry = 0; */
2270 res->attr.block.irg = current_ir_graph;
2271 res->attr.block.backedge = NULL;
2272 res->attr.block.in_cg = NULL;
2273 res->attr.block.cg_backedge = NULL;
2274 set_Block_block_visited(res, 0);
2276 /* Create and initialize array for Phi-node construction. */
2277 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2278 current_ir_graph->n_loc);
2279 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2281 /* Immature block may not be optimized! */
2282 irn_vrfy_irg (res, current_ir_graph);
2289 return new_d_immBlock(NULL);
2292 /* add an adge to a jmp/control flow node */
2294 add_in_edge (ir_node *block, ir_node *jmp)
2296 if (block->attr.block.matured) {
2297 assert(0 && "Error: Block already matured!\n");
2300 assert (jmp != NULL);
2301 ARR_APP1 (ir_node *, block->in, jmp);
2305 /* changing the current block */
2307 switch_block (ir_node *target)
2309 current_ir_graph->current_block = target;
2312 /* ************************ */
2313 /* parameter administration */
2315 /* get a value from the parameter array from the current block by its index */
2317 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2319 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2320 inc_irg_visited(current_ir_graph);
2322 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2324 /* get a value from the parameter array from the current block by its index */
2326 get_value (int pos, ir_mode *mode)
2328 return get_d_value(NULL, pos, mode);
2331 /* set a value at position pos in the parameter array from the current block */
2333 set_value (int pos, ir_node *value)
2335 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2336 assert(pos+1 < current_ir_graph->n_loc);
2337 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2340 /* get the current store */
2344 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2345 /* GL: one could call get_value instead */
2346 inc_irg_visited(current_ir_graph);
2347 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2350 /* set the current store */
2352 set_store (ir_node *store)
2354 /* GL: one could call set_value instead */
2355 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2356 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2360 keep_alive (ir_node *ka)
2362 add_End_keepalive(current_ir_graph->end, ka);
2365 /** Useful access routines **/
2366 /* Returns the current block of the current graph. To set the current
2367 block use switch_block(). */
2368 ir_node *get_cur_block() {
2369 return get_irg_current_block(current_ir_graph);
2372 /* Returns the frame type of the current graph */
2373 type *get_cur_frame_type() {
2374 return get_irg_frame_type(current_ir_graph);
2378 /* ********************************************************************* */
2381 /* call once for each run of the library */
2383 init_cons (default_initialize_local_variable_func_t *func)
2385 default_initialize_local_variable = func;
2388 /* call for each graph */
2390 finalize_cons (ir_graph *irg) {
2391 irg->phase_state = phase_high;
2395 ir_node *new_Block(int arity, ir_node **in) {
2396 return new_d_Block(NULL, arity, in);
2398 ir_node *new_Start (void) {
2399 return new_d_Start(NULL);
2401 ir_node *new_End (void) {
2402 return new_d_End(NULL);
2404 ir_node *new_Jmp (void) {
2405 return new_d_Jmp(NULL);
2407 ir_node *new_Cond (ir_node *c) {
2408 return new_d_Cond(NULL, c);
2410 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2411 return new_d_Return(NULL, store, arity, in);
2413 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2414 return new_d_Raise(NULL, store, obj);
2416 ir_node *new_Const (ir_mode *mode, tarval *con) {
2417 return new_d_Const(NULL, mode, con);
2419 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2420 return new_d_SymConst(NULL, value, kind);
2422 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2423 return new_d_simpleSel(NULL, store, objptr, ent);
2425 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2427 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2429 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2430 return new_d_InstOf (NULL, store, objptr, ent);
2432 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2434 return new_d_Call(NULL, store, callee, arity, in, tp);
2436 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2437 return new_d_Add(NULL, op1, op2, mode);
2439 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2440 return new_d_Sub(NULL, op1, op2, mode);
2442 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2443 return new_d_Minus(NULL, op, mode);
2445 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2446 return new_d_Mul(NULL, op1, op2, mode);
2448 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2449 return new_d_Quot(NULL, memop, op1, op2);
2451 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2452 return new_d_DivMod(NULL, memop, op1, op2);
2454 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2455 return new_d_Div(NULL, memop, op1, op2);
2457 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2458 return new_d_Mod(NULL, memop, op1, op2);
2460 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2461 return new_d_Abs(NULL, op, mode);
2463 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2464 return new_d_And(NULL, op1, op2, mode);
2466 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2467 return new_d_Or(NULL, op1, op2, mode);
2469 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2470 return new_d_Eor(NULL, op1, op2, mode);
2472 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2473 return new_d_Not(NULL, op, mode);
2475 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2476 return new_d_Shl(NULL, op, k, mode);
2478 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2479 return new_d_Shr(NULL, op, k, mode);
2481 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2482 return new_d_Shrs(NULL, op, k, mode);
2484 #define new_Rotate new_Rot
2485 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2486 return new_d_Rot(NULL, op, k, mode);
2488 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2489 return new_d_Cmp(NULL, op1, op2);
2491 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2492 return new_d_Conv(NULL, op, mode);
2494 ir_node *new_Cast (ir_node *op, type *to_tp) {
2495 return new_d_Cast(NULL, op, to_tp);
2497 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2498 return new_d_Phi(NULL, arity, in, mode);
2500 ir_node *new_Load (ir_node *store, ir_node *addr) {
2501 return new_d_Load(NULL, store, addr);
2503 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2504 return new_d_Store(NULL, store, addr, val);
2506 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2507 where_alloc where) {
2508 return new_d_Alloc(NULL, store, size, alloc_type, where);
2510 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2512 return new_d_Free(NULL, store, ptr, size, free_type);
2514 ir_node *new_Sync (int arity, ir_node **in) {
2515 return new_d_Sync(NULL, arity, in);
2517 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2518 return new_d_Proj(NULL, arg, mode, proj);
2520 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2521 return new_d_defaultProj(NULL, arg, max_proj);
2523 ir_node *new_Tuple (int arity, ir_node **in) {
2524 return new_d_Tuple(NULL, arity, in);
2526 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2527 return new_d_Id(NULL, val, mode);
2529 ir_node *new_Bad (void) {
2532 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2533 return new_d_Confirm (NULL, val, bound, cmp);
2535 ir_node *new_Unknown(ir_mode *m) {
2536 return new_d_Unknown(m);
2538 ir_node *new_CallBegin (ir_node *callee) {
2539 return new_d_CallBegin(NULL, callee);
2541 ir_node *new_EndReg (void) {
2542 return new_d_EndReg(NULL);
2544 ir_node *new_EndExcept (void) {
2545 return new_d_EndExcept(NULL);
2547 ir_node *new_Break (void) {
2548 return new_d_Break(NULL);
2550 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2551 return new_d_Filter(NULL, arg, mode, proj);
2553 ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
2554 return new_d_FuncCall(NULL, callee, arity, in, tp);