3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* creates a bd constructor for a binop */
66 #define NEW_BD_BINOP(instr) \
68 new_bd_##instr (dbg_info *db, ir_node *block, \
69 ir_node *op1, ir_node *op2, ir_mode *mode) \
73 ir_graph *irg = current_ir_graph; \
76 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
77 res = optimize_node(res); \
78 IRN_VRFY_IRG(res, irg); \
82 /* creates a bd constructor for an unop */
83 #define NEW_BD_UNOP(instr) \
85 new_bd_##instr (dbg_info *db, ir_node *block, \
86 ir_node *op, ir_mode *mode) \
89 ir_graph *irg = current_ir_graph; \
90 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
91 res = optimize_node(res); \
92 IRN_VRFY_IRG(res, irg); \
96 /* creates a bd constructor for an divop */
97 #define NEW_BD_DIVOP(instr) \
99 new_bd_##instr (dbg_info *db, ir_node *block, \
100 ir_node *memop, ir_node *op1, ir_node *op2) \
104 ir_graph *irg = current_ir_graph; \
108 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2); \
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_Minus(db, current_ir_graph->current_block, op, mode); \
171 /* Constructs a Block with a fixed number of predecessors.
172 Does not set current_block. Can not be used with automatic
173 Phi node construction. */
175 new_bd_Block (dbg_info *db, int arity, ir_node **in)
178 ir_graph *irg = current_ir_graph;
180 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
181 set_Block_matured(res, 1);
182 set_Block_block_visited(res, 0);
184 /* res->attr.block.exc = exc_normal; */
185 /* res->attr.block.handler_entry = 0; */
186 res->attr.block.dead = 0;
187 res->attr.block.irg = irg;
188 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
189 res->attr.block.in_cg = NULL;
190 res->attr.block.cg_backedge = NULL;
191 res->attr.block.extblk = NULL;
193 IRN_VRFY_IRG(res, irg);
198 new_bd_Start (dbg_info *db, ir_node *block)
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
204 /* res->attr.start.irg = irg; */
206 IRN_VRFY_IRG(res, irg);
211 new_bd_End (dbg_info *db, ir_node *block)
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
218 IRN_VRFY_IRG(res, irg);
222 /* Creates a Phi node with all predecessors. Calling this constructor
223 is only allowed if the corresponding block is mature. */
225 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
228 ir_graph *irg = current_ir_graph;
232 /* Don't assert that block matured: the use of this constructor is strongly
234 if ( get_Block_matured(block) )
235 assert( get_irn_arity(block) == arity );
237 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
239 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
241 for (i = arity-1; i >= 0; i--)
242 if (get_irn_op(in[i]) == op_Unknown) {
247 if (!has_unknown) res = optimize_node (res);
248 IRN_VRFY_IRG(res, irg);
250 /* Memory Phis in endless loops must be kept alive.
251 As we can't distinguish these easily we keep all of them alive. */
252 if ((res->op == op_Phi) && (mode == mode_M))
253 add_End_keepalive(irg->end, res);
258 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
261 ir_graph *irg = current_ir_graph;
263 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
264 res->attr.con.tv = con;
265 set_Const_type(res, tp); /* Call method because of complex assertion. */
266 res = optimize_node (res);
267 assert(get_Const_type(res) == tp);
268 IRN_VRFY_IRG(res, irg);
274 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
276 ir_graph *irg = current_ir_graph;
278 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
282 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
290 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
293 ir_graph *irg = current_ir_graph;
295 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
296 res = optimize_node(res);
297 IRN_VRFY_IRG(res, irg);
302 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
306 ir_graph *irg = current_ir_graph;
308 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
309 res->attr.proj = proj;
312 assert(get_Proj_pred(res));
313 assert(get_nodes_block(get_Proj_pred(res)));
315 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
323 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
327 ir_graph *irg = current_ir_graph;
329 assert(arg->op == op_Cond);
330 arg->attr.c.kind = fragmentary;
331 arg->attr.c.default_proj = max_proj;
332 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
337 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
340 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
352 ir_graph *irg = current_ir_graph;
354 assert(is_atomic_type(to_tp));
356 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
357 res->attr.cast.totype = to_tp;
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
364 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
367 ir_graph *irg = current_ir_graph;
369 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
370 res = optimize_node (res);
371 IRN_VRFY_IRG(res, irg);
396 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
400 ir_graph *irg = current_ir_graph;
403 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Jmp (dbg_info *db, ir_node *block)
413 ir_graph *irg = current_ir_graph;
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
417 IRN_VRFY_IRG (res, irg);
422 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG (res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.c.kind = dense;
444 res->attr.c.default_proj = 0;
445 res->attr.c.pred = COND_JMP_PRED_NONE;
446 res = optimize_node (res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp)
458 ir_graph *irg = current_ir_graph;
461 NEW_ARR_A(ir_node *, r_in, r_arity);
464 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
466 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
468 assert((get_unknown_type() == tp) || is_Method_type(tp));
469 set_Call_type(res, tp);
470 res->attr.call.exc.pin_state = op_pin_state_pinned;
471 res->attr.call.callee_arr = NULL;
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Return (dbg_info *db, ir_node *block,
479 ir_node *store, int arity, ir_node **in)
484 ir_graph *irg = current_ir_graph;
487 NEW_ARR_A (ir_node *, r_in, r_arity);
489 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Load (dbg_info *db, ir_node *block,
498 ir_node *store, ir_node *adr, ir_mode *mode)
502 ir_graph *irg = current_ir_graph;
506 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
507 res->attr.load.exc.pin_state = op_pin_state_pinned;
508 res->attr.load.load_mode = mode;
509 res->attr.load.volatility = volatility_non_volatile;
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_bd_Store (dbg_info *db, ir_node *block,
517 ir_node *store, ir_node *adr, ir_node *val)
521 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
527 res->attr.store.exc.pin_state = op_pin_state_pinned;
528 res->attr.store.volatility = volatility_non_volatile;
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
535 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
536 ir_node *size, ir_type *alloc_type, where_alloc where)
540 ir_graph *irg = current_ir_graph;
544 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
545 res->attr.a.exc.pin_state = op_pin_state_pinned;
546 res->attr.a.where = where;
547 res->attr.a.type = alloc_type;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
555 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
559 ir_graph *irg = current_ir_graph;
564 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
565 res->attr.f.where = where;
566 res->attr.f.type = free_type;
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
574 int arity, ir_node **in, entity *ent)
579 ir_graph *irg = current_ir_graph;
581 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
584 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
587 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
589 * FIXM: Sel's can select functions which should be of mode mode_P_code.
591 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
592 res->attr.s.ent = ent;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
600 symconst_kind symkind, ir_type *tp) {
603 ir_graph *irg = current_ir_graph;
605 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
606 mode = mode_P_data; /* FIXME: can be mode_P_code */
610 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
612 res->attr.i.num = symkind;
613 res->attr.i.sym = value;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
623 symconst_kind symkind)
625 ir_graph *irg = current_ir_graph;
627 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
632 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
638 res = optimize_node(res);
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
646 ir_node *in[2], *res;
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
652 res->attr.confirm_cmp = cmp;
653 res = optimize_node (res);
654 IRN_VRFY_IRG(res, irg);
658 /* this function is often called with current_ir_graph unset */
660 new_bd_Unknown (ir_mode *m)
663 ir_graph *irg = current_ir_graph;
665 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
666 res = optimize_node(res);
671 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
675 ir_graph *irg = current_ir_graph;
677 in[0] = get_Call_ptr(call);
678 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
679 /* res->attr.callbegin.irg = irg; */
680 res->attr.callbegin.call = call;
681 res = optimize_node(res);
682 IRN_VRFY_IRG(res, irg);
687 new_bd_EndReg (dbg_info *db, ir_node *block)
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
694 IRN_VRFY_IRG(res, irg);
699 new_bd_EndExcept (dbg_info *db, ir_node *block)
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
705 irg->end_except = res;
706 IRN_VRFY_IRG (res, irg);
711 new_bd_Break (dbg_info *db, ir_node *block)
714 ir_graph *irg = current_ir_graph;
716 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
727 ir_graph *irg = current_ir_graph;
729 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
730 res->attr.filter.proj = proj;
731 res->attr.filter.in_cg = NULL;
732 res->attr.filter.backedge = NULL;
735 assert(get_Proj_pred(res));
736 assert(get_nodes_block(get_Proj_pred(res)));
738 res = optimize_node(res);
739 IRN_VRFY_IRG(res, irg);
744 new_bd_Mux (dbg_info *db, ir_node *block,
745 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
749 ir_graph *irg = current_ir_graph;
755 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_CopyB (dbg_info *db, ir_node *block,
765 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
769 ir_graph *irg = current_ir_graph;
775 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
777 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
778 res->attr.copyb.data_type = data_type;
779 res = optimize_node(res);
780 IRN_VRFY_IRG(res, irg);
785 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
786 ir_node *objptr, ir_type *type)
790 ir_graph *irg = current_ir_graph;
794 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
795 res->attr.io.type = type;
796 res = optimize_node(res);
797 IRN_VRFY_IRG(res, irg);
802 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
806 ir_graph *irg = current_ir_graph;
810 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
811 res = optimize_node(res);
812 IRN_VRFY_IRG(res, irg);
817 new_bd_Bound (dbg_info *db, ir_node *block,
818 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
822 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
829 res->attr.bound.exc.pin_state = op_pin_state_pinned;
830 res = optimize_node(res);
831 IRN_VRFY_IRG(res, irg);
835 /* --------------------------------------------- */
836 /* private interfaces, for professional use only */
837 /* --------------------------------------------- */
839 /* Constructs a Block with a fixed number of predecessors.
840 Does not set current_block. Can not be used with automatic
841 Phi node construction. */
843 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
845 ir_graph *rem = current_ir_graph;
848 current_ir_graph = irg;
849 res = new_bd_Block (db, arity, in);
850 current_ir_graph = rem;
856 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
858 ir_graph *rem = current_ir_graph;
861 current_ir_graph = irg;
862 res = new_bd_Start (db, block);
863 current_ir_graph = rem;
869 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
872 ir_graph *rem = current_ir_graph;
874 current_ir_graph = rem;
875 res = new_bd_End (db, block);
876 current_ir_graph = rem;
881 /* Creates a Phi node with all predecessors. Calling this constructor
882 is only allowed if the corresponding block is mature. */
884 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
887 ir_graph *rem = current_ir_graph;
889 current_ir_graph = irg;
890 res = new_bd_Phi (db, block,arity, in, mode);
891 current_ir_graph = rem;
897 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
900 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Const_type (db, block, mode, con, tp);
904 current_ir_graph = rem;
910 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
913 ir_graph *rem = current_ir_graph;
915 current_ir_graph = irg;
916 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
917 current_ir_graph = rem;
923 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
925 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
929 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
932 ir_graph *rem = current_ir_graph;
934 current_ir_graph = irg;
935 res = new_bd_Id(db, block, val, mode);
936 current_ir_graph = rem;
942 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
946 ir_graph *rem = current_ir_graph;
948 current_ir_graph = irg;
949 res = new_bd_Proj(db, block, arg, mode, proj);
950 current_ir_graph = rem;
956 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
960 ir_graph *rem = current_ir_graph;
962 current_ir_graph = irg;
963 res = new_bd_defaultProj(db, block, arg, max_proj);
964 current_ir_graph = rem;
970 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
973 ir_graph *rem = current_ir_graph;
975 current_ir_graph = irg;
976 res = new_bd_Conv(db, block, op, mode);
977 current_ir_graph = rem;
983 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
986 ir_graph *rem = current_ir_graph;
988 current_ir_graph = irg;
989 res = new_bd_Cast(db, block, op, to_tp);
990 current_ir_graph = rem;
996 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
999 ir_graph *rem = current_ir_graph;
1001 current_ir_graph = irg;
1002 res = new_bd_Tuple(db, block, arity, in);
1003 current_ir_graph = rem;
1013 NEW_RD_DIVOP(DivMod)
1026 NEW_RD_BINOP(Borrow)
1029 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1030 ir_node *op1, ir_node *op2)
1033 ir_graph *rem = current_ir_graph;
1035 current_ir_graph = irg;
1036 res = new_bd_Cmp(db, block, op1, op2);
1037 current_ir_graph = rem;
1043 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1046 ir_graph *rem = current_ir_graph;
1048 current_ir_graph = irg;
1049 res = new_bd_Jmp(db, block);
1050 current_ir_graph = rem;
1056 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_IJmp(db, block, tgt);
1063 current_ir_graph = rem;
1069 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1072 ir_graph *rem = current_ir_graph;
1074 current_ir_graph = irg;
1075 res = new_bd_Cond(db, block, c);
1076 current_ir_graph = rem;
1082 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1083 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1090 current_ir_graph = rem;
1096 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1097 ir_node *store, int arity, ir_node **in)
1100 ir_graph *rem = current_ir_graph;
1102 current_ir_graph = irg;
1103 res = new_bd_Return(db, block, store, arity, in);
1104 current_ir_graph = rem;
1110 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1111 ir_node *store, ir_node *adr, ir_mode *mode)
1114 ir_graph *rem = current_ir_graph;
1116 current_ir_graph = irg;
1117 res = new_bd_Load(db, block, store, adr, mode);
1118 current_ir_graph = rem;
1124 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1125 ir_node *store, ir_node *adr, ir_node *val)
1128 ir_graph *rem = current_ir_graph;
1130 current_ir_graph = irg;
1131 res = new_bd_Store(db, block, store, adr, val);
1132 current_ir_graph = rem;
1138 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1139 ir_node *size, ir_type *alloc_type, where_alloc where)
1142 ir_graph *rem = current_ir_graph;
1144 current_ir_graph = irg;
1145 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1146 current_ir_graph = rem;
1152 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1153 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1156 ir_graph *rem = current_ir_graph;
1158 current_ir_graph = irg;
1159 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1160 current_ir_graph = rem;
1166 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1167 ir_node *store, ir_node *objptr, entity *ent)
1170 ir_graph *rem = current_ir_graph;
1172 current_ir_graph = irg;
1173 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1174 current_ir_graph = rem;
1180 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1181 int arity, ir_node **in, entity *ent)
1184 ir_graph *rem = current_ir_graph;
1186 current_ir_graph = irg;
1187 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1188 current_ir_graph = rem;
1194 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1195 symconst_kind symkind, ir_type *tp)
1198 ir_graph *rem = current_ir_graph;
1200 current_ir_graph = irg;
1201 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1202 current_ir_graph = rem;
1208 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1209 symconst_kind symkind)
1211 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1215 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1217 symconst_symbol sym = {(ir_type *)symbol};
1218 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1221 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1222 symconst_symbol sym = {(ir_type *)symbol};
1223 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1226 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1227 symconst_symbol sym = {symbol};
1228 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1231 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1232 symconst_symbol sym = {symbol};
1233 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1237 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1240 ir_graph *rem = current_ir_graph;
1242 current_ir_graph = irg;
1243 res = new_bd_Sync(db, block, arity, in);
1244 current_ir_graph = rem;
1250 new_rd_Bad (ir_graph *irg)
1256 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1259 ir_graph *rem = current_ir_graph;
1261 current_ir_graph = irg;
1262 res = new_bd_Confirm(db, block, val, bound, cmp);
1263 current_ir_graph = rem;
1268 /* this function is often called with current_ir_graph unset */
1270 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1273 ir_graph *rem = current_ir_graph;
1275 current_ir_graph = irg;
1276 res = new_bd_Unknown(m);
1277 current_ir_graph = rem;
1283 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1286 ir_graph *rem = current_ir_graph;
1288 current_ir_graph = irg;
1289 res = new_bd_CallBegin(db, block, call);
1290 current_ir_graph = rem;
1296 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1300 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1302 IRN_VRFY_IRG(res, irg);
1307 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1311 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1312 irg->end_except = res;
1313 IRN_VRFY_IRG (res, irg);
1318 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1321 ir_graph *rem = current_ir_graph;
1323 current_ir_graph = irg;
1324 res = new_bd_Break(db, block);
1325 current_ir_graph = rem;
1331 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1335 ir_graph *rem = current_ir_graph;
1337 current_ir_graph = irg;
1338 res = new_bd_Filter(db, block, arg, mode, proj);
1339 current_ir_graph = rem;
1345 new_rd_NoMem (ir_graph *irg) {
1350 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1351 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1354 ir_graph *rem = current_ir_graph;
1356 current_ir_graph = irg;
1357 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1358 current_ir_graph = rem;
1363 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1364 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1367 ir_graph *rem = current_ir_graph;
1369 current_ir_graph = irg;
1370 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1371 current_ir_graph = rem;
1377 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1378 ir_node *objptr, ir_type *type)
1381 ir_graph *rem = current_ir_graph;
1383 current_ir_graph = irg;
1384 res = new_bd_InstOf(db, block, store, objptr, type);
1385 current_ir_graph = rem;
1391 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1394 ir_graph *rem = current_ir_graph;
1396 current_ir_graph = irg;
1397 res = new_bd_Raise(db, block, store, obj);
1398 current_ir_graph = rem;
1403 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1404 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1407 ir_graph *rem = current_ir_graph;
1409 current_ir_graph = irg;
1410 res = new_bd_Bound(db, block, store, idx, lower, upper);
1411 current_ir_graph = rem;
1416 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1417 return new_rd_Block(NULL, irg, arity, in);
1419 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1420 return new_rd_Start(NULL, irg, block);
1422 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1423 return new_rd_End(NULL, irg, block);
1425 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1426 return new_rd_Jmp(NULL, irg, block);
1428 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1429 return new_rd_IJmp(NULL, irg, block, tgt);
1431 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1432 return new_rd_Cond(NULL, irg, block, c);
1434 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1435 ir_node *store, int arity, ir_node **in) {
1436 return new_rd_Return(NULL, irg, block, store, arity, in);
1438 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1439 ir_mode *mode, tarval *con) {
1440 return new_rd_Const(NULL, irg, block, mode, con);
1442 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1443 ir_mode *mode, long value) {
1444 return new_rd_Const_long(NULL, irg, block, mode, value);
1446 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1447 ir_mode *mode, tarval *con, ir_type *tp) {
1448 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1450 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1451 symconst_symbol value, symconst_kind symkind) {
1452 return new_rd_SymConst(NULL, irg, block, value, symkind);
1454 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1455 ir_node *objptr, entity *ent) {
1456 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1458 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1459 ir_node *objptr, int n_index, ir_node **index,
1461 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1463 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1464 ir_node *callee, int arity, ir_node **in,
1466 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1468 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1469 ir_node *op1, ir_node *op2, ir_mode *mode) {
1470 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1472 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1473 ir_node *op1, ir_node *op2, ir_mode *mode) {
1474 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1476 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1477 ir_node *op, ir_mode *mode) {
1478 return new_rd_Minus(NULL, irg, block, op, mode);
1480 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1481 ir_node *op1, ir_node *op2, ir_mode *mode) {
1482 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1484 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1485 ir_node *memop, ir_node *op1, ir_node *op2) {
1486 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1488 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1489 ir_node *memop, ir_node *op1, ir_node *op2) {
1490 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1492 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1493 ir_node *memop, ir_node *op1, ir_node *op2) {
1494 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1496 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1497 ir_node *memop, ir_node *op1, ir_node *op2) {
1498 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1500 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1501 ir_node *op, ir_mode *mode) {
1502 return new_rd_Abs(NULL, irg, block, op, mode);
1504 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1505 ir_node *op1, ir_node *op2, ir_mode *mode) {
1506 return new_rd_And(NULL, irg, block, op1, op2, mode);
1508 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1509 ir_node *op1, ir_node *op2, ir_mode *mode) {
1510 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1512 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1516 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1517 ir_node *op, ir_mode *mode) {
1518 return new_rd_Not(NULL, irg, block, op, mode);
1520 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1521 ir_node *op, ir_node *k, ir_mode *mode) {
1522 return new_rd_Shl(NULL, irg, block, op, k, mode);
1524 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1525 ir_node *op, ir_node *k, ir_mode *mode) {
1526 return new_rd_Shr(NULL, irg, block, op, k, mode);
1528 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1529 ir_node *op, ir_node *k, ir_mode *mode) {
1530 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1532 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1533 ir_node *op, ir_node *k, ir_mode *mode) {
1534 return new_rd_Rot(NULL, irg, block, op, k, mode);
1536 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1537 ir_node *op, ir_node *k, ir_mode *mode) {
1538 return new_rd_Carry(NULL, irg, block, op, k, mode);
1540 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1541 ir_node *op, ir_node *k, ir_mode *mode) {
1542 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1544 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1545 ir_node *op1, ir_node *op2) {
1546 return new_rd_Cmp(NULL, irg, block, op1, op2);
1548 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1549 ir_node *op, ir_mode *mode) {
1550 return new_rd_Conv(NULL, irg, block, op, mode);
1552 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1553 return new_rd_Cast(NULL, irg, block, op, to_tp);
1555 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1556 ir_node **in, ir_mode *mode) {
1557 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1559 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1560 ir_node *store, ir_node *adr, ir_mode *mode) {
1561 return new_rd_Load(NULL, irg, block, store, adr, mode);
1563 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1564 ir_node *store, ir_node *adr, ir_node *val) {
1565 return new_rd_Store(NULL, irg, block, store, adr, val);
1567 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1568 ir_node *size, ir_type *alloc_type, where_alloc where) {
1569 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1571 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1572 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1573 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1575 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1576 return new_rd_Sync(NULL, irg, block, arity, in);
1578 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1579 ir_mode *mode, long proj) {
1580 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1582 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1584 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1586 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1587 int arity, ir_node **in) {
1588 return new_rd_Tuple(NULL, irg, block, arity, in );
1590 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1591 ir_node *val, ir_mode *mode) {
1592 return new_rd_Id(NULL, irg, block, val, mode);
1594 ir_node *new_r_Bad (ir_graph *irg) {
1595 return new_rd_Bad(irg);
1597 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1598 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1600 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1601 return new_rd_Unknown(irg, m);
1603 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1604 return new_rd_CallBegin(NULL, irg, block, callee);
1606 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1607 return new_rd_EndReg(NULL, irg, block);
1609 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1610 return new_rd_EndExcept(NULL, irg, block);
1612 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1613 return new_rd_Break(NULL, irg, block);
1615 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1616 ir_mode *mode, long proj) {
1617 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1619 ir_node *new_r_NoMem (ir_graph *irg) {
1620 return new_rd_NoMem(irg);
1622 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1623 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1624 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1626 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1627 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1628 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1630 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1632 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1634 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1635 ir_node *store, ir_node *obj) {
1636 return new_rd_Raise(NULL, irg, block, store, obj);
1638 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1639 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1640 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1643 /** ********************/
1644 /** public interfaces */
1645 /** construction tools */
1649 * - create a new Start node in the current block
1651 * @return s - pointer to the created Start node
1656 new_d_Start (dbg_info *db)
1660 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1661 op_Start, mode_T, 0, NULL);
1662 /* res->attr.start.irg = current_ir_graph; */
1664 res = optimize_node(res);
1665 IRN_VRFY_IRG(res, current_ir_graph);
1670 new_d_End (dbg_info *db)
1673 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1674 op_End, mode_X, -1, NULL);
1675 res = optimize_node(res);
1676 IRN_VRFY_IRG(res, current_ir_graph);
1681 /* Constructs a Block with a fixed number of predecessors.
1682 Does set current_block. Can be used with automatic Phi
1683 node construction. */
1685 new_d_Block (dbg_info *db, int arity, ir_node **in)
1689 int has_unknown = 0;
1691 res = new_bd_Block(db, arity, in);
1693 /* Create and initialize array for Phi-node construction. */
1694 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1695 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1696 current_ir_graph->n_loc);
1697 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1700 for (i = arity-1; i >= 0; i--)
1701 if (get_irn_op(in[i]) == op_Unknown) {
1706 if (!has_unknown) res = optimize_node(res);
1707 current_ir_graph->current_block = res;
1709 IRN_VRFY_IRG(res, current_ir_graph);
1714 /* ***********************************************************************/
1715 /* Methods necessary for automatic Phi node creation */
1717 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1718 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1719 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1720 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1722 Call Graph: ( A ---> B == A "calls" B)
1724 get_value mature_immBlock
1732 get_r_value_internal |
1736 new_rd_Phi0 new_rd_Phi_in
1738 * *************************************************************************** */
1740 /** Creates a Phi node with 0 predecessors */
1741 static INLINE ir_node *
1742 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1746 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1747 IRN_VRFY_IRG(res, irg);
1751 /* There are two implementations of the Phi node construction. The first
1752 is faster, but does not work for blocks with more than 2 predecessors.
1753 The second works always but is slower and causes more unnecessary Phi
1755 Select the implementations by the following preprocessor flag set in
1757 #if USE_FAST_PHI_CONSTRUCTION
1759 /* This is a stack used for allocating and deallocating nodes in
1760 new_rd_Phi_in. The original implementation used the obstack
1761 to model this stack, now it is explicit. This reduces side effects.
1763 #if USE_EXPLICIT_PHI_IN_STACK
1765 new_Phi_in_stack(void) {
1768 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1770 res->stack = NEW_ARR_F (ir_node *, 0);
1777 free_Phi_in_stack(Phi_in_stack *s) {
1778 DEL_ARR_F(s->stack);
1782 free_to_Phi_in_stack(ir_node *phi) {
1783 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1784 current_ir_graph->Phi_in_stack->pos)
1785 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1787 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1789 (current_ir_graph->Phi_in_stack->pos)++;
1792 static INLINE ir_node *
1793 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1794 int arity, ir_node **in) {
1796 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1797 int pos = current_ir_graph->Phi_in_stack->pos;
1801 /* We need to allocate a new node */
1802 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1803 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1805 /* reuse the old node and initialize it again. */
1808 assert (res->kind == k_ir_node);
1809 assert (res->op == op_Phi);
1813 assert (arity >= 0);
1814 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1815 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1817 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1819 (current_ir_graph->Phi_in_stack->pos)--;
1823 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1825 /* Creates a Phi node with a given, fixed array **in of predecessors.
1826 If the Phi node is unnecessary, as the same value reaches the block
1827 through all control flow paths, it is eliminated and the value
1828 returned directly. This constructor is only intended for use in
1829 the automatic Phi node generation triggered by get_value or mature.
1830 The implementation is quite tricky and depends on the fact, that
1831 the nodes are allocated on a stack:
1832 The in array contains predecessors and NULLs. The NULLs appear,
1833 if get_r_value_internal, that computed the predecessors, reached
1834 the same block on two paths. In this case the same value reaches
1835 this block on both paths, there is no definition in between. We need
1836 not allocate a Phi where these path's merge, but we have to communicate
1837 this fact to the caller. This happens by returning a pointer to the
1838 node the caller _will_ allocate. (Yes, we predict the address. We can
1839 do so because the nodes are allocated on the obstack.) The caller then
1840 finds a pointer to itself and, when this routine is called again,
1843 static INLINE ir_node *
1844 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1847 ir_node *res, *known;
1849 /* Allocate a new node on the obstack. This can return a node to
1850 which some of the pointers in the in-array already point.
1851 Attention: the constructor copies the in array, i.e., the later
1852 changes to the array in this routine do not affect the
1853 constructed node! If the in array contains NULLs, there will be
1854 missing predecessors in the returned node. Is this a possible
1855 internal state of the Phi node generation? */
1856 #if USE_EXPLICIT_PHI_IN_STACK
1857 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1859 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1860 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1863 /* The in-array can contain NULLs. These were returned by
1864 get_r_value_internal if it reached the same block/definition on a
1865 second path. The NULLs are replaced by the node itself to
1866 simplify the test in the next loop. */
1867 for (i = 0; i < ins; ++i) {
1872 /* This loop checks whether the Phi has more than one predecessor.
1873 If so, it is a real Phi node and we break the loop. Else the Phi
1874 node merges the same definition on several paths and therefore is
1876 for (i = 0; i < ins; ++i) {
1877 if (in[i] == res || in[i] == known)
1886 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1888 #if USE_EXPLICIT_PHI_IN_STACK
1889 free_to_Phi_in_stack(res);
1891 edges_node_deleted(res, current_ir_graph);
1892 obstack_free(current_ir_graph->obst, res);
1896 res = optimize_node (res);
1897 IRN_VRFY_IRG(res, irg);
1900 /* return the pointer to the Phi node. This node might be deallocated! */
1905 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1908 allocates and returns this node. The routine called to allocate the
1909 node might optimize it away and return a real value, or even a pointer
1910 to a deallocated Phi node on top of the obstack!
1911 This function is called with an in-array of proper size. **/
1913 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1915 ir_node *prevBlock, *res;
1918 /* This loop goes to all predecessor blocks of the block the Phi node is in
1919 and there finds the operands of the Phi node by calling
1920 get_r_value_internal. */
1921 for (i = 1; i <= ins; ++i) {
1922 assert (block->in[i]);
1923 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1925 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1928 /* After collecting all predecessors into the array nin a new Phi node
1929 with these predecessors is created. This constructor contains an
1930 optimization: If all predecessors of the Phi node are identical it
1931 returns the only operand instead of a new Phi node. If the value
1932 passes two different control flow edges without being defined, and
1933 this is the second path treated, a pointer to the node that will be
1934 allocated for the first path (recursion) is returned. We already
1935 know the address of this node, as it is the next node to be allocated
1936 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1937 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1939 /* Now we now the value for "pos" and can enter it in the array with
1940 all known local variables. Attention: this might be a pointer to
1941 a node, that later will be allocated!!! See new_rd_Phi_in.
1942 If this is called in mature, after some set_value in the same block,
1943 the proper value must not be overwritten:
1945 get_value (makes Phi0, put's it into graph_arr)
1946 set_value (overwrites Phi0 in graph_arr)
1947 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1950 if (!block->attr.block.graph_arr[pos]) {
1951 block->attr.block.graph_arr[pos] = res;
1953 /* printf(" value already computed by %s\n",
1954 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1960 /* This function returns the last definition of a variable. In case
1961 this variable was last defined in a previous block, Phi nodes are
1962 inserted. If the part of the firm graph containing the definition
1963 is not yet constructed, a dummy Phi node is returned. */
1965 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1968 /* There are 4 cases to treat.
1970 1. The block is not mature and we visit it the first time. We can not
1971 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1972 predecessors is returned. This node is added to the linked list (field
1973 "link") of the containing block to be completed when this block is
1974 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1977 2. The value is already known in this block, graph_arr[pos] is set and we
1978 visit the block the first time. We can return the value without
1979 creating any new nodes.
1981 3. The block is mature and we visit it the first time. A Phi node needs
1982 to be created (phi_merge). If the Phi is not needed, as all it's
1983 operands are the same value reaching the block through different
1984 paths, it's optimized away and the value itself is returned.
1986 4. The block is mature, and we visit it the second time. Now two
1987 subcases are possible:
1988 * The value was computed completely the last time we were here. This
1989 is the case if there is no loop. We can return the proper value.
1990 * The recursion that visited this node and set the flag did not
1991 return yet. We are computing a value in a loop and need to
1992 break the recursion without knowing the result yet.
1993 @@@ strange case. Straight forward we would create a Phi before
1994 starting the computation of it's predecessors. In this case we will
1995 find a Phi here in any case. The problem is that this implementation
1996 only creates a Phi after computing the predecessors, so that it is
1997 hard to compute self references of this Phi. @@@
1998 There is no simple check for the second subcase. Therefore we check
1999 for a second visit and treat all such cases as the second subcase.
2000 Anyways, the basic situation is the same: we reached a block
2001 on two paths without finding a definition of the value: No Phi
2002 nodes are needed on both paths.
2003 We return this information "Two paths, no Phi needed" by a very tricky
2004 implementation that relies on the fact that an obstack is a stack and
2005 will return a node with the same address on different allocations.
2006 Look also at phi_merge and new_rd_phi_in to understand this.
2007 @@@ Unfortunately this does not work, see testprogram
2008 three_cfpred_example.
2012 /* case 4 -- already visited. */
2013 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2015 /* visited the first time */
2016 set_irn_visited(block, get_irg_visited(current_ir_graph));
2018 /* Get the local valid value */
2019 res = block->attr.block.graph_arr[pos];
2021 /* case 2 -- If the value is actually computed, return it. */
2022 if (res) return res;
2024 if (block->attr.block.matured) { /* case 3 */
2026 /* The Phi has the same amount of ins as the corresponding block. */
2027 int ins = get_irn_arity(block);
2029 NEW_ARR_A (ir_node *, nin, ins);
2031 /* Phi merge collects the predecessors and then creates a node. */
2032 res = phi_merge (block, pos, mode, nin, ins);
2034 } else { /* case 1 */
2035 /* The block is not mature, we don't know how many in's are needed. A Phi
2036 with zero predecessors is created. Such a Phi node is called Phi0
2037 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2038 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2040 The Phi0 has to remember the pos of it's internal value. If the real
2041 Phi is computed, pos is used to update the array with the local
2044 res = new_rd_Phi0 (current_ir_graph, block, mode);
2045 res->attr.phi0_pos = pos;
2046 res->link = block->link;
2050 /* If we get here, the frontend missed a use-before-definition error */
2053 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2054 assert (mode->code >= irm_F && mode->code <= irm_P);
2055 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2056 tarval_mode_null[mode->code]);
2059 /* The local valid value is available now. */
2060 block->attr.block.graph_arr[pos] = res;
2068 it starts the recursion. This causes an Id at the entry of
2069 every block that has no definition of the value! **/
2071 #if USE_EXPLICIT_PHI_IN_STACK
2073 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2074 void free_Phi_in_stack(Phi_in_stack *s) { }
2077 static INLINE ir_node *
2078 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2079 ir_node **in, int ins, ir_node *phi0)
2082 ir_node *res, *known;
2084 /* Allocate a new node on the obstack. The allocation copies the in
2086 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2087 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2089 /* This loop checks whether the Phi has more than one predecessor.
2090 If so, it is a real Phi node and we break the loop. Else the
2091 Phi node merges the same definition on several paths and therefore
2092 is not needed. Don't consider Bad nodes! */
2094 for (i=0; i < ins; ++i)
2098 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2100 /* Optimize self referencing Phis: We can't detect them yet properly, as
2101 they still refer to the Phi0 they will replace. So replace right now. */
2102 if (phi0 && in[i] == phi0) in[i] = res;
2104 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2112 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2115 edges_node_deleted(res, current_ir_graph);
2116 obstack_free (current_ir_graph->obst, res);
2117 if (is_Phi(known)) {
2118 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2119 order, an enclosing Phi know may get superfluous. */
2120 res = optimize_in_place_2(known);
2122 exchange(known, res);
2128 /* A undefined value, e.g., in unreachable code. */
2132 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2133 IRN_VRFY_IRG(res, irg);
2134 /* Memory Phis in endless loops must be kept alive.
2135 As we can't distinguish these easily we keep all of them alive. */
2136 if ((res->op == op_Phi) && (mode == mode_M))
2137 add_End_keepalive(irg->end, res);
2144 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2146 #if PRECISE_EXC_CONTEXT
2148 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2150 /* Construct a new frag_array for node n.
2151 Copy the content from the current graph_arr of the corresponding block:
2152 this is the current state.
2153 Set ProjM(n) as current memory state.
2154 Further the last entry in frag_arr of current block points to n. This
2155 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2157 static INLINE ir_node ** new_frag_arr (ir_node *n)
2162 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2163 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2164 sizeof(ir_node *)*current_ir_graph->n_loc);
2166 /* turn off optimization before allocating Proj nodes, as res isn't
2168 opt = get_opt_optimize(); set_optimize(0);
2169 /* Here we rely on the fact that all frag ops have Memory as first result! */
2170 if (get_irn_op(n) == op_Call)
2171 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2172 else if (get_irn_op(n) == op_CopyB)
2173 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2174 else if (get_irn_op(n) == op_Bound)
2175 arr[0] = new_Proj(n, mode_M, pn_Bound_M_except);
2177 assert((pn_Quot_M == pn_DivMod_M) &&
2178 (pn_Quot_M == pn_Div_M) &&
2179 (pn_Quot_M == pn_Mod_M) &&
2180 (pn_Quot_M == pn_Load_M) &&
2181 (pn_Quot_M == pn_Store_M) &&
2182 (pn_Quot_M == pn_Alloc_M) );
2183 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2187 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2192 * returns the frag_arr from a node
2194 static INLINE ir_node **
2195 get_frag_arr (ir_node *n) {
2196 switch (get_irn_opcode(n)) {
2198 return n->attr.call.exc.frag_arr;
2200 return n->attr.a.exc.frag_arr;
2202 return n->attr.load.exc.frag_arr;
2204 return n->attr.store.exc.frag_arr;
2206 return n->attr.except.frag_arr;
2211 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2213 if (!frag_arr[pos]) frag_arr[pos] = val;
2214 if (frag_arr[current_ir_graph->n_loc - 1]) {
2215 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2216 assert(arr != frag_arr && "Endless recursion detected");
2217 set_frag_value(arr, pos, val);
2222 for (i = 0; i < 1000; ++i) {
2223 if (!frag_arr[pos]) {
2224 frag_arr[pos] = val;
2226 if (frag_arr[current_ir_graph->n_loc - 1]) {
2227 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2233 assert(0 && "potential endless recursion");
2238 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2242 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2244 frag_arr = get_frag_arr(cfOp);
2245 res = frag_arr[pos];
2247 if (block->attr.block.graph_arr[pos]) {
2248 /* There was a set_value after the cfOp and no get_value before that
2249 set_value. We must build a Phi node now. */
2250 if (block->attr.block.matured) {
2251 int ins = get_irn_arity(block);
2253 NEW_ARR_A (ir_node *, nin, ins);
2254 res = phi_merge(block, pos, mode, nin, ins);
2256 res = new_rd_Phi0 (current_ir_graph, block, mode);
2257 res->attr.phi0_pos = pos;
2258 res->link = block->link;
2262 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2263 but this should be better: (remove comment if this works) */
2264 /* It's a Phi, we can write this into all graph_arrs with NULL */
2265 set_frag_value(block->attr.block.graph_arr, pos, res);
2267 res = get_r_value_internal(block, pos, mode);
2268 set_frag_value(block->attr.block.graph_arr, pos, res);
2273 #endif /* PRECISE_EXC_CONTEXT */
2276 computes the predecessors for the real phi node, and then
2277 allocates and returns this node. The routine called to allocate the
2278 node might optimize it away and return a real value.
2279 This function must be called with an in-array of proper size. **/
2281 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2283 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2286 /* If this block has no value at pos create a Phi0 and remember it
2287 in graph_arr to break recursions.
2288 Else we may not set graph_arr as there a later value is remembered. */
2290 if (!block->attr.block.graph_arr[pos]) {
2291 if (block == get_irg_start_block(current_ir_graph)) {
2292 /* Collapsing to Bad tarvals is no good idea.
2293 So we call a user-supplied routine here that deals with this case as
2294 appropriate for the given language. Sorrily the only help we can give
2295 here is the position.
2297 Even if all variables are defined before use, it can happen that
2298 we get to the start block, if a Cond has been replaced by a tuple
2299 (bad, jmp). In this case we call the function needlessly, eventually
2300 generating an non existent error.
2301 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2304 if (default_initialize_local_variable) {
2305 ir_node *rem = get_cur_block();
2307 set_cur_block(block);
2308 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2312 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2313 /* We don't need to care about exception ops in the start block.
2314 There are none by definition. */
2315 return block->attr.block.graph_arr[pos];
2317 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2318 block->attr.block.graph_arr[pos] = phi0;
2319 #if PRECISE_EXC_CONTEXT
2320 if (get_opt_precise_exc_context()) {
2321 /* Set graph_arr for fragile ops. Also here we should break recursion.
2322 We could choose a cyclic path through an cfop. But the recursion would
2323 break at some point. */
2324 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2330 /* This loop goes to all predecessor blocks of the block the Phi node
2331 is in and there finds the operands of the Phi node by calling
2332 get_r_value_internal. */
2333 for (i = 1; i <= ins; ++i) {
2334 prevCfOp = skip_Proj(block->in[i]);
2336 if (is_Bad(prevCfOp)) {
2337 /* In case a Cond has been optimized we would get right to the start block
2338 with an invalid definition. */
2339 nin[i-1] = new_Bad();
2342 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2344 if (!is_Bad(prevBlock)) {
2345 #if PRECISE_EXC_CONTEXT
2346 if (get_opt_precise_exc_context() &&
2347 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2348 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2349 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2352 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2354 nin[i-1] = new_Bad();
2358 /* We want to pass the Phi0 node to the constructor: this finds additional
2359 optimization possibilities.
2360 The Phi0 node either is allocated in this function, or it comes from
2361 a former call to get_r_value_internal. In this case we may not yet
2362 exchange phi0, as this is done in mature_immBlock. */
2364 phi0_all = block->attr.block.graph_arr[pos];
2365 if (!((get_irn_op(phi0_all) == op_Phi) &&
2366 (get_irn_arity(phi0_all) == 0) &&
2367 (get_nodes_block(phi0_all) == block)))
2373 /* After collecting all predecessors into the array nin a new Phi node
2374 with these predecessors is created. This constructor contains an
2375 optimization: If all predecessors of the Phi node are identical it
2376 returns the only operand instead of a new Phi node. */
2377 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2379 /* In case we allocated a Phi0 node at the beginning of this procedure,
2380 we need to exchange this Phi0 with the real Phi. */
2382 exchange(phi0, res);
2383 block->attr.block.graph_arr[pos] = res;
2384 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2385 only an optimization. */
2391 /* This function returns the last definition of a variable. In case
2392 this variable was last defined in a previous block, Phi nodes are
2393 inserted. If the part of the firm graph containing the definition
2394 is not yet constructed, a dummy Phi node is returned. */
2396 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2399 /* There are 4 cases to treat.
2401 1. The block is not mature and we visit it the first time. We can not
2402 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2403 predecessors is returned. This node is added to the linked list (field
2404 "link") of the containing block to be completed when this block is
2405 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2408 2. The value is already known in this block, graph_arr[pos] is set and we
2409 visit the block the first time. We can return the value without
2410 creating any new nodes.
2412 3. The block is mature and we visit it the first time. A Phi node needs
2413 to be created (phi_merge). If the Phi is not needed, as all it's
2414 operands are the same value reaching the block through different
2415 paths, it's optimized away and the value itself is returned.
2417 4. The block is mature, and we visit it the second time. Now two
2418 subcases are possible:
2419 * The value was computed completely the last time we were here. This
2420 is the case if there is no loop. We can return the proper value.
2421 * The recursion that visited this node and set the flag did not
2422 return yet. We are computing a value in a loop and need to
2423 break the recursion. This case only happens if we visited
2424 the same block with phi_merge before, which inserted a Phi0.
2425 So we return the Phi0.
2428 /* case 4 -- already visited. */
2429 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2430 /* As phi_merge allocates a Phi0 this value is always defined. Here
2431 is the critical difference of the two algorithms. */
2432 assert(block->attr.block.graph_arr[pos]);
2433 return block->attr.block.graph_arr[pos];
2436 /* visited the first time */
2437 set_irn_visited(block, get_irg_visited(current_ir_graph));
2439 /* Get the local valid value */
2440 res = block->attr.block.graph_arr[pos];
2442 /* case 2 -- If the value is actually computed, return it. */
2443 if (res) { return res; };
2445 if (block->attr.block.matured) { /* case 3 */
2447 /* The Phi has the same amount of ins as the corresponding block. */
2448 int ins = get_irn_arity(block);
2450 NEW_ARR_A (ir_node *, nin, ins);
2452 /* Phi merge collects the predecessors and then creates a node. */
2453 res = phi_merge (block, pos, mode, nin, ins);
2455 } else { /* case 1 */
2456 /* The block is not mature, we don't know how many in's are needed. A Phi
2457 with zero predecessors is created. Such a Phi node is called Phi0
2458 node. The Phi0 is then added to the list of Phi0 nodes in this block
2459 to be matured by mature_immBlock later.
2460 The Phi0 has to remember the pos of it's internal value. If the real
2461 Phi is computed, pos is used to update the array with the local
2463 res = new_rd_Phi0 (current_ir_graph, block, mode);
2464 res->attr.phi0_pos = pos;
2465 res->link = block->link;
2469 /* If we get here, the frontend missed a use-before-definition error */
2472 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2473 assert (mode->code >= irm_F && mode->code <= irm_P);
2474 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2475 get_mode_null(mode));
2478 /* The local valid value is available now. */
2479 block->attr.block.graph_arr[pos] = res;
2484 #endif /* USE_FAST_PHI_CONSTRUCTION */
2486 /* ************************************************************************** */
2489 * Finalize a Block node, when all control flows are known.
2490 * Acceptable parameters are only Block nodes.
2493 mature_immBlock (ir_node *block)
2499 assert (get_irn_opcode(block) == iro_Block);
2500 /* @@@ should be commented in
2501 assert (!get_Block_matured(block) && "Block already matured"); */
2503 if (!get_Block_matured(block)) {
2504 ins = ARR_LEN (block->in)-1;
2505 /* Fix block parameters */
2506 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2508 /* An array for building the Phi nodes. */
2509 NEW_ARR_A (ir_node *, nin, ins);
2511 /* Traverse a chain of Phi nodes attached to this block and mature
2513 for (n = block->link; n; n=next) {
2514 inc_irg_visited(current_ir_graph);
2516 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2519 block->attr.block.matured = 1;
2521 /* Now, as the block is a finished firm node, we can optimize it.
2522 Since other nodes have been allocated since the block was created
2523 we can not free the node on the obstack. Therefore we have to call
2525 Unfortunately the optimization does not change a lot, as all allocated
2526 nodes refer to the unoptimized node.
2527 We can call _2, as global cse has no effect on blocks. */
2528 block = optimize_in_place_2(block);
2529 IRN_VRFY_IRG(block, current_ir_graph);
2534 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2536 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2540 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2542 return new_bd_Const(db, current_ir_graph->start_block, mode, con);
2546 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2548 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2552 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2554 return new_bd_Const_type(db, current_ir_graph->start_block, mode, con, tp);
2559 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2561 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2565 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2567 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2571 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2574 assert(arg->op == op_Cond);
2575 arg->attr.c.kind = fragmentary;
2576 arg->attr.c.default_proj = max_proj;
2577 res = new_Proj (arg, mode_X, max_proj);
2582 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2584 return new_bd_Conv(db, current_ir_graph->current_block, op, mode);
2588 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2590 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2594 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2596 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2605 * allocate the frag array
2607 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2608 if (get_opt_precise_exc_context()) {
2609 if ((current_ir_graph->phase_state == phase_building) &&
2610 (get_irn_op(res) == op) && /* Could be optimized away. */
2611 !*frag_store) /* Could be a cse where the arr is already set. */ {
2612 *frag_store = new_frag_arr(res);
2618 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2621 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2622 res->attr.except.pin_state = op_pin_state_pinned;
2623 #if PRECISE_EXC_CONTEXT
2624 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2631 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2634 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2635 res->attr.except.pin_state = op_pin_state_pinned;
2636 #if PRECISE_EXC_CONTEXT
2637 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2644 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2647 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2648 res->attr.except.pin_state = op_pin_state_pinned;
2649 #if PRECISE_EXC_CONTEXT
2650 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2657 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2660 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2661 res->attr.except.pin_state = op_pin_state_pinned;
2662 #if PRECISE_EXC_CONTEXT
2663 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2682 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2684 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2688 new_d_Jmp (dbg_info *db)
2690 return new_bd_Jmp (db, current_ir_graph->current_block);
2694 new_d_IJmp (dbg_info *db, ir_node *tgt)
2696 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2700 new_d_Cond (dbg_info *db, ir_node *c)
2702 return new_bd_Cond (db, current_ir_graph->current_block, c);
2706 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2710 res = new_bd_Call (db, current_ir_graph->current_block,
2711 store, callee, arity, in, tp);
2712 #if PRECISE_EXC_CONTEXT
2713 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2720 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2722 return new_bd_Return (db, current_ir_graph->current_block,
2727 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2730 res = new_bd_Load (db, current_ir_graph->current_block,
2732 #if PRECISE_EXC_CONTEXT
2733 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2740 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2743 res = new_bd_Store (db, current_ir_graph->current_block,
2745 #if PRECISE_EXC_CONTEXT
2746 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2753 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2757 res = new_bd_Alloc (db, current_ir_graph->current_block,
2758 store, size, alloc_type, where);
2759 #if PRECISE_EXC_CONTEXT
2760 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2767 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2768 ir_node *size, ir_type *free_type, where_alloc where)
2770 return new_bd_Free (db, current_ir_graph->current_block,
2771 store, ptr, size, free_type, where);
2775 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2776 /* GL: objptr was called frame before. Frame was a bad choice for the name
2777 as the operand could as well be a pointer to a dynamic object. */
2779 return new_bd_Sel (db, current_ir_graph->current_block,
2780 store, objptr, 0, NULL, ent);
2784 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2786 return new_bd_Sel (db, current_ir_graph->current_block,
2787 store, objptr, n_index, index, sel);
2791 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2793 return new_bd_SymConst_type (db, current_ir_graph->start_block,
2798 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2800 return new_bd_SymConst (db, current_ir_graph->start_block,
2805 new_d_Sync (dbg_info *db, int arity, ir_node** in)
2807 return new_bd_Sync (db, current_ir_graph->current_block, arity, in);
2813 return _new_d_Bad();
2817 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2819 return new_bd_Confirm (db, current_ir_graph->current_block,
2824 new_d_Unknown (ir_mode *m)
2826 return new_bd_Unknown(m);
2830 new_d_CallBegin (dbg_info *db, ir_node *call)
2833 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2838 new_d_EndReg (dbg_info *db)
2841 res = new_bd_EndReg(db, current_ir_graph->current_block);
2846 new_d_EndExcept (dbg_info *db)
2849 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2854 new_d_Break (dbg_info *db)
2856 return new_bd_Break (db, current_ir_graph->current_block);
2860 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2862 return new_bd_Filter (db, current_ir_graph->current_block,
2869 return _new_d_NoMem();
2873 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2874 ir_node *ir_true, ir_mode *mode) {
2875 return new_bd_Mux (db, current_ir_graph->current_block,
2876 sel, ir_false, ir_true, mode);
2879 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2880 ir_node *dst, ir_node *src, ir_type *data_type) {
2882 res = new_bd_CopyB(db, current_ir_graph->current_block,
2883 store, dst, src, data_type);
2884 #if PRECISE_EXC_CONTEXT
2885 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2891 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2893 return new_bd_InstOf (db, current_ir_graph->current_block,
2894 store, objptr, type);
2898 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2900 return new_bd_Raise (db, current_ir_graph->current_block,
2904 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2905 ir_node *idx, ir_node *lower, ir_node *upper) {
2907 res = new_bd_Bound(db, current_ir_graph->current_block,
2908 store, idx, lower, upper);
2909 #if PRECISE_EXC_CONTEXT
2910 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2915 /* ********************************************************************* */
2916 /* Comfortable interface with automatic Phi node construction. */
2917 /* (Uses also constructors of ?? interface, except new_Block. */
2918 /* ********************************************************************* */
2920 /* Block construction */
2921 /* immature Block without predecessors */
2922 ir_node *new_d_immBlock (dbg_info *db) {
2925 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2926 /* creates a new dynamic in-array as length of in is -1 */
2927 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2928 current_ir_graph->current_block = res;
2929 res->attr.block.matured = 0;
2930 res->attr.block.dead = 0;
2931 /* res->attr.block.exc = exc_normal; */
2932 /* res->attr.block.handler_entry = 0; */
2933 res->attr.block.irg = current_ir_graph;
2934 res->attr.block.backedge = NULL;
2935 res->attr.block.in_cg = NULL;
2936 res->attr.block.cg_backedge = NULL;
2937 set_Block_block_visited(res, 0);
2939 /* Create and initialize array for Phi-node construction. */
2940 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2941 current_ir_graph->n_loc);
2942 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2944 /* Immature block may not be optimized! */
2945 IRN_VRFY_IRG(res, current_ir_graph);
2951 new_immBlock (void) {
2952 return new_d_immBlock(NULL);
2955 /* add an edge to a jmp/control flow node */
2957 add_immBlock_pred (ir_node *block, ir_node *jmp)
2959 if (block->attr.block.matured) {
2960 assert(0 && "Error: Block already matured!\n");
2963 assert(jmp != NULL);
2964 ARR_APP1(ir_node *, block->in, jmp);
2968 /* changing the current block */
2970 set_cur_block (ir_node *target) {
2971 current_ir_graph->current_block = target;
2974 /* ************************ */
2975 /* parameter administration */
2977 /* get a value from the parameter array from the current block by its index */
2979 get_d_value (dbg_info *db, int pos, ir_mode *mode)
2981 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2982 inc_irg_visited(current_ir_graph);
2984 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2986 /* get a value from the parameter array from the current block by its index */
2988 get_value (int pos, ir_mode *mode)
2990 return get_d_value(NULL, pos, mode);
2993 /* set a value at position pos in the parameter array from the current block */
2995 set_value (int pos, ir_node *value)
2997 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2998 assert(pos+1 < current_ir_graph->n_loc);
2999 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3003 find_value(ir_node *value)
3006 ir_node *bl = current_ir_graph->current_block;
3008 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3009 if (bl->attr.block.graph_arr[i] == value)
3014 /* get the current store */
3018 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3019 /* GL: one could call get_value instead */
3020 inc_irg_visited(current_ir_graph);
3021 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3024 /* set the current store */
3026 set_store (ir_node *store)
3028 /* GL: one could call set_value instead */
3029 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3030 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3034 keep_alive (ir_node *ka) {
3035 add_End_keepalive(current_ir_graph->end, ka);
3038 /* --- Useful access routines --- */
3039 /* Returns the current block of the current graph. To set the current
3040 block use set_cur_block. */
3041 ir_node *get_cur_block(void) {
3042 return get_irg_current_block(current_ir_graph);
3045 /* Returns the frame type of the current graph */
3046 ir_type *get_cur_frame_type(void) {
3047 return get_irg_frame_type(current_ir_graph);
3051 /* ********************************************************************* */
3054 /* call once for each run of the library */
3056 init_cons(uninitialized_local_variable_func_t *func)
3058 default_initialize_local_variable = func;
3061 /* call for each graph */
3063 irg_finalize_cons (ir_graph *irg) {
3064 irg->phase_state = phase_high;
3068 irp_finalize_cons (void) {
3069 int i, n_irgs = get_irp_n_irgs();
3070 for (i = 0; i < n_irgs; i++) {
3071 irg_finalize_cons(get_irp_irg(i));
3073 irp->phase_state = phase_high;
3077 ir_node *new_Block(int arity, ir_node **in) {
3078 return new_d_Block(NULL, arity, in);
3080 ir_node *new_Start (void) {
3081 return new_d_Start(NULL);
3083 ir_node *new_End (void) {
3084 return new_d_End(NULL);
3086 ir_node *new_Jmp (void) {
3087 return new_d_Jmp(NULL);
3089 ir_node *new_IJmp (ir_node *tgt) {
3090 return new_d_IJmp(NULL, tgt);
3092 ir_node *new_Cond (ir_node *c) {
3093 return new_d_Cond(NULL, c);
3095 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3096 return new_d_Return(NULL, store, arity, in);
3098 ir_node *new_Const (ir_mode *mode, tarval *con) {
3099 return new_d_Const(NULL, mode, con);
3102 ir_node *new_Const_long(ir_mode *mode, long value)
3104 return new_d_Const_long(NULL, mode, value);
3107 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3108 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3111 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3112 return new_d_SymConst(NULL, value, kind);
3114 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3115 return new_d_simpleSel(NULL, store, objptr, ent);
3117 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3119 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3121 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3123 return new_d_Call(NULL, store, callee, arity, in, tp);
3125 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3126 return new_d_Add(NULL, op1, op2, mode);
3128 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3129 return new_d_Sub(NULL, op1, op2, mode);
3131 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3132 return new_d_Minus(NULL, op, mode);
3134 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3135 return new_d_Mul(NULL, op1, op2, mode);
3137 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3138 return new_d_Quot(NULL, memop, op1, op2);
3140 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3141 return new_d_DivMod(NULL, memop, op1, op2);
3143 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3144 return new_d_Div(NULL, memop, op1, op2);
3146 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3147 return new_d_Mod(NULL, memop, op1, op2);
3149 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3150 return new_d_Abs(NULL, op, mode);
3152 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3153 return new_d_And(NULL, op1, op2, mode);
3155 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3156 return new_d_Or(NULL, op1, op2, mode);
3158 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3159 return new_d_Eor(NULL, op1, op2, mode);
3161 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3162 return new_d_Not(NULL, op, mode);
3164 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3165 return new_d_Shl(NULL, op, k, mode);
3167 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3168 return new_d_Shr(NULL, op, k, mode);
3170 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3171 return new_d_Shrs(NULL, op, k, mode);
3173 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3174 return new_d_Rot(NULL, op, k, mode);
3176 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3177 return new_d_Carry(NULL, op1, op2, mode);
3179 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3180 return new_d_Borrow(NULL, op1, op2, mode);
3182 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3183 return new_d_Cmp(NULL, op1, op2);
3185 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3186 return new_d_Conv(NULL, op, mode);
3188 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3189 return new_d_Cast(NULL, op, to_tp);
3191 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3192 return new_d_Phi(NULL, arity, in, mode);
3194 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3195 return new_d_Load(NULL, store, addr, mode);
3197 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3198 return new_d_Store(NULL, store, addr, val);
3200 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3201 where_alloc where) {
3202 return new_d_Alloc(NULL, store, size, alloc_type, where);
3204 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3205 ir_type *free_type, where_alloc where) {
3206 return new_d_Free(NULL, store, ptr, size, free_type, where);
3208 ir_node *new_Sync (int arity, ir_node **in) {
3209 return new_d_Sync(NULL, arity, in);
3211 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3212 return new_d_Proj(NULL, arg, mode, proj);
3214 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3215 return new_d_defaultProj(NULL, arg, max_proj);
3217 ir_node *new_Tuple (int arity, ir_node **in) {
3218 return new_d_Tuple(NULL, arity, in);
3220 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3221 return new_d_Id(NULL, val, mode);
3223 ir_node *new_Bad (void) {
3226 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3227 return new_d_Confirm (NULL, val, bound, cmp);
3229 ir_node *new_Unknown(ir_mode *m) {
3230 return new_d_Unknown(m);
3232 ir_node *new_CallBegin (ir_node *callee) {
3233 return new_d_CallBegin(NULL, callee);
3235 ir_node *new_EndReg (void) {
3236 return new_d_EndReg(NULL);
3238 ir_node *new_EndExcept (void) {
3239 return new_d_EndExcept(NULL);
3241 ir_node *new_Break (void) {
3242 return new_d_Break(NULL);
3244 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3245 return new_d_Filter(NULL, arg, mode, proj);
3247 ir_node *new_NoMem (void) {
3248 return new_d_NoMem();
3250 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3251 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3253 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3254 return new_d_CopyB(NULL, store, dst, src, data_type);
3256 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3257 return new_d_InstOf (NULL, store, objptr, ent);
3259 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3260 return new_d_Raise(NULL, store, obj);
3262 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3263 return new_d_Bound(NULL, store, idx, lower, upper);