3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr) \
69 new_bd_##instr (dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 res = optimize_node(res); \
79 IRN_VRFY_IRG(res, irg); \
83 /* creates a bd constructor for an unop */
84 #define NEW_BD_UNOP(instr) \
86 new_bd_##instr (dbg_info *db, ir_node *block, \
87 ir_node *op, ir_mode *mode) \
90 ir_graph *irg = current_ir_graph; \
91 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
92 res = optimize_node(res); \
93 IRN_VRFY_IRG(res, irg); \
97 /* creates a bd constructor for an divop */
98 #define NEW_BD_DIVOP(instr) \
100 new_bd_##instr (dbg_info *db, ir_node *block, \
101 ir_node *memop, ir_node *op1, ir_node *op2) \
105 ir_graph *irg = current_ir_graph; \
109 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
110 res = optimize_node(res); \
111 IRN_VRFY_IRG(res, irg); \
115 /* creates a rd constructor for a binop */
116 #define NEW_RD_BINOP(instr) \
118 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
119 ir_node *op1, ir_node *op2, ir_mode *mode) \
122 ir_graph *rem = current_ir_graph; \
123 current_ir_graph = irg; \
124 res = new_bd_##instr(db, block, op1, op2, mode); \
125 current_ir_graph = rem; \
129 /* creates a rd constructor for an unop */
130 #define NEW_RD_UNOP(instr) \
132 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
133 ir_node *op, ir_mode *mode) \
136 ir_graph *rem = current_ir_graph; \
137 current_ir_graph = irg; \
138 res = new_bd_##instr(db, block, op, mode); \
139 current_ir_graph = rem; \
143 /* creates a rd constructor for an divop */
144 #define NEW_RD_DIVOP(instr) \
146 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
147 ir_node *memop, ir_node *op1, ir_node *op2) \
150 ir_graph *rem = current_ir_graph; \
151 current_ir_graph = irg; \
152 res = new_bd_##instr(db, block, memop, op1, op2); \
153 current_ir_graph = rem; \
157 /* creates a d constructor for an binop */
158 #define NEW_D_BINOP(instr) \
160 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
161 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
164 /* creates a d constructor for an unop */
165 #define NEW_D_UNOP(instr) \
167 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
168 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
172 /* Constructs a Block with a fixed number of predecessors.
173 Does not set current_block. Can not be used with automatic
174 Phi node construction. */
176 new_bd_Block (dbg_info *db, int arity, ir_node **in)
179 ir_graph *irg = current_ir_graph;
181 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
182 set_Block_matured(res, 1);
183 set_Block_block_visited(res, 0);
185 /* res->attr.block.exc = exc_normal; */
186 /* res->attr.block.handler_entry = 0; */
187 res->attr.block.dead = 0;
188 res->attr.block.irg = irg;
189 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
190 res->attr.block.in_cg = NULL;
191 res->attr.block.cg_backedge = NULL;
192 res->attr.block.extblk = NULL;
194 IRN_VRFY_IRG(res, irg);
199 new_bd_Start (dbg_info *db, ir_node *block)
202 ir_graph *irg = current_ir_graph;
204 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
205 /* res->attr.start.irg = irg; */
207 IRN_VRFY_IRG(res, irg);
212 new_bd_End (dbg_info *db, ir_node *block)
215 ir_graph *irg = current_ir_graph;
217 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
219 IRN_VRFY_IRG(res, irg);
223 /* Creates a Phi node with all predecessors. Calling this constructor
224 is only allowed if the corresponding block is mature. */
226 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
229 ir_graph *irg = current_ir_graph;
233 /* Don't assert that block matured: the use of this constructor is strongly
235 if ( get_Block_matured(block) )
236 assert( get_irn_arity(block) == arity );
238 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
240 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
242 for (i = arity-1; i >= 0; i--)
243 if (get_irn_op(in[i]) == op_Unknown) {
248 if (!has_unknown) res = optimize_node (res);
249 IRN_VRFY_IRG(res, irg);
251 /* Memory Phis in endless loops must be kept alive.
252 As we can't distinguish these easily we keep all of them alive. */
253 if ((res->op == op_Phi) && (mode == mode_M))
254 add_End_keepalive(get_irg_end(irg), res);
259 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
265 res->attr.con.tv = con;
266 set_Const_type(res, tp); /* Call method because of complex assertion. */
267 res = optimize_node (res);
268 assert(get_Const_type(res) == tp);
269 IRN_VRFY_IRG(res, irg);
275 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
277 ir_graph *irg = current_ir_graph;
279 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
283 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
291 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
294 ir_graph *irg = current_ir_graph;
296 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
297 res = optimize_node(res);
298 IRN_VRFY_IRG(res, irg);
303 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
310 res->attr.proj = proj;
313 assert(get_Proj_pred(res));
314 assert(get_nodes_block(get_Proj_pred(res)));
316 res = optimize_node(res);
318 IRN_VRFY_IRG(res, irg);
324 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
328 ir_graph *irg = current_ir_graph;
330 assert(arg->op == op_Cond);
331 arg->attr.c.kind = fragmentary;
332 arg->attr.c.default_proj = max_proj;
333 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
341 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
344 res = optimize_node(res);
345 IRN_VRFY_IRG(res, irg);
350 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
353 ir_graph *irg = current_ir_graph;
355 assert(is_atomic_type(to_tp));
357 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
358 res->attr.cast.totype = to_tp;
359 res = optimize_node(res);
360 IRN_VRFY_IRG(res, irg);
365 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
368 ir_graph *irg = current_ir_graph;
370 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
371 res = optimize_node (res);
372 IRN_VRFY_IRG(res, irg);
397 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
401 ir_graph *irg = current_ir_graph;
404 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
405 res = optimize_node(res);
406 IRN_VRFY_IRG(res, irg);
411 new_bd_Jmp (dbg_info *db, ir_node *block)
414 ir_graph *irg = current_ir_graph;
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
417 res = optimize_node (res);
418 IRN_VRFY_IRG (res, irg);
423 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
426 ir_graph *irg = current_ir_graph;
428 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
429 res = optimize_node (res);
430 IRN_VRFY_IRG (res, irg);
432 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
438 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
441 ir_graph *irg = current_ir_graph;
443 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
444 res->attr.c.kind = dense;
445 res->attr.c.default_proj = 0;
446 res->attr.c.pred = COND_JMP_PRED_NONE;
447 res = optimize_node (res);
448 IRN_VRFY_IRG(res, irg);
453 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
454 ir_node *callee, int arity, ir_node **in, ir_type *tp)
459 ir_graph *irg = current_ir_graph;
462 NEW_ARR_A(ir_node *, r_in, r_arity);
465 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
467 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
469 assert((get_unknown_type() == tp) || is_Method_type(tp));
470 set_Call_type(res, tp);
471 res->attr.call.exc.pin_state = op_pin_state_pinned;
472 res->attr.call.callee_arr = NULL;
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_bd_Return (dbg_info *db, ir_node *block,
480 ir_node *store, int arity, ir_node **in)
485 ir_graph *irg = current_ir_graph;
488 NEW_ARR_A (ir_node *, r_in, r_arity);
490 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
491 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
492 res = optimize_node(res);
493 IRN_VRFY_IRG(res, irg);
498 new_bd_Load (dbg_info *db, ir_node *block,
499 ir_node *store, ir_node *adr, ir_mode *mode)
503 ir_graph *irg = current_ir_graph;
507 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
508 res->attr.load.exc.pin_state = op_pin_state_pinned;
509 res->attr.load.load_mode = mode;
510 res->attr.load.volatility = volatility_non_volatile;
511 res = optimize_node(res);
512 IRN_VRFY_IRG(res, irg);
517 new_bd_Store (dbg_info *db, ir_node *block,
518 ir_node *store, ir_node *adr, ir_node *val)
522 ir_graph *irg = current_ir_graph;
527 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
528 res->attr.store.exc.pin_state = op_pin_state_pinned;
529 res->attr.store.volatility = volatility_non_volatile;
530 res = optimize_node(res);
531 IRN_VRFY_IRG(res, irg);
536 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
537 ir_node *size, ir_type *alloc_type, where_alloc where)
541 ir_graph *irg = current_ir_graph;
545 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
546 res->attr.a.exc.pin_state = op_pin_state_pinned;
547 res->attr.a.where = where;
548 res->attr.a.type = alloc_type;
549 res = optimize_node(res);
550 IRN_VRFY_IRG(res, irg);
555 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
556 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
560 ir_graph *irg = current_ir_graph;
565 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
566 res->attr.f.where = where;
567 res->attr.f.type = free_type;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
575 int arity, ir_node **in, entity *ent)
580 ir_graph *irg = current_ir_graph;
582 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
585 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
588 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
590 * FIXM: Sel's can select functions which should be of mode mode_P_code.
592 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
593 res->attr.s.ent = ent;
594 res = optimize_node(res);
595 IRN_VRFY_IRG(res, irg);
600 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
601 symconst_kind symkind, ir_type *tp) {
604 ir_graph *irg = current_ir_graph;
606 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
607 mode = mode_P_data; /* FIXME: can be mode_P_code */
611 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
613 res->attr.i.num = symkind;
614 res->attr.i.sym = value;
617 res = optimize_node(res);
618 IRN_VRFY_IRG(res, irg);
623 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
624 symconst_kind symkind)
626 ir_graph *irg = current_ir_graph;
628 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
633 new_bd_Sync (dbg_info *db, ir_node *block)
636 ir_graph *irg = current_ir_graph;
638 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
639 /* no need to call optimize node here, Sync are always created with no predecessors */
640 IRN_VRFY_IRG(res, irg);
645 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
647 ir_node *in[2], *res;
648 ir_graph *irg = current_ir_graph;
652 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
653 res->attr.confirm_cmp = cmp;
654 res = optimize_node (res);
655 IRN_VRFY_IRG(res, irg);
659 /* this function is often called with current_ir_graph unset */
661 new_bd_Unknown (ir_mode *m)
664 ir_graph *irg = current_ir_graph;
666 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
667 res = optimize_node(res);
672 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
676 ir_graph *irg = current_ir_graph;
678 in[0] = get_Call_ptr(call);
679 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
680 /* res->attr.callbegin.irg = irg; */
681 res->attr.callbegin.call = call;
682 res = optimize_node(res);
683 IRN_VRFY_IRG(res, irg);
688 new_bd_EndReg (dbg_info *db, ir_node *block)
691 ir_graph *irg = current_ir_graph;
693 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
694 set_irg_end_reg(irg, res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_EndExcept (dbg_info *db, ir_node *block)
703 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
706 set_irg_end_except(irg, res);
707 IRN_VRFY_IRG (res, irg);
712 new_bd_Break (dbg_info *db, ir_node *block)
715 ir_graph *irg = current_ir_graph;
717 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
718 res = optimize_node(res);
719 IRN_VRFY_IRG(res, irg);
724 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
728 ir_graph *irg = current_ir_graph;
730 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
731 res->attr.filter.proj = proj;
732 res->attr.filter.in_cg = NULL;
733 res->attr.filter.backedge = NULL;
736 assert(get_Proj_pred(res));
737 assert(get_nodes_block(get_Proj_pred(res)));
739 res = optimize_node(res);
740 IRN_VRFY_IRG(res, irg);
745 new_bd_Mux (dbg_info *db, ir_node *block,
746 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
750 ir_graph *irg = current_ir_graph;
756 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
759 res = optimize_node(res);
760 IRN_VRFY_IRG(res, irg);
765 new_bd_Psi (dbg_info *db, ir_node *block,
766 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
770 ir_graph *irg = current_ir_graph;
773 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
775 for (i = 0; i < arity; ++i) {
777 in[2 * i + 1] = vals[i];
781 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
784 res = optimize_node(res);
785 IRN_VRFY_IRG(res, irg);
790 new_bd_CopyB (dbg_info *db, ir_node *block,
791 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
795 ir_graph *irg = current_ir_graph;
801 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
803 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
804 res->attr.copyb.data_type = data_type;
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
812 ir_node *objptr, ir_type *type)
816 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
821 res->attr.io.type = type;
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
832 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
837 res = optimize_node(res);
838 IRN_VRFY_IRG(res, irg);
843 new_bd_Bound (dbg_info *db, ir_node *block,
844 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
848 ir_graph *irg = current_ir_graph;
854 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
855 res->attr.bound.exc.pin_state = op_pin_state_pinned;
856 res = optimize_node(res);
857 IRN_VRFY_IRG(res, irg);
861 /* --------------------------------------------- */
862 /* private interfaces, for professional use only */
863 /* --------------------------------------------- */
865 /* Constructs a Block with a fixed number of predecessors.
866 Does not set current_block. Can not be used with automatic
867 Phi node construction. */
869 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
871 ir_graph *rem = current_ir_graph;
874 current_ir_graph = irg;
875 res = new_bd_Block (db, arity, in);
876 current_ir_graph = rem;
882 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
884 ir_graph *rem = current_ir_graph;
887 current_ir_graph = irg;
888 res = new_bd_Start (db, block);
889 current_ir_graph = rem;
895 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = rem;
901 res = new_bd_End (db, block);
902 current_ir_graph = rem;
907 /* Creates a Phi node with all predecessors. Calling this constructor
908 is only allowed if the corresponding block is mature. */
910 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
913 ir_graph *rem = current_ir_graph;
915 current_ir_graph = irg;
916 res = new_bd_Phi (db, block,arity, in, mode);
917 current_ir_graph = rem;
923 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
926 ir_graph *rem = current_ir_graph;
928 current_ir_graph = irg;
929 res = new_bd_Const_type (db, block, mode, con, tp);
930 current_ir_graph = rem;
936 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
939 ir_graph *rem = current_ir_graph;
941 current_ir_graph = irg;
942 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
943 current_ir_graph = rem;
949 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
951 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
955 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
958 ir_graph *rem = current_ir_graph;
960 current_ir_graph = irg;
961 res = new_bd_Id(db, block, val, mode);
962 current_ir_graph = rem;
968 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
972 ir_graph *rem = current_ir_graph;
974 current_ir_graph = irg;
975 res = new_bd_Proj(db, block, arg, mode, proj);
976 current_ir_graph = rem;
982 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
986 ir_graph *rem = current_ir_graph;
988 current_ir_graph = irg;
989 res = new_bd_defaultProj(db, block, arg, max_proj);
990 current_ir_graph = rem;
996 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
999 ir_graph *rem = current_ir_graph;
1001 current_ir_graph = irg;
1002 res = new_bd_Conv(db, block, op, mode);
1003 current_ir_graph = rem;
1009 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1012 ir_graph *rem = current_ir_graph;
1014 current_ir_graph = irg;
1015 res = new_bd_Cast(db, block, op, to_tp);
1016 current_ir_graph = rem;
1022 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1025 ir_graph *rem = current_ir_graph;
1027 current_ir_graph = irg;
1028 res = new_bd_Tuple(db, block, arity, in);
1029 current_ir_graph = rem;
1039 NEW_RD_DIVOP(DivMod)
1052 NEW_RD_BINOP(Borrow)
1055 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1056 ir_node *op1, ir_node *op2)
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Cmp(db, block, op1, op2);
1063 current_ir_graph = rem;
1069 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1072 ir_graph *rem = current_ir_graph;
1074 current_ir_graph = irg;
1075 res = new_bd_Jmp(db, block);
1076 current_ir_graph = rem;
1082 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_IJmp(db, block, tgt);
1089 current_ir_graph = rem;
1095 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Cond(db, block, c);
1102 current_ir_graph = rem;
1108 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1109 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1112 ir_graph *rem = current_ir_graph;
1114 current_ir_graph = irg;
1115 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1116 current_ir_graph = rem;
1122 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1123 ir_node *store, int arity, ir_node **in)
1126 ir_graph *rem = current_ir_graph;
1128 current_ir_graph = irg;
1129 res = new_bd_Return(db, block, store, arity, in);
1130 current_ir_graph = rem;
1136 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1137 ir_node *store, ir_node *adr, ir_mode *mode)
1140 ir_graph *rem = current_ir_graph;
1142 current_ir_graph = irg;
1143 res = new_bd_Load(db, block, store, adr, mode);
1144 current_ir_graph = rem;
1150 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1151 ir_node *store, ir_node *adr, ir_node *val)
1154 ir_graph *rem = current_ir_graph;
1156 current_ir_graph = irg;
1157 res = new_bd_Store(db, block, store, adr, val);
1158 current_ir_graph = rem;
1164 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1165 ir_node *size, ir_type *alloc_type, where_alloc where)
1168 ir_graph *rem = current_ir_graph;
1170 current_ir_graph = irg;
1171 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1172 current_ir_graph = rem;
1178 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1179 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1182 ir_graph *rem = current_ir_graph;
1184 current_ir_graph = irg;
1185 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1186 current_ir_graph = rem;
1192 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1193 ir_node *store, ir_node *objptr, entity *ent)
1196 ir_graph *rem = current_ir_graph;
1198 current_ir_graph = irg;
1199 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1200 current_ir_graph = rem;
1206 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1207 int arity, ir_node **in, entity *ent)
1210 ir_graph *rem = current_ir_graph;
1212 current_ir_graph = irg;
1213 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1214 current_ir_graph = rem;
1220 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1221 symconst_kind symkind, ir_type *tp)
1224 ir_graph *rem = current_ir_graph;
1226 current_ir_graph = irg;
1227 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1228 current_ir_graph = rem;
1234 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1235 symconst_kind symkind)
1237 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1241 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1243 symconst_symbol sym = {(ir_type *)symbol};
1244 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1247 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1248 symconst_symbol sym = {(ir_type *)symbol};
1249 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1252 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1253 symconst_symbol sym = {symbol};
1254 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1257 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1258 symconst_symbol sym = {symbol};
1259 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1262 ir_node *new_rd_SymConst_align (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1263 symconst_symbol sym = {symbol};
1264 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1268 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1271 ir_graph *rem = current_ir_graph;
1274 current_ir_graph = irg;
1275 res = new_bd_Sync(db, block);
1276 current_ir_graph = rem;
1278 for (i = 0; i < arity; ++i) add_Sync_pred(res, in[i]);
1284 new_rd_Bad (ir_graph *irg) {
1285 return get_irg_bad(irg);
1289 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1292 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_Confirm(db, block, val, bound, cmp);
1296 current_ir_graph = rem;
1301 /* this function is often called with current_ir_graph unset */
1303 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1306 ir_graph *rem = current_ir_graph;
1308 current_ir_graph = irg;
1309 res = new_bd_Unknown(m);
1310 current_ir_graph = rem;
1316 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1319 ir_graph *rem = current_ir_graph;
1321 current_ir_graph = irg;
1322 res = new_bd_CallBegin(db, block, call);
1323 current_ir_graph = rem;
1329 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1333 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1334 set_irg_end_reg(irg, res);
1335 IRN_VRFY_IRG(res, irg);
1340 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1344 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1345 set_irg_end_except(irg, res);
1346 IRN_VRFY_IRG (res, irg);
1351 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1354 ir_graph *rem = current_ir_graph;
1356 current_ir_graph = irg;
1357 res = new_bd_Break(db, block);
1358 current_ir_graph = rem;
1364 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1368 ir_graph *rem = current_ir_graph;
1370 current_ir_graph = irg;
1371 res = new_bd_Filter(db, block, arg, mode, proj);
1372 current_ir_graph = rem;
1378 new_rd_NoMem (ir_graph *irg) {
1379 return get_irg_no_mem(irg);
1383 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1384 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1391 current_ir_graph = rem;
1397 new_rd_Psi (dbg_info *db, ir_graph *irg, ir_node *block,
1398 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1401 ir_graph *rem = current_ir_graph;
1403 current_ir_graph = irg;
1404 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1405 current_ir_graph = rem;
1410 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1411 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1418 current_ir_graph = rem;
1424 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1425 ir_node *objptr, ir_type *type)
1428 ir_graph *rem = current_ir_graph;
1430 current_ir_graph = irg;
1431 res = new_bd_InstOf(db, block, store, objptr, type);
1432 current_ir_graph = rem;
1438 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1441 ir_graph *rem = current_ir_graph;
1443 current_ir_graph = irg;
1444 res = new_bd_Raise(db, block, store, obj);
1445 current_ir_graph = rem;
1450 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1451 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1454 ir_graph *rem = current_ir_graph;
1456 current_ir_graph = irg;
1457 res = new_bd_Bound(db, block, store, idx, lower, upper);
1458 current_ir_graph = rem;
1463 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1464 return new_rd_Block(NULL, irg, arity, in);
1466 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1467 return new_rd_Start(NULL, irg, block);
1469 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1470 return new_rd_End(NULL, irg, block);
1472 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1473 return new_rd_Jmp(NULL, irg, block);
1475 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1476 return new_rd_IJmp(NULL, irg, block, tgt);
1478 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1479 return new_rd_Cond(NULL, irg, block, c);
1481 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1482 ir_node *store, int arity, ir_node **in) {
1483 return new_rd_Return(NULL, irg, block, store, arity, in);
1485 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1486 ir_mode *mode, tarval *con) {
1487 return new_rd_Const(NULL, irg, block, mode, con);
1489 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1490 ir_mode *mode, long value) {
1491 return new_rd_Const_long(NULL, irg, block, mode, value);
1493 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1494 ir_mode *mode, tarval *con, ir_type *tp) {
1495 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1497 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1498 symconst_symbol value, symconst_kind symkind) {
1499 return new_rd_SymConst(NULL, irg, block, value, symkind);
1501 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *objptr, entity *ent) {
1503 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1505 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1506 ir_node *objptr, int n_index, ir_node **index,
1508 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1510 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1511 ir_node *callee, int arity, ir_node **in,
1513 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1515 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1516 ir_node *op1, ir_node *op2, ir_mode *mode) {
1517 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1519 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1520 ir_node *op1, ir_node *op2, ir_mode *mode) {
1521 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1523 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1524 ir_node *op, ir_mode *mode) {
1525 return new_rd_Minus(NULL, irg, block, op, mode);
1527 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1528 ir_node *op1, ir_node *op2, ir_mode *mode) {
1529 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1531 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1532 ir_node *memop, ir_node *op1, ir_node *op2) {
1533 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1535 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1536 ir_node *memop, ir_node *op1, ir_node *op2) {
1537 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1539 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1540 ir_node *memop, ir_node *op1, ir_node *op2) {
1541 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1543 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1544 ir_node *memop, ir_node *op1, ir_node *op2) {
1545 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1547 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1548 ir_node *op, ir_mode *mode) {
1549 return new_rd_Abs(NULL, irg, block, op, mode);
1551 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_And(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1556 ir_node *op1, ir_node *op2, ir_mode *mode) {
1557 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1559 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1560 ir_node *op1, ir_node *op2, ir_mode *mode) {
1561 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1563 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1564 ir_node *op, ir_mode *mode) {
1565 return new_rd_Not(NULL, irg, block, op, mode);
1567 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1568 ir_node *op, ir_node *k, ir_mode *mode) {
1569 return new_rd_Shl(NULL, irg, block, op, k, mode);
1571 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_node *k, ir_mode *mode) {
1573 return new_rd_Shr(NULL, irg, block, op, k, mode);
1575 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Rot(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1584 ir_node *op, ir_node *k, ir_mode *mode) {
1585 return new_rd_Carry(NULL, irg, block, op, k, mode);
1587 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_node *k, ir_mode *mode) {
1589 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1591 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1592 ir_node *op1, ir_node *op2) {
1593 return new_rd_Cmp(NULL, irg, block, op1, op2);
1595 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_mode *mode) {
1597 return new_rd_Conv(NULL, irg, block, op, mode);
1599 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1600 return new_rd_Cast(NULL, irg, block, op, to_tp);
1602 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1603 ir_node **in, ir_mode *mode) {
1604 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1606 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1607 ir_node *store, ir_node *adr, ir_mode *mode) {
1608 return new_rd_Load(NULL, irg, block, store, adr, mode);
1610 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1611 ir_node *store, ir_node *adr, ir_node *val) {
1612 return new_rd_Store(NULL, irg, block, store, adr, val);
1614 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1615 ir_node *size, ir_type *alloc_type, where_alloc where) {
1616 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1618 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1619 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1620 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1622 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1623 return new_rd_Sync(NULL, irg, block, arity, in);
1625 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1626 ir_mode *mode, long proj) {
1627 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1629 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1631 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1633 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1634 int arity, ir_node **in) {
1635 return new_rd_Tuple(NULL, irg, block, arity, in );
1637 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1638 ir_node *val, ir_mode *mode) {
1639 return new_rd_Id(NULL, irg, block, val, mode);
1641 ir_node *new_r_Bad (ir_graph *irg) {
1642 return new_rd_Bad(irg);
1644 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1645 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1647 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1648 return new_rd_Unknown(irg, m);
1650 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1651 return new_rd_CallBegin(NULL, irg, block, callee);
1653 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1654 return new_rd_EndReg(NULL, irg, block);
1656 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1657 return new_rd_EndExcept(NULL, irg, block);
1659 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1660 return new_rd_Break(NULL, irg, block);
1662 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1663 ir_mode *mode, long proj) {
1664 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1666 ir_node *new_r_NoMem (ir_graph *irg) {
1667 return new_rd_NoMem(irg);
1669 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1670 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1671 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1673 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1674 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1675 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1677 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1678 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1679 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1681 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1683 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1685 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1686 ir_node *store, ir_node *obj) {
1687 return new_rd_Raise(NULL, irg, block, store, obj);
1689 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1690 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1691 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1694 /** ********************/
1695 /** public interfaces */
1696 /** construction tools */
1700 * - create a new Start node in the current block
1702 * @return s - pointer to the created Start node
1707 new_d_Start (dbg_info *db)
1711 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1712 op_Start, mode_T, 0, NULL);
1713 /* res->attr.start.irg = current_ir_graph; */
1715 res = optimize_node(res);
1716 IRN_VRFY_IRG(res, current_ir_graph);
1721 new_d_End (dbg_info *db)
1724 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1725 op_End, mode_X, -1, NULL);
1726 res = optimize_node(res);
1727 IRN_VRFY_IRG(res, current_ir_graph);
1732 /* Constructs a Block with a fixed number of predecessors.
1733 Does set current_block. Can be used with automatic Phi
1734 node construction. */
1736 new_d_Block (dbg_info *db, int arity, ir_node **in)
1740 int has_unknown = 0;
1742 res = new_bd_Block(db, arity, in);
1744 /* Create and initialize array for Phi-node construction. */
1745 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1746 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1747 current_ir_graph->n_loc);
1748 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1751 for (i = arity-1; i >= 0; i--)
1752 if (get_irn_op(in[i]) == op_Unknown) {
1757 if (!has_unknown) res = optimize_node(res);
1758 current_ir_graph->current_block = res;
1760 IRN_VRFY_IRG(res, current_ir_graph);
1765 /* ***********************************************************************/
1766 /* Methods necessary for automatic Phi node creation */
1768 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1769 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1770 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1771 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1773 Call Graph: ( A ---> B == A "calls" B)
1775 get_value mature_immBlock
1783 get_r_value_internal |
1787 new_rd_Phi0 new_rd_Phi_in
1789 * *************************************************************************** */
1791 /** Creates a Phi node with 0 predecessors */
1792 static INLINE ir_node *
1793 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1797 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1798 IRN_VRFY_IRG(res, irg);
1802 /* There are two implementations of the Phi node construction. The first
1803 is faster, but does not work for blocks with more than 2 predecessors.
1804 The second works always but is slower and causes more unnecessary Phi
1806 Select the implementations by the following preprocessor flag set in
1808 #if USE_FAST_PHI_CONSTRUCTION
1810 /* This is a stack used for allocating and deallocating nodes in
1811 new_rd_Phi_in. The original implementation used the obstack
1812 to model this stack, now it is explicit. This reduces side effects.
1814 #if USE_EXPLICIT_PHI_IN_STACK
1816 new_Phi_in_stack(void) {
1819 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1821 res->stack = NEW_ARR_F (ir_node *, 0);
1828 free_Phi_in_stack(Phi_in_stack *s) {
1829 DEL_ARR_F(s->stack);
1833 free_to_Phi_in_stack(ir_node *phi) {
1834 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1835 current_ir_graph->Phi_in_stack->pos)
1836 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1838 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1840 (current_ir_graph->Phi_in_stack->pos)++;
1843 static INLINE ir_node *
1844 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1845 int arity, ir_node **in) {
1847 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1848 int pos = current_ir_graph->Phi_in_stack->pos;
1852 /* We need to allocate a new node */
1853 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1854 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1856 /* reuse the old node and initialize it again. */
1859 assert (res->kind == k_ir_node);
1860 assert (res->op == op_Phi);
1864 assert (arity >= 0);
1865 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1866 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1868 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1870 (current_ir_graph->Phi_in_stack->pos)--;
1874 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1876 /* Creates a Phi node with a given, fixed array **in of predecessors.
1877 If the Phi node is unnecessary, as the same value reaches the block
1878 through all control flow paths, it is eliminated and the value
1879 returned directly. This constructor is only intended for use in
1880 the automatic Phi node generation triggered by get_value or mature.
1881 The implementation is quite tricky and depends on the fact, that
1882 the nodes are allocated on a stack:
1883 The in array contains predecessors and NULLs. The NULLs appear,
1884 if get_r_value_internal, that computed the predecessors, reached
1885 the same block on two paths. In this case the same value reaches
1886 this block on both paths, there is no definition in between. We need
1887 not allocate a Phi where these path's merge, but we have to communicate
1888 this fact to the caller. This happens by returning a pointer to the
1889 node the caller _will_ allocate. (Yes, we predict the address. We can
1890 do so because the nodes are allocated on the obstack.) The caller then
1891 finds a pointer to itself and, when this routine is called again,
1894 static INLINE ir_node *
1895 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1898 ir_node *res, *known;
1900 /* Allocate a new node on the obstack. This can return a node to
1901 which some of the pointers in the in-array already point.
1902 Attention: the constructor copies the in array, i.e., the later
1903 changes to the array in this routine do not affect the
1904 constructed node! If the in array contains NULLs, there will be
1905 missing predecessors in the returned node. Is this a possible
1906 internal state of the Phi node generation? */
1907 #if USE_EXPLICIT_PHI_IN_STACK
1908 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1910 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1911 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1914 /* The in-array can contain NULLs. These were returned by
1915 get_r_value_internal if it reached the same block/definition on a
1916 second path. The NULLs are replaced by the node itself to
1917 simplify the test in the next loop. */
1918 for (i = 0; i < ins; ++i) {
1923 /* This loop checks whether the Phi has more than one predecessor.
1924 If so, it is a real Phi node and we break the loop. Else the Phi
1925 node merges the same definition on several paths and therefore is
1927 for (i = 0; i < ins; ++i) {
1928 if (in[i] == res || in[i] == known)
1937 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1939 #if USE_EXPLICIT_PHI_IN_STACK
1940 free_to_Phi_in_stack(res);
1942 edges_node_deleted(res, current_ir_graph);
1943 obstack_free(current_ir_graph->obst, res);
1947 res = optimize_node (res);
1948 IRN_VRFY_IRG(res, irg);
1951 /* return the pointer to the Phi node. This node might be deallocated! */
1956 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1959 allocates and returns this node. The routine called to allocate the
1960 node might optimize it away and return a real value, or even a pointer
1961 to a deallocated Phi node on top of the obstack!
1962 This function is called with an in-array of proper size. **/
1964 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1966 ir_node *prevBlock, *res;
1969 /* This loop goes to all predecessor blocks of the block the Phi node is in
1970 and there finds the operands of the Phi node by calling
1971 get_r_value_internal. */
1972 for (i = 1; i <= ins; ++i) {
1973 assert (block->in[i]);
1974 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1976 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1979 /* After collecting all predecessors into the array nin a new Phi node
1980 with these predecessors is created. This constructor contains an
1981 optimization: If all predecessors of the Phi node are identical it
1982 returns the only operand instead of a new Phi node. If the value
1983 passes two different control flow edges without being defined, and
1984 this is the second path treated, a pointer to the node that will be
1985 allocated for the first path (recursion) is returned. We already
1986 know the address of this node, as it is the next node to be allocated
1987 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1988 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1990 /* Now we now the value for "pos" and can enter it in the array with
1991 all known local variables. Attention: this might be a pointer to
1992 a node, that later will be allocated!!! See new_rd_Phi_in().
1993 If this is called in mature, after some set_value() in the same block,
1994 the proper value must not be overwritten:
1996 get_value (makes Phi0, put's it into graph_arr)
1997 set_value (overwrites Phi0 in graph_arr)
1998 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2001 if (!block->attr.block.graph_arr[pos]) {
2002 block->attr.block.graph_arr[pos] = res;
2004 /* printf(" value already computed by %s\n",
2005 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2011 /* This function returns the last definition of a variable. In case
2012 this variable was last defined in a previous block, Phi nodes are
2013 inserted. If the part of the firm graph containing the definition
2014 is not yet constructed, a dummy Phi node is returned. */
2016 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2019 /* There are 4 cases to treat.
2021 1. The block is not mature and we visit it the first time. We can not
2022 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2023 predecessors is returned. This node is added to the linked list (field
2024 "link") of the containing block to be completed when this block is
2025 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2028 2. The value is already known in this block, graph_arr[pos] is set and we
2029 visit the block the first time. We can return the value without
2030 creating any new nodes.
2032 3. The block is mature and we visit it the first time. A Phi node needs
2033 to be created (phi_merge). If the Phi is not needed, as all it's
2034 operands are the same value reaching the block through different
2035 paths, it's optimized away and the value itself is returned.
2037 4. The block is mature, and we visit it the second time. Now two
2038 subcases are possible:
2039 * The value was computed completely the last time we were here. This
2040 is the case if there is no loop. We can return the proper value.
2041 * The recursion that visited this node and set the flag did not
2042 return yet. We are computing a value in a loop and need to
2043 break the recursion without knowing the result yet.
2044 @@@ strange case. Straight forward we would create a Phi before
2045 starting the computation of it's predecessors. In this case we will
2046 find a Phi here in any case. The problem is that this implementation
2047 only creates a Phi after computing the predecessors, so that it is
2048 hard to compute self references of this Phi. @@@
2049 There is no simple check for the second subcase. Therefore we check
2050 for a second visit and treat all such cases as the second subcase.
2051 Anyways, the basic situation is the same: we reached a block
2052 on two paths without finding a definition of the value: No Phi
2053 nodes are needed on both paths.
2054 We return this information "Two paths, no Phi needed" by a very tricky
2055 implementation that relies on the fact that an obstack is a stack and
2056 will return a node with the same address on different allocations.
2057 Look also at phi_merge and new_rd_phi_in to understand this.
2058 @@@ Unfortunately this does not work, see testprogram
2059 three_cfpred_example.
2063 /* case 4 -- already visited. */
2064 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2066 /* visited the first time */
2067 set_irn_visited(block, get_irg_visited(current_ir_graph));
2069 /* Get the local valid value */
2070 res = block->attr.block.graph_arr[pos];
2072 /* case 2 -- If the value is actually computed, return it. */
2073 if (res) return res;
2075 if (block->attr.block.matured) { /* case 3 */
2077 /* The Phi has the same amount of ins as the corresponding block. */
2078 int ins = get_irn_arity(block);
2080 NEW_ARR_A (ir_node *, nin, ins);
2082 /* Phi merge collects the predecessors and then creates a node. */
2083 res = phi_merge (block, pos, mode, nin, ins);
2085 } else { /* case 1 */
2086 /* The block is not mature, we don't know how many in's are needed. A Phi
2087 with zero predecessors is created. Such a Phi node is called Phi0
2088 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2089 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2091 The Phi0 has to remember the pos of it's internal value. If the real
2092 Phi is computed, pos is used to update the array with the local
2095 res = new_rd_Phi0 (current_ir_graph, block, mode);
2096 res->attr.phi0_pos = pos;
2097 res->link = block->link;
2101 /* If we get here, the frontend missed a use-before-definition error */
2104 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2105 assert (mode->code >= irm_F && mode->code <= irm_P);
2106 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2107 tarval_mode_null[mode->code]);
2110 /* The local valid value is available now. */
2111 block->attr.block.graph_arr[pos] = res;
2119 it starts the recursion. This causes an Id at the entry of
2120 every block that has no definition of the value! **/
2122 #if USE_EXPLICIT_PHI_IN_STACK
2124 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2125 void free_Phi_in_stack(Phi_in_stack *s) { }
2128 static INLINE ir_node *
2129 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2130 ir_node **in, int ins, ir_node *phi0)
2133 ir_node *res, *known;
2135 /* Allocate a new node on the obstack. The allocation copies the in
2137 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2138 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2140 /* This loop checks whether the Phi has more than one predecessor.
2141 If so, it is a real Phi node and we break the loop. Else the
2142 Phi node merges the same definition on several paths and therefore
2143 is not needed. Don't consider Bad nodes! */
2145 for (i=0; i < ins; ++i)
2149 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2151 /* Optimize self referencing Phis: We can't detect them yet properly, as
2152 they still refer to the Phi0 they will replace. So replace right now. */
2153 if (phi0 && in[i] == phi0) in[i] = res;
2155 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2163 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2166 edges_node_deleted(res, current_ir_graph);
2167 obstack_free (current_ir_graph->obst, res);
2168 if (is_Phi(known)) {
2169 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2170 order, an enclosing Phi know may get superfluous. */
2171 res = optimize_in_place_2(known);
2173 exchange(known, res);
2179 /* A undefined value, e.g., in unreachable code. */
2183 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2184 IRN_VRFY_IRG(res, irg);
2185 /* Memory Phis in endless loops must be kept alive.
2186 As we can't distinguish these easily we keep all of them alive. */
2187 if ((res->op == op_Phi) && (mode == mode_M))
2188 add_End_keepalive(get_irg_end(irg), res);
2195 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2197 #if PRECISE_EXC_CONTEXT
2199 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2201 /* Construct a new frag_array for node n.
2202 Copy the content from the current graph_arr of the corresponding block:
2203 this is the current state.
2204 Set ProjM(n) as current memory state.
2205 Further the last entry in frag_arr of current block points to n. This
2206 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2208 static INLINE ir_node ** new_frag_arr (ir_node *n)
2213 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2214 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2215 sizeof(ir_node *)*current_ir_graph->n_loc);
2217 /* turn off optimization before allocating Proj nodes, as res isn't
2219 opt = get_opt_optimize(); set_optimize(0);
2220 /* Here we rely on the fact that all frag ops have Memory as first result! */
2221 if (get_irn_op(n) == op_Call)
2222 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2223 else if (get_irn_op(n) == op_CopyB)
2224 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2226 assert((pn_Quot_M == pn_DivMod_M) &&
2227 (pn_Quot_M == pn_Div_M) &&
2228 (pn_Quot_M == pn_Mod_M) &&
2229 (pn_Quot_M == pn_Load_M) &&
2230 (pn_Quot_M == pn_Store_M) &&
2231 (pn_Quot_M == pn_Alloc_M) &&
2232 (pn_Quot_M == pn_Bound_M));
2233 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2237 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2242 * returns the frag_arr from a node
2244 static INLINE ir_node **
2245 get_frag_arr (ir_node *n) {
2246 switch (get_irn_opcode(n)) {
2248 return n->attr.call.exc.frag_arr;
2250 return n->attr.a.exc.frag_arr;
2252 return n->attr.load.exc.frag_arr;
2254 return n->attr.store.exc.frag_arr;
2256 return n->attr.except.frag_arr;
2261 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2263 if (!frag_arr[pos]) frag_arr[pos] = val;
2264 if (frag_arr[current_ir_graph->n_loc - 1]) {
2265 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2266 assert(arr != frag_arr && "Endless recursion detected");
2267 set_frag_value(arr, pos, val);
2272 for (i = 0; i < 1000; ++i) {
2273 if (!frag_arr[pos]) {
2274 frag_arr[pos] = val;
2276 if (frag_arr[current_ir_graph->n_loc - 1]) {
2277 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2283 assert(0 && "potential endless recursion");
2288 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2292 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2294 frag_arr = get_frag_arr(cfOp);
2295 res = frag_arr[pos];
2297 if (block->attr.block.graph_arr[pos]) {
2298 /* There was a set_value() after the cfOp and no get_value before that
2299 set_value(). We must build a Phi node now. */
2300 if (block->attr.block.matured) {
2301 int ins = get_irn_arity(block);
2303 NEW_ARR_A (ir_node *, nin, ins);
2304 res = phi_merge(block, pos, mode, nin, ins);
2306 res = new_rd_Phi0 (current_ir_graph, block, mode);
2307 res->attr.phi0_pos = pos;
2308 res->link = block->link;
2312 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2313 but this should be better: (remove comment if this works) */
2314 /* It's a Phi, we can write this into all graph_arrs with NULL */
2315 set_frag_value(block->attr.block.graph_arr, pos, res);
2317 res = get_r_value_internal(block, pos, mode);
2318 set_frag_value(block->attr.block.graph_arr, pos, res);
2323 #endif /* PRECISE_EXC_CONTEXT */
2326 computes the predecessors for the real phi node, and then
2327 allocates and returns this node. The routine called to allocate the
2328 node might optimize it away and return a real value.
2329 This function must be called with an in-array of proper size. **/
2331 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2333 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2336 /* If this block has no value at pos create a Phi0 and remember it
2337 in graph_arr to break recursions.
2338 Else we may not set graph_arr as there a later value is remembered. */
2340 if (!block->attr.block.graph_arr[pos]) {
2341 if (block == get_irg_start_block(current_ir_graph)) {
2342 /* Collapsing to Bad tarvals is no good idea.
2343 So we call a user-supplied routine here that deals with this case as
2344 appropriate for the given language. Sorrily the only help we can give
2345 here is the position.
2347 Even if all variables are defined before use, it can happen that
2348 we get to the start block, if a Cond has been replaced by a tuple
2349 (bad, jmp). In this case we call the function needlessly, eventually
2350 generating an non existent error.
2351 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2354 if (default_initialize_local_variable) {
2355 ir_node *rem = get_cur_block();
2357 set_cur_block(block);
2358 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2362 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2363 /* We don't need to care about exception ops in the start block.
2364 There are none by definition. */
2365 return block->attr.block.graph_arr[pos];
2367 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2368 block->attr.block.graph_arr[pos] = phi0;
2369 #if PRECISE_EXC_CONTEXT
2370 if (get_opt_precise_exc_context()) {
2371 /* Set graph_arr for fragile ops. Also here we should break recursion.
2372 We could choose a cyclic path through an cfop. But the recursion would
2373 break at some point. */
2374 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2380 /* This loop goes to all predecessor blocks of the block the Phi node
2381 is in and there finds the operands of the Phi node by calling
2382 get_r_value_internal. */
2383 for (i = 1; i <= ins; ++i) {
2384 prevCfOp = skip_Proj(block->in[i]);
2386 if (is_Bad(prevCfOp)) {
2387 /* In case a Cond has been optimized we would get right to the start block
2388 with an invalid definition. */
2389 nin[i-1] = new_Bad();
2392 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2394 if (!is_Bad(prevBlock)) {
2395 #if PRECISE_EXC_CONTEXT
2396 if (get_opt_precise_exc_context() &&
2397 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2398 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2399 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2402 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2404 nin[i-1] = new_Bad();
2408 /* We want to pass the Phi0 node to the constructor: this finds additional
2409 optimization possibilities.
2410 The Phi0 node either is allocated in this function, or it comes from
2411 a former call to get_r_value_internal. In this case we may not yet
2412 exchange phi0, as this is done in mature_immBlock. */
2414 phi0_all = block->attr.block.graph_arr[pos];
2415 if (!((get_irn_op(phi0_all) == op_Phi) &&
2416 (get_irn_arity(phi0_all) == 0) &&
2417 (get_nodes_block(phi0_all) == block)))
2423 /* After collecting all predecessors into the array nin a new Phi node
2424 with these predecessors is created. This constructor contains an
2425 optimization: If all predecessors of the Phi node are identical it
2426 returns the only operand instead of a new Phi node. */
2427 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2429 /* In case we allocated a Phi0 node at the beginning of this procedure,
2430 we need to exchange this Phi0 with the real Phi. */
2432 exchange(phi0, res);
2433 block->attr.block.graph_arr[pos] = res;
2434 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2435 only an optimization. */
2441 /* This function returns the last definition of a variable. In case
2442 this variable was last defined in a previous block, Phi nodes are
2443 inserted. If the part of the firm graph containing the definition
2444 is not yet constructed, a dummy Phi node is returned. */
2446 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2449 /* There are 4 cases to treat.
2451 1. The block is not mature and we visit it the first time. We can not
2452 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2453 predecessors is returned. This node is added to the linked list (field
2454 "link") of the containing block to be completed when this block is
2455 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2458 2. The value is already known in this block, graph_arr[pos] is set and we
2459 visit the block the first time. We can return the value without
2460 creating any new nodes.
2462 3. The block is mature and we visit it the first time. A Phi node needs
2463 to be created (phi_merge). If the Phi is not needed, as all it's
2464 operands are the same value reaching the block through different
2465 paths, it's optimized away and the value itself is returned.
2467 4. The block is mature, and we visit it the second time. Now two
2468 subcases are possible:
2469 * The value was computed completely the last time we were here. This
2470 is the case if there is no loop. We can return the proper value.
2471 * The recursion that visited this node and set the flag did not
2472 return yet. We are computing a value in a loop and need to
2473 break the recursion. This case only happens if we visited
2474 the same block with phi_merge before, which inserted a Phi0.
2475 So we return the Phi0.
2478 /* case 4 -- already visited. */
2479 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2480 /* As phi_merge allocates a Phi0 this value is always defined. Here
2481 is the critical difference of the two algorithms. */
2482 assert(block->attr.block.graph_arr[pos]);
2483 return block->attr.block.graph_arr[pos];
2486 /* visited the first time */
2487 set_irn_visited(block, get_irg_visited(current_ir_graph));
2489 /* Get the local valid value */
2490 res = block->attr.block.graph_arr[pos];
2492 /* case 2 -- If the value is actually computed, return it. */
2493 if (res) { return res; };
2495 if (block->attr.block.matured) { /* case 3 */
2497 /* The Phi has the same amount of ins as the corresponding block. */
2498 int ins = get_irn_arity(block);
2500 NEW_ARR_A (ir_node *, nin, ins);
2502 /* Phi merge collects the predecessors and then creates a node. */
2503 res = phi_merge (block, pos, mode, nin, ins);
2505 } else { /* case 1 */
2506 /* The block is not mature, we don't know how many in's are needed. A Phi
2507 with zero predecessors is created. Such a Phi node is called Phi0
2508 node. The Phi0 is then added to the list of Phi0 nodes in this block
2509 to be matured by mature_immBlock later.
2510 The Phi0 has to remember the pos of it's internal value. If the real
2511 Phi is computed, pos is used to update the array with the local
2513 res = new_rd_Phi0 (current_ir_graph, block, mode);
2514 res->attr.phi0_pos = pos;
2515 res->link = block->link;
2519 /* If we get here, the frontend missed a use-before-definition error */
2522 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2523 assert (mode->code >= irm_F && mode->code <= irm_P);
2524 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2525 get_mode_null(mode));
2528 /* The local valid value is available now. */
2529 block->attr.block.graph_arr[pos] = res;
2534 #endif /* USE_FAST_PHI_CONSTRUCTION */
2536 /* ************************************************************************** */
2539 * Finalize a Block node, when all control flows are known.
2540 * Acceptable parameters are only Block nodes.
2543 mature_immBlock (ir_node *block)
2549 assert (get_irn_opcode(block) == iro_Block);
2550 /* @@@ should be commented in
2551 assert (!get_Block_matured(block) && "Block already matured"); */
2553 if (!get_Block_matured(block)) {
2554 ins = ARR_LEN (block->in)-1;
2555 /* Fix block parameters */
2556 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2558 /* An array for building the Phi nodes. */
2559 NEW_ARR_A (ir_node *, nin, ins);
2561 /* Traverse a chain of Phi nodes attached to this block and mature
2563 for (n = block->link; n; n=next) {
2564 inc_irg_visited(current_ir_graph);
2566 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2569 block->attr.block.matured = 1;
2571 /* Now, as the block is a finished firm node, we can optimize it.
2572 Since other nodes have been allocated since the block was created
2573 we can not free the node on the obstack. Therefore we have to call
2575 Unfortunately the optimization does not change a lot, as all allocated
2576 nodes refer to the unoptimized node.
2577 We can call _2, as global cse has no effect on blocks. */
2578 block = optimize_in_place_2(block);
2579 IRN_VRFY_IRG(block, current_ir_graph);
2584 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2586 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2590 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2592 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2596 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2598 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2602 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2604 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2609 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2611 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2615 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2617 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2621 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2624 assert(arg->op == op_Cond);
2625 arg->attr.c.kind = fragmentary;
2626 arg->attr.c.default_proj = max_proj;
2627 res = new_Proj (arg, mode_X, max_proj);
2632 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2634 return new_bd_Conv(db, current_ir_graph->current_block, op, mode);
2638 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2640 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2644 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2646 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2655 * allocate the frag array
2657 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2658 if (get_opt_precise_exc_context()) {
2659 if ((current_ir_graph->phase_state == phase_building) &&
2660 (get_irn_op(res) == op) && /* Could be optimized away. */
2661 !*frag_store) /* Could be a cse where the arr is already set. */ {
2662 *frag_store = new_frag_arr(res);
2668 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2671 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2672 res->attr.except.pin_state = op_pin_state_pinned;
2673 #if PRECISE_EXC_CONTEXT
2674 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2681 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2684 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2685 res->attr.except.pin_state = op_pin_state_pinned;
2686 #if PRECISE_EXC_CONTEXT
2687 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2694 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2697 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2698 res->attr.except.pin_state = op_pin_state_pinned;
2699 #if PRECISE_EXC_CONTEXT
2700 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2707 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2710 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2711 res->attr.except.pin_state = op_pin_state_pinned;
2712 #if PRECISE_EXC_CONTEXT
2713 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2732 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2734 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2738 new_d_Jmp (dbg_info *db)
2740 return new_bd_Jmp (db, current_ir_graph->current_block);
2744 new_d_IJmp (dbg_info *db, ir_node *tgt)
2746 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2750 new_d_Cond (dbg_info *db, ir_node *c)
2752 return new_bd_Cond (db, current_ir_graph->current_block, c);
2756 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2760 res = new_bd_Call (db, current_ir_graph->current_block,
2761 store, callee, arity, in, tp);
2762 #if PRECISE_EXC_CONTEXT
2763 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2770 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2772 return new_bd_Return (db, current_ir_graph->current_block,
2777 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2780 res = new_bd_Load (db, current_ir_graph->current_block,
2782 #if PRECISE_EXC_CONTEXT
2783 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2790 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2793 res = new_bd_Store (db, current_ir_graph->current_block,
2795 #if PRECISE_EXC_CONTEXT
2796 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2803 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2807 res = new_bd_Alloc (db, current_ir_graph->current_block,
2808 store, size, alloc_type, where);
2809 #if PRECISE_EXC_CONTEXT
2810 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2817 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2818 ir_node *size, ir_type *free_type, where_alloc where)
2820 return new_bd_Free (db, current_ir_graph->current_block,
2821 store, ptr, size, free_type, where);
2825 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2826 /* GL: objptr was called frame before. Frame was a bad choice for the name
2827 as the operand could as well be a pointer to a dynamic object. */
2829 return new_bd_Sel (db, current_ir_graph->current_block,
2830 store, objptr, 0, NULL, ent);
2834 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2836 return new_bd_Sel (db, current_ir_graph->current_block,
2837 store, objptr, n_index, index, sel);
2841 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2843 return new_bd_SymConst_type (db, get_irg_start_block(current_ir_graph),
2848 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2850 return new_bd_SymConst (db, get_irg_start_block(current_ir_graph),
2855 new_d_Sync (dbg_info *db, int arity, ir_node *in[])
2857 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block, arity, in);
2863 return _new_d_Bad();
2867 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2869 return new_bd_Confirm (db, current_ir_graph->current_block,
2874 new_d_Unknown (ir_mode *m)
2876 return new_bd_Unknown(m);
2880 new_d_CallBegin (dbg_info *db, ir_node *call)
2883 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2888 new_d_EndReg (dbg_info *db)
2891 res = new_bd_EndReg(db, current_ir_graph->current_block);
2896 new_d_EndExcept (dbg_info *db)
2899 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2904 new_d_Break (dbg_info *db)
2906 return new_bd_Break (db, current_ir_graph->current_block);
2910 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2912 return new_bd_Filter (db, current_ir_graph->current_block,
2919 return _new_d_NoMem();
2923 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2924 ir_node *ir_true, ir_mode *mode) {
2925 return new_bd_Mux (db, current_ir_graph->current_block,
2926 sel, ir_false, ir_true, mode);
2930 new_d_Psi (dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2931 return new_bd_Psi (db, current_ir_graph->current_block,
2932 arity, conds, vals, mode);
2935 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2936 ir_node *dst, ir_node *src, ir_type *data_type) {
2938 res = new_bd_CopyB(db, current_ir_graph->current_block,
2939 store, dst, src, data_type);
2940 #if PRECISE_EXC_CONTEXT
2941 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2947 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2949 return new_bd_InstOf (db, current_ir_graph->current_block,
2950 store, objptr, type);
2954 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2956 return new_bd_Raise (db, current_ir_graph->current_block,
2960 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2961 ir_node *idx, ir_node *lower, ir_node *upper) {
2963 res = new_bd_Bound(db, current_ir_graph->current_block,
2964 store, idx, lower, upper);
2965 #if PRECISE_EXC_CONTEXT
2966 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2971 /* ********************************************************************* */
2972 /* Comfortable interface with automatic Phi node construction. */
2973 /* (Uses also constructors of ?? interface, except new_Block. */
2974 /* ********************************************************************* */
2976 /* Block construction */
2977 /* immature Block without predecessors */
2978 ir_node *new_d_immBlock (dbg_info *db) {
2981 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2982 /* creates a new dynamic in-array as length of in is -1 */
2983 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2984 current_ir_graph->current_block = res;
2985 res->attr.block.matured = 0;
2986 res->attr.block.dead = 0;
2987 /* res->attr.block.exc = exc_normal; */
2988 /* res->attr.block.handler_entry = 0; */
2989 res->attr.block.irg = current_ir_graph;
2990 res->attr.block.backedge = NULL;
2991 res->attr.block.in_cg = NULL;
2992 res->attr.block.cg_backedge = NULL;
2993 set_Block_block_visited(res, 0);
2995 /* Create and initialize array for Phi-node construction. */
2996 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2997 current_ir_graph->n_loc);
2998 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3000 /* Immature block may not be optimized! */
3001 IRN_VRFY_IRG(res, current_ir_graph);
3007 new_immBlock (void) {
3008 return new_d_immBlock(NULL);
3011 /* add an edge to a jmp/control flow node */
3013 add_immBlock_pred (ir_node *block, ir_node *jmp)
3015 if (block->attr.block.matured) {
3016 assert(0 && "Error: Block already matured!\n");
3019 assert(jmp != NULL);
3020 ARR_APP1(ir_node *, block->in, jmp);
3024 /* changing the current block */
3026 set_cur_block (ir_node *target) {
3027 current_ir_graph->current_block = target;
3030 /* ************************ */
3031 /* parameter administration */
3033 /* get a value from the parameter array from the current block by its index */
3035 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3037 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3038 inc_irg_visited(current_ir_graph);
3040 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3042 /* get a value from the parameter array from the current block by its index */
3044 get_value (int pos, ir_mode *mode)
3046 return get_d_value(NULL, pos, mode);
3049 /* set a value at position pos in the parameter array from the current block */
3051 set_value (int pos, ir_node *value)
3053 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3054 assert(pos+1 < current_ir_graph->n_loc);
3055 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3059 find_value(ir_node *value)
3062 ir_node *bl = current_ir_graph->current_block;
3064 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3065 if (bl->attr.block.graph_arr[i] == value)
3070 /* get the current store */
3074 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3075 /* GL: one could call get_value instead */
3076 inc_irg_visited(current_ir_graph);
3077 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3080 /* set the current store: handles automatic Sync construction for Load nodes */
3082 set_store (ir_node *store)
3084 ir_node *load, *pload, *pred, *in[2];
3086 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3088 if (get_opt_auto_create_sync()) {
3089 /* handle non-volatile Load nodes by automatically creating Sync's */
3090 load = skip_Proj(store);
3091 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3092 pred = get_Load_mem(load);
3094 if (is_Sync(pred)) {
3095 /* a Load after a Sync: move it up */
3096 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3098 set_Load_mem(load, get_memop_mem(mem));
3099 add_Sync_pred(pred, store);
3103 pload = skip_Proj(pred);
3104 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3105 /* a Load after a Load: create a new Sync */
3106 set_Load_mem(load, get_Load_mem(pload));
3110 store = new_Sync(2, in);
3115 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3119 keep_alive (ir_node *ka) {
3120 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3123 /* --- Useful access routines --- */
3124 /* Returns the current block of the current graph. To set the current
3125 block use set_cur_block. */
3126 ir_node *get_cur_block(void) {
3127 return get_irg_current_block(current_ir_graph);
3130 /* Returns the frame type of the current graph */
3131 ir_type *get_cur_frame_type(void) {
3132 return get_irg_frame_type(current_ir_graph);
3136 /* ********************************************************************* */
3139 /* call once for each run of the library */
3141 init_cons(uninitialized_local_variable_func_t *func)
3143 default_initialize_local_variable = func;
3147 irp_finalize_cons (void) {
3149 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3150 irg_finalize_cons(get_irp_irg(i));
3152 irp->phase_state = phase_high;
3156 ir_node *new_Block(int arity, ir_node **in) {
3157 return new_d_Block(NULL, arity, in);
3159 ir_node *new_Start (void) {
3160 return new_d_Start(NULL);
3162 ir_node *new_End (void) {
3163 return new_d_End(NULL);
3165 ir_node *new_Jmp (void) {
3166 return new_d_Jmp(NULL);
3168 ir_node *new_IJmp (ir_node *tgt) {
3169 return new_d_IJmp(NULL, tgt);
3171 ir_node *new_Cond (ir_node *c) {
3172 return new_d_Cond(NULL, c);
3174 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3175 return new_d_Return(NULL, store, arity, in);
3177 ir_node *new_Const (ir_mode *mode, tarval *con) {
3178 return new_d_Const(NULL, mode, con);
3181 ir_node *new_Const_long(ir_mode *mode, long value)
3183 return new_d_Const_long(NULL, mode, value);
3186 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3187 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3190 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3191 return new_d_SymConst(NULL, value, kind);
3193 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3194 return new_d_simpleSel(NULL, store, objptr, ent);
3196 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3198 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3200 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3202 return new_d_Call(NULL, store, callee, arity, in, tp);
3204 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3205 return new_d_Add(NULL, op1, op2, mode);
3207 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Sub(NULL, op1, op2, mode);
3210 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3211 return new_d_Minus(NULL, op, mode);
3213 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3214 return new_d_Mul(NULL, op1, op2, mode);
3216 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3217 return new_d_Quot(NULL, memop, op1, op2);
3219 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3220 return new_d_DivMod(NULL, memop, op1, op2);
3222 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3223 return new_d_Div(NULL, memop, op1, op2);
3225 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3226 return new_d_Mod(NULL, memop, op1, op2);
3228 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3229 return new_d_Abs(NULL, op, mode);
3231 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3232 return new_d_And(NULL, op1, op2, mode);
3234 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3235 return new_d_Or(NULL, op1, op2, mode);
3237 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3238 return new_d_Eor(NULL, op1, op2, mode);
3240 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3241 return new_d_Not(NULL, op, mode);
3243 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3244 return new_d_Shl(NULL, op, k, mode);
3246 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3247 return new_d_Shr(NULL, op, k, mode);
3249 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3250 return new_d_Shrs(NULL, op, k, mode);
3252 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Rot(NULL, op, k, mode);
3255 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3256 return new_d_Carry(NULL, op1, op2, mode);
3258 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3259 return new_d_Borrow(NULL, op1, op2, mode);
3261 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3262 return new_d_Cmp(NULL, op1, op2);
3264 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3265 return new_d_Conv(NULL, op, mode);
3267 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3268 return new_d_Cast(NULL, op, to_tp);
3270 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3271 return new_d_Phi(NULL, arity, in, mode);
3273 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3274 return new_d_Load(NULL, store, addr, mode);
3276 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3277 return new_d_Store(NULL, store, addr, val);
3279 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3280 where_alloc where) {
3281 return new_d_Alloc(NULL, store, size, alloc_type, where);
3283 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3284 ir_type *free_type, where_alloc where) {
3285 return new_d_Free(NULL, store, ptr, size, free_type, where);
3287 ir_node *new_Sync (int arity, ir_node *in[]) {
3288 return new_d_Sync(NULL, arity, in);
3290 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3291 return new_d_Proj(NULL, arg, mode, proj);
3293 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3294 return new_d_defaultProj(NULL, arg, max_proj);
3296 ir_node *new_Tuple (int arity, ir_node **in) {
3297 return new_d_Tuple(NULL, arity, in);
3299 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3300 return new_d_Id(NULL, val, mode);
3302 ir_node *new_Bad (void) {
3305 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3306 return new_d_Confirm (NULL, val, bound, cmp);
3308 ir_node *new_Unknown(ir_mode *m) {
3309 return new_d_Unknown(m);
3311 ir_node *new_CallBegin (ir_node *callee) {
3312 return new_d_CallBegin(NULL, callee);
3314 ir_node *new_EndReg (void) {
3315 return new_d_EndReg(NULL);
3317 ir_node *new_EndExcept (void) {
3318 return new_d_EndExcept(NULL);
3320 ir_node *new_Break (void) {
3321 return new_d_Break(NULL);
3323 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3324 return new_d_Filter(NULL, arg, mode, proj);
3326 ir_node *new_NoMem (void) {
3327 return new_d_NoMem();
3329 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3330 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3332 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3333 return new_d_Psi(NULL, arity, conds, vals, mode);
3335 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3336 return new_d_CopyB(NULL, store, dst, src, data_type);
3338 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3339 return new_d_InstOf (NULL, store, objptr, ent);
3341 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3342 return new_d_Raise(NULL, store, obj);
3344 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3345 return new_d_Bound(NULL, store, idx, lower, upper);