2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if ( get_Block_matured(block) )
249 assert( get_irn_arity(block) == arity );
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity-1; i >= 0; i--)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block) {
418 ir_graph *irg = current_ir_graph;
420 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
429 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node(res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res->attr.load.aligned = align_is_aligned;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val) {
518 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
524 res->attr.store.exc.pin_state = op_pin_state_pinned;
525 res->attr.store.volatility = volatility_non_volatile;
526 res->attr.store.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
596 symconst_kind symkind, ir_type *tp) {
599 ir_graph *irg = current_ir_graph;
601 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
602 mode = mode_P_data; /* FIXME: can be mode_P_code */
606 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
608 res->attr.symc.num = symkind;
609 res->attr.symc.sym = value;
610 res->attr.symc.tp = tp;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
615 } /* new_bd_SymConst_type */
618 new_bd_Sync(dbg_info *db, ir_node *block) {
620 ir_graph *irg = current_ir_graph;
622 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
623 /* no need to call optimize node here, Sync are always created with no predecessors */
624 IRN_VRFY_IRG(res, irg);
629 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
630 ir_node *in[2], *res;
631 ir_graph *irg = current_ir_graph;
635 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
636 res->attr.confirm.cmp = cmp;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
640 } /* new_bd_Confirm */
643 new_bd_Unknown(ir_mode *m) {
645 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
648 res = optimize_node(res);
650 } /* new_bd_Unknown */
653 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
656 ir_graph *irg = current_ir_graph;
658 in[0] = get_Call_ptr(call);
659 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
660 /* res->attr.callbegin.irg = irg; */
661 res->attr.callbegin.call = call;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
665 } /* new_bd_CallBegin */
668 new_bd_EndReg(dbg_info *db, ir_node *block) {
670 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
673 set_irg_end_reg(irg, res);
674 IRN_VRFY_IRG(res, irg);
676 } /* new_bd_EndReg */
679 new_bd_EndExcept(dbg_info *db, ir_node *block) {
681 ir_graph *irg = current_ir_graph;
683 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
684 set_irg_end_except(irg, res);
685 IRN_VRFY_IRG (res, irg);
687 } /* new_bd_EndExcept */
690 new_bd_Break(dbg_info *db, ir_node *block) {
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
695 res = optimize_node(res);
696 IRN_VRFY_IRG(res, irg);
701 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
707 res->attr.filter.proj = proj;
708 res->attr.filter.in_cg = NULL;
709 res->attr.filter.backedge = NULL;
712 assert(get_Proj_pred(res));
713 assert(get_nodes_block(get_Proj_pred(res)));
715 res = optimize_node(res);
716 IRN_VRFY_IRG(res, irg);
718 } /* new_bd_Filter */
721 new_bd_Mux(dbg_info *db, ir_node *block,
722 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
725 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
740 new_bd_Psi(dbg_info *db, ir_node *block,
741 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
744 ir_graph *irg = current_ir_graph;
747 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
749 for (i = 0; i < arity; ++i) {
751 in[2 * i + 1] = vals[i];
755 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_CopyB(dbg_info *db, ir_node *block,
765 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
768 ir_graph *irg = current_ir_graph;
774 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
776 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
777 res->attr.copyb.data_type = data_type;
778 res = optimize_node(res);
779 IRN_VRFY_IRG(res, irg);
784 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
785 ir_node *objptr, ir_type *type) {
788 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
793 res->attr.instof.type = type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
797 } /* new_bd_InstOf */
800 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
803 ir_graph *irg = current_ir_graph;
807 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
814 new_bd_Bound(dbg_info *db, ir_node *block,
815 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
818 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
825 res->attr.bound.exc.pin_state = op_pin_state_pinned;
826 res = optimize_node(res);
827 IRN_VRFY_IRG(res, irg);
832 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
834 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
837 res = optimize_node(res);
838 IRN_VRFY_IRG(res, irg);
843 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
844 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
846 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
850 res->attr.assem.pin_state = op_pin_state_pinned;
851 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
852 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
853 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
854 res->attr.assem.asm_text = asm_text;
856 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
857 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
864 /* --------------------------------------------- */
865 /* private interfaces, for professional use only */
866 /* --------------------------------------------- */
868 /* Constructs a Block with a fixed number of predecessors.
869 Does not set current_block. Can not be used with automatic
870 Phi node construction. */
872 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
873 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_Block(db, arity, in);
878 current_ir_graph = rem;
884 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
885 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Start(db, block);
890 current_ir_graph = rem;
896 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_End(db, block);
902 current_ir_graph = rem;
907 /* Creates a Phi node with all predecessors. Calling this constructor
908 is only allowed if the corresponding block is mature. */
910 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi(db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_Const_type(db, block, mode, con, tp);
928 current_ir_graph = rem;
931 } /* new_rd_Const_type */
934 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
936 ir_graph *rem = current_ir_graph;
938 current_ir_graph = irg;
939 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
940 current_ir_graph = rem;
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
951 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
953 ir_graph *rem = current_ir_graph;
955 current_ir_graph = irg;
956 res = new_bd_Id(db, block, val, mode);
957 current_ir_graph = rem;
963 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Proj(db, block, arg, mode, proj);
970 current_ir_graph = rem;
976 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_defaultProj(db, block, arg, max_proj);
983 current_ir_graph = rem;
986 } /* new_rd_defaultProj */
989 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Conv(db, block, op, mode, 0);
995 current_ir_graph = rem;
1001 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_Cast(db, block, op, to_tp);
1007 current_ir_graph = rem;
1013 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1015 ir_graph *rem = current_ir_graph;
1017 current_ir_graph = irg;
1018 res = new_bd_Tuple(db, block, arity, in);
1019 current_ir_graph = rem;
1022 } /* new_rd_Tuple */
1030 NEW_RD_DIVOP(DivMod)
1043 NEW_RD_BINOP(Borrow)
1046 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1047 ir_node *op1, ir_node *op2) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Cmp(db, block, op1, op2);
1053 current_ir_graph = rem;
1059 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_Jmp(db, block);
1065 current_ir_graph = rem;
1071 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_IJmp(db, block, tgt);
1077 current_ir_graph = rem;
1083 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Cond(db, block, c);
1089 current_ir_graph = rem;
1095 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1096 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1102 current_ir_graph = rem;
1108 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1109 ir_node *store, int arity, ir_node **in) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Return(db, block, store, arity, in);
1115 current_ir_graph = rem;
1118 } /* new_rd_Return */
1121 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, ir_node *adr, ir_mode *mode) {
1124 ir_graph *rem = current_ir_graph;
1126 current_ir_graph = irg;
1127 res = new_bd_Load(db, block, store, adr, mode);
1128 current_ir_graph = rem;
1134 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1135 ir_node *store, ir_node *adr, ir_node *val) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Store(db, block, store, adr, val);
1141 current_ir_graph = rem;
1144 } /* new_rd_Store */
1147 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1148 ir_node *size, ir_type *alloc_type, where_alloc where) {
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1154 current_ir_graph = rem;
1157 } /* new_rd_Alloc */
1160 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1161 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1167 current_ir_graph = rem;
1173 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1174 ir_node *store, ir_node *objptr, ir_entity *ent) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1180 current_ir_graph = rem;
1183 } /* new_rd_simpleSel */
1186 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1187 int arity, ir_node **in, ir_entity *ent) {
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1193 current_ir_graph = rem;
1199 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1200 symconst_kind symkind, ir_type *tp) {
1202 ir_graph *rem = current_ir_graph;
1204 current_ir_graph = irg;
1205 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1206 current_ir_graph = rem;
1209 } /* new_rd_SymConst_type */
1212 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1213 symconst_kind symkind) {
1214 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1216 } /* new_rd_SymConst */
1218 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1219 symconst_symbol sym;
1220 sym.entity_p = symbol;
1221 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1222 } /* new_rd_SymConst_addr_ent */
1224 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1225 symconst_symbol sym;
1226 sym.entity_p = symbol;
1227 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1228 } /* new_rd_SymConst_ofs_ent */
1230 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1231 symconst_symbol sym;
1232 sym.ident_p = symbol;
1233 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1234 } /* new_rd_SymConst_addr_name */
1236 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1237 symconst_symbol sym;
1238 sym.type_p = symbol;
1239 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1240 } /* new_rd_SymConst_type_tag */
1242 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.type_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1246 } /* new_rd_SymConst_size */
1248 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.type_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1252 } /* new_rd_SymConst_align */
1255 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1257 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sync(db, block);
1262 current_ir_graph = rem;
1264 for (i = 0; i < arity; ++i)
1265 add_Sync_pred(res, in[i]);
1271 new_rd_Bad(ir_graph *irg) {
1272 return get_irg_bad(irg);
1276 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Confirm(db, block, val, bound, cmp);
1282 current_ir_graph = rem;
1285 } /* new_rd_Confirm */
1288 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1290 ir_graph *rem = current_ir_graph;
1292 current_ir_graph = irg;
1293 res = new_bd_Unknown(m);
1294 current_ir_graph = rem;
1297 } /* new_rd_Unknown */
1300 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_CallBegin(db, block, call);
1306 current_ir_graph = rem;
1309 } /* new_rd_CallBegin */
1312 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1315 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1316 set_irg_end_reg(irg, res);
1317 IRN_VRFY_IRG(res, irg);
1319 } /* new_rd_EndReg */
1322 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1325 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1326 set_irg_end_except(irg, res);
1327 IRN_VRFY_IRG (res, irg);
1329 } /* new_rd_EndExcept */
1332 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Break(db, block);
1338 current_ir_graph = rem;
1341 } /* new_rd_Break */
1344 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Filter(db, block, arg, mode, proj);
1351 current_ir_graph = rem;
1354 } /* new_rd_Filter */
1357 new_rd_NoMem(ir_graph *irg) {
1358 return get_irg_no_mem(irg);
1359 } /* new_rd_NoMem */
1362 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1363 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1369 current_ir_graph = rem;
1375 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1376 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1378 ir_graph *rem = current_ir_graph;
1380 current_ir_graph = irg;
1381 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1382 current_ir_graph = rem;
1387 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1388 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1394 current_ir_graph = rem;
1397 } /* new_rd_CopyB */
1400 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1401 ir_node *objptr, ir_type *type) {
1403 ir_graph *rem = current_ir_graph;
1405 current_ir_graph = irg;
1406 res = new_bd_InstOf(db, block, store, objptr, type);
1407 current_ir_graph = rem;
1410 } /* new_rd_InstOf */
1413 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1415 ir_graph *rem = current_ir_graph;
1417 current_ir_graph = irg;
1418 res = new_bd_Raise(db, block, store, obj);
1419 current_ir_graph = rem;
1422 } /* new_rd_Raise */
1424 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1425 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_Bound(db, block, store, idx, lower, upper);
1431 current_ir_graph = rem;
1434 } /* new_rd_Bound */
1436 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1438 ir_graph *rem = current_ir_graph;
1440 current_ir_graph = irg;
1441 res = new_bd_Pin(db, block, node);
1442 current_ir_graph = rem;
1447 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1448 int arity, ir_node *in[], ir_asm_constraint *inputs,
1449 int n_outs, ir_asm_constraint *outputs,
1450 int n_clobber, ident *clobber[], ident *asm_text) {
1452 ir_graph *rem = current_ir_graph;
1454 current_ir_graph = irg;
1455 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1456 current_ir_graph = rem;
1462 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1463 return new_rd_Block(NULL, irg, arity, in);
1465 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1466 return new_rd_Start(NULL, irg, block);
1468 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1469 return new_rd_End(NULL, irg, block);
1471 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1472 return new_rd_Jmp(NULL, irg, block);
1474 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1475 return new_rd_IJmp(NULL, irg, block, tgt);
1477 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1478 return new_rd_Cond(NULL, irg, block, c);
1480 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1481 ir_node *store, int arity, ir_node **in) {
1482 return new_rd_Return(NULL, irg, block, store, arity, in);
1484 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con) {
1486 return new_rd_Const(NULL, irg, block, mode, con);
1488 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1489 ir_mode *mode, long value) {
1490 return new_rd_Const_long(NULL, irg, block, mode, value);
1492 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1493 ir_mode *mode, tarval *con, ir_type *tp) {
1494 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1496 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1497 symconst_symbol value, symconst_kind symkind) {
1498 return new_rd_SymConst(NULL, irg, block, value, symkind);
1500 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *objptr, ir_entity *ent) {
1502 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1504 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *objptr, int n_index, ir_node **index,
1507 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1509 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1510 ir_node *callee, int arity, ir_node **in,
1512 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1514 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_mode *mode) {
1524 return new_rd_Minus(NULL, irg, block, op, mode);
1526 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1534 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1544 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1546 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1547 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1548 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1550 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1551 ir_node *op, ir_mode *mode) {
1552 return new_rd_Abs(NULL, irg, block, op, mode);
1554 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_And(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1559 ir_node *op1, ir_node *op2, ir_mode *mode) {
1560 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1562 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1563 ir_node *op1, ir_node *op2, ir_mode *mode) {
1564 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1566 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_mode *mode) {
1568 return new_rd_Not(NULL, irg, block, op, mode);
1570 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shl(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Shr(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Rot(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_node *k, ir_mode *mode) {
1588 return new_rd_Carry(NULL, irg, block, op, k, mode);
1590 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1591 ir_node *op, ir_node *k, ir_mode *mode) {
1592 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1594 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1595 ir_node *op1, ir_node *op2) {
1596 return new_rd_Cmp(NULL, irg, block, op1, op2);
1598 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_mode *mode) {
1600 return new_rd_Conv(NULL, irg, block, op, mode);
1602 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1603 return new_rd_Cast(NULL, irg, block, op, to_tp);
1605 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1606 ir_node **in, ir_mode *mode) {
1607 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1609 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1610 ir_node *store, ir_node *adr, ir_mode *mode) {
1611 return new_rd_Load(NULL, irg, block, store, adr, mode);
1613 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1614 ir_node *store, ir_node *adr, ir_node *val) {
1615 return new_rd_Store(NULL, irg, block, store, adr, val);
1617 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1618 ir_node *size, ir_type *alloc_type, where_alloc where) {
1619 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1621 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1622 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1623 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1625 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1626 return new_rd_Sync(NULL, irg, block, arity, in);
1628 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1629 ir_mode *mode, long proj) {
1630 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1632 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1634 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1636 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1637 int arity, ir_node **in) {
1638 return new_rd_Tuple(NULL, irg, block, arity, in );
1640 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1641 ir_node *val, ir_mode *mode) {
1642 return new_rd_Id(NULL, irg, block, val, mode);
1644 ir_node *new_r_Bad(ir_graph *irg) {
1645 return new_rd_Bad(irg);
1647 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1648 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1650 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1651 return new_rd_Unknown(irg, m);
1653 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1654 return new_rd_CallBegin(NULL, irg, block, callee);
1656 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1657 return new_rd_EndReg(NULL, irg, block);
1659 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1660 return new_rd_EndExcept(NULL, irg, block);
1662 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1663 return new_rd_Break(NULL, irg, block);
1665 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1666 ir_mode *mode, long proj) {
1667 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1669 ir_node *new_r_NoMem(ir_graph *irg) {
1670 return new_rd_NoMem(irg);
1672 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1673 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1674 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1676 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1677 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1678 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1680 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1682 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1684 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1686 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1688 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1689 ir_node *store, ir_node *obj) {
1690 return new_rd_Raise(NULL, irg, block, store, obj);
1692 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1693 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1694 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1696 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1697 return new_rd_Pin(NULL, irg, block, node);
1699 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1700 int arity, ir_node *in[], ir_asm_constraint *inputs,
1701 int n_outs, ir_asm_constraint *outputs,
1702 int n_clobber, ident *clobber[], ident *asm_text) {
1703 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1706 /** ********************/
1707 /** public interfaces */
1708 /** construction tools */
1712 * - create a new Start node in the current block
1714 * @return s - pointer to the created Start node
1719 new_d_Start(dbg_info *db) {
1722 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1723 op_Start, mode_T, 0, NULL);
1725 res = optimize_node(res);
1726 IRN_VRFY_IRG(res, current_ir_graph);
1731 new_d_End(dbg_info *db) {
1733 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1734 op_End, mode_X, -1, NULL);
1735 res = optimize_node(res);
1736 IRN_VRFY_IRG(res, current_ir_graph);
1741 /* Constructs a Block with a fixed number of predecessors.
1742 Does set current_block. Can be used with automatic Phi
1743 node construction. */
1745 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1748 int has_unknown = 0;
1750 res = new_bd_Block(db, arity, in);
1752 /* Create and initialize array for Phi-node construction. */
1753 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1754 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1755 current_ir_graph->n_loc);
1756 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1759 for (i = arity-1; i >= 0; i--)
1760 if (get_irn_op(in[i]) == op_Unknown) {
1765 if (!has_unknown) res = optimize_node(res);
1766 current_ir_graph->current_block = res;
1768 IRN_VRFY_IRG(res, current_ir_graph);
1773 /* ***********************************************************************/
1774 /* Methods necessary for automatic Phi node creation */
1776 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1777 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1778 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1779 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1781 Call Graph: ( A ---> B == A "calls" B)
1783 get_value mature_immBlock
1791 get_r_value_internal |
1795 new_rd_Phi0 new_rd_Phi_in
1797 * *************************************************************************** */
1799 /** Creates a Phi node with 0 predecessors. */
1800 static INLINE ir_node *
1801 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1804 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1805 IRN_VRFY_IRG(res, irg);
1810 static INLINE ir_node *
1811 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1812 ir_node **in, int ins, ir_node *phi0) {
1814 ir_node *res, *known;
1816 /* Allocate a new node on the obstack. The allocation copies the in
1818 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1819 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1821 /* This loop checks whether the Phi has more than one predecessor.
1822 If so, it is a real Phi node and we break the loop. Else the
1823 Phi node merges the same definition on several paths and therefore
1824 is not needed. Don't consider Bad nodes! */
1826 for (i=0; i < ins; ++i)
1830 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1832 /* Optimize self referencing Phis: We can't detect them yet properly, as
1833 they still refer to the Phi0 they will replace. So replace right now. */
1834 if (phi0 && in[i] == phi0) in[i] = res;
1836 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1844 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1847 edges_node_deleted(res, current_ir_graph);
1848 obstack_free (current_ir_graph->obst, res);
1849 if (is_Phi(known)) {
1850 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1851 order, an enclosing Phi know may get superfluous. */
1852 res = optimize_in_place_2(known);
1854 exchange(known, res);
1860 /* A undefined value, e.g., in unreachable code. */
1864 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1865 IRN_VRFY_IRG(res, irg);
1866 /* Memory Phis in endless loops must be kept alive.
1867 As we can't distinguish these easily we keep all of them alive. */
1868 if ((res->op == op_Phi) && (mode == mode_M))
1869 add_End_keepalive(get_irg_end(irg), res);
1873 } /* new_rd_Phi_in */
1876 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1878 #if PRECISE_EXC_CONTEXT
1880 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1883 * Construct a new frag_array for node n.
1884 * Copy the content from the current graph_arr of the corresponding block:
1885 * this is the current state.
1886 * Set ProjM(n) as current memory state.
1887 * Further the last entry in frag_arr of current block points to n. This
1888 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1890 static INLINE ir_node **new_frag_arr(ir_node *n) {
1894 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1895 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1896 sizeof(ir_node *)*current_ir_graph->n_loc);
1898 /* turn off optimization before allocating Proj nodes, as res isn't
1900 opt = get_opt_optimize(); set_optimize(0);
1901 /* Here we rely on the fact that all frag ops have Memory as first result! */
1902 if (get_irn_op(n) == op_Call)
1903 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1904 else if (get_irn_op(n) == op_CopyB)
1905 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1907 assert((pn_Quot_M == pn_DivMod_M) &&
1908 (pn_Quot_M == pn_Div_M) &&
1909 (pn_Quot_M == pn_Mod_M) &&
1910 (pn_Quot_M == pn_Load_M) &&
1911 (pn_Quot_M == pn_Store_M) &&
1912 (pn_Quot_M == pn_Alloc_M) &&
1913 (pn_Quot_M == pn_Bound_M));
1914 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1918 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1920 } /* new_frag_arr */
1923 * Returns the frag_arr from a node.
1925 static INLINE ir_node **get_frag_arr(ir_node *n) {
1926 switch (get_irn_opcode(n)) {
1928 return n->attr.call.exc.frag_arr;
1930 return n->attr.alloc.exc.frag_arr;
1932 return n->attr.load.exc.frag_arr;
1934 return n->attr.store.exc.frag_arr;
1936 return n->attr.except.frag_arr;
1938 } /* get_frag_arr */
1941 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1943 if (!frag_arr[pos]) frag_arr[pos] = val;
1944 if (frag_arr[current_ir_graph->n_loc - 1]) {
1945 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1946 assert(arr != frag_arr && "Endless recursion detected");
1947 set_frag_value(arr, pos, val);
1952 for (i = 0; i < 1000; ++i) {
1953 if (!frag_arr[pos]) {
1954 frag_arr[pos] = val;
1956 if (frag_arr[current_ir_graph->n_loc - 1]) {
1957 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1963 assert(0 && "potential endless recursion");
1965 } /* set_frag_value */
1968 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1972 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1974 frag_arr = get_frag_arr(cfOp);
1975 res = frag_arr[pos];
1977 if (block->attr.block.graph_arr[pos]) {
1978 /* There was a set_value() after the cfOp and no get_value before that
1979 set_value(). We must build a Phi node now. */
1980 if (block->attr.block.is_matured) {
1981 int ins = get_irn_arity(block);
1983 NEW_ARR_A(ir_node *, nin, ins);
1984 res = phi_merge(block, pos, mode, nin, ins);
1986 res = new_rd_Phi0(current_ir_graph, block, mode);
1987 res->attr.phi0.pos = pos;
1988 res->link = block->link;
1992 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1993 but this should be better: (remove comment if this works) */
1994 /* It's a Phi, we can write this into all graph_arrs with NULL */
1995 set_frag_value(block->attr.block.graph_arr, pos, res);
1997 res = get_r_value_internal(block, pos, mode);
1998 set_frag_value(block->attr.block.graph_arr, pos, res);
2002 } /* get_r_frag_value_internal */
2003 #endif /* PRECISE_EXC_CONTEXT */
2006 * check whether a control flow cf_pred is a exception flow.
2008 * @param cf_pred the control flow node
2009 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2011 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2012 /* all projections from a raise are exceptional control flow */
2013 if (is_Raise(prev_cf_op))
2015 if (is_fragile_op(prev_cf_op)) {
2016 if (is_Proj(cf_pred) && get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2017 /* the regular control flow, NO exception */
2020 /* Hmm, exception but not a Proj? */
2021 assert(!"unexpected condition: fragile op without a proj");
2025 } /* is_exception_flow */
2028 * Computes the predecessors for the real phi node, and then
2029 * allocates and returns this node. The routine called to allocate the
2030 * node might optimize it away and return a real value.
2031 * This function must be called with an in-array of proper size.
2034 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2035 ir_node *prevBlock, *res, *phi0, *phi0_all;
2038 /* If this block has no value at pos create a Phi0 and remember it
2039 in graph_arr to break recursions.
2040 Else we may not set graph_arr as there a later value is remembered. */
2042 if (!block->attr.block.graph_arr[pos]) {
2043 if (block == get_irg_start_block(current_ir_graph)) {
2044 /* Collapsing to Bad tarvals is no good idea.
2045 So we call a user-supplied routine here that deals with this case as
2046 appropriate for the given language. Sorrily the only help we can give
2047 here is the position.
2049 Even if all variables are defined before use, it can happen that
2050 we get to the start block, if a Cond has been replaced by a tuple
2051 (bad, jmp). In this case we call the function needlessly, eventually
2052 generating an non existent error.
2053 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2056 if (default_initialize_local_variable) {
2057 ir_node *rem = get_cur_block();
2059 set_cur_block(block);
2060 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2064 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2065 /* We don't need to care about exception ops in the start block.
2066 There are none by definition. */
2067 return block->attr.block.graph_arr[pos];
2069 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2070 block->attr.block.graph_arr[pos] = phi0;
2071 #if PRECISE_EXC_CONTEXT
2072 if (get_opt_precise_exc_context()) {
2073 /* Set graph_arr for fragile ops. Also here we should break recursion.
2074 We could choose a cyclic path through an cfop. But the recursion would
2075 break at some point. */
2076 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2082 /* This loop goes to all predecessor blocks of the block the Phi node
2083 is in and there finds the operands of the Phi node by calling
2084 get_r_value_internal. */
2085 for (i = 1; i <= ins; ++i) {
2086 ir_node *cf_pred = block->in[i];
2087 ir_node *prevCfOp = skip_Proj(cf_pred);
2089 if (is_Bad(prevCfOp)) {
2090 /* In case a Cond has been optimized we would get right to the start block
2091 with an invalid definition. */
2092 nin[i-1] = new_Bad();
2095 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2097 if (!is_Bad(prevBlock)) {
2098 #if PRECISE_EXC_CONTEXT
2099 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2100 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2101 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2104 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2106 nin[i-1] = new_Bad();
2110 /* We want to pass the Phi0 node to the constructor: this finds additional
2111 optimization possibilities.
2112 The Phi0 node either is allocated in this function, or it comes from
2113 a former call to get_r_value_internal. In this case we may not yet
2114 exchange phi0, as this is done in mature_immBlock. */
2116 phi0_all = block->attr.block.graph_arr[pos];
2117 if (!((get_irn_op(phi0_all) == op_Phi) &&
2118 (get_irn_arity(phi0_all) == 0) &&
2119 (get_nodes_block(phi0_all) == block)))
2125 /* After collecting all predecessors into the array nin a new Phi node
2126 with these predecessors is created. This constructor contains an
2127 optimization: If all predecessors of the Phi node are identical it
2128 returns the only operand instead of a new Phi node. */
2129 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2131 /* In case we allocated a Phi0 node at the beginning of this procedure,
2132 we need to exchange this Phi0 with the real Phi. */
2134 exchange(phi0, res);
2135 block->attr.block.graph_arr[pos] = res;
2136 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2137 only an optimization. */
2144 * This function returns the last definition of a value. In case
2145 * this value was last defined in a previous block, Phi nodes are
2146 * inserted. If the part of the firm graph containing the definition
2147 * is not yet constructed, a dummy Phi node is returned.
2149 * @param block the current block
2150 * @param pos the value number of the value searched
2151 * @param mode the mode of this value (needed for Phi construction)
2154 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2156 /* There are 4 cases to treat.
2158 1. The block is not mature and we visit it the first time. We can not
2159 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2160 predecessors is returned. This node is added to the linked list (field
2161 "link") of the containing block to be completed when this block is
2162 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2165 2. The value is already known in this block, graph_arr[pos] is set and we
2166 visit the block the first time. We can return the value without
2167 creating any new nodes.
2169 3. The block is mature and we visit it the first time. A Phi node needs
2170 to be created (phi_merge). If the Phi is not needed, as all it's
2171 operands are the same value reaching the block through different
2172 paths, it's optimized away and the value itself is returned.
2174 4. The block is mature, and we visit it the second time. Now two
2175 subcases are possible:
2176 * The value was computed completely the last time we were here. This
2177 is the case if there is no loop. We can return the proper value.
2178 * The recursion that visited this node and set the flag did not
2179 return yet. We are computing a value in a loop and need to
2180 break the recursion. This case only happens if we visited
2181 the same block with phi_merge before, which inserted a Phi0.
2182 So we return the Phi0.
2185 /* case 4 -- already visited. */
2186 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2187 /* As phi_merge allocates a Phi0 this value is always defined. Here
2188 is the critical difference of the two algorithms. */
2189 assert(block->attr.block.graph_arr[pos]);
2190 return block->attr.block.graph_arr[pos];
2193 /* visited the first time */
2194 set_irn_visited(block, get_irg_visited(current_ir_graph));
2196 /* Get the local valid value */
2197 res = block->attr.block.graph_arr[pos];
2199 /* case 2 -- If the value is actually computed, return it. */
2200 if (res) { return res; };
2202 if (block->attr.block.is_matured) { /* case 3 */
2204 /* The Phi has the same amount of ins as the corresponding block. */
2205 int ins = get_irn_arity(block);
2207 NEW_ARR_A (ir_node *, nin, ins);
2209 /* Phi merge collects the predecessors and then creates a node. */
2210 res = phi_merge (block, pos, mode, nin, ins);
2212 } else { /* case 1 */
2213 /* The block is not mature, we don't know how many in's are needed. A Phi
2214 with zero predecessors is created. Such a Phi node is called Phi0
2215 node. The Phi0 is then added to the list of Phi0 nodes in this block
2216 to be matured by mature_immBlock later.
2217 The Phi0 has to remember the pos of it's internal value. If the real
2218 Phi is computed, pos is used to update the array with the local
2220 res = new_rd_Phi0(current_ir_graph, block, mode);
2221 res->attr.phi0.pos = pos;
2222 res->link = block->link;
2226 /* If we get here, the frontend missed a use-before-definition error */
2229 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2230 assert(mode->code >= irm_F && mode->code <= irm_P);
2231 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2232 get_mode_null(mode));
2235 /* The local valid value is available now. */
2236 block->attr.block.graph_arr[pos] = res;
2239 } /* get_r_value_internal */
2241 /* ************************************************************************** */
2244 * Finalize a Block node, when all control flows are known.
2245 * Acceptable parameters are only Block nodes.
2248 mature_immBlock(ir_node *block) {
2253 assert(is_Block(block));
2254 if (!get_Block_matured(block)) {
2255 ins = ARR_LEN(block->in)-1;
2256 /* Fix block parameters */
2257 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2259 /* An array for building the Phi nodes. */
2260 NEW_ARR_A(ir_node *, nin, ins);
2262 /* Traverse a chain of Phi nodes attached to this block and mature
2264 for (n = block->link; n; n = next) {
2265 inc_irg_visited(current_ir_graph);
2267 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2270 block->attr.block.is_matured = 1;
2272 /* Now, as the block is a finished firm node, we can optimize it.
2273 Since other nodes have been allocated since the block was created
2274 we can not free the node on the obstack. Therefore we have to call
2276 Unfortunately the optimization does not change a lot, as all allocated
2277 nodes refer to the unoptimized node.
2278 We can call _2, as global cse has no effect on blocks. */
2279 block = optimize_in_place_2(block);
2280 IRN_VRFY_IRG(block, current_ir_graph);
2282 } /* mature_immBlock */
2285 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2286 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2290 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2291 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2295 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2296 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2297 } /* new_d_Const_long */
2300 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2301 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2302 } /* new_d_Const_type */
2306 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2307 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2311 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2312 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2316 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2319 assert(arg->op == op_Cond);
2320 arg->attr.cond.kind = fragmentary;
2321 arg->attr.cond.default_proj = max_proj;
2322 res = new_Proj(arg, mode_X, max_proj);
2324 } /* new_d_defaultProj */
2327 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2328 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2332 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2333 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2334 } /* new_d_strictConv */
2337 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2338 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2342 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2343 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2353 * Allocate the frag array.
2355 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2356 if (get_opt_precise_exc_context()) {
2357 if ((current_ir_graph->phase_state == phase_building) &&
2358 (get_irn_op(res) == op) && /* Could be optimized away. */
2359 !*frag_store) /* Could be a cse where the arr is already set. */ {
2360 *frag_store = new_frag_arr(res);
2363 } /* allocate_frag_arr */
2366 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2368 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2369 #if PRECISE_EXC_CONTEXT
2370 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2377 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2379 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2380 #if PRECISE_EXC_CONTEXT
2381 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2385 } /* new_d_DivMod */
2388 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2390 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2391 #if PRECISE_EXC_CONTEXT
2392 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2399 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2401 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2402 #if PRECISE_EXC_CONTEXT
2403 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2422 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2423 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2427 new_d_Jmp(dbg_info *db) {
2428 return new_bd_Jmp(db, current_ir_graph->current_block);
2432 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2433 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2437 new_d_Cond(dbg_info *db, ir_node *c) {
2438 return new_bd_Cond(db, current_ir_graph->current_block, c);
2442 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2445 res = new_bd_Call(db, current_ir_graph->current_block,
2446 store, callee, arity, in, tp);
2447 #if PRECISE_EXC_CONTEXT
2448 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2455 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2456 return new_bd_Return(db, current_ir_graph->current_block,
2458 } /* new_d_Return */
2461 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2463 res = new_bd_Load(db, current_ir_graph->current_block,
2465 #if PRECISE_EXC_CONTEXT
2466 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2473 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2475 res = new_bd_Store(db, current_ir_graph->current_block,
2477 #if PRECISE_EXC_CONTEXT
2478 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2485 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2486 where_alloc where) {
2488 res = new_bd_Alloc(db, current_ir_graph->current_block,
2489 store, size, alloc_type, where);
2490 #if PRECISE_EXC_CONTEXT
2491 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2498 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2499 ir_node *size, ir_type *free_type, where_alloc where) {
2500 return new_bd_Free(db, current_ir_graph->current_block,
2501 store, ptr, size, free_type, where);
2505 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2506 /* GL: objptr was called frame before. Frame was a bad choice for the name
2507 as the operand could as well be a pointer to a dynamic object. */
2509 return new_bd_Sel(db, current_ir_graph->current_block,
2510 store, objptr, 0, NULL, ent);
2511 } /* new_d_simpleSel */
2514 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2515 return new_bd_Sel(db, current_ir_graph->current_block,
2516 store, objptr, n_index, index, sel);
2520 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2521 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2523 } /* new_d_SymConst_type */
2526 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2527 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2528 value, kind, firm_unknown_type);
2529 } /* new_d_SymConst */
2532 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2533 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2539 return _new_d_Bad();
2543 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2544 return new_bd_Confirm(db, current_ir_graph->current_block,
2546 } /* new_d_Confirm */
2549 new_d_Unknown(ir_mode *m) {
2550 return new_bd_Unknown(m);
2551 } /* new_d_Unknown */
2554 new_d_CallBegin(dbg_info *db, ir_node *call) {
2555 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2556 } /* new_d_CallBegin */
2559 new_d_EndReg(dbg_info *db) {
2560 return new_bd_EndReg(db, current_ir_graph->current_block);
2561 } /* new_d_EndReg */
2564 new_d_EndExcept(dbg_info *db) {
2565 return new_bd_EndExcept(db, current_ir_graph->current_block);
2566 } /* new_d_EndExcept */
2569 new_d_Break(dbg_info *db) {
2570 return new_bd_Break(db, current_ir_graph->current_block);
2574 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2575 return new_bd_Filter(db, current_ir_graph->current_block,
2577 } /* new_d_Filter */
2580 (new_d_NoMem)(void) {
2581 return _new_d_NoMem();
2585 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2586 ir_node *ir_true, ir_mode *mode) {
2587 return new_bd_Mux(db, current_ir_graph->current_block,
2588 sel, ir_false, ir_true, mode);
2592 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2593 return new_bd_Psi(db, current_ir_graph->current_block,
2594 arity, conds, vals, mode);
2597 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2598 ir_node *dst, ir_node *src, ir_type *data_type) {
2600 res = new_bd_CopyB(db, current_ir_graph->current_block,
2601 store, dst, src, data_type);
2602 #if PRECISE_EXC_CONTEXT
2603 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2609 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2610 return new_bd_InstOf(db, current_ir_graph->current_block,
2611 store, objptr, type);
2612 } /* new_d_InstOf */
2615 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2616 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2619 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2620 ir_node *idx, ir_node *lower, ir_node *upper) {
2622 res = new_bd_Bound(db, current_ir_graph->current_block,
2623 store, idx, lower, upper);
2624 #if PRECISE_EXC_CONTEXT
2625 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2631 new_d_Pin(dbg_info *db, ir_node *node) {
2632 return new_bd_Pin(db, current_ir_graph->current_block, node);
2636 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2637 int n_outs, ir_asm_constraint *outputs,
2638 int n_clobber, ident *clobber[], ident *asm_text) {
2639 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2642 /* ********************************************************************* */
2643 /* Comfortable interface with automatic Phi node construction. */
2644 /* (Uses also constructors of ?? interface, except new_Block. */
2645 /* ********************************************************************* */
2647 /* Block construction */
2648 /* immature Block without predecessors */
2650 new_d_immBlock(dbg_info *db) {
2653 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2654 /* creates a new dynamic in-array as length of in is -1 */
2655 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2656 current_ir_graph->current_block = res;
2658 /* macroblock head */
2661 res->attr.block.is_matured = 0;
2662 res->attr.block.is_dead = 0;
2663 res->attr.block.is_mb_head = 1;
2664 res->attr.block.has_label = 0;
2665 res->attr.block.irg = current_ir_graph;
2666 res->attr.block.backedge = NULL;
2667 res->attr.block.in_cg = NULL;
2668 res->attr.block.cg_backedge = NULL;
2669 res->attr.block.extblk = NULL;
2670 res->attr.block.region = NULL;
2671 res->attr.block.mb_depth = 0;
2672 res->attr.block.label = 0;
2674 set_Block_block_visited(res, 0);
2676 /* Create and initialize array for Phi-node construction. */
2677 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2678 current_ir_graph->n_loc);
2679 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2681 /* Immature block may not be optimized! */
2682 IRN_VRFY_IRG(res, current_ir_graph);
2685 } /* new_d_immBlock */
2688 new_immBlock(void) {
2689 return new_d_immBlock(NULL);
2690 } /* new_immBlock */
2692 /* immature PartBlock with its predecessors */
2694 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2695 ir_node *res = new_d_immBlock(db);
2696 ir_node *blk = get_nodes_block(pred_jmp);
2698 res->in[0] = blk->in[0];
2699 add_immBlock_pred(res, pred_jmp);
2701 res->attr.block.is_mb_head = 0;
2702 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2705 } /* new_d_immPartBlock */
2708 new_immPartBlock(ir_node *pred_jmp) {
2709 return new_d_immPartBlock(NULL, pred_jmp);
2710 } /* new_immPartBlock */
2712 /* add an edge to a jmp/control flow node */
2714 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2715 int n = ARR_LEN(block->in) - 1;
2717 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2718 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2719 assert(jmp != NULL);
2721 ARR_APP1(ir_node *, block->in, jmp);
2723 hook_set_irn_n(block, n, jmp, NULL);
2724 } /* add_immBlock_pred */
2726 /* changing the current block */
2728 set_cur_block(ir_node *target) {
2729 current_ir_graph->current_block = target;
2730 } /* set_cur_block */
2732 /* ************************ */
2733 /* parameter administration */
2735 /* get a value from the parameter array from the current block by its index */
2737 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2738 ir_graph *irg = current_ir_graph;
2739 assert(get_irg_phase_state(irg) == phase_building);
2740 inc_irg_visited(irg);
2743 return get_r_value_internal(irg->current_block, pos + 1, mode);
2746 /* get a value from the parameter array from the current block by its index */
2748 get_value(int pos, ir_mode *mode) {
2749 return get_d_value(NULL, pos, mode);
2752 /* set a value at position pos in the parameter array from the current block */
2754 set_value(int pos, ir_node *value) {
2755 ir_graph *irg = current_ir_graph;
2756 assert(get_irg_phase_state(irg) == phase_building);
2757 assert(pos+1 < irg->n_loc);
2758 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2761 /* Find the value number for a node in the current block.*/
2763 find_value(ir_node *value) {
2765 ir_node *bl = current_ir_graph->current_block;
2767 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2768 if (bl->attr.block.graph_arr[i] == value)
2773 /* get the current store */
2776 ir_graph *irg = current_ir_graph;
2778 assert(get_irg_phase_state(irg) == phase_building);
2779 /* GL: one could call get_value instead */
2780 inc_irg_visited(irg);
2781 return get_r_value_internal(irg->current_block, 0, mode_M);
2784 /* set the current store: handles automatic Sync construction for Load nodes */
2786 set_store(ir_node *store) {
2787 ir_node *load, *pload, *pred, *in[2];
2789 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2790 /* Beware: due to dead code elimination, a store might become a Bad node even in
2791 the construction phase. */
2792 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2794 if (get_opt_auto_create_sync()) {
2795 /* handle non-volatile Load nodes by automatically creating Sync's */
2796 load = skip_Proj(store);
2797 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2798 pred = get_Load_mem(load);
2800 if (is_Sync(pred)) {
2801 /* a Load after a Sync: move it up */
2802 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2804 set_Load_mem(load, get_memop_mem(mem));
2805 add_Sync_pred(pred, store);
2808 pload = skip_Proj(pred);
2809 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2810 /* a Load after a Load: create a new Sync */
2811 set_Load_mem(load, get_Load_mem(pload));
2815 store = new_Sync(2, in);
2820 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2824 keep_alive(ir_node *ka) {
2825 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2828 /* --- Useful access routines --- */
2829 /* Returns the current block of the current graph. To set the current
2830 block use set_cur_block. */
2831 ir_node *get_cur_block(void) {
2832 return get_irg_current_block(current_ir_graph);
2833 } /* get_cur_block */
2835 /* Returns the frame type of the current graph */
2836 ir_type *get_cur_frame_type(void) {
2837 return get_irg_frame_type(current_ir_graph);
2838 } /* get_cur_frame_type */
2841 /* ********************************************************************* */
2844 /* call once for each run of the library */
2846 init_cons(uninitialized_local_variable_func_t *func) {
2847 default_initialize_local_variable = func;
2851 irp_finalize_cons(void) {
2853 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2854 irg_finalize_cons(get_irp_irg(i));
2856 irp->phase_state = phase_high;
2857 } /* irp_finalize_cons */
2860 ir_node *new_Block(int arity, ir_node **in) {
2861 return new_d_Block(NULL, arity, in);
2863 ir_node *new_Start(void) {
2864 return new_d_Start(NULL);
2866 ir_node *new_End(void) {
2867 return new_d_End(NULL);
2869 ir_node *new_Jmp(void) {
2870 return new_d_Jmp(NULL);
2872 ir_node *new_IJmp(ir_node *tgt) {
2873 return new_d_IJmp(NULL, tgt);
2875 ir_node *new_Cond(ir_node *c) {
2876 return new_d_Cond(NULL, c);
2878 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2879 return new_d_Return(NULL, store, arity, in);
2881 ir_node *new_Const(ir_mode *mode, tarval *con) {
2882 return new_d_Const(NULL, mode, con);
2885 ir_node *new_Const_long(ir_mode *mode, long value) {
2886 return new_d_Const_long(NULL, mode, value);
2889 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2890 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2893 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
2894 return new_d_SymConst_type(NULL, value, kind, type);
2896 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
2897 return new_d_SymConst(NULL, value, kind);
2899 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2900 return new_d_simpleSel(NULL, store, objptr, ent);
2902 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2904 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2906 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2908 return new_d_Call(NULL, store, callee, arity, in, tp);
2910 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2911 return new_d_Add(NULL, op1, op2, mode);
2913 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2914 return new_d_Sub(NULL, op1, op2, mode);
2916 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2917 return new_d_Minus(NULL, op, mode);
2919 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2920 return new_d_Mul(NULL, op1, op2, mode);
2922 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2923 return new_d_Mulh(NULL, op1, op2, mode);
2925 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2926 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2928 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2929 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2931 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2932 return new_d_Div(NULL, memop, op1, op2, mode, state);
2934 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2935 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2937 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2938 return new_d_Abs(NULL, op, mode);
2940 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2941 return new_d_And(NULL, op1, op2, mode);
2943 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2944 return new_d_Or(NULL, op1, op2, mode);
2946 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2947 return new_d_Eor(NULL, op1, op2, mode);
2949 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2950 return new_d_Not(NULL, op, mode);
2952 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2953 return new_d_Shl(NULL, op, k, mode);
2955 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2956 return new_d_Shr(NULL, op, k, mode);
2958 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2959 return new_d_Shrs(NULL, op, k, mode);
2961 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
2962 return new_d_Rot(NULL, op, k, mode);
2964 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2965 return new_d_Carry(NULL, op1, op2, mode);
2967 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2968 return new_d_Borrow(NULL, op1, op2, mode);
2970 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2971 return new_d_Cmp(NULL, op1, op2);
2973 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2974 return new_d_Conv(NULL, op, mode);
2976 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2977 return new_d_strictConv(NULL, op, mode);
2979 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2980 return new_d_Cast(NULL, op, to_tp);
2982 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2983 return new_d_Phi(NULL, arity, in, mode);
2985 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2986 return new_d_Load(NULL, store, addr, mode);
2988 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2989 return new_d_Store(NULL, store, addr, val);
2991 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2992 where_alloc where) {
2993 return new_d_Alloc(NULL, store, size, alloc_type, where);
2995 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
2996 ir_type *free_type, where_alloc where) {
2997 return new_d_Free(NULL, store, ptr, size, free_type, where);
2999 ir_node *new_Sync(int arity, ir_node *in[]) {
3000 return new_d_Sync(NULL, arity, in);
3002 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3003 return new_d_Proj(NULL, arg, mode, proj);
3005 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3006 return new_d_defaultProj(NULL, arg, max_proj);
3008 ir_node *new_Tuple(int arity, ir_node **in) {
3009 return new_d_Tuple(NULL, arity, in);
3011 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3012 return new_d_Id(NULL, val, mode);
3014 ir_node *new_Bad(void) {
3017 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3018 return new_d_Confirm(NULL, val, bound, cmp);
3020 ir_node *new_Unknown(ir_mode *m) {
3021 return new_d_Unknown(m);
3023 ir_node *new_CallBegin(ir_node *callee) {
3024 return new_d_CallBegin(NULL, callee);
3026 ir_node *new_EndReg(void) {
3027 return new_d_EndReg(NULL);
3029 ir_node *new_EndExcept(void) {
3030 return new_d_EndExcept(NULL);
3032 ir_node *new_Break(void) {
3033 return new_d_Break(NULL);
3035 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3036 return new_d_Filter(NULL, arg, mode, proj);
3038 ir_node *new_NoMem(void) {
3039 return new_d_NoMem();
3041 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3042 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3044 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3045 return new_d_Psi(NULL, arity, conds, vals, mode);
3047 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3048 return new_d_CopyB(NULL, store, dst, src, data_type);
3050 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3051 return new_d_InstOf(NULL, store, objptr, ent);
3053 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3054 return new_d_Raise(NULL, store, obj);
3056 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3057 return new_d_Bound(NULL, store, idx, lower, upper);
3059 ir_node *new_Pin(ir_node *node) {
3060 return new_d_Pin(NULL, node);
3062 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3063 int n_outs, ir_asm_constraint *outputs,
3064 int n_clobber, ident *clobber[], ident *asm_text) {
3065 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3068 /* create a new anchor node */
3069 ir_node *new_Anchor(ir_graph *irg) {
3070 ir_node *in[anchor_last];
3071 memset(in, 0, sizeof(in));
3072 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);