2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if (get_Block_matured(block))
249 assert(get_irn_arity(block) == arity);
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity - 1; i >= 0; --i)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block) {
418 ir_graph *irg = current_ir_graph;
420 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
429 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node(res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res->attr.load.aligned = align_is_aligned;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val) {
518 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
524 res->attr.store.exc.pin_state = op_pin_state_pinned;
525 res->attr.store.volatility = volatility_non_volatile;
526 res->attr.store.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
596 symconst_kind symkind, ir_type *tp) {
599 ir_graph *irg = current_ir_graph;
601 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
602 mode = mode_P_data; /* FIXME: can be mode_P_code */
606 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
608 res->attr.symc.num = symkind;
609 res->attr.symc.sym = value;
610 res->attr.symc.tp = tp;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
615 } /* new_bd_SymConst_type */
618 new_bd_Sync(dbg_info *db, ir_node *block) {
620 ir_graph *irg = current_ir_graph;
622 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
623 /* no need to call optimize node here, Sync are always created with no predecessors */
624 IRN_VRFY_IRG(res, irg);
629 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
630 ir_node *in[2], *res;
631 ir_graph *irg = current_ir_graph;
635 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
636 res->attr.confirm.cmp = cmp;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
640 } /* new_bd_Confirm */
643 new_bd_Unknown(ir_mode *m) {
645 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
648 res = optimize_node(res);
650 } /* new_bd_Unknown */
653 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
656 ir_graph *irg = current_ir_graph;
658 in[0] = get_Call_ptr(call);
659 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
660 /* res->attr.callbegin.irg = irg; */
661 res->attr.callbegin.call = call;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
665 } /* new_bd_CallBegin */
668 new_bd_EndReg(dbg_info *db, ir_node *block) {
670 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
673 set_irg_end_reg(irg, res);
674 IRN_VRFY_IRG(res, irg);
676 } /* new_bd_EndReg */
679 new_bd_EndExcept(dbg_info *db, ir_node *block) {
681 ir_graph *irg = current_ir_graph;
683 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
684 set_irg_end_except(irg, res);
685 IRN_VRFY_IRG (res, irg);
687 } /* new_bd_EndExcept */
690 new_bd_Break(dbg_info *db, ir_node *block) {
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
695 res = optimize_node(res);
696 IRN_VRFY_IRG(res, irg);
701 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
707 res->attr.filter.proj = proj;
708 res->attr.filter.in_cg = NULL;
709 res->attr.filter.backedge = NULL;
712 assert(get_Proj_pred(res));
713 assert(get_nodes_block(get_Proj_pred(res)));
715 res = optimize_node(res);
716 IRN_VRFY_IRG(res, irg);
718 } /* new_bd_Filter */
721 new_bd_Mux(dbg_info *db, ir_node *block,
722 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
725 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
740 new_bd_Psi(dbg_info *db, ir_node *block,
741 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
744 ir_graph *irg = current_ir_graph;
747 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
749 for (i = 0; i < arity; ++i) {
751 in[2 * i + 1] = vals[i];
755 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_CopyB(dbg_info *db, ir_node *block,
765 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
768 ir_graph *irg = current_ir_graph;
774 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
776 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
777 res->attr.copyb.data_type = data_type;
778 res = optimize_node(res);
779 IRN_VRFY_IRG(res, irg);
784 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
785 ir_node *objptr, ir_type *type) {
788 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
793 res->attr.instof.type = type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
797 } /* new_bd_InstOf */
800 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
803 ir_graph *irg = current_ir_graph;
807 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
814 new_bd_Bound(dbg_info *db, ir_node *block,
815 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
818 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
825 res->attr.bound.exc.pin_state = op_pin_state_pinned;
826 res = optimize_node(res);
827 IRN_VRFY_IRG(res, irg);
832 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
834 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
837 res = optimize_node(res);
838 IRN_VRFY_IRG(res, irg);
843 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
844 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
846 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
850 res->attr.assem.pin_state = op_pin_state_pinned;
851 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
852 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
853 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
854 res->attr.assem.asm_text = asm_text;
856 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
857 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
864 /* --------------------------------------------- */
865 /* private interfaces, for professional use only */
866 /* --------------------------------------------- */
868 /* Constructs a Block with a fixed number of predecessors.
869 Does not set current_block. Can not be used with automatic
870 Phi node construction. */
872 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
873 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_Block(db, arity, in);
878 current_ir_graph = rem;
884 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
885 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Start(db, block);
890 current_ir_graph = rem;
896 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_End(db, block);
902 current_ir_graph = rem;
907 /* Creates a Phi node with all predecessors. Calling this constructor
908 is only allowed if the corresponding block is mature. */
910 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi(db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_Const_type(db, block, mode, con, tp);
928 current_ir_graph = rem;
931 } /* new_rd_Const_type */
934 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
936 ir_graph *rem = current_ir_graph;
938 current_ir_graph = irg;
939 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
940 current_ir_graph = rem;
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
951 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
953 ir_graph *rem = current_ir_graph;
955 current_ir_graph = irg;
956 res = new_bd_Id(db, block, val, mode);
957 current_ir_graph = rem;
963 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Proj(db, block, arg, mode, proj);
970 current_ir_graph = rem;
976 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_defaultProj(db, block, arg, max_proj);
983 current_ir_graph = rem;
986 } /* new_rd_defaultProj */
989 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Conv(db, block, op, mode, 0);
995 current_ir_graph = rem;
1001 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_Cast(db, block, op, to_tp);
1007 current_ir_graph = rem;
1013 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1015 ir_graph *rem = current_ir_graph;
1017 current_ir_graph = irg;
1018 res = new_bd_Tuple(db, block, arity, in);
1019 current_ir_graph = rem;
1022 } /* new_rd_Tuple */
1030 NEW_RD_DIVOP(DivMod)
1043 NEW_RD_BINOP(Borrow)
1046 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1047 ir_node *op1, ir_node *op2) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Cmp(db, block, op1, op2);
1053 current_ir_graph = rem;
1059 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_Jmp(db, block);
1065 current_ir_graph = rem;
1071 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_IJmp(db, block, tgt);
1077 current_ir_graph = rem;
1083 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Cond(db, block, c);
1089 current_ir_graph = rem;
1095 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1096 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1102 current_ir_graph = rem;
1108 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1109 ir_node *store, int arity, ir_node **in) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Return(db, block, store, arity, in);
1115 current_ir_graph = rem;
1118 } /* new_rd_Return */
1121 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, ir_node *adr, ir_mode *mode) {
1124 ir_graph *rem = current_ir_graph;
1126 current_ir_graph = irg;
1127 res = new_bd_Load(db, block, store, adr, mode);
1128 current_ir_graph = rem;
1134 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1135 ir_node *store, ir_node *adr, ir_node *val) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Store(db, block, store, adr, val);
1141 current_ir_graph = rem;
1144 } /* new_rd_Store */
1147 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1148 ir_node *size, ir_type *alloc_type, where_alloc where) {
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1154 current_ir_graph = rem;
1157 } /* new_rd_Alloc */
1160 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1161 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1167 current_ir_graph = rem;
1173 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1174 ir_node *store, ir_node *objptr, ir_entity *ent) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1180 current_ir_graph = rem;
1183 } /* new_rd_simpleSel */
1186 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1187 int arity, ir_node **in, ir_entity *ent) {
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1193 current_ir_graph = rem;
1199 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1200 symconst_kind symkind, ir_type *tp) {
1202 ir_graph *rem = current_ir_graph;
1204 current_ir_graph = irg;
1205 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1206 current_ir_graph = rem;
1209 } /* new_rd_SymConst_type */
1212 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1213 symconst_kind symkind) {
1214 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1216 } /* new_rd_SymConst */
1218 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1219 symconst_symbol sym;
1220 sym.entity_p = symbol;
1221 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1222 } /* new_rd_SymConst_addr_ent */
1224 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1225 symconst_symbol sym;
1226 sym.entity_p = symbol;
1227 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1228 } /* new_rd_SymConst_ofs_ent */
1230 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1231 symconst_symbol sym;
1232 sym.ident_p = symbol;
1233 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1234 } /* new_rd_SymConst_addr_name */
1236 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1237 symconst_symbol sym;
1238 sym.type_p = symbol;
1239 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1240 } /* new_rd_SymConst_type_tag */
1242 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.type_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1246 } /* new_rd_SymConst_size */
1248 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.type_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1252 } /* new_rd_SymConst_align */
1255 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1257 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sync(db, block);
1262 current_ir_graph = rem;
1264 for (i = 0; i < arity; ++i)
1265 add_Sync_pred(res, in[i]);
1271 new_rd_Bad(ir_graph *irg) {
1272 return get_irg_bad(irg);
1276 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Confirm(db, block, val, bound, cmp);
1282 current_ir_graph = rem;
1285 } /* new_rd_Confirm */
1288 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1290 ir_graph *rem = current_ir_graph;
1292 current_ir_graph = irg;
1293 res = new_bd_Unknown(m);
1294 current_ir_graph = rem;
1297 } /* new_rd_Unknown */
1300 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_CallBegin(db, block, call);
1306 current_ir_graph = rem;
1309 } /* new_rd_CallBegin */
1312 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1315 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1316 set_irg_end_reg(irg, res);
1317 IRN_VRFY_IRG(res, irg);
1319 } /* new_rd_EndReg */
1322 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1325 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1326 set_irg_end_except(irg, res);
1327 IRN_VRFY_IRG (res, irg);
1329 } /* new_rd_EndExcept */
1332 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Break(db, block);
1338 current_ir_graph = rem;
1341 } /* new_rd_Break */
1344 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Filter(db, block, arg, mode, proj);
1351 current_ir_graph = rem;
1354 } /* new_rd_Filter */
1357 new_rd_NoMem(ir_graph *irg) {
1358 return get_irg_no_mem(irg);
1359 } /* new_rd_NoMem */
1362 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1363 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1369 current_ir_graph = rem;
1375 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1376 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1378 ir_graph *rem = current_ir_graph;
1380 current_ir_graph = irg;
1381 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1382 current_ir_graph = rem;
1387 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1388 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1394 current_ir_graph = rem;
1397 } /* new_rd_CopyB */
1400 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1401 ir_node *objptr, ir_type *type) {
1403 ir_graph *rem = current_ir_graph;
1405 current_ir_graph = irg;
1406 res = new_bd_InstOf(db, block, store, objptr, type);
1407 current_ir_graph = rem;
1410 } /* new_rd_InstOf */
1413 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1415 ir_graph *rem = current_ir_graph;
1417 current_ir_graph = irg;
1418 res = new_bd_Raise(db, block, store, obj);
1419 current_ir_graph = rem;
1422 } /* new_rd_Raise */
1424 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1425 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_Bound(db, block, store, idx, lower, upper);
1431 current_ir_graph = rem;
1434 } /* new_rd_Bound */
1436 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1438 ir_graph *rem = current_ir_graph;
1440 current_ir_graph = irg;
1441 res = new_bd_Pin(db, block, node);
1442 current_ir_graph = rem;
1447 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1448 int arity, ir_node *in[], ir_asm_constraint *inputs,
1449 int n_outs, ir_asm_constraint *outputs,
1450 int n_clobber, ident *clobber[], ident *asm_text) {
1452 ir_graph *rem = current_ir_graph;
1454 current_ir_graph = irg;
1455 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1456 current_ir_graph = rem;
1462 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1463 return new_rd_Block(NULL, irg, arity, in);
1465 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1466 return new_rd_Start(NULL, irg, block);
1468 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1469 return new_rd_End(NULL, irg, block);
1471 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1472 return new_rd_Jmp(NULL, irg, block);
1474 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1475 return new_rd_IJmp(NULL, irg, block, tgt);
1477 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1478 return new_rd_Cond(NULL, irg, block, c);
1480 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1481 ir_node *store, int arity, ir_node **in) {
1482 return new_rd_Return(NULL, irg, block, store, arity, in);
1484 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con) {
1486 return new_rd_Const(NULL, irg, block, mode, con);
1488 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1489 ir_mode *mode, long value) {
1490 return new_rd_Const_long(NULL, irg, block, mode, value);
1492 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1493 ir_mode *mode, tarval *con, ir_type *tp) {
1494 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1496 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1497 symconst_symbol value, symconst_kind symkind) {
1498 return new_rd_SymConst(NULL, irg, block, value, symkind);
1500 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *objptr, ir_entity *ent) {
1502 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1504 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *objptr, int n_index, ir_node **index,
1507 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1509 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1510 ir_node *callee, int arity, ir_node **in,
1512 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1514 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_mode *mode) {
1524 return new_rd_Minus(NULL, irg, block, op, mode);
1526 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1534 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1544 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1546 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1547 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1548 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1550 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1551 ir_node *op, ir_mode *mode) {
1552 return new_rd_Abs(NULL, irg, block, op, mode);
1554 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_And(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1559 ir_node *op1, ir_node *op2, ir_mode *mode) {
1560 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1562 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1563 ir_node *op1, ir_node *op2, ir_mode *mode) {
1564 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1566 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_mode *mode) {
1568 return new_rd_Not(NULL, irg, block, op, mode);
1570 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shl(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Shr(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Rot(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_node *k, ir_mode *mode) {
1588 return new_rd_Carry(NULL, irg, block, op, k, mode);
1590 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1591 ir_node *op, ir_node *k, ir_mode *mode) {
1592 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1594 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1595 ir_node *op1, ir_node *op2) {
1596 return new_rd_Cmp(NULL, irg, block, op1, op2);
1598 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_mode *mode) {
1600 return new_rd_Conv(NULL, irg, block, op, mode);
1602 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1603 return new_rd_Cast(NULL, irg, block, op, to_tp);
1605 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1606 ir_node **in, ir_mode *mode) {
1607 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1609 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1610 ir_node *store, ir_node *adr, ir_mode *mode) {
1611 return new_rd_Load(NULL, irg, block, store, adr, mode);
1613 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1614 ir_node *store, ir_node *adr, ir_node *val) {
1615 return new_rd_Store(NULL, irg, block, store, adr, val);
1617 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1618 ir_node *size, ir_type *alloc_type, where_alloc where) {
1619 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1621 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1622 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1623 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1625 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1626 return new_rd_Sync(NULL, irg, block, arity, in);
1628 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1629 ir_mode *mode, long proj) {
1630 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1632 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1634 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1636 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1637 int arity, ir_node **in) {
1638 return new_rd_Tuple(NULL, irg, block, arity, in );
1640 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1641 ir_node *val, ir_mode *mode) {
1642 return new_rd_Id(NULL, irg, block, val, mode);
1644 ir_node *new_r_Bad(ir_graph *irg) {
1645 return new_rd_Bad(irg);
1647 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1648 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1650 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1651 return new_rd_Unknown(irg, m);
1653 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1654 return new_rd_CallBegin(NULL, irg, block, callee);
1656 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1657 return new_rd_EndReg(NULL, irg, block);
1659 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1660 return new_rd_EndExcept(NULL, irg, block);
1662 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1663 return new_rd_Break(NULL, irg, block);
1665 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1666 ir_mode *mode, long proj) {
1667 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1669 ir_node *new_r_NoMem(ir_graph *irg) {
1670 return new_rd_NoMem(irg);
1672 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1673 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1674 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1676 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1677 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1678 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1680 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1682 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1684 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1686 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1688 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1689 ir_node *store, ir_node *obj) {
1690 return new_rd_Raise(NULL, irg, block, store, obj);
1692 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1693 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1694 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1696 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1697 return new_rd_Pin(NULL, irg, block, node);
1699 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1700 int arity, ir_node *in[], ir_asm_constraint *inputs,
1701 int n_outs, ir_asm_constraint *outputs,
1702 int n_clobber, ident *clobber[], ident *asm_text) {
1703 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1706 /** ********************/
1707 /** public interfaces */
1708 /** construction tools */
1712 * - create a new Start node in the current block
1714 * @return s - pointer to the created Start node
1719 new_d_Start(dbg_info *db) {
1722 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1723 op_Start, mode_T, 0, NULL);
1725 res = optimize_node(res);
1726 IRN_VRFY_IRG(res, current_ir_graph);
1731 new_d_End(dbg_info *db) {
1733 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1734 op_End, mode_X, -1, NULL);
1735 res = optimize_node(res);
1736 IRN_VRFY_IRG(res, current_ir_graph);
1741 /* Constructs a Block with a fixed number of predecessors.
1742 Does set current_block. Can be used with automatic Phi
1743 node construction. */
1745 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1748 int has_unknown = 0;
1750 res = new_bd_Block(db, arity, in);
1752 /* Create and initialize array for Phi-node construction. */
1753 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1754 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1755 current_ir_graph->n_loc);
1756 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1759 for (i = arity-1; i >= 0; i--)
1760 if (get_irn_op(in[i]) == op_Unknown) {
1765 if (!has_unknown) res = optimize_node(res);
1766 current_ir_graph->current_block = res;
1768 IRN_VRFY_IRG(res, current_ir_graph);
1773 /* ***********************************************************************/
1774 /* Methods necessary for automatic Phi node creation */
1776 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1777 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1778 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1779 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1781 Call Graph: ( A ---> B == A "calls" B)
1783 get_value mature_immBlock
1791 get_r_value_internal |
1795 new_rd_Phi0 new_rd_Phi_in
1797 * *************************************************************************** */
1799 /** Creates a Phi node with 0 predecessors. */
1800 static INLINE ir_node *
1801 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1804 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1805 IRN_VRFY_IRG(res, irg);
1810 static INLINE ir_node *
1811 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1812 ir_node **in, int ins, ir_node *phi0) {
1814 ir_node *res, *known;
1816 /* Allocate a new node on the obstack. The allocation copies the in
1818 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1819 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1821 /* This loop checks whether the Phi has more than one predecessor.
1822 If so, it is a real Phi node and we break the loop. Else the
1823 Phi node merges the same definition on several paths and therefore
1824 is not needed. Don't consider Bad nodes! */
1826 for (i=0; i < ins; ++i)
1830 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1832 /* Optimize self referencing Phis: We can't detect them yet properly, as
1833 they still refer to the Phi0 they will replace. So replace right now. */
1834 if (phi0 && in[i] == phi0) in[i] = res;
1836 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1844 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1847 edges_node_deleted(res, current_ir_graph);
1848 obstack_free (current_ir_graph->obst, res);
1849 if (is_Phi(known)) {
1850 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1851 order, an enclosing Phi know may get superfluous. */
1852 res = optimize_in_place_2(known);
1854 exchange(known, res);
1860 /* A undefined value, e.g., in unreachable code. */
1864 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1865 IRN_VRFY_IRG(res, irg);
1866 /* Memory Phis in endless loops must be kept alive.
1867 As we can't distinguish these easily we keep all of them alive. */
1868 if ((res->op == op_Phi) && (mode == mode_M))
1869 add_End_keepalive(get_irg_end(irg), res);
1873 } /* new_rd_Phi_in */
1876 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1878 #if PRECISE_EXC_CONTEXT
1880 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1883 * Construct a new frag_array for node n.
1884 * Copy the content from the current graph_arr of the corresponding block:
1885 * this is the current state.
1886 * Set ProjM(n) as current memory state.
1887 * Further the last entry in frag_arr of current block points to n. This
1888 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1890 static INLINE ir_node **new_frag_arr(ir_node *n) {
1894 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1895 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1896 sizeof(ir_node *)*current_ir_graph->n_loc);
1898 /* turn off optimization before allocating Proj nodes, as res isn't
1900 opt = get_opt_optimize(); set_optimize(0);
1901 /* Here we rely on the fact that all frag ops have Memory as first result! */
1902 if (get_irn_op(n) == op_Call)
1903 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1904 else if (get_irn_op(n) == op_CopyB)
1905 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1907 assert((pn_Quot_M == pn_DivMod_M) &&
1908 (pn_Quot_M == pn_Div_M) &&
1909 (pn_Quot_M == pn_Mod_M) &&
1910 (pn_Quot_M == pn_Load_M) &&
1911 (pn_Quot_M == pn_Store_M) &&
1912 (pn_Quot_M == pn_Alloc_M) &&
1913 (pn_Quot_M == pn_Bound_M));
1914 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1918 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1920 } /* new_frag_arr */
1923 * Returns the frag_arr from a node.
1925 static INLINE ir_node **get_frag_arr(ir_node *n) {
1926 switch (get_irn_opcode(n)) {
1928 return n->attr.call.exc.frag_arr;
1930 return n->attr.alloc.exc.frag_arr;
1932 return n->attr.load.exc.frag_arr;
1934 return n->attr.store.exc.frag_arr;
1936 return n->attr.except.frag_arr;
1938 } /* get_frag_arr */
1941 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1943 if (!frag_arr[pos]) frag_arr[pos] = val;
1944 if (frag_arr[current_ir_graph->n_loc - 1]) {
1945 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1946 assert(arr != frag_arr && "Endless recursion detected");
1947 set_frag_value(arr, pos, val);
1952 for (i = 0; i < 1000; ++i) {
1953 if (!frag_arr[pos]) {
1954 frag_arr[pos] = val;
1956 if (frag_arr[current_ir_graph->n_loc - 1]) {
1957 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1963 assert(0 && "potential endless recursion");
1965 } /* set_frag_value */
1968 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1972 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1974 frag_arr = get_frag_arr(cfOp);
1975 res = frag_arr[pos];
1977 if (block->attr.block.graph_arr[pos]) {
1978 /* There was a set_value() after the cfOp and no get_value before that
1979 set_value(). We must build a Phi node now. */
1980 if (block->attr.block.is_matured) {
1981 int ins = get_irn_arity(block);
1983 NEW_ARR_A(ir_node *, nin, ins);
1984 res = phi_merge(block, pos, mode, nin, ins);
1986 res = new_rd_Phi0(current_ir_graph, block, mode);
1987 res->attr.phi0.pos = pos;
1988 res->link = block->link;
1992 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1993 but this should be better: (remove comment if this works) */
1994 /* It's a Phi, we can write this into all graph_arrs with NULL */
1995 set_frag_value(block->attr.block.graph_arr, pos, res);
1997 res = get_r_value_internal(block, pos, mode);
1998 set_frag_value(block->attr.block.graph_arr, pos, res);
2002 } /* get_r_frag_value_internal */
2003 #endif /* PRECISE_EXC_CONTEXT */
2006 * check whether a control flow cf_pred is a exception flow.
2008 * @param cf_pred the control flow node
2009 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2011 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2012 /* all projections from a raise are exceptional control flow */
2013 if (is_Raise(prev_cf_op))
2015 if (is_fragile_op(prev_cf_op)) {
2016 if (is_Proj(cf_pred)) {
2017 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2018 /* the regular control flow, NO exception */
2021 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2024 /* Hmm, exception but not a Proj? */
2025 assert(!"unexpected condition: fragile op without a proj");
2029 } /* is_exception_flow */
2032 * Computes the predecessors for the real phi node, and then
2033 * allocates and returns this node. The routine called to allocate the
2034 * node might optimize it away and return a real value.
2035 * This function must be called with an in-array of proper size.
2038 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2039 ir_node *prevBlock, *res, *phi0, *phi0_all;
2042 /* If this block has no value at pos create a Phi0 and remember it
2043 in graph_arr to break recursions.
2044 Else we may not set graph_arr as there a later value is remembered. */
2046 if (!block->attr.block.graph_arr[pos]) {
2047 if (block == get_irg_start_block(current_ir_graph)) {
2048 /* Collapsing to Bad tarvals is no good idea.
2049 So we call a user-supplied routine here that deals with this case as
2050 appropriate for the given language. Sorrily the only help we can give
2051 here is the position.
2053 Even if all variables are defined before use, it can happen that
2054 we get to the start block, if a Cond has been replaced by a tuple
2055 (bad, jmp). In this case we call the function needlessly, eventually
2056 generating an non existent error.
2057 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2060 if (default_initialize_local_variable) {
2061 ir_node *rem = get_cur_block();
2063 set_cur_block(block);
2064 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2068 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2069 /* We don't need to care about exception ops in the start block.
2070 There are none by definition. */
2071 return block->attr.block.graph_arr[pos];
2073 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2074 block->attr.block.graph_arr[pos] = phi0;
2075 #if PRECISE_EXC_CONTEXT
2076 if (get_opt_precise_exc_context()) {
2077 /* Set graph_arr for fragile ops. Also here we should break recursion.
2078 We could choose a cyclic path through an cfop. But the recursion would
2079 break at some point. */
2080 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2086 /* This loop goes to all predecessor blocks of the block the Phi node
2087 is in and there finds the operands of the Phi node by calling
2088 get_r_value_internal. */
2089 for (i = 1; i <= ins; ++i) {
2090 ir_node *cf_pred = block->in[i];
2091 ir_node *prevCfOp = skip_Proj(cf_pred);
2093 if (is_Bad(prevCfOp)) {
2094 /* In case a Cond has been optimized we would get right to the start block
2095 with an invalid definition. */
2096 nin[i-1] = new_Bad();
2099 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2101 if (!is_Bad(prevBlock)) {
2102 #if PRECISE_EXC_CONTEXT
2103 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2104 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2105 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2108 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2110 nin[i-1] = new_Bad();
2114 /* We want to pass the Phi0 node to the constructor: this finds additional
2115 optimization possibilities.
2116 The Phi0 node either is allocated in this function, or it comes from
2117 a former call to get_r_value_internal. In this case we may not yet
2118 exchange phi0, as this is done in mature_immBlock. */
2120 phi0_all = block->attr.block.graph_arr[pos];
2121 if (!((get_irn_op(phi0_all) == op_Phi) &&
2122 (get_irn_arity(phi0_all) == 0) &&
2123 (get_nodes_block(phi0_all) == block)))
2129 /* After collecting all predecessors into the array nin a new Phi node
2130 with these predecessors is created. This constructor contains an
2131 optimization: If all predecessors of the Phi node are identical it
2132 returns the only operand instead of a new Phi node. */
2133 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2135 /* In case we allocated a Phi0 node at the beginning of this procedure,
2136 we need to exchange this Phi0 with the real Phi. */
2138 exchange(phi0, res);
2139 block->attr.block.graph_arr[pos] = res;
2140 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2141 only an optimization. */
2148 * This function returns the last definition of a value. In case
2149 * this value was last defined in a previous block, Phi nodes are
2150 * inserted. If the part of the firm graph containing the definition
2151 * is not yet constructed, a dummy Phi node is returned.
2153 * @param block the current block
2154 * @param pos the value number of the value searched
2155 * @param mode the mode of this value (needed for Phi construction)
2158 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2160 /* There are 4 cases to treat.
2162 1. The block is not mature and we visit it the first time. We can not
2163 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2164 predecessors is returned. This node is added to the linked list (field
2165 "link") of the containing block to be completed when this block is
2166 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2169 2. The value is already known in this block, graph_arr[pos] is set and we
2170 visit the block the first time. We can return the value without
2171 creating any new nodes.
2173 3. The block is mature and we visit it the first time. A Phi node needs
2174 to be created (phi_merge). If the Phi is not needed, as all it's
2175 operands are the same value reaching the block through different
2176 paths, it's optimized away and the value itself is returned.
2178 4. The block is mature, and we visit it the second time. Now two
2179 subcases are possible:
2180 * The value was computed completely the last time we were here. This
2181 is the case if there is no loop. We can return the proper value.
2182 * The recursion that visited this node and set the flag did not
2183 return yet. We are computing a value in a loop and need to
2184 break the recursion. This case only happens if we visited
2185 the same block with phi_merge before, which inserted a Phi0.
2186 So we return the Phi0.
2189 /* case 4 -- already visited. */
2190 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2191 /* As phi_merge allocates a Phi0 this value is always defined. Here
2192 is the critical difference of the two algorithms. */
2193 assert(block->attr.block.graph_arr[pos]);
2194 return block->attr.block.graph_arr[pos];
2197 /* visited the first time */
2198 set_irn_visited(block, get_irg_visited(current_ir_graph));
2200 /* Get the local valid value */
2201 res = block->attr.block.graph_arr[pos];
2203 /* case 2 -- If the value is actually computed, return it. */
2204 if (res) { return res; };
2206 if (block->attr.block.is_matured) { /* case 3 */
2208 /* The Phi has the same amount of ins as the corresponding block. */
2209 int ins = get_irn_arity(block);
2211 NEW_ARR_A (ir_node *, nin, ins);
2213 /* Phi merge collects the predecessors and then creates a node. */
2214 res = phi_merge (block, pos, mode, nin, ins);
2216 } else { /* case 1 */
2217 /* The block is not mature, we don't know how many in's are needed. A Phi
2218 with zero predecessors is created. Such a Phi node is called Phi0
2219 node. The Phi0 is then added to the list of Phi0 nodes in this block
2220 to be matured by mature_immBlock later.
2221 The Phi0 has to remember the pos of it's internal value. If the real
2222 Phi is computed, pos is used to update the array with the local
2224 res = new_rd_Phi0(current_ir_graph, block, mode);
2225 res->attr.phi0.pos = pos;
2226 res->link = block->link;
2230 /* If we get here, the frontend missed a use-before-definition error */
2233 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2234 assert(mode->code >= irm_F && mode->code <= irm_P);
2235 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2236 get_mode_null(mode));
2239 /* The local valid value is available now. */
2240 block->attr.block.graph_arr[pos] = res;
2243 } /* get_r_value_internal */
2245 /* ************************************************************************** */
2248 * Finalize a Block node, when all control flows are known.
2249 * Acceptable parameters are only Block nodes.
2252 mature_immBlock(ir_node *block) {
2257 assert(is_Block(block));
2258 if (!get_Block_matured(block)) {
2259 ins = ARR_LEN(block->in)-1;
2260 /* Fix block parameters */
2261 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2263 /* An array for building the Phi nodes. */
2264 NEW_ARR_A(ir_node *, nin, ins);
2266 /* Traverse a chain of Phi nodes attached to this block and mature
2268 for (n = block->link; n; n = next) {
2269 inc_irg_visited(current_ir_graph);
2271 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2274 block->attr.block.is_matured = 1;
2276 /* Now, as the block is a finished firm node, we can optimize it.
2277 Since other nodes have been allocated since the block was created
2278 we can not free the node on the obstack. Therefore we have to call
2280 Unfortunately the optimization does not change a lot, as all allocated
2281 nodes refer to the unoptimized node.
2282 We can call _2, as global cse has no effect on blocks. */
2283 block = optimize_in_place_2(block);
2284 IRN_VRFY_IRG(block, current_ir_graph);
2286 } /* mature_immBlock */
2289 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2290 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2294 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2295 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2299 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2300 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2301 } /* new_d_Const_long */
2304 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2305 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2306 } /* new_d_Const_type */
2310 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2311 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2315 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2316 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2320 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2323 assert(arg->op == op_Cond);
2324 arg->attr.cond.kind = fragmentary;
2325 arg->attr.cond.default_proj = max_proj;
2326 res = new_Proj(arg, mode_X, max_proj);
2328 } /* new_d_defaultProj */
2331 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2332 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2336 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2337 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2338 } /* new_d_strictConv */
2341 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2342 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2346 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2347 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2357 * Allocate the frag array.
2359 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2360 if (get_opt_precise_exc_context()) {
2361 if ((current_ir_graph->phase_state == phase_building) &&
2362 (get_irn_op(res) == op) && /* Could be optimized away. */
2363 !*frag_store) /* Could be a cse where the arr is already set. */ {
2364 *frag_store = new_frag_arr(res);
2367 } /* allocate_frag_arr */
2370 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2372 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2373 #if PRECISE_EXC_CONTEXT
2374 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2381 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2383 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2384 #if PRECISE_EXC_CONTEXT
2385 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2389 } /* new_d_DivMod */
2392 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2394 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2395 #if PRECISE_EXC_CONTEXT
2396 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2403 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2405 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2406 #if PRECISE_EXC_CONTEXT
2407 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2426 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2427 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2431 new_d_Jmp(dbg_info *db) {
2432 return new_bd_Jmp(db, current_ir_graph->current_block);
2436 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2437 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2441 new_d_Cond(dbg_info *db, ir_node *c) {
2442 return new_bd_Cond(db, current_ir_graph->current_block, c);
2446 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2449 res = new_bd_Call(db, current_ir_graph->current_block,
2450 store, callee, arity, in, tp);
2451 #if PRECISE_EXC_CONTEXT
2452 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2459 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2460 return new_bd_Return(db, current_ir_graph->current_block,
2462 } /* new_d_Return */
2465 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2467 res = new_bd_Load(db, current_ir_graph->current_block,
2469 #if PRECISE_EXC_CONTEXT
2470 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2477 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2479 res = new_bd_Store(db, current_ir_graph->current_block,
2481 #if PRECISE_EXC_CONTEXT
2482 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2489 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2490 where_alloc where) {
2492 res = new_bd_Alloc(db, current_ir_graph->current_block,
2493 store, size, alloc_type, where);
2494 #if PRECISE_EXC_CONTEXT
2495 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2502 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2503 ir_node *size, ir_type *free_type, where_alloc where) {
2504 return new_bd_Free(db, current_ir_graph->current_block,
2505 store, ptr, size, free_type, where);
2509 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2510 /* GL: objptr was called frame before. Frame was a bad choice for the name
2511 as the operand could as well be a pointer to a dynamic object. */
2513 return new_bd_Sel(db, current_ir_graph->current_block,
2514 store, objptr, 0, NULL, ent);
2515 } /* new_d_simpleSel */
2518 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2519 return new_bd_Sel(db, current_ir_graph->current_block,
2520 store, objptr, n_index, index, sel);
2524 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2525 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2527 } /* new_d_SymConst_type */
2530 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2531 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2532 value, kind, firm_unknown_type);
2533 } /* new_d_SymConst */
2536 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2537 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2543 return _new_d_Bad();
2547 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2548 return new_bd_Confirm(db, current_ir_graph->current_block,
2550 } /* new_d_Confirm */
2553 new_d_Unknown(ir_mode *m) {
2554 return new_bd_Unknown(m);
2555 } /* new_d_Unknown */
2558 new_d_CallBegin(dbg_info *db, ir_node *call) {
2559 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2560 } /* new_d_CallBegin */
2563 new_d_EndReg(dbg_info *db) {
2564 return new_bd_EndReg(db, current_ir_graph->current_block);
2565 } /* new_d_EndReg */
2568 new_d_EndExcept(dbg_info *db) {
2569 return new_bd_EndExcept(db, current_ir_graph->current_block);
2570 } /* new_d_EndExcept */
2573 new_d_Break(dbg_info *db) {
2574 return new_bd_Break(db, current_ir_graph->current_block);
2578 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2579 return new_bd_Filter(db, current_ir_graph->current_block,
2581 } /* new_d_Filter */
2584 (new_d_NoMem)(void) {
2585 return _new_d_NoMem();
2589 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2590 ir_node *ir_true, ir_mode *mode) {
2591 return new_bd_Mux(db, current_ir_graph->current_block,
2592 sel, ir_false, ir_true, mode);
2596 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2597 return new_bd_Psi(db, current_ir_graph->current_block,
2598 arity, conds, vals, mode);
2601 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2602 ir_node *dst, ir_node *src, ir_type *data_type) {
2604 res = new_bd_CopyB(db, current_ir_graph->current_block,
2605 store, dst, src, data_type);
2606 #if PRECISE_EXC_CONTEXT
2607 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2613 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2614 return new_bd_InstOf(db, current_ir_graph->current_block,
2615 store, objptr, type);
2616 } /* new_d_InstOf */
2619 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2620 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2623 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2624 ir_node *idx, ir_node *lower, ir_node *upper) {
2626 res = new_bd_Bound(db, current_ir_graph->current_block,
2627 store, idx, lower, upper);
2628 #if PRECISE_EXC_CONTEXT
2629 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2635 new_d_Pin(dbg_info *db, ir_node *node) {
2636 return new_bd_Pin(db, current_ir_graph->current_block, node);
2640 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2641 int n_outs, ir_asm_constraint *outputs,
2642 int n_clobber, ident *clobber[], ident *asm_text) {
2643 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2646 /* ********************************************************************* */
2647 /* Comfortable interface with automatic Phi node construction. */
2648 /* (Uses also constructors of ?? interface, except new_Block. */
2649 /* ********************************************************************* */
2651 /* Block construction */
2652 /* immature Block without predecessors */
2654 new_d_immBlock(dbg_info *db) {
2657 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2658 /* creates a new dynamic in-array as length of in is -1 */
2659 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2660 current_ir_graph->current_block = res;
2662 /* macroblock head */
2665 res->attr.block.is_matured = 0;
2666 res->attr.block.is_dead = 0;
2667 res->attr.block.is_mb_head = 1;
2668 res->attr.block.has_label = 0;
2669 res->attr.block.irg = current_ir_graph;
2670 res->attr.block.backedge = NULL;
2671 res->attr.block.in_cg = NULL;
2672 res->attr.block.cg_backedge = NULL;
2673 res->attr.block.extblk = NULL;
2674 res->attr.block.region = NULL;
2675 res->attr.block.mb_depth = 0;
2676 res->attr.block.label = 0;
2678 set_Block_block_visited(res, 0);
2680 /* Create and initialize array for Phi-node construction. */
2681 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2682 current_ir_graph->n_loc);
2683 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2685 /* Immature block may not be optimized! */
2686 IRN_VRFY_IRG(res, current_ir_graph);
2689 } /* new_d_immBlock */
2692 new_immBlock(void) {
2693 return new_d_immBlock(NULL);
2694 } /* new_immBlock */
2696 /* immature PartBlock with its predecessors */
2698 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2699 ir_node *res = new_d_immBlock(db);
2700 ir_node *blk = get_nodes_block(pred_jmp);
2702 res->in[0] = blk->in[0];
2703 assert(res->in[0] != NULL);
2704 add_immBlock_pred(res, pred_jmp);
2706 res->attr.block.is_mb_head = 0;
2707 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2710 } /* new_d_immPartBlock */
2713 new_immPartBlock(ir_node *pred_jmp) {
2714 return new_d_immPartBlock(NULL, pred_jmp);
2715 } /* new_immPartBlock */
2717 /* add an edge to a jmp/control flow node */
2719 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2720 int n = ARR_LEN(block->in) - 1;
2722 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2723 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2724 assert(jmp != NULL);
2726 ARR_APP1(ir_node *, block->in, jmp);
2728 hook_set_irn_n(block, n, jmp, NULL);
2729 } /* add_immBlock_pred */
2731 /* changing the current block */
2733 set_cur_block(ir_node *target) {
2734 current_ir_graph->current_block = target;
2735 } /* set_cur_block */
2737 /* ************************ */
2738 /* parameter administration */
2740 /* get a value from the parameter array from the current block by its index */
2742 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2743 ir_graph *irg = current_ir_graph;
2744 assert(get_irg_phase_state(irg) == phase_building);
2745 inc_irg_visited(irg);
2748 return get_r_value_internal(irg->current_block, pos + 1, mode);
2751 /* get a value from the parameter array from the current block by its index */
2753 get_value(int pos, ir_mode *mode) {
2754 return get_d_value(NULL, pos, mode);
2757 /* set a value at position pos in the parameter array from the current block */
2759 set_value(int pos, ir_node *value) {
2760 ir_graph *irg = current_ir_graph;
2761 assert(get_irg_phase_state(irg) == phase_building);
2762 assert(pos+1 < irg->n_loc);
2763 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2766 /* Find the value number for a node in the current block.*/
2768 find_value(ir_node *value) {
2770 ir_node *bl = current_ir_graph->current_block;
2772 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2773 if (bl->attr.block.graph_arr[i] == value)
2778 /* get the current store */
2781 ir_graph *irg = current_ir_graph;
2783 assert(get_irg_phase_state(irg) == phase_building);
2784 /* GL: one could call get_value instead */
2785 inc_irg_visited(irg);
2786 return get_r_value_internal(irg->current_block, 0, mode_M);
2789 /* set the current store: handles automatic Sync construction for Load nodes */
2791 set_store(ir_node *store) {
2792 ir_node *load, *pload, *pred, *in[2];
2794 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2795 /* Beware: due to dead code elimination, a store might become a Bad node even in
2796 the construction phase. */
2797 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2799 if (get_opt_auto_create_sync()) {
2800 /* handle non-volatile Load nodes by automatically creating Sync's */
2801 load = skip_Proj(store);
2802 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2803 pred = get_Load_mem(load);
2805 if (is_Sync(pred)) {
2806 /* a Load after a Sync: move it up */
2807 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2809 set_Load_mem(load, get_memop_mem(mem));
2810 add_Sync_pred(pred, store);
2813 pload = skip_Proj(pred);
2814 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2815 /* a Load after a Load: create a new Sync */
2816 set_Load_mem(load, get_Load_mem(pload));
2820 store = new_Sync(2, in);
2825 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2829 keep_alive(ir_node *ka) {
2830 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2833 /* --- Useful access routines --- */
2834 /* Returns the current block of the current graph. To set the current
2835 block use set_cur_block. */
2836 ir_node *get_cur_block(void) {
2837 return get_irg_current_block(current_ir_graph);
2838 } /* get_cur_block */
2840 /* Returns the frame type of the current graph */
2841 ir_type *get_cur_frame_type(void) {
2842 return get_irg_frame_type(current_ir_graph);
2843 } /* get_cur_frame_type */
2846 /* ********************************************************************* */
2849 /* call once for each run of the library */
2851 init_cons(uninitialized_local_variable_func_t *func) {
2852 default_initialize_local_variable = func;
2856 irp_finalize_cons(void) {
2858 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2859 irg_finalize_cons(get_irp_irg(i));
2861 irp->phase_state = phase_high;
2862 } /* irp_finalize_cons */
2865 ir_node *new_Block(int arity, ir_node **in) {
2866 return new_d_Block(NULL, arity, in);
2868 ir_node *new_Start(void) {
2869 return new_d_Start(NULL);
2871 ir_node *new_End(void) {
2872 return new_d_End(NULL);
2874 ir_node *new_Jmp(void) {
2875 return new_d_Jmp(NULL);
2877 ir_node *new_IJmp(ir_node *tgt) {
2878 return new_d_IJmp(NULL, tgt);
2880 ir_node *new_Cond(ir_node *c) {
2881 return new_d_Cond(NULL, c);
2883 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2884 return new_d_Return(NULL, store, arity, in);
2886 ir_node *new_Const(ir_mode *mode, tarval *con) {
2887 return new_d_Const(NULL, mode, con);
2890 ir_node *new_Const_long(ir_mode *mode, long value) {
2891 return new_d_Const_long(NULL, mode, value);
2894 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2895 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2898 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
2899 return new_d_SymConst_type(NULL, value, kind, type);
2901 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
2902 return new_d_SymConst(NULL, value, kind);
2904 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2905 return new_d_simpleSel(NULL, store, objptr, ent);
2907 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2909 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2911 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2913 return new_d_Call(NULL, store, callee, arity, in, tp);
2915 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2916 return new_d_Add(NULL, op1, op2, mode);
2918 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2919 return new_d_Sub(NULL, op1, op2, mode);
2921 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2922 return new_d_Minus(NULL, op, mode);
2924 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2925 return new_d_Mul(NULL, op1, op2, mode);
2927 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2928 return new_d_Mulh(NULL, op1, op2, mode);
2930 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2931 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2933 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2934 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2936 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2937 return new_d_Div(NULL, memop, op1, op2, mode, state);
2939 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2940 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2942 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2943 return new_d_Abs(NULL, op, mode);
2945 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2946 return new_d_And(NULL, op1, op2, mode);
2948 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2949 return new_d_Or(NULL, op1, op2, mode);
2951 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2952 return new_d_Eor(NULL, op1, op2, mode);
2954 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2955 return new_d_Not(NULL, op, mode);
2957 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2958 return new_d_Shl(NULL, op, k, mode);
2960 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2961 return new_d_Shr(NULL, op, k, mode);
2963 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2964 return new_d_Shrs(NULL, op, k, mode);
2966 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
2967 return new_d_Rot(NULL, op, k, mode);
2969 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2970 return new_d_Carry(NULL, op1, op2, mode);
2972 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2973 return new_d_Borrow(NULL, op1, op2, mode);
2975 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2976 return new_d_Cmp(NULL, op1, op2);
2978 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2979 return new_d_Conv(NULL, op, mode);
2981 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2982 return new_d_strictConv(NULL, op, mode);
2984 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2985 return new_d_Cast(NULL, op, to_tp);
2987 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2988 return new_d_Phi(NULL, arity, in, mode);
2990 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2991 return new_d_Load(NULL, store, addr, mode);
2993 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2994 return new_d_Store(NULL, store, addr, val);
2996 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2997 where_alloc where) {
2998 return new_d_Alloc(NULL, store, size, alloc_type, where);
3000 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3001 ir_type *free_type, where_alloc where) {
3002 return new_d_Free(NULL, store, ptr, size, free_type, where);
3004 ir_node *new_Sync(int arity, ir_node *in[]) {
3005 return new_d_Sync(NULL, arity, in);
3007 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3008 return new_d_Proj(NULL, arg, mode, proj);
3010 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3011 return new_d_defaultProj(NULL, arg, max_proj);
3013 ir_node *new_Tuple(int arity, ir_node **in) {
3014 return new_d_Tuple(NULL, arity, in);
3016 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3017 return new_d_Id(NULL, val, mode);
3019 ir_node *new_Bad(void) {
3022 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3023 return new_d_Confirm(NULL, val, bound, cmp);
3025 ir_node *new_Unknown(ir_mode *m) {
3026 return new_d_Unknown(m);
3028 ir_node *new_CallBegin(ir_node *callee) {
3029 return new_d_CallBegin(NULL, callee);
3031 ir_node *new_EndReg(void) {
3032 return new_d_EndReg(NULL);
3034 ir_node *new_EndExcept(void) {
3035 return new_d_EndExcept(NULL);
3037 ir_node *new_Break(void) {
3038 return new_d_Break(NULL);
3040 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3041 return new_d_Filter(NULL, arg, mode, proj);
3043 ir_node *new_NoMem(void) {
3044 return new_d_NoMem();
3046 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3047 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3049 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3050 return new_d_Psi(NULL, arity, conds, vals, mode);
3052 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3053 return new_d_CopyB(NULL, store, dst, src, data_type);
3055 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3056 return new_d_InstOf(NULL, store, objptr, ent);
3058 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3059 return new_d_Raise(NULL, store, obj);
3061 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3062 return new_d_Bound(NULL, store, idx, lower, upper);
3064 ir_node *new_Pin(ir_node *node) {
3065 return new_d_Pin(NULL, node);
3067 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3068 int n_outs, ir_asm_constraint *outputs,
3069 int n_clobber, ident *clobber[], ident *asm_text) {
3070 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3073 /* create a new anchor node */
3074 ir_node *new_Anchor(ir_graph *irg) {
3075 ir_node *in[anchor_last];
3076 memset(in, 0, sizeof(in));
3077 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);