2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if (get_Block_matured(block))
249 assert(get_irn_arity(block) == arity);
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity - 1; i >= 0; --i)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block) {
418 ir_graph *irg = current_ir_graph;
420 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
429 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node(res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res->attr.load.aligned = align_is_aligned;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val) {
518 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
524 res->attr.store.exc.pin_state = op_pin_state_pinned;
525 res->attr.store.volatility = volatility_non_volatile;
526 res->attr.store.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
596 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
597 ir_graph *irg = current_ir_graph;
598 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
600 res->attr.symc.num = symkind;
601 res->attr.symc.sym = value;
602 res->attr.symc.tp = tp;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
607 } /* new_bd_SymConst_type */
610 new_bd_Sync(dbg_info *db, ir_node *block) {
612 ir_graph *irg = current_ir_graph;
614 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
615 /* no need to call optimize node here, Sync are always created with no predecessors */
616 IRN_VRFY_IRG(res, irg);
621 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
622 ir_node *in[2], *res;
623 ir_graph *irg = current_ir_graph;
627 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
628 res->attr.confirm.cmp = cmp;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
632 } /* new_bd_Confirm */
635 new_bd_Unknown(ir_mode *m) {
637 ir_graph *irg = current_ir_graph;
639 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
640 res = optimize_node(res);
642 } /* new_bd_Unknown */
645 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
648 ir_graph *irg = current_ir_graph;
650 in[0] = get_Call_ptr(call);
651 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
652 /* res->attr.callbegin.irg = irg; */
653 res->attr.callbegin.call = call;
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, irg);
657 } /* new_bd_CallBegin */
660 new_bd_EndReg(dbg_info *db, ir_node *block) {
662 ir_graph *irg = current_ir_graph;
664 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
665 set_irg_end_reg(irg, res);
666 IRN_VRFY_IRG(res, irg);
668 } /* new_bd_EndReg */
671 new_bd_EndExcept(dbg_info *db, ir_node *block) {
673 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
676 set_irg_end_except(irg, res);
677 IRN_VRFY_IRG (res, irg);
679 } /* new_bd_EndExcept */
682 new_bd_Break(dbg_info *db, ir_node *block) {
684 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
687 res = optimize_node(res);
688 IRN_VRFY_IRG(res, irg);
693 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
699 res->attr.filter.proj = proj;
700 res->attr.filter.in_cg = NULL;
701 res->attr.filter.backedge = NULL;
704 assert(get_Proj_pred(res));
705 assert(get_nodes_block(get_Proj_pred(res)));
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
710 } /* new_bd_Filter */
713 new_bd_Mux(dbg_info *db, ir_node *block,
714 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
717 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
732 new_bd_Psi(dbg_info *db, ir_node *block,
733 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
736 ir_graph *irg = current_ir_graph;
739 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
741 for (i = 0; i < arity; ++i) {
743 in[2 * i + 1] = vals[i];
747 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_bd_CopyB(dbg_info *db, ir_node *block,
757 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
760 ir_graph *irg = current_ir_graph;
766 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
768 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
769 res->attr.copyb.data_type = data_type;
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
777 ir_node *objptr, ir_type *type) {
780 ir_graph *irg = current_ir_graph;
784 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
785 res->attr.instof.type = type;
786 res = optimize_node(res);
787 IRN_VRFY_IRG(res, irg);
789 } /* new_bd_InstOf */
792 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
795 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_Bound(dbg_info *db, ir_node *block,
807 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
810 ir_graph *irg = current_ir_graph;
816 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
817 res->attr.bound.exc.pin_state = op_pin_state_pinned;
818 res = optimize_node(res);
819 IRN_VRFY_IRG(res, irg);
824 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
826 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
835 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
836 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
838 ir_graph *irg = current_ir_graph;
841 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
842 res->attr.assem.pin_state = op_pin_state_pinned;
843 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
844 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
845 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
846 res->attr.assem.asm_text = asm_text;
848 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
849 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
850 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
852 res = optimize_node(res);
853 IRN_VRFY_IRG(res, irg);
857 /* --------------------------------------------- */
858 /* private interfaces, for professional use only */
859 /* --------------------------------------------- */
861 /* Constructs a Block with a fixed number of predecessors.
862 Does not set current_block. Can not be used with automatic
863 Phi node construction. */
865 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
866 ir_graph *rem = current_ir_graph;
869 current_ir_graph = irg;
870 res = new_bd_Block(db, arity, in);
871 current_ir_graph = rem;
877 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
878 ir_graph *rem = current_ir_graph;
881 current_ir_graph = irg;
882 res = new_bd_Start(db, block);
883 current_ir_graph = rem;
889 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
891 ir_graph *rem = current_ir_graph;
893 current_ir_graph = irg;
894 res = new_bd_End(db, block);
895 current_ir_graph = rem;
900 /* Creates a Phi node with all predecessors. Calling this constructor
901 is only allowed if the corresponding block is mature. */
903 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
905 ir_graph *rem = current_ir_graph;
907 current_ir_graph = irg;
908 res = new_bd_Phi(db, block,arity, in, mode);
909 current_ir_graph = rem;
915 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
917 ir_graph *rem = current_ir_graph;
919 current_ir_graph = irg;
920 res = new_bd_Const_type(db, block, mode, con, tp);
921 current_ir_graph = rem;
924 } /* new_rd_Const_type */
927 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
929 ir_graph *rem = current_ir_graph;
931 current_ir_graph = irg;
932 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
933 current_ir_graph = rem;
939 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
940 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
941 } /* new_rd_Const_long */
944 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
946 ir_graph *rem = current_ir_graph;
948 current_ir_graph = irg;
949 res = new_bd_Id(db, block, val, mode);
950 current_ir_graph = rem;
956 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
959 ir_graph *rem = current_ir_graph;
961 current_ir_graph = irg;
962 res = new_bd_Proj(db, block, arg, mode, proj);
963 current_ir_graph = rem;
969 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
972 ir_graph *rem = current_ir_graph;
974 current_ir_graph = irg;
975 res = new_bd_defaultProj(db, block, arg, max_proj);
976 current_ir_graph = rem;
979 } /* new_rd_defaultProj */
982 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
984 ir_graph *rem = current_ir_graph;
986 current_ir_graph = irg;
987 res = new_bd_Conv(db, block, op, mode, 0);
988 current_ir_graph = rem;
994 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
996 ir_graph *rem = current_ir_graph;
998 current_ir_graph = irg;
999 res = new_bd_Cast(db, block, op, to_tp);
1000 current_ir_graph = rem;
1006 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1008 ir_graph *rem = current_ir_graph;
1010 current_ir_graph = irg;
1011 res = new_bd_Tuple(db, block, arity, in);
1012 current_ir_graph = rem;
1015 } /* new_rd_Tuple */
1023 NEW_RD_DIVOP(DivMod)
1036 NEW_RD_BINOP(Borrow)
1039 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1040 ir_node *op1, ir_node *op2) {
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Cmp(db, block, op1, op2);
1046 current_ir_graph = rem;
1052 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1054 ir_graph *rem = current_ir_graph;
1056 current_ir_graph = irg;
1057 res = new_bd_Jmp(db, block);
1058 current_ir_graph = rem;
1064 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_IJmp(db, block, tgt);
1070 current_ir_graph = rem;
1076 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1078 ir_graph *rem = current_ir_graph;
1080 current_ir_graph = irg;
1081 res = new_bd_Cond(db, block, c);
1082 current_ir_graph = rem;
1088 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1089 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1091 ir_graph *rem = current_ir_graph;
1093 current_ir_graph = irg;
1094 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1095 current_ir_graph = rem;
1101 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1102 ir_node *store, int arity, ir_node **in) {
1104 ir_graph *rem = current_ir_graph;
1106 current_ir_graph = irg;
1107 res = new_bd_Return(db, block, store, arity, in);
1108 current_ir_graph = rem;
1111 } /* new_rd_Return */
1114 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1115 ir_node *store, ir_node *adr, ir_mode *mode) {
1117 ir_graph *rem = current_ir_graph;
1119 current_ir_graph = irg;
1120 res = new_bd_Load(db, block, store, adr, mode);
1121 current_ir_graph = rem;
1127 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1128 ir_node *store, ir_node *adr, ir_node *val) {
1130 ir_graph *rem = current_ir_graph;
1132 current_ir_graph = irg;
1133 res = new_bd_Store(db, block, store, adr, val);
1134 current_ir_graph = rem;
1137 } /* new_rd_Store */
1140 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1141 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1143 ir_graph *rem = current_ir_graph;
1145 current_ir_graph = irg;
1146 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1147 current_ir_graph = rem;
1150 } /* new_rd_Alloc */
1153 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1154 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1156 ir_graph *rem = current_ir_graph;
1158 current_ir_graph = irg;
1159 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1160 current_ir_graph = rem;
1166 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1167 ir_node *store, ir_node *objptr, ir_entity *ent) {
1169 ir_graph *rem = current_ir_graph;
1171 current_ir_graph = irg;
1172 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1173 current_ir_graph = rem;
1176 } /* new_rd_simpleSel */
1179 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1180 int arity, ir_node **in, ir_entity *ent) {
1182 ir_graph *rem = current_ir_graph;
1184 current_ir_graph = irg;
1185 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1186 current_ir_graph = rem;
1192 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1193 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1199 current_ir_graph = rem;
1202 } /* new_rd_SymConst_type */
1205 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1206 symconst_symbol value, symconst_kind symkind) {
1207 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1208 } /* new_rd_SymConst */
1210 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1211 symconst_symbol sym;
1212 sym.entity_p = symbol;
1213 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1214 } /* new_rd_SymConst_addr_ent */
1216 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1217 symconst_symbol sym;
1218 sym.entity_p = symbol;
1219 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1220 } /* new_rd_SymConst_ofs_ent */
1222 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1223 symconst_symbol sym;
1224 sym.ident_p = symbol;
1225 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1226 } /* new_rd_SymConst_addr_name */
1228 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1229 symconst_symbol sym;
1230 sym.type_p = symbol;
1231 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1232 } /* new_rd_SymConst_type_tag */
1234 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1235 symconst_symbol sym;
1236 sym.type_p = symbol;
1237 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1238 } /* new_rd_SymConst_size */
1240 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1241 symconst_symbol sym;
1242 sym.type_p = symbol;
1243 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1244 } /* new_rd_SymConst_align */
1247 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1249 ir_graph *rem = current_ir_graph;
1252 current_ir_graph = irg;
1253 res = new_bd_Sync(db, block);
1254 current_ir_graph = rem;
1256 for (i = 0; i < arity; ++i)
1257 add_Sync_pred(res, in[i]);
1263 new_rd_Bad(ir_graph *irg) {
1264 return get_irg_bad(irg);
1268 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1270 ir_graph *rem = current_ir_graph;
1272 current_ir_graph = irg;
1273 res = new_bd_Confirm(db, block, val, bound, cmp);
1274 current_ir_graph = rem;
1277 } /* new_rd_Confirm */
1280 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1282 ir_graph *rem = current_ir_graph;
1284 current_ir_graph = irg;
1285 res = new_bd_Unknown(m);
1286 current_ir_graph = rem;
1289 } /* new_rd_Unknown */
1292 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1294 ir_graph *rem = current_ir_graph;
1296 current_ir_graph = irg;
1297 res = new_bd_CallBegin(db, block, call);
1298 current_ir_graph = rem;
1301 } /* new_rd_CallBegin */
1304 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1307 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1308 set_irg_end_reg(irg, res);
1309 IRN_VRFY_IRG(res, irg);
1311 } /* new_rd_EndReg */
1314 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1317 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1318 set_irg_end_except(irg, res);
1319 IRN_VRFY_IRG (res, irg);
1321 } /* new_rd_EndExcept */
1324 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1326 ir_graph *rem = current_ir_graph;
1328 current_ir_graph = irg;
1329 res = new_bd_Break(db, block);
1330 current_ir_graph = rem;
1333 } /* new_rd_Break */
1336 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1339 ir_graph *rem = current_ir_graph;
1341 current_ir_graph = irg;
1342 res = new_bd_Filter(db, block, arg, mode, proj);
1343 current_ir_graph = rem;
1346 } /* new_rd_Filter */
1349 new_rd_NoMem(ir_graph *irg) {
1350 return get_irg_no_mem(irg);
1351 } /* new_rd_NoMem */
1354 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1355 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1357 ir_graph *rem = current_ir_graph;
1359 current_ir_graph = irg;
1360 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1361 current_ir_graph = rem;
1367 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1368 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1370 ir_graph *rem = current_ir_graph;
1372 current_ir_graph = irg;
1373 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1374 current_ir_graph = rem;
1379 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1380 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1382 ir_graph *rem = current_ir_graph;
1384 current_ir_graph = irg;
1385 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1386 current_ir_graph = rem;
1389 } /* new_rd_CopyB */
1392 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1393 ir_node *objptr, ir_type *type) {
1395 ir_graph *rem = current_ir_graph;
1397 current_ir_graph = irg;
1398 res = new_bd_InstOf(db, block, store, objptr, type);
1399 current_ir_graph = rem;
1402 } /* new_rd_InstOf */
1405 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1407 ir_graph *rem = current_ir_graph;
1409 current_ir_graph = irg;
1410 res = new_bd_Raise(db, block, store, obj);
1411 current_ir_graph = rem;
1414 } /* new_rd_Raise */
1416 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1417 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1419 ir_graph *rem = current_ir_graph;
1421 current_ir_graph = irg;
1422 res = new_bd_Bound(db, block, store, idx, lower, upper);
1423 current_ir_graph = rem;
1426 } /* new_rd_Bound */
1428 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1430 ir_graph *rem = current_ir_graph;
1432 current_ir_graph = irg;
1433 res = new_bd_Pin(db, block, node);
1434 current_ir_graph = rem;
1439 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1440 int arity, ir_node *in[], ir_asm_constraint *inputs,
1441 int n_outs, ir_asm_constraint *outputs,
1442 int n_clobber, ident *clobber[], ident *asm_text) {
1444 ir_graph *rem = current_ir_graph;
1446 current_ir_graph = irg;
1447 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1448 current_ir_graph = rem;
1454 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1455 return new_rd_Block(NULL, irg, arity, in);
1457 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1458 return new_rd_Start(NULL, irg, block);
1460 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1461 return new_rd_End(NULL, irg, block);
1463 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1464 return new_rd_Jmp(NULL, irg, block);
1466 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1467 return new_rd_IJmp(NULL, irg, block, tgt);
1469 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1470 return new_rd_Cond(NULL, irg, block, c);
1472 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1473 ir_node *store, int arity, ir_node **in) {
1474 return new_rd_Return(NULL, irg, block, store, arity, in);
1476 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1477 ir_mode *mode, tarval *con) {
1478 return new_rd_Const(NULL, irg, block, mode, con);
1480 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1481 ir_mode *mode, long value) {
1482 return new_rd_Const_long(NULL, irg, block, mode, value);
1484 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con, ir_type *tp) {
1486 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1488 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1489 symconst_symbol value, symconst_kind symkind) {
1490 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1492 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1493 ir_node *objptr, ir_entity *ent) {
1494 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1496 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1497 ir_node *objptr, int n_index, ir_node **index,
1499 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1501 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *callee, int arity, ir_node **in,
1504 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1506 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1507 ir_node *op1, ir_node *op2, ir_mode *mode) {
1508 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1510 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1511 ir_node *op1, ir_node *op2, ir_mode *mode) {
1512 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1514 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1515 ir_node *op, ir_mode *mode) {
1516 return new_rd_Minus(NULL, irg, block, op, mode);
1518 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1523 ir_node *op1, ir_node *op2, ir_mode *mode) {
1524 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1526 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1527 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1528 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1530 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1531 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1532 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1534 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1543 ir_node *op, ir_mode *mode) {
1544 return new_rd_Abs(NULL, irg, block, op, mode);
1546 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1547 ir_node *op1, ir_node *op2, ir_mode *mode) {
1548 return new_rd_And(NULL, irg, block, op1, op2, mode);
1550 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1559 ir_node *op, ir_mode *mode) {
1560 return new_rd_Not(NULL, irg, block, op, mode);
1562 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1563 ir_node *op, ir_node *k, ir_mode *mode) {
1564 return new_rd_Shl(NULL, irg, block, op, k, mode);
1566 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_node *k, ir_mode *mode) {
1568 return new_rd_Shr(NULL, irg, block, op, k, mode);
1570 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Rot(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Carry(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1587 ir_node *op1, ir_node *op2) {
1588 return new_rd_Cmp(NULL, irg, block, op1, op2);
1590 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1591 ir_node *op, ir_mode *mode) {
1592 return new_rd_Conv(NULL, irg, block, op, mode);
1594 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1595 return new_rd_Cast(NULL, irg, block, op, to_tp);
1597 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1598 ir_node **in, ir_mode *mode) {
1599 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1601 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1602 ir_node *store, ir_node *adr, ir_mode *mode) {
1603 return new_rd_Load(NULL, irg, block, store, adr, mode);
1605 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1606 ir_node *store, ir_node *adr, ir_node *val) {
1607 return new_rd_Store(NULL, irg, block, store, adr, val);
1609 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1610 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1611 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1613 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1614 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1615 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1617 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1618 return new_rd_Sync(NULL, irg, block, arity, in);
1620 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1621 ir_mode *mode, long proj) {
1622 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1624 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1626 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1628 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1629 int arity, ir_node **in) {
1630 return new_rd_Tuple(NULL, irg, block, arity, in );
1632 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1633 ir_node *val, ir_mode *mode) {
1634 return new_rd_Id(NULL, irg, block, val, mode);
1636 ir_node *new_r_Bad(ir_graph *irg) {
1637 return new_rd_Bad(irg);
1639 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1640 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1642 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1643 return new_rd_Unknown(irg, m);
1645 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1646 return new_rd_CallBegin(NULL, irg, block, callee);
1648 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1649 return new_rd_EndReg(NULL, irg, block);
1651 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1652 return new_rd_EndExcept(NULL, irg, block);
1654 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1655 return new_rd_Break(NULL, irg, block);
1657 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1658 ir_mode *mode, long proj) {
1659 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1661 ir_node *new_r_NoMem(ir_graph *irg) {
1662 return new_rd_NoMem(irg);
1664 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1665 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1666 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1668 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1669 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1670 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1672 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1673 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1674 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1676 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1678 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1680 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *obj) {
1682 return new_rd_Raise(NULL, irg, block, store, obj);
1684 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1685 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1686 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1688 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1689 return new_rd_Pin(NULL, irg, block, node);
1691 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1692 int arity, ir_node *in[], ir_asm_constraint *inputs,
1693 int n_outs, ir_asm_constraint *outputs,
1694 int n_clobber, ident *clobber[], ident *asm_text) {
1695 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1698 /** ********************/
1699 /** public interfaces */
1700 /** construction tools */
1704 * - create a new Start node in the current block
1706 * @return s - pointer to the created Start node
1711 new_d_Start(dbg_info *db) {
1714 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1715 op_Start, mode_T, 0, NULL);
1717 res = optimize_node(res);
1718 IRN_VRFY_IRG(res, current_ir_graph);
1723 new_d_End(dbg_info *db) {
1725 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1726 op_End, mode_X, -1, NULL);
1727 res = optimize_node(res);
1728 IRN_VRFY_IRG(res, current_ir_graph);
1733 /* Constructs a Block with a fixed number of predecessors.
1734 Does set current_block. Can be used with automatic Phi
1735 node construction. */
1737 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1740 int has_unknown = 0;
1742 res = new_bd_Block(db, arity, in);
1744 /* Create and initialize array for Phi-node construction. */
1745 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1746 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1747 current_ir_graph->n_loc);
1748 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1751 for (i = arity-1; i >= 0; i--)
1752 if (get_irn_op(in[i]) == op_Unknown) {
1757 if (!has_unknown) res = optimize_node(res);
1758 current_ir_graph->current_block = res;
1760 IRN_VRFY_IRG(res, current_ir_graph);
1765 /* ***********************************************************************/
1766 /* Methods necessary for automatic Phi node creation */
1768 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1769 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1770 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1771 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1773 Call Graph: ( A ---> B == A "calls" B)
1775 get_value mature_immBlock
1783 get_r_value_internal |
1787 new_rd_Phi0 new_rd_Phi_in
1789 * *************************************************************************** */
1791 /** Creates a Phi node with 0 predecessors. */
1792 static INLINE ir_node *
1793 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1796 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1797 IRN_VRFY_IRG(res, irg);
1802 static INLINE ir_node *
1803 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1804 ir_node **in, int ins, ir_node *phi0) {
1806 ir_node *res, *known;
1808 /* Allocate a new node on the obstack. The allocation copies the in
1810 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1811 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1813 /* This loop checks whether the Phi has more than one predecessor.
1814 If so, it is a real Phi node and we break the loop. Else the
1815 Phi node merges the same definition on several paths and therefore
1816 is not needed. Don't consider Bad nodes! */
1818 for (i=0; i < ins; ++i)
1822 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1824 /* Optimize self referencing Phis: We can't detect them yet properly, as
1825 they still refer to the Phi0 they will replace. So replace right now. */
1826 if (phi0 && in[i] == phi0) in[i] = res;
1828 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1836 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1839 edges_node_deleted(res, current_ir_graph);
1840 obstack_free (current_ir_graph->obst, res);
1841 if (is_Phi(known)) {
1842 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1843 order, an enclosing Phi know may get superfluous. */
1844 res = optimize_in_place_2(known);
1846 exchange(known, res);
1852 /* A undefined value, e.g., in unreachable code. */
1856 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1857 IRN_VRFY_IRG(res, irg);
1858 /* Memory Phis in endless loops must be kept alive.
1859 As we can't distinguish these easily we keep all of them alive. */
1860 if ((res->op == op_Phi) && (mode == mode_M))
1861 add_End_keepalive(get_irg_end(irg), res);
1865 } /* new_rd_Phi_in */
1868 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1870 #if PRECISE_EXC_CONTEXT
1872 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1875 * Construct a new frag_array for node n.
1876 * Copy the content from the current graph_arr of the corresponding block:
1877 * this is the current state.
1878 * Set ProjM(n) as current memory state.
1879 * Further the last entry in frag_arr of current block points to n. This
1880 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1882 static INLINE ir_node **new_frag_arr(ir_node *n) {
1886 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1887 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1888 sizeof(ir_node *)*current_ir_graph->n_loc);
1890 /* turn off optimization before allocating Proj nodes, as res isn't
1892 opt = get_opt_optimize(); set_optimize(0);
1893 /* Here we rely on the fact that all frag ops have Memory as first result! */
1894 if (get_irn_op(n) == op_Call)
1895 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1896 else if (get_irn_op(n) == op_CopyB)
1897 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1899 assert((pn_Quot_M == pn_DivMod_M) &&
1900 (pn_Quot_M == pn_Div_M) &&
1901 (pn_Quot_M == pn_Mod_M) &&
1902 (pn_Quot_M == pn_Load_M) &&
1903 (pn_Quot_M == pn_Store_M) &&
1904 (pn_Quot_M == pn_Alloc_M) &&
1905 (pn_Quot_M == pn_Bound_M));
1906 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1910 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1912 } /* new_frag_arr */
1915 * Returns the frag_arr from a node.
1917 static INLINE ir_node **get_frag_arr(ir_node *n) {
1918 switch (get_irn_opcode(n)) {
1920 return n->attr.call.exc.frag_arr;
1922 return n->attr.alloc.exc.frag_arr;
1924 return n->attr.load.exc.frag_arr;
1926 return n->attr.store.exc.frag_arr;
1928 return n->attr.except.frag_arr;
1930 } /* get_frag_arr */
1933 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1935 if (!frag_arr[pos]) frag_arr[pos] = val;
1936 if (frag_arr[current_ir_graph->n_loc - 1]) {
1937 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1938 assert(arr != frag_arr && "Endless recursion detected");
1939 set_frag_value(arr, pos, val);
1944 for (i = 0; i < 1000; ++i) {
1945 if (!frag_arr[pos]) {
1946 frag_arr[pos] = val;
1948 if (frag_arr[current_ir_graph->n_loc - 1]) {
1949 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1955 assert(0 && "potential endless recursion");
1957 } /* set_frag_value */
1960 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1964 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1966 frag_arr = get_frag_arr(cfOp);
1967 res = frag_arr[pos];
1969 if (block->attr.block.graph_arr[pos]) {
1970 /* There was a set_value() after the cfOp and no get_value before that
1971 set_value(). We must build a Phi node now. */
1972 if (block->attr.block.is_matured) {
1973 int ins = get_irn_arity(block);
1975 NEW_ARR_A(ir_node *, nin, ins);
1976 res = phi_merge(block, pos, mode, nin, ins);
1978 res = new_rd_Phi0(current_ir_graph, block, mode);
1979 res->attr.phi.u.pos = pos;
1980 res->attr.phi.next = block->attr.block.phis;
1981 block->attr.block.phis = res;
1984 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1985 but this should be better: (remove comment if this works) */
1986 /* It's a Phi, we can write this into all graph_arrs with NULL */
1987 set_frag_value(block->attr.block.graph_arr, pos, res);
1989 res = get_r_value_internal(block, pos, mode);
1990 set_frag_value(block->attr.block.graph_arr, pos, res);
1994 } /* get_r_frag_value_internal */
1995 #endif /* PRECISE_EXC_CONTEXT */
1998 * check whether a control flow cf_pred is a exception flow.
2000 * @param cf_pred the control flow node
2001 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2003 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2005 * Note: all projections from a raise are "exceptional control flow" we we handle it
2006 * like a normal Jmp, because there is no "regular" one.
2007 * That's why Raise is no "fragile_op"!
2009 if (is_fragile_op(prev_cf_op)) {
2010 if (is_Proj(cf_pred)) {
2011 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2012 /* the regular control flow, NO exception */
2015 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2018 /* Hmm, exception but not a Proj? */
2019 assert(!"unexpected condition: fragile op without a proj");
2023 } /* is_exception_flow */
2026 * Computes the predecessors for the real phi node, and then
2027 * allocates and returns this node. The routine called to allocate the
2028 * node might optimize it away and return a real value.
2029 * This function must be called with an in-array of proper size.
2032 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2033 ir_node *prevBlock, *res, *phi0, *phi0_all;
2036 /* If this block has no value at pos create a Phi0 and remember it
2037 in graph_arr to break recursions.
2038 Else we may not set graph_arr as there a later value is remembered. */
2040 if (!block->attr.block.graph_arr[pos]) {
2041 if (block == get_irg_start_block(current_ir_graph)) {
2042 /* Collapsing to Bad tarvals is no good idea.
2043 So we call a user-supplied routine here that deals with this case as
2044 appropriate for the given language. Sorrily the only help we can give
2045 here is the position.
2047 Even if all variables are defined before use, it can happen that
2048 we get to the start block, if a Cond has been replaced by a tuple
2049 (bad, jmp). In this case we call the function needlessly, eventually
2050 generating an non existent error.
2051 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2054 if (default_initialize_local_variable) {
2055 ir_node *rem = get_cur_block();
2057 set_cur_block(block);
2058 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2062 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2063 /* We don't need to care about exception ops in the start block.
2064 There are none by definition. */
2065 return block->attr.block.graph_arr[pos];
2067 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2068 block->attr.block.graph_arr[pos] = phi0;
2069 #if PRECISE_EXC_CONTEXT
2070 if (get_opt_precise_exc_context()) {
2071 /* Set graph_arr for fragile ops. Also here we should break recursion.
2072 We could choose a cyclic path through an cfop. But the recursion would
2073 break at some point. */
2074 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2080 /* This loop goes to all predecessor blocks of the block the Phi node
2081 is in and there finds the operands of the Phi node by calling
2082 get_r_value_internal. */
2083 for (i = 1; i <= ins; ++i) {
2084 ir_node *cf_pred = block->in[i];
2085 ir_node *prevCfOp = skip_Proj(cf_pred);
2087 if (is_Bad(prevCfOp)) {
2088 /* In case a Cond has been optimized we would get right to the start block
2089 with an invalid definition. */
2090 nin[i-1] = new_Bad();
2093 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2095 if (!is_Bad(prevBlock)) {
2096 #if PRECISE_EXC_CONTEXT
2097 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2098 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2099 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2102 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2104 nin[i-1] = new_Bad();
2108 /* We want to pass the Phi0 node to the constructor: this finds additional
2109 optimization possibilities.
2110 The Phi0 node either is allocated in this function, or it comes from
2111 a former call to get_r_value_internal. In this case we may not yet
2112 exchange phi0, as this is done in mature_immBlock. */
2114 phi0_all = block->attr.block.graph_arr[pos];
2115 if (!((get_irn_op(phi0_all) == op_Phi) &&
2116 (get_irn_arity(phi0_all) == 0) &&
2117 (get_nodes_block(phi0_all) == block)))
2123 /* After collecting all predecessors into the array nin a new Phi node
2124 with these predecessors is created. This constructor contains an
2125 optimization: If all predecessors of the Phi node are identical it
2126 returns the only operand instead of a new Phi node. */
2127 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2129 /* In case we allocated a Phi0 node at the beginning of this procedure,
2130 we need to exchange this Phi0 with the real Phi. */
2132 exchange(phi0, res);
2133 block->attr.block.graph_arr[pos] = res;
2134 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2135 only an optimization. */
2142 * This function returns the last definition of a value. In case
2143 * this value was last defined in a previous block, Phi nodes are
2144 * inserted. If the part of the firm graph containing the definition
2145 * is not yet constructed, a dummy Phi node is returned.
2147 * @param block the current block
2148 * @param pos the value number of the value searched
2149 * @param mode the mode of this value (needed for Phi construction)
2152 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2154 /* There are 4 cases to treat.
2156 1. The block is not mature and we visit it the first time. We can not
2157 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2158 predecessors is returned. This node is added to the linked list (field
2159 "link") of the containing block to be completed when this block is
2160 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2163 2. The value is already known in this block, graph_arr[pos] is set and we
2164 visit the block the first time. We can return the value without
2165 creating any new nodes.
2167 3. The block is mature and we visit it the first time. A Phi node needs
2168 to be created (phi_merge). If the Phi is not needed, as all it's
2169 operands are the same value reaching the block through different
2170 paths, it's optimized away and the value itself is returned.
2172 4. The block is mature, and we visit it the second time. Now two
2173 subcases are possible:
2174 * The value was computed completely the last time we were here. This
2175 is the case if there is no loop. We can return the proper value.
2176 * The recursion that visited this node and set the flag did not
2177 return yet. We are computing a value in a loop and need to
2178 break the recursion. This case only happens if we visited
2179 the same block with phi_merge before, which inserted a Phi0.
2180 So we return the Phi0.
2183 /* case 4 -- already visited. */
2184 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2185 /* As phi_merge allocates a Phi0 this value is always defined. Here
2186 is the critical difference of the two algorithms. */
2187 assert(block->attr.block.graph_arr[pos]);
2188 return block->attr.block.graph_arr[pos];
2191 /* visited the first time */
2192 set_irn_visited(block, get_irg_visited(current_ir_graph));
2194 /* Get the local valid value */
2195 res = block->attr.block.graph_arr[pos];
2197 /* case 2 -- If the value is actually computed, return it. */
2198 if (res) { return res; };
2200 if (block->attr.block.is_matured) { /* case 3 */
2202 /* The Phi has the same amount of ins as the corresponding block. */
2203 int ins = get_irn_arity(block);
2205 NEW_ARR_A (ir_node *, nin, ins);
2207 /* Phi merge collects the predecessors and then creates a node. */
2208 res = phi_merge (block, pos, mode, nin, ins);
2210 } else { /* case 1 */
2211 /* The block is not mature, we don't know how many in's are needed. A Phi
2212 with zero predecessors is created. Such a Phi node is called Phi0
2213 node. The Phi0 is then added to the list of Phi0 nodes in this block
2214 to be matured by mature_immBlock later.
2215 The Phi0 has to remember the pos of it's internal value. If the real
2216 Phi is computed, pos is used to update the array with the local
2218 res = new_rd_Phi0(current_ir_graph, block, mode);
2219 res->attr.phi.u.pos = pos;
2220 res->attr.phi.next = block->attr.block.phis;
2221 block->attr.block.phis = res;
2224 /* If we get here, the frontend missed a use-before-definition error */
2227 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2228 assert(mode->code >= irm_F && mode->code <= irm_P);
2229 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2230 get_mode_null(mode));
2233 /* The local valid value is available now. */
2234 block->attr.block.graph_arr[pos] = res;
2237 } /* get_r_value_internal */
2239 /* ************************************************************************** */
2242 * Finalize a Block node, when all control flows are known.
2243 * Acceptable parameters are only Block nodes.
2246 mature_immBlock(ir_node *block) {
2251 assert(is_Block(block));
2252 if (!get_Block_matured(block)) {
2253 ins = ARR_LEN(block->in)-1;
2254 /* Fix block parameters */
2255 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2257 /* An array for building the Phi nodes. */
2258 NEW_ARR_A(ir_node *, nin, ins);
2260 /* Traverse a chain of Phi nodes attached to this block and mature
2262 for (n = block->attr.block.phis; n; n = next) {
2263 inc_irg_visited(current_ir_graph);
2264 next = n->attr.phi.next;
2265 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2268 block->attr.block.is_matured = 1;
2270 /* Now, as the block is a finished firm node, we can optimize it.
2271 Since other nodes have been allocated since the block was created
2272 we can not free the node on the obstack. Therefore we have to call
2274 Unfortunately the optimization does not change a lot, as all allocated
2275 nodes refer to the unoptimized node.
2276 We can call _2, as global cse has no effect on blocks. */
2277 block = optimize_in_place_2(block);
2278 IRN_VRFY_IRG(block, current_ir_graph);
2280 } /* mature_immBlock */
2283 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2284 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2288 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2289 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2293 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2294 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2295 } /* new_d_Const_long */
2298 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2299 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2300 } /* new_d_Const_type */
2304 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2305 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2309 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2310 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2314 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2317 assert(arg->op == op_Cond);
2318 arg->attr.cond.kind = fragmentary;
2319 arg->attr.cond.default_proj = max_proj;
2320 res = new_Proj(arg, mode_X, max_proj);
2322 } /* new_d_defaultProj */
2325 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2326 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2330 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2331 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2332 } /* new_d_strictConv */
2335 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2336 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2340 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2341 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2351 * Allocate the frag array.
2353 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2354 if (get_opt_precise_exc_context()) {
2355 if ((current_ir_graph->phase_state == phase_building) &&
2356 (get_irn_op(res) == op) && /* Could be optimized away. */
2357 !*frag_store) /* Could be a cse where the arr is already set. */ {
2358 *frag_store = new_frag_arr(res);
2361 } /* allocate_frag_arr */
2364 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2366 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2367 #if PRECISE_EXC_CONTEXT
2368 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2375 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2377 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2378 #if PRECISE_EXC_CONTEXT
2379 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2383 } /* new_d_DivMod */
2386 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2388 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2389 #if PRECISE_EXC_CONTEXT
2390 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2397 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2399 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2400 #if PRECISE_EXC_CONTEXT
2401 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2420 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2421 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2425 new_d_Jmp(dbg_info *db) {
2426 return new_bd_Jmp(db, current_ir_graph->current_block);
2430 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2431 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2435 new_d_Cond(dbg_info *db, ir_node *c) {
2436 return new_bd_Cond(db, current_ir_graph->current_block, c);
2440 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2443 res = new_bd_Call(db, current_ir_graph->current_block,
2444 store, callee, arity, in, tp);
2445 #if PRECISE_EXC_CONTEXT
2446 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2453 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2454 return new_bd_Return(db, current_ir_graph->current_block,
2456 } /* new_d_Return */
2459 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2461 res = new_bd_Load(db, current_ir_graph->current_block,
2463 #if PRECISE_EXC_CONTEXT
2464 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2471 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2473 res = new_bd_Store(db, current_ir_graph->current_block,
2475 #if PRECISE_EXC_CONTEXT
2476 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2483 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2484 ir_where_alloc where) {
2486 res = new_bd_Alloc(db, current_ir_graph->current_block,
2487 store, size, alloc_type, where);
2488 #if PRECISE_EXC_CONTEXT
2489 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2496 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2497 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2498 return new_bd_Free(db, current_ir_graph->current_block,
2499 store, ptr, size, free_type, where);
2503 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2504 /* GL: objptr was called frame before. Frame was a bad choice for the name
2505 as the operand could as well be a pointer to a dynamic object. */
2507 return new_bd_Sel(db, current_ir_graph->current_block,
2508 store, objptr, 0, NULL, ent);
2509 } /* new_d_simpleSel */
2512 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2513 return new_bd_Sel(db, current_ir_graph->current_block,
2514 store, objptr, n_index, index, sel);
2518 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2519 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2521 } /* new_d_SymConst_type */
2524 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2525 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2526 value, kind, firm_unknown_type);
2527 } /* new_d_SymConst */
2530 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2531 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2537 return _new_d_Bad();
2541 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2542 return new_bd_Confirm(db, current_ir_graph->current_block,
2544 } /* new_d_Confirm */
2547 new_d_Unknown(ir_mode *m) {
2548 return new_bd_Unknown(m);
2549 } /* new_d_Unknown */
2552 new_d_CallBegin(dbg_info *db, ir_node *call) {
2553 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2554 } /* new_d_CallBegin */
2557 new_d_EndReg(dbg_info *db) {
2558 return new_bd_EndReg(db, current_ir_graph->current_block);
2559 } /* new_d_EndReg */
2562 new_d_EndExcept(dbg_info *db) {
2563 return new_bd_EndExcept(db, current_ir_graph->current_block);
2564 } /* new_d_EndExcept */
2567 new_d_Break(dbg_info *db) {
2568 return new_bd_Break(db, current_ir_graph->current_block);
2572 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2573 return new_bd_Filter(db, current_ir_graph->current_block,
2575 } /* new_d_Filter */
2578 (new_d_NoMem)(void) {
2579 return _new_d_NoMem();
2583 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2584 ir_node *ir_true, ir_mode *mode) {
2585 return new_bd_Mux(db, current_ir_graph->current_block,
2586 sel, ir_false, ir_true, mode);
2590 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2591 return new_bd_Psi(db, current_ir_graph->current_block,
2592 arity, conds, vals, mode);
2595 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2596 ir_node *dst, ir_node *src, ir_type *data_type) {
2598 res = new_bd_CopyB(db, current_ir_graph->current_block,
2599 store, dst, src, data_type);
2600 #if PRECISE_EXC_CONTEXT
2601 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2607 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2608 return new_bd_InstOf(db, current_ir_graph->current_block,
2609 store, objptr, type);
2610 } /* new_d_InstOf */
2613 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2614 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2617 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2618 ir_node *idx, ir_node *lower, ir_node *upper) {
2620 res = new_bd_Bound(db, current_ir_graph->current_block,
2621 store, idx, lower, upper);
2622 #if PRECISE_EXC_CONTEXT
2623 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2629 new_d_Pin(dbg_info *db, ir_node *node) {
2630 return new_bd_Pin(db, current_ir_graph->current_block, node);
2634 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2635 int n_outs, ir_asm_constraint *outputs,
2636 int n_clobber, ident *clobber[], ident *asm_text) {
2637 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2640 /* ********************************************************************* */
2641 /* Comfortable interface with automatic Phi node construction. */
2642 /* (Uses also constructors of ?? interface, except new_Block. */
2643 /* ********************************************************************* */
2645 /* Block construction */
2646 /* immature Block without predecessors */
2648 new_d_immBlock(dbg_info *db) {
2651 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2652 /* creates a new dynamic in-array as length of in is -1 */
2653 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2654 current_ir_graph->current_block = res;
2656 /* macroblock head */
2659 res->attr.block.is_matured = 0;
2660 res->attr.block.is_dead = 0;
2661 res->attr.block.is_mb_head = 1;
2662 res->attr.block.has_label = 0;
2663 res->attr.block.irg = current_ir_graph;
2664 res->attr.block.backedge = NULL;
2665 res->attr.block.in_cg = NULL;
2666 res->attr.block.cg_backedge = NULL;
2667 res->attr.block.extblk = NULL;
2668 res->attr.block.region = NULL;
2669 res->attr.block.mb_depth = 0;
2670 res->attr.block.label = 0;
2672 set_Block_block_visited(res, 0);
2674 /* Create and initialize array for Phi-node construction. */
2675 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2676 current_ir_graph->n_loc);
2677 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2679 /* Immature block may not be optimized! */
2680 IRN_VRFY_IRG(res, current_ir_graph);
2683 } /* new_d_immBlock */
2686 new_immBlock(void) {
2687 return new_d_immBlock(NULL);
2688 } /* new_immBlock */
2690 /* immature PartBlock with its predecessors */
2692 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2693 ir_node *res = new_d_immBlock(db);
2694 ir_node *blk = get_nodes_block(pred_jmp);
2696 res->in[0] = blk->in[0];
2697 assert(res->in[0] != NULL);
2698 add_immBlock_pred(res, pred_jmp);
2700 res->attr.block.is_mb_head = 0;
2701 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2704 } /* new_d_immPartBlock */
2707 new_immPartBlock(ir_node *pred_jmp) {
2708 return new_d_immPartBlock(NULL, pred_jmp);
2709 } /* new_immPartBlock */
2711 /* add an edge to a jmp/control flow node */
2713 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2714 int n = ARR_LEN(block->in) - 1;
2716 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2717 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2718 assert(is_ir_node(jmp));
2720 ARR_APP1(ir_node *, block->in, jmp);
2722 hook_set_irn_n(block, n, jmp, NULL);
2723 } /* add_immBlock_pred */
2725 /* changing the current block */
2727 set_cur_block(ir_node *target) {
2728 current_ir_graph->current_block = target;
2729 } /* set_cur_block */
2731 /* ************************ */
2732 /* parameter administration */
2734 /* get a value from the parameter array from the current block by its index */
2736 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2737 ir_graph *irg = current_ir_graph;
2738 assert(get_irg_phase_state(irg) == phase_building);
2739 inc_irg_visited(irg);
2742 return get_r_value_internal(irg->current_block, pos + 1, mode);
2745 /* get a value from the parameter array from the current block by its index */
2747 get_value(int pos, ir_mode *mode) {
2748 return get_d_value(NULL, pos, mode);
2751 /* set a value at position pos in the parameter array from the current block */
2753 set_value(int pos, ir_node *value) {
2754 ir_graph *irg = current_ir_graph;
2755 assert(get_irg_phase_state(irg) == phase_building);
2756 assert(pos+1 < irg->n_loc);
2757 assert(is_ir_node(value));
2758 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2761 /* Find the value number for a node in the current block.*/
2763 find_value(ir_node *value) {
2765 ir_node *bl = current_ir_graph->current_block;
2767 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2768 if (bl->attr.block.graph_arr[i] == value)
2773 /* get the current store */
2776 ir_graph *irg = current_ir_graph;
2778 assert(get_irg_phase_state(irg) == phase_building);
2779 /* GL: one could call get_value instead */
2780 inc_irg_visited(irg);
2781 return get_r_value_internal(irg->current_block, 0, mode_M);
2784 /* set the current store: handles automatic Sync construction for Load nodes */
2786 set_store(ir_node *store) {
2787 ir_node *load, *pload, *pred, *in[2];
2789 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2790 /* Beware: due to dead code elimination, a store might become a Bad node even in
2791 the construction phase. */
2792 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2794 if (get_opt_auto_create_sync()) {
2795 /* handle non-volatile Load nodes by automatically creating Sync's */
2796 load = skip_Proj(store);
2797 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2798 pred = get_Load_mem(load);
2800 if (is_Sync(pred)) {
2801 /* a Load after a Sync: move it up */
2802 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2804 set_Load_mem(load, get_memop_mem(mem));
2805 add_Sync_pred(pred, store);
2808 pload = skip_Proj(pred);
2809 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2810 /* a Load after a Load: create a new Sync */
2811 set_Load_mem(load, get_Load_mem(pload));
2815 store = new_Sync(2, in);
2820 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2824 keep_alive(ir_node *ka) {
2825 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2828 /* --- Useful access routines --- */
2829 /* Returns the current block of the current graph. To set the current
2830 block use set_cur_block. */
2831 ir_node *get_cur_block(void) {
2832 return get_irg_current_block(current_ir_graph);
2833 } /* get_cur_block */
2835 /* Returns the frame type of the current graph */
2836 ir_type *get_cur_frame_type(void) {
2837 return get_irg_frame_type(current_ir_graph);
2838 } /* get_cur_frame_type */
2841 /* ********************************************************************* */
2844 /* call once for each run of the library */
2846 init_cons(uninitialized_local_variable_func_t *func) {
2847 default_initialize_local_variable = func;
2851 irp_finalize_cons(void) {
2853 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2854 irg_finalize_cons(get_irp_irg(i));
2856 irp->phase_state = phase_high;
2857 } /* irp_finalize_cons */
2860 ir_node *new_Block(int arity, ir_node **in) {
2861 return new_d_Block(NULL, arity, in);
2863 ir_node *new_Start(void) {
2864 return new_d_Start(NULL);
2866 ir_node *new_End(void) {
2867 return new_d_End(NULL);
2869 ir_node *new_Jmp(void) {
2870 return new_d_Jmp(NULL);
2872 ir_node *new_IJmp(ir_node *tgt) {
2873 return new_d_IJmp(NULL, tgt);
2875 ir_node *new_Cond(ir_node *c) {
2876 return new_d_Cond(NULL, c);
2878 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2879 return new_d_Return(NULL, store, arity, in);
2881 ir_node *new_Const(ir_mode *mode, tarval *con) {
2882 return new_d_Const(NULL, mode, con);
2885 ir_node *new_Const_long(ir_mode *mode, long value) {
2886 return new_d_Const_long(NULL, mode, value);
2889 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2890 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2893 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2894 return new_d_SymConst_type(NULL, mode, value, kind, type);
2896 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2897 return new_d_SymConst(NULL, mode, value, kind);
2899 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2900 return new_d_simpleSel(NULL, store, objptr, ent);
2902 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2904 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2906 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2908 return new_d_Call(NULL, store, callee, arity, in, tp);
2910 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2911 return new_d_Add(NULL, op1, op2, mode);
2913 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2914 return new_d_Sub(NULL, op1, op2, mode);
2916 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2917 return new_d_Minus(NULL, op, mode);
2919 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2920 return new_d_Mul(NULL, op1, op2, mode);
2922 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2923 return new_d_Mulh(NULL, op1, op2, mode);
2925 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2926 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2928 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2929 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2931 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2932 return new_d_Div(NULL, memop, op1, op2, mode, state);
2934 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2935 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2937 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2938 return new_d_Abs(NULL, op, mode);
2940 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2941 return new_d_And(NULL, op1, op2, mode);
2943 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2944 return new_d_Or(NULL, op1, op2, mode);
2946 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2947 return new_d_Eor(NULL, op1, op2, mode);
2949 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2950 return new_d_Not(NULL, op, mode);
2952 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2953 return new_d_Shl(NULL, op, k, mode);
2955 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2956 return new_d_Shr(NULL, op, k, mode);
2958 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2959 return new_d_Shrs(NULL, op, k, mode);
2961 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
2962 return new_d_Rot(NULL, op, k, mode);
2964 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2965 return new_d_Carry(NULL, op1, op2, mode);
2967 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2968 return new_d_Borrow(NULL, op1, op2, mode);
2970 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2971 return new_d_Cmp(NULL, op1, op2);
2973 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2974 return new_d_Conv(NULL, op, mode);
2976 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2977 return new_d_strictConv(NULL, op, mode);
2979 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2980 return new_d_Cast(NULL, op, to_tp);
2982 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2983 return new_d_Phi(NULL, arity, in, mode);
2985 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2986 return new_d_Load(NULL, store, addr, mode);
2988 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2989 return new_d_Store(NULL, store, addr, val);
2991 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2992 ir_where_alloc where) {
2993 return new_d_Alloc(NULL, store, size, alloc_type, where);
2995 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
2996 ir_type *free_type, ir_where_alloc where) {
2997 return new_d_Free(NULL, store, ptr, size, free_type, where);
2999 ir_node *new_Sync(int arity, ir_node *in[]) {
3000 return new_d_Sync(NULL, arity, in);
3002 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3003 return new_d_Proj(NULL, arg, mode, proj);
3005 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3006 return new_d_defaultProj(NULL, arg, max_proj);
3008 ir_node *new_Tuple(int arity, ir_node **in) {
3009 return new_d_Tuple(NULL, arity, in);
3011 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3012 return new_d_Id(NULL, val, mode);
3014 ir_node *new_Bad(void) {
3017 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3018 return new_d_Confirm(NULL, val, bound, cmp);
3020 ir_node *new_Unknown(ir_mode *m) {
3021 return new_d_Unknown(m);
3023 ir_node *new_CallBegin(ir_node *callee) {
3024 return new_d_CallBegin(NULL, callee);
3026 ir_node *new_EndReg(void) {
3027 return new_d_EndReg(NULL);
3029 ir_node *new_EndExcept(void) {
3030 return new_d_EndExcept(NULL);
3032 ir_node *new_Break(void) {
3033 return new_d_Break(NULL);
3035 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3036 return new_d_Filter(NULL, arg, mode, proj);
3038 ir_node *new_NoMem(void) {
3039 return new_d_NoMem();
3041 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3042 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3044 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3045 return new_d_Psi(NULL, arity, conds, vals, mode);
3047 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3048 return new_d_CopyB(NULL, store, dst, src, data_type);
3050 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3051 return new_d_InstOf(NULL, store, objptr, ent);
3053 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3054 return new_d_Raise(NULL, store, obj);
3056 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3057 return new_d_Bound(NULL, store, idx, lower, upper);
3059 ir_node *new_Pin(ir_node *node) {
3060 return new_d_Pin(NULL, node);
3062 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3063 int n_outs, ir_asm_constraint *outputs,
3064 int n_clobber, ident *clobber[], ident *asm_text) {
3065 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3068 /* create a new anchor node */
3069 ir_node *new_Anchor(ir_graph *irg) {
3070 ir_node *in[anchor_last];
3071 memset(in, 0, sizeof(in));
3072 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);