2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Return(dbg_info *db, ir_node *block,
501 ir_node *store, int arity, ir_node **in) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A (ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
511 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
512 res = optimize_node(res);
513 IRN_VRFY_IRG(res, irg);
515 } /* new_bd_Return */
518 new_bd_Load(dbg_info *db, ir_node *block,
519 ir_node *store, ir_node *adr, ir_mode *mode) {
522 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
527 res->attr.load.exc.pin_state = op_pin_state_pinned;
528 res->attr.load.load_mode = mode;
529 res->attr.load.volatility = volatility_non_volatile;
530 res->attr.load.aligned = align_is_aligned;
531 res = optimize_node(res);
532 IRN_VRFY_IRG(res, irg);
537 new_bd_Store(dbg_info *db, ir_node *block,
538 ir_node *store, ir_node *adr, ir_node *val) {
541 ir_graph *irg = current_ir_graph;
546 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
547 res->attr.store.exc.pin_state = op_pin_state_pinned;
548 res->attr.store.volatility = volatility_non_volatile;
549 res->attr.store.aligned = align_is_aligned;
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
557 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
560 ir_graph *irg = current_ir_graph;
564 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
565 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
566 res->attr.alloc.where = where;
567 res->attr.alloc.type = alloc_type;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
578 ir_graph *irg = current_ir_graph;
583 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
584 res->attr.free.where = where;
585 res->attr.free.type = free_type;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
593 int arity, ir_node **in, ir_entity *ent) {
597 ir_graph *irg = current_ir_graph;
598 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
600 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
603 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
606 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
608 * Sel's can select functions which should be of mode mode_P_code.
610 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
611 res->attr.sel.ent = ent;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
619 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
620 ir_graph *irg = current_ir_graph;
621 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
623 res->attr.symc.kind = symkind;
624 res->attr.symc.sym = value;
625 res->attr.symc.tp = tp;
627 res = optimize_node(res);
628 IRN_VRFY_IRG(res, irg);
630 } /* new_bd_SymConst_type */
633 new_bd_Sync(dbg_info *db, ir_node *block) {
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
638 /* no need to call optimize node here, Sync are always created with no predecessors */
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
645 ir_node *in[2], *res;
646 ir_graph *irg = current_ir_graph;
650 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
651 res->attr.confirm.cmp = cmp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_Confirm */
658 new_bd_Unknown(ir_mode *m) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
663 res = optimize_node(res);
665 } /* new_bd_Unknown */
668 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
671 ir_graph *irg = current_ir_graph;
673 in[0] = get_Call_ptr(call);
674 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
675 /* res->attr.callbegin.irg = irg; */
676 res->attr.callbegin.call = call;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_CallBegin */
683 new_bd_EndReg(dbg_info *db, ir_node *block) {
685 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
688 set_irg_end_reg(irg, res);
689 IRN_VRFY_IRG(res, irg);
691 } /* new_bd_EndReg */
694 new_bd_EndExcept(dbg_info *db, ir_node *block) {
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
699 set_irg_end_except(irg, res);
700 IRN_VRFY_IRG (res, irg);
702 } /* new_bd_EndExcept */
705 new_bd_Break(dbg_info *db, ir_node *block) {
707 ir_graph *irg = current_ir_graph;
709 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
710 res = optimize_node(res);
711 IRN_VRFY_IRG(res, irg);
716 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
719 ir_graph *irg = current_ir_graph;
721 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
722 res->attr.filter.proj = proj;
723 res->attr.filter.in_cg = NULL;
724 res->attr.filter.backedge = NULL;
727 assert(get_Proj_pred(res));
728 assert(get_nodes_block(get_Proj_pred(res)));
730 res = optimize_node(res);
731 IRN_VRFY_IRG(res, irg);
733 } /* new_bd_Filter */
736 new_bd_Mux(dbg_info *db, ir_node *block,
737 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
740 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
749 res = optimize_node(res);
750 IRN_VRFY_IRG(res, irg);
755 new_bd_CopyB(dbg_info *db, ir_node *block,
756 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
759 ir_graph *irg = current_ir_graph;
765 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
767 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
768 res->attr.copyb.data_type = data_type;
769 res = optimize_node(res);
770 IRN_VRFY_IRG(res, irg);
775 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
776 ir_node *objptr, ir_type *type) {
779 ir_graph *irg = current_ir_graph;
783 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
784 res->attr.instof.type = type;
785 res = optimize_node(res);
786 IRN_VRFY_IRG(res, irg);
788 } /* new_bd_InstOf */
791 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
794 ir_graph *irg = current_ir_graph;
798 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_Bound(dbg_info *db, ir_node *block,
806 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
809 ir_graph *irg = current_ir_graph;
815 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
816 res->attr.bound.exc.pin_state = op_pin_state_pinned;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
823 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
825 ir_graph *irg = current_ir_graph;
827 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
828 res = optimize_node(res);
829 IRN_VRFY_IRG(res, irg);
834 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
835 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
837 ir_graph *irg = current_ir_graph;
840 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
841 res->attr.assem.pin_state = op_pin_state_pinned;
842 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
843 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
844 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
845 res->attr.assem.asm_text = asm_text;
847 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
848 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
849 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
856 /* --------------------------------------------- */
857 /* private interfaces, for professional use only */
858 /* --------------------------------------------- */
861 /* Constructs a Block with a fixed number of predecessors.
862 Does not set current_block. Can not be used with automatic
863 Phi node construction. */
865 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
866 ir_graph *rem = current_ir_graph;
869 current_ir_graph = irg;
870 res = new_bd_Block(db, arity, in);
871 current_ir_graph = rem;
877 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
878 ir_graph *rem = current_ir_graph;
881 current_ir_graph = irg;
882 res = new_bd_Start(db, block);
883 current_ir_graph = rem;
889 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
891 ir_graph *rem = current_ir_graph;
893 current_ir_graph = irg;
894 res = new_bd_End(db, block);
895 current_ir_graph = rem;
901 /* Creates a Phi node with all predecessors. Calling this constructor
902 is only allowed if the corresponding block is mature. */
904 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
906 ir_graph *rem = current_ir_graph;
908 current_ir_graph = irg;
909 res = new_bd_Phi(db, block,arity, in, mode);
910 current_ir_graph = rem;
917 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
919 ir_graph *rem = current_ir_graph;
921 current_ir_graph = irg;
922 res = new_bd_Const_type(db, con, tp);
923 current_ir_graph = rem;
926 } /* new_rd_Const_type */
930 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
933 ir_graph *rem = current_ir_graph;
935 current_ir_graph = irg;
936 res = new_bd_Const_type(db, con, firm_unknown_type);
937 current_ir_graph = rem;
939 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
952 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_Id(db, block, val, mode);
958 current_ir_graph = rem;
964 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_Proj(db, block, arg, mode, proj);
971 current_ir_graph = rem;
978 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
981 ir_graph *rem = current_ir_graph;
983 current_ir_graph = irg;
984 res = new_bd_defaultProj(db, block, arg, max_proj);
985 current_ir_graph = rem;
988 } /* new_rd_defaultProj */
991 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
993 ir_graph *rem = current_ir_graph;
995 current_ir_graph = irg;
996 res = new_bd_Conv(db, block, op, mode, 0);
997 current_ir_graph = rem;
1004 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1006 ir_graph *rem = current_ir_graph;
1008 current_ir_graph = irg;
1009 res = new_bd_Cast(db, block, op, to_tp);
1010 current_ir_graph = rem;
1016 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = irg;
1021 res = new_bd_Tuple(db, block, arity, in);
1022 current_ir_graph = rem;
1025 } /* new_rd_Tuple */
1034 NEW_RD_DIVOP(DivMod)
1049 NEW_RD_BINOP(Borrow)
1051 /* creates a rd constructor for an divRL */
1052 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1053 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1056 ir_graph *rem = current_ir_graph;
1057 current_ir_graph = irg;
1058 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1059 current_ir_graph = rem;
1065 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1066 ir_node *op1, ir_node *op2) {
1068 ir_graph *rem = current_ir_graph;
1070 current_ir_graph = irg;
1071 res = new_bd_Cmp(db, block, op1, op2);
1072 current_ir_graph = rem;
1078 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Jmp(db, block);
1084 current_ir_graph = rem;
1090 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_IJmp(db, block, tgt);
1096 current_ir_graph = rem;
1102 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1104 ir_graph *rem = current_ir_graph;
1106 current_ir_graph = irg;
1107 res = new_bd_Cond(db, block, c);
1108 current_ir_graph = rem;
1115 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1122 current_ir_graph = rem;
1128 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1129 ir_node *store, int arity, ir_node **in) {
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Return(db, block, store, arity, in);
1135 current_ir_graph = rem;
1138 } /* new_rd_Return */
1141 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1142 ir_node *store, ir_node *adr, ir_mode *mode) {
1144 ir_graph *rem = current_ir_graph;
1146 current_ir_graph = irg;
1147 res = new_bd_Load(db, block, store, adr, mode);
1148 current_ir_graph = rem;
1154 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1155 ir_node *store, ir_node *adr, ir_node *val) {
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_Store(db, block, store, adr, val);
1161 current_ir_graph = rem;
1164 } /* new_rd_Store */
1167 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1168 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1170 ir_graph *rem = current_ir_graph;
1172 current_ir_graph = irg;
1173 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1174 current_ir_graph = rem;
1177 } /* new_rd_Alloc */
1180 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1181 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1183 ir_graph *rem = current_ir_graph;
1185 current_ir_graph = irg;
1186 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1187 current_ir_graph = rem;
1193 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1194 ir_node *store, ir_node *objptr, ir_entity *ent) {
1196 ir_graph *rem = current_ir_graph;
1198 current_ir_graph = irg;
1199 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1200 current_ir_graph = rem;
1203 } /* new_rd_simpleSel */
1206 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1207 int arity, ir_node **in, ir_entity *ent) {
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1220 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1222 ir_graph *rem = current_ir_graph;
1224 current_ir_graph = irg;
1225 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1226 current_ir_graph = rem;
1229 } /* new_rd_SymConst_type */
1232 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1233 symconst_symbol value, symconst_kind symkind) {
1234 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1235 } /* new_rd_SymConst */
1237 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1238 symconst_symbol sym;
1239 sym.entity_p = symbol;
1240 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1241 } /* new_rd_SymConst_addr_ent */
1243 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1244 symconst_symbol sym;
1245 sym.entity_p = symbol;
1246 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1247 } /* new_rd_SymConst_ofs_ent */
1249 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1250 symconst_symbol sym;
1251 sym.ident_p = symbol;
1252 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1253 } /* new_rd_SymConst_addr_name */
1255 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1256 symconst_symbol sym;
1257 sym.type_p = symbol;
1258 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1259 } /* new_rd_SymConst_type_tag */
1261 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1262 symconst_symbol sym;
1263 sym.type_p = symbol;
1264 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1265 } /* new_rd_SymConst_size */
1267 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1268 symconst_symbol sym;
1269 sym.type_p = symbol;
1270 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1271 } /* new_rd_SymConst_align */
1274 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1276 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Sync(db, block);
1281 current_ir_graph = rem;
1283 for (i = 0; i < arity; ++i)
1284 add_Sync_pred(res, in[i]);
1290 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1292 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_Confirm(db, block, val, bound, cmp);
1296 current_ir_graph = rem;
1299 } /* new_rd_Confirm */
1302 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1304 ir_graph *rem = current_ir_graph;
1306 current_ir_graph = irg;
1307 res = new_bd_Unknown(m);
1308 current_ir_graph = rem;
1311 } /* new_rd_Unknown */
1314 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1316 ir_graph *rem = current_ir_graph;
1318 current_ir_graph = irg;
1319 res = new_bd_CallBegin(db, block, call);
1320 current_ir_graph = rem;
1323 } /* new_rd_CallBegin */
1326 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1329 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1330 set_irg_end_reg(irg, res);
1331 IRN_VRFY_IRG(res, irg);
1333 } /* new_rd_EndReg */
1336 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1339 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1340 set_irg_end_except(irg, res);
1341 IRN_VRFY_IRG (res, irg);
1343 } /* new_rd_EndExcept */
1346 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_Break(db, block);
1352 current_ir_graph = rem;
1355 } /* new_rd_Break */
1358 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1361 ir_graph *rem = current_ir_graph;
1363 current_ir_graph = irg;
1364 res = new_bd_Filter(db, block, arg, mode, proj);
1365 current_ir_graph = rem;
1368 } /* new_rd_Filter */
1371 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1372 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1378 current_ir_graph = rem;
1383 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1384 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1390 current_ir_graph = rem;
1393 } /* new_rd_CopyB */
1396 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1397 ir_node *objptr, ir_type *type) {
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_InstOf(db, block, store, objptr, type);
1403 current_ir_graph = rem;
1406 } /* new_rd_InstOf */
1409 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Raise(db, block, store, obj);
1415 current_ir_graph = rem;
1418 } /* new_rd_Raise */
1420 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Bound(db, block, store, idx, lower, upper);
1427 current_ir_graph = rem;
1430 } /* new_rd_Bound */
1432 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1434 ir_graph *rem = current_ir_graph;
1436 current_ir_graph = irg;
1437 res = new_bd_Pin(db, block, node);
1438 current_ir_graph = rem;
1443 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1444 int arity, ir_node *in[], ir_asm_constraint *inputs,
1445 int n_outs, ir_asm_constraint *outputs,
1446 int n_clobber, ident *clobber[], ident *asm_text) {
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1452 current_ir_graph = rem;
1459 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1460 return new_rd_Block(NULL, irg, arity, in);
1462 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1463 return new_rd_Start(NULL, irg, block);
1465 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1466 return new_rd_End(NULL, irg, block);
1468 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1469 return new_rd_Jmp(NULL, irg, block);
1471 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1472 return new_rd_IJmp(NULL, irg, block, tgt);
1474 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1475 return new_rd_Cond(NULL, irg, block, c);
1478 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1479 ir_node *store, int arity, ir_node **in) {
1480 return new_rd_Return(NULL, irg, block, store, arity, in);
1482 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1483 return new_rd_Const(NULL, irg, con);
1485 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1486 return new_rd_Const_long(NULL, irg, mode, value);
1489 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1490 return new_rd_Const_type(NULL, irg, con, tp);
1493 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1494 symconst_symbol value, symconst_kind symkind) {
1495 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1497 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1498 ir_node *objptr, ir_entity *ent) {
1499 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1501 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *objptr, int n_index, ir_node **index,
1504 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1506 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1507 ir_node *callee, int arity, ir_node **in,
1509 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1512 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1522 ir_node *op, ir_mode *mode) {
1523 return new_rd_Minus(NULL, irg, block, op, mode);
1525 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1526 ir_node *op1, ir_node *op2, ir_mode *mode) {
1527 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1529 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1530 ir_node *op1, ir_node *op2, ir_mode *mode) {
1531 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1533 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1535 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1537 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1539 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1544 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1547 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1548 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1549 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1551 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1552 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1553 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1555 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1556 ir_node *op, ir_mode *mode) {
1557 return new_rd_Abs(NULL, irg, block, op, mode);
1559 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1560 ir_node *op1, ir_node *op2, ir_mode *mode) {
1561 return new_rd_And(NULL, irg, block, op1, op2, mode);
1563 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1564 ir_node *op1, ir_node *op2, ir_mode *mode) {
1565 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1567 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1568 ir_node *op1, ir_node *op2, ir_mode *mode) {
1569 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_mode *mode) {
1573 return new_rd_Not(NULL, irg, block, op, mode);
1575 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Shl(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Shr(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1584 ir_node *op, ir_node *k, ir_mode *mode) {
1585 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1587 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_node *k, ir_mode *mode) {
1589 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1591 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_node *k, ir_mode *mode) {
1593 return new_rd_Carry(NULL, irg, block, op, k, mode);
1595 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_node *k, ir_mode *mode) {
1597 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1600 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1601 ir_node *op1, ir_node *op2) {
1602 return new_rd_Cmp(NULL, irg, block, op1, op2);
1605 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1606 ir_node *op, ir_mode *mode) {
1607 return new_rd_Conv(NULL, irg, block, op, mode);
1610 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1611 return new_rd_Cast(NULL, irg, block, op, to_tp);
1614 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1615 ir_node **in, ir_mode *mode) {
1616 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1618 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1619 ir_node *store, ir_node *adr, ir_mode *mode) {
1620 return new_rd_Load(NULL, irg, block, store, adr, mode);
1622 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1623 ir_node *store, ir_node *adr, ir_node *val) {
1624 return new_rd_Store(NULL, irg, block, store, adr, val);
1626 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1627 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1628 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1630 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1631 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1632 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1634 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1635 return new_rd_Sync(NULL, irg, block, arity, in);
1638 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1639 ir_mode *mode, long proj) {
1640 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1643 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1645 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1648 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1649 int arity, ir_node **in) {
1650 return new_rd_Tuple(NULL, irg, block, arity, in );
1652 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1653 ir_node *val, ir_mode *mode) {
1654 return new_rd_Id(NULL, irg, block, val, mode);
1657 ir_node *new_r_Bad(ir_graph *irg) {
1658 return get_irg_bad(irg);
1660 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1661 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1663 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1664 return new_rd_Unknown(irg, m);
1666 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1667 return new_rd_CallBegin(NULL, irg, block, callee);
1669 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1670 return new_rd_EndReg(NULL, irg, block);
1672 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1673 return new_rd_EndExcept(NULL, irg, block);
1675 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1676 return new_rd_Break(NULL, irg, block);
1678 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1679 ir_mode *mode, long proj) {
1680 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1682 ir_node *new_r_NoMem(ir_graph *irg) {
1683 return get_irg_no_mem(irg);
1685 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1686 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1687 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1689 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1690 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1691 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1693 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1695 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1697 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1698 ir_node *store, ir_node *obj) {
1699 return new_rd_Raise(NULL, irg, block, store, obj);
1701 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1702 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1703 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1705 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1706 return new_rd_Pin(NULL, irg, block, node);
1708 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1709 int arity, ir_node *in[], ir_asm_constraint *inputs,
1710 int n_outs, ir_asm_constraint *outputs,
1711 int n_clobber, ident *clobber[], ident *asm_text) {
1712 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1715 /** ********************/
1716 /** public interfaces */
1717 /** construction tools */
1723 * - create a new Start node in the current block
1725 * @return s - pointer to the created Start node
1730 new_d_Start(dbg_info *db) {
1733 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1734 op_Start, mode_T, 0, NULL);
1736 res = optimize_node(res);
1737 IRN_VRFY_IRG(res, current_ir_graph);
1742 new_d_End(dbg_info *db) {
1744 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1745 op_End, mode_X, -1, NULL);
1746 res = optimize_node(res);
1747 IRN_VRFY_IRG(res, current_ir_graph);
1752 /* Constructs a Block with a fixed number of predecessors.
1753 Does set current_block. Can be used with automatic Phi
1754 node construction. */
1756 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1759 int has_unknown = 0;
1761 res = new_bd_Block(db, arity, in);
1763 /* Create and initialize array for Phi-node construction. */
1764 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1765 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1766 current_ir_graph->n_loc);
1767 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1770 for (i = arity-1; i >= 0; i--)
1771 if (is_Unknown(in[i])) {
1776 if (!has_unknown) res = optimize_node(res);
1777 current_ir_graph->current_block = res;
1779 IRN_VRFY_IRG(res, current_ir_graph);
1785 /* ***********************************************************************/
1786 /* Methods necessary for automatic Phi node creation */
1788 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1789 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1790 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1791 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1793 Call Graph: ( A ---> B == A "calls" B)
1795 get_value mature_immBlock
1803 get_r_value_internal |
1807 new_rd_Phi0 new_rd_Phi_in
1809 * *************************************************************************** */
1811 /** Creates a Phi node with 0 predecessors. */
1812 static inline ir_node *
1813 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1816 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1817 IRN_VRFY_IRG(res, irg);
1823 * Internal constructor of a Phi node by a phi_merge operation.
1825 * @param irg the graph on which the Phi will be constructed
1826 * @param block the block in which the Phi will be constructed
1827 * @param mode the mod eof the Phi node
1828 * @param in the input array of the phi node
1829 * @param ins number of elements in the input array
1830 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1831 * the value for which the new Phi is constructed
1833 static inline ir_node *
1834 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1835 ir_node **in, int ins, ir_node *phi0) {
1837 ir_node *res, *known;
1839 /* Allocate a new node on the obstack. The allocation copies the in
1841 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1842 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1844 /* This loop checks whether the Phi has more than one predecessor.
1845 If so, it is a real Phi node and we break the loop. Else the
1846 Phi node merges the same definition on several paths and therefore
1847 is not needed. Don't consider Bad nodes! */
1849 for (i = ins - 1; i >= 0; --i) {
1852 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1854 /* Optimize self referencing Phis: We can't detect them yet properly, as
1855 they still refer to the Phi0 they will replace. So replace right now. */
1856 if (phi0 && in[i] == phi0)
1859 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1868 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1871 edges_node_deleted(res, current_ir_graph);
1872 obstack_free(current_ir_graph->obst, res);
1873 if (is_Phi(known)) {
1874 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1875 order, an enclosing Phi know may get superfluous. */
1876 res = optimize_in_place_2(known);
1878 exchange(known, res);
1883 /* A undefined value, e.g., in unreachable code. */
1887 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1888 IRN_VRFY_IRG(res, irg);
1889 /* Memory Phis in endless loops must be kept alive.
1890 As we can't distinguish these easily we keep all of them alive. */
1891 if (is_Phi(res) && mode == mode_M)
1892 add_End_keepalive(get_irg_end(irg), res);
1896 } /* new_rd_Phi_in */
1899 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1901 #if PRECISE_EXC_CONTEXT
1903 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1906 * Construct a new frag_array for node n.
1907 * Copy the content from the current graph_arr of the corresponding block:
1908 * this is the current state.
1909 * Set ProjM(n) as current memory state.
1910 * Further the last entry in frag_arr of current block points to n. This
1911 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1913 static inline ir_node **new_frag_arr(ir_node *n) {
1917 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1918 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1919 sizeof(ir_node *)*current_ir_graph->n_loc);
1921 /* turn off optimization before allocating Proj nodes, as res isn't
1923 opt = get_opt_optimize(); set_optimize(0);
1924 /* Here we rely on the fact that all frag ops have Memory as first result! */
1926 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1927 } else if (is_CopyB(n)) {
1928 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1930 assert((pn_Quot_M == pn_DivMod_M) &&
1931 (pn_Quot_M == pn_Div_M) &&
1932 (pn_Quot_M == pn_Mod_M) &&
1933 (pn_Quot_M == pn_Load_M) &&
1934 (pn_Quot_M == pn_Store_M) &&
1935 (pn_Quot_M == pn_Alloc_M) &&
1936 (pn_Quot_M == pn_Bound_M));
1937 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1941 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1943 } /* new_frag_arr */
1946 * Returns the frag_arr from a node.
1948 static inline ir_node **get_frag_arr(ir_node *n) {
1949 switch (get_irn_opcode(n)) {
1951 return n->attr.call.exc.frag_arr;
1953 return n->attr.alloc.exc.frag_arr;
1955 return n->attr.load.exc.frag_arr;
1957 return n->attr.store.exc.frag_arr;
1959 return n->attr.except.frag_arr;
1961 } /* get_frag_arr */
1964 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1965 #ifdef DEBUG_libfirm
1968 for (i = 1024; i >= 0; --i)
1973 if (frag_arr[pos] == NULL)
1974 frag_arr[pos] = val;
1975 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1976 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1977 assert(arr != frag_arr && "Endless recursion detected");
1982 assert(!"potential endless recursion in set_frag_value");
1983 } /* set_frag_value */
1986 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1990 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1992 frag_arr = get_frag_arr(cfOp);
1993 res = frag_arr[pos];
1995 if (block->attr.block.graph_arr[pos] != NULL) {
1996 /* There was a set_value() after the cfOp and no get_value() before that
1997 set_value(). We must build a Phi node now. */
1998 if (block->attr.block.is_matured) {
1999 int ins = get_irn_arity(block);
2001 NEW_ARR_A(ir_node *, nin, ins);
2002 res = phi_merge(block, pos, mode, nin, ins);
2004 res = new_rd_Phi0(current_ir_graph, block, mode);
2005 res->attr.phi.u.pos = pos;
2006 res->attr.phi.next = block->attr.block.phis;
2007 block->attr.block.phis = res;
2009 assert(res != NULL);
2010 /* It's a Phi, we can write this into all graph_arrs with NULL */
2011 set_frag_value(block->attr.block.graph_arr, pos, res);
2013 res = get_r_value_internal(block, pos, mode);
2014 set_frag_value(block->attr.block.graph_arr, pos, res);
2018 } /* get_r_frag_value_internal */
2019 #endif /* PRECISE_EXC_CONTEXT */
2022 * Check whether a control flownode cf_pred represents an exception flow.
2024 * @param cf_pred the control flow node
2025 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2027 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2029 * Note: all projections from a raise are "exceptional control flow" we we handle it
2030 * like a normal Jmp, because there is no "regular" one.
2031 * That's why Raise is no "fragile_op"!
2033 if (is_fragile_op(prev_cf_op)) {
2034 if (is_Proj(cf_pred)) {
2035 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2036 /* the regular control flow, NO exception */
2039 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2042 /* Hmm, exception but not a Proj? */
2043 assert(!"unexpected condition: fragile op without a proj");
2047 } /* is_exception_flow */
2050 * Computes the predecessors for the real phi node, and then
2051 * allocates and returns this node. The routine called to allocate the
2052 * node might optimize it away and return a real value.
2053 * This function must be called with an in-array of proper size.
2056 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2057 ir_node *prevBlock, *res, *phi0, *phi0_all;
2060 /* If this block has no value at pos create a Phi0 and remember it
2061 in graph_arr to break recursions.
2062 Else we may not set graph_arr as there a later value is remembered. */
2064 if (block->attr.block.graph_arr[pos] == NULL) {
2065 ir_graph *irg = current_ir_graph;
2067 if (block == get_irg_start_block(irg)) {
2068 /* Collapsing to Bad tarvals is no good idea.
2069 So we call a user-supplied routine here that deals with this case as
2070 appropriate for the given language. Sorrily the only help we can give
2071 here is the position.
2073 Even if all variables are defined before use, it can happen that
2074 we get to the start block, if a Cond has been replaced by a tuple
2075 (bad, jmp). In this case we call the function needlessly, eventually
2076 generating an non existent error.
2077 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2080 if (default_initialize_local_variable != NULL) {
2081 ir_node *rem = get_cur_block();
2083 set_cur_block(block);
2084 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2088 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2089 /* We don't need to care about exception ops in the start block.
2090 There are none by definition. */
2091 return block->attr.block.graph_arr[pos];
2093 phi0 = new_rd_Phi0(irg, block, mode);
2094 block->attr.block.graph_arr[pos] = phi0;
2095 #if PRECISE_EXC_CONTEXT
2096 if (get_opt_precise_exc_context()) {
2097 /* Set graph_arr for fragile ops. Also here we should break recursion.
2098 We could choose a cyclic path through an cfop. But the recursion would
2099 break at some point. */
2100 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2106 /* This loop goes to all predecessor blocks of the block the Phi node
2107 is in and there finds the operands of the Phi node by calling
2108 get_r_value_internal. */
2109 for (i = 1; i <= ins; ++i) {
2110 ir_node *cf_pred = block->in[i];
2111 ir_node *prevCfOp = skip_Proj(cf_pred);
2113 if (is_Bad(prevCfOp)) {
2114 /* In case a Cond has been optimized we would get right to the start block
2115 with an invalid definition. */
2116 nin[i-1] = new_Bad();
2119 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2121 if (!is_Bad(prevBlock)) {
2122 #if PRECISE_EXC_CONTEXT
2123 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2124 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2125 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2128 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2130 nin[i-1] = new_Bad();
2134 /* We want to pass the Phi0 node to the constructor: this finds additional
2135 optimization possibilities.
2136 The Phi0 node either is allocated in this function, or it comes from
2137 a former call to get_r_value_internal(). In this case we may not yet
2138 exchange phi0, as this is done in mature_immBlock(). */
2140 phi0_all = block->attr.block.graph_arr[pos];
2141 if (!(is_Phi(phi0_all) &&
2142 (get_irn_arity(phi0_all) == 0) &&
2143 (get_nodes_block(phi0_all) == block)))
2149 /* After collecting all predecessors into the array nin a new Phi node
2150 with these predecessors is created. This constructor contains an
2151 optimization: If all predecessors of the Phi node are identical it
2152 returns the only operand instead of a new Phi node. */
2153 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2155 /* In case we allocated a Phi0 node at the beginning of this procedure,
2156 we need to exchange this Phi0 with the real Phi. */
2158 exchange(phi0, res);
2159 block->attr.block.graph_arr[pos] = res;
2160 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2161 only an optimization. */
2168 * This function returns the last definition of a value. In case
2169 * this value was last defined in a previous block, Phi nodes are
2170 * inserted. If the part of the firm graph containing the definition
2171 * is not yet constructed, a dummy Phi node is returned.
2173 * @param block the current block
2174 * @param pos the value number of the value searched
2175 * @param mode the mode of this value (needed for Phi construction)
2178 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2180 /* There are 4 cases to treat.
2182 1. The block is not mature and we visit it the first time. We can not
2183 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2184 predecessors is returned. This node is added to the linked list (block
2185 attribute "phis") of the containing block to be completed when this block is
2186 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2189 2. The value is already known in this block, graph_arr[pos] is set and we
2190 visit the block the first time. We can return the value without
2191 creating any new nodes.
2193 3. The block is mature and we visit it the first time. A Phi node needs
2194 to be created (phi_merge). If the Phi is not needed, as all it's
2195 operands are the same value reaching the block through different
2196 paths, it's optimized away and the value itself is returned.
2198 4. The block is mature, and we visit it the second time. Now two
2199 subcases are possible:
2200 * The value was computed completely the last time we were here. This
2201 is the case if there is no loop. We can return the proper value.
2202 * The recursion that visited this node and set the flag did not
2203 return yet. We are computing a value in a loop and need to
2204 break the recursion. This case only happens if we visited
2205 the same block with phi_merge before, which inserted a Phi0.
2206 So we return the Phi0.
2209 /* case 4 -- already visited. */
2210 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2211 /* As phi_merge allocates a Phi0 this value is always defined. Here
2212 is the critical difference of the two algorithms. */
2213 assert(block->attr.block.graph_arr[pos]);
2214 return block->attr.block.graph_arr[pos];
2217 /* visited the first time */
2218 set_irn_visited(block, get_irg_visited(current_ir_graph));
2220 /* Get the local valid value */
2221 res = block->attr.block.graph_arr[pos];
2223 /* case 2 -- If the value is actually computed, return it. */
2227 if (block->attr.block.is_matured) { /* case 3 */
2229 /* The Phi has the same amount of ins as the corresponding block. */
2230 int ins = get_irn_arity(block);
2232 NEW_ARR_A(ir_node *, nin, ins);
2234 /* Phi merge collects the predecessors and then creates a node. */
2235 res = phi_merge(block, pos, mode, nin, ins);
2237 } else { /* case 1 */
2238 /* The block is not mature, we don't know how many in's are needed. A Phi
2239 with zero predecessors is created. Such a Phi node is called Phi0
2240 node. The Phi0 is then added to the list of Phi0 nodes in this block
2241 to be matured by mature_immBlock later.
2242 The Phi0 has to remember the pos of it's internal value. If the real
2243 Phi is computed, pos is used to update the array with the local
2245 res = new_rd_Phi0(current_ir_graph, block, mode);
2246 res->attr.phi.u.pos = pos;
2247 res->attr.phi.next = block->attr.block.phis;
2248 block->attr.block.phis = res;
2251 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2253 /* The local valid value is available now. */
2254 block->attr.block.graph_arr[pos] = res;
2257 } /* get_r_value_internal */
2259 /* ************************************************************************** */
2262 * Finalize a Block node, when all control flows are known.
2263 * Acceptable parameters are only Block nodes.
2266 mature_immBlock(ir_node *block) {
2271 assert(is_Block(block));
2272 if (!get_Block_matured(block)) {
2273 ir_graph *irg = current_ir_graph;
2275 ins = ARR_LEN(block->in) - 1;
2276 /* Fix block parameters */
2277 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2279 /* An array for building the Phi nodes. */
2280 NEW_ARR_A(ir_node *, nin, ins);
2282 /* Traverse a chain of Phi nodes attached to this block and mature
2284 for (n = block->attr.block.phis; n; n = next) {
2285 inc_irg_visited(irg);
2286 next = n->attr.phi.next;
2287 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2290 block->attr.block.is_matured = 1;
2292 /* Now, as the block is a finished Firm node, we can optimize it.
2293 Since other nodes have been allocated since the block was created
2294 we can not free the node on the obstack. Therefore we have to call
2295 optimize_in_place().
2296 Unfortunately the optimization does not change a lot, as all allocated
2297 nodes refer to the unoptimized node.
2298 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2299 block = optimize_in_place_2(block);
2300 IRN_VRFY_IRG(block, irg);
2302 } /* mature_immBlock */
2305 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2306 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2310 new_d_Const(dbg_info *db, tarval *con) {
2311 return new_bd_Const(db, con);
2315 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2316 return new_bd_Const_long(db, mode, value);
2317 } /* new_d_Const_long */
2321 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2322 return new_bd_Const_type(db, con, tp);
2323 } /* new_d_Const_type */
2327 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2328 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2332 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2333 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2338 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2341 assert(arg->op == op_Cond);
2342 arg->attr.cond.kind = fragmentary;
2343 arg->attr.cond.default_proj = max_proj;
2344 res = new_Proj(arg, mode_X, max_proj);
2346 } /* new_d_defaultProj */
2349 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2350 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2354 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2355 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2356 } /* new_d_strictConv */
2360 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2361 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2365 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2366 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2377 * Allocate a frag array for a node if the current graph state is phase_building.
2379 * @param irn the node for which the frag array should be allocated
2380 * @param op the opcode of the (original) node, if does not match opcode of irn,
2382 * @param frag_store the address of the frag store in irn attributes, if this
2383 * address contains a value != NULL, does nothing
2385 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2386 if (get_opt_precise_exc_context()) {
2387 if ((current_ir_graph->phase_state == phase_building) &&
2388 (get_irn_op(irn) == op) && /* Could be optimized away. */
2389 !*frag_store) /* Could be a cse where the arr is already set. */ {
2390 *frag_store = new_frag_arr(irn);
2393 } /* firm_alloc_frag_arr */
2396 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2398 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2399 #if PRECISE_EXC_CONTEXT
2400 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2407 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2409 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2410 #if PRECISE_EXC_CONTEXT
2411 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2415 } /* new_d_DivMod */
2419 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2421 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2422 #if PRECISE_EXC_CONTEXT
2423 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2431 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2433 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2434 #if PRECISE_EXC_CONTEXT
2435 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2442 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2444 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2445 #if PRECISE_EXC_CONTEXT
2446 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2466 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2467 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2471 new_d_Jmp(dbg_info *db) {
2472 return new_bd_Jmp(db, current_ir_graph->current_block);
2476 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2477 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2481 new_d_Cond(dbg_info *db, ir_node *c) {
2482 return new_bd_Cond(db, current_ir_graph->current_block, c);
2487 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2490 res = new_bd_Call(db, current_ir_graph->current_block,
2491 store, callee, arity, in, tp);
2492 #if PRECISE_EXC_CONTEXT
2493 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2500 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2501 return new_bd_Return(db, current_ir_graph->current_block,
2503 } /* new_d_Return */
2506 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2508 res = new_bd_Load(db, current_ir_graph->current_block,
2510 #if PRECISE_EXC_CONTEXT
2511 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2518 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2520 res = new_bd_Store(db, current_ir_graph->current_block,
2522 #if PRECISE_EXC_CONTEXT
2523 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2530 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2531 ir_where_alloc where) {
2533 res = new_bd_Alloc(db, current_ir_graph->current_block,
2534 store, size, alloc_type, where);
2535 #if PRECISE_EXC_CONTEXT
2536 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2543 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2544 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2545 return new_bd_Free(db, current_ir_graph->current_block,
2546 store, ptr, size, free_type, where);
2550 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2551 /* GL: objptr was called frame before. Frame was a bad choice for the name
2552 as the operand could as well be a pointer to a dynamic object. */
2554 return new_bd_Sel(db, current_ir_graph->current_block,
2555 store, objptr, 0, NULL, ent);
2556 } /* new_d_simpleSel */
2559 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2560 return new_bd_Sel(db, current_ir_graph->current_block,
2561 store, objptr, n_index, index, sel);
2565 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2566 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2568 } /* new_d_SymConst_type */
2571 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2572 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2573 value, kind, firm_unknown_type);
2574 } /* new_d_SymConst */
2577 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2578 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2582 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2583 return new_bd_Confirm(db, current_ir_graph->current_block,
2585 } /* new_d_Confirm */
2588 new_d_Unknown(ir_mode *m) {
2589 return new_bd_Unknown(m);
2590 } /* new_d_Unknown */
2593 new_d_CallBegin(dbg_info *db, ir_node *call) {
2594 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2595 } /* new_d_CallBegin */
2598 new_d_EndReg(dbg_info *db) {
2599 return new_bd_EndReg(db, current_ir_graph->current_block);
2600 } /* new_d_EndReg */
2603 new_d_EndExcept(dbg_info *db) {
2604 return new_bd_EndExcept(db, current_ir_graph->current_block);
2605 } /* new_d_EndExcept */
2608 new_d_Break(dbg_info *db) {
2609 return new_bd_Break(db, current_ir_graph->current_block);
2613 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2614 return new_bd_Filter(db, current_ir_graph->current_block,
2616 } /* new_d_Filter */
2619 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2620 ir_node *ir_true, ir_mode *mode) {
2621 return new_bd_Mux(db, current_ir_graph->current_block,
2622 sel, ir_false, ir_true, mode);
2625 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2626 ir_node *dst, ir_node *src, ir_type *data_type) {
2628 res = new_bd_CopyB(db, current_ir_graph->current_block,
2629 store, dst, src, data_type);
2630 #if PRECISE_EXC_CONTEXT
2631 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2637 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2638 return new_bd_InstOf(db, current_ir_graph->current_block,
2639 store, objptr, type);
2640 } /* new_d_InstOf */
2643 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2644 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2647 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2648 ir_node *idx, ir_node *lower, ir_node *upper) {
2650 res = new_bd_Bound(db, current_ir_graph->current_block,
2651 store, idx, lower, upper);
2652 #if PRECISE_EXC_CONTEXT
2653 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2659 new_d_Pin(dbg_info *db, ir_node *node) {
2660 return new_bd_Pin(db, current_ir_graph->current_block, node);
2664 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2665 int n_outs, ir_asm_constraint *outputs,
2666 int n_clobber, ident *clobber[], ident *asm_text) {
2667 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2670 /* ********************************************************************* */
2671 /* Comfortable interface with automatic Phi node construction. */
2672 /* (Uses also constructors of ?? interface, except new_Block. */
2673 /* ********************************************************************* */
2675 /* Block construction */
2676 /* immature Block without predecessors */
2678 new_d_immBlock(dbg_info *db) {
2681 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2682 /* creates a new dynamic in-array as length of in is -1 */
2683 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2684 current_ir_graph->current_block = res;
2686 /* macroblock head */
2689 res->attr.block.is_matured = 0;
2690 res->attr.block.is_dead = 0;
2691 res->attr.block.is_mb_head = 1;
2692 res->attr.block.has_label = 0;
2693 res->attr.block.irg = current_ir_graph;
2694 res->attr.block.backedge = NULL;
2695 res->attr.block.in_cg = NULL;
2696 res->attr.block.cg_backedge = NULL;
2697 res->attr.block.extblk = NULL;
2698 res->attr.block.region = NULL;
2699 res->attr.block.mb_depth = 0;
2700 res->attr.block.label = 0;
2702 set_Block_block_visited(res, 0);
2704 /* Create and initialize array for Phi-node construction. */
2705 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2706 current_ir_graph->n_loc);
2707 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2709 /* Immature block may not be optimized! */
2710 IRN_VRFY_IRG(res, current_ir_graph);
2713 } /* new_d_immBlock */
2716 new_immBlock(void) {
2717 return new_d_immBlock(NULL);
2718 } /* new_immBlock */
2720 /* immature PartBlock with its predecessors */
2722 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2723 ir_node *res = new_d_immBlock(db);
2724 ir_node *blk = get_nodes_block(pred_jmp);
2726 res->in[0] = blk->in[0];
2727 assert(res->in[0] != NULL);
2728 add_immBlock_pred(res, pred_jmp);
2730 res->attr.block.is_mb_head = 0;
2731 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2734 } /* new_d_immPartBlock */
2737 new_immPartBlock(ir_node *pred_jmp) {
2738 return new_d_immPartBlock(NULL, pred_jmp);
2739 } /* new_immPartBlock */
2741 /* add an edge to a jmp/control flow node */
2743 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2744 int n = ARR_LEN(block->in) - 1;
2746 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2747 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2748 assert(is_ir_node(jmp));
2750 ARR_APP1(ir_node *, block->in, jmp);
2752 hook_set_irn_n(block, n, jmp, NULL);
2753 } /* add_immBlock_pred */
2755 /* changing the current block */
2757 set_cur_block(ir_node *target) {
2758 current_ir_graph->current_block = target;
2759 } /* set_cur_block */
2761 /* ************************ */
2762 /* parameter administration */
2764 /* get a value from the parameter array from the current block by its index */
2766 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2767 ir_graph *irg = current_ir_graph;
2768 assert(get_irg_phase_state(irg) == phase_building);
2769 inc_irg_visited(irg);
2772 return get_r_value_internal(irg->current_block, pos + 1, mode);
2775 /* get a value from the parameter array from the current block by its index */
2777 get_value(int pos, ir_mode *mode) {
2778 return get_d_value(NULL, pos, mode);
2781 /* set a value at position pos in the parameter array from the current block */
2783 set_value(int pos, ir_node *value) {
2784 ir_graph *irg = current_ir_graph;
2785 assert(get_irg_phase_state(irg) == phase_building);
2786 assert(pos+1 < irg->n_loc);
2787 assert(is_ir_node(value));
2788 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2791 /* Find the value number for a node in the current block.*/
2793 find_value(ir_node *value) {
2795 ir_node *bl = current_ir_graph->current_block;
2797 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2798 if (bl->attr.block.graph_arr[i] == value)
2803 /* get the current store */
2806 ir_graph *irg = current_ir_graph;
2808 assert(get_irg_phase_state(irg) == phase_building);
2809 /* GL: one could call get_value instead */
2810 inc_irg_visited(irg);
2811 return get_r_value_internal(irg->current_block, 0, mode_M);
2814 /* set the current store: handles automatic Sync construction for Load nodes */
2816 set_store(ir_node *store) {
2817 ir_node *load, *pload, *pred, *in[2];
2819 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2820 /* Beware: due to dead code elimination, a store might become a Bad node even in
2821 the construction phase. */
2822 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2824 if (get_opt_auto_create_sync()) {
2825 /* handle non-volatile Load nodes by automatically creating Sync's */
2826 load = skip_Proj(store);
2827 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2828 pred = get_Load_mem(load);
2830 if (is_Sync(pred)) {
2831 /* a Load after a Sync: move it up */
2832 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2834 set_Load_mem(load, get_memop_mem(mem));
2835 add_Sync_pred(pred, store);
2838 pload = skip_Proj(pred);
2839 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2840 /* a Load after a Load: create a new Sync */
2841 set_Load_mem(load, get_Load_mem(pload));
2845 store = new_Sync(2, in);
2850 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2854 keep_alive(ir_node *ka) {
2855 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2858 /* --- Useful access routines --- */
2859 /* Returns the current block of the current graph. To set the current
2860 block use set_cur_block. */
2861 ir_node *get_cur_block(void) {
2862 return get_irg_current_block(current_ir_graph);
2863 } /* get_cur_block */
2865 /* Returns the frame type of the current graph */
2866 ir_type *get_cur_frame_type(void) {
2867 return get_irg_frame_type(current_ir_graph);
2868 } /* get_cur_frame_type */
2871 /* ********************************************************************* */
2874 /* call once for each run of the library */
2876 firm_init_cons(uninitialized_local_variable_func_t *func) {
2877 default_initialize_local_variable = func;
2878 } /* firm_init_cons */
2881 irp_finalize_cons(void) {
2883 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2884 irg_finalize_cons(get_irp_irg(i));
2886 irp->phase_state = phase_high;
2887 } /* irp_finalize_cons */
2890 ir_node *new_Block(int arity, ir_node **in) {
2891 return new_d_Block(NULL, arity, in);
2893 ir_node *new_Start(void) {
2894 return new_d_Start(NULL);
2896 ir_node *new_End(void) {
2897 return new_d_End(NULL);
2899 ir_node *new_Jmp(void) {
2900 return new_d_Jmp(NULL);
2902 ir_node *new_IJmp(ir_node *tgt) {
2903 return new_d_IJmp(NULL, tgt);
2905 ir_node *new_Cond(ir_node *c) {
2906 return new_d_Cond(NULL, c);
2909 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2910 return new_d_Return(NULL, store, arity, in);
2912 ir_node *new_Const(tarval *con) {
2913 return new_d_Const(NULL, con);
2916 ir_node *new_Const_long(ir_mode *mode, long value) {
2917 return new_d_Const_long(NULL, mode, value);
2921 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2922 return new_d_Const_type(NULL, con, tp);
2926 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2927 return new_d_SymConst_type(NULL, mode, value, kind, type);
2929 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2930 return new_d_SymConst(NULL, mode, value, kind);
2932 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2933 return new_d_simpleSel(NULL, store, objptr, ent);
2935 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2937 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2939 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2941 return new_d_Call(NULL, store, callee, arity, in, tp);
2944 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2945 return new_d_Add(NULL, op1, op2, mode);
2948 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2949 return new_d_Sub(NULL, op1, op2, mode);
2951 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2952 return new_d_Minus(NULL, op, mode);
2954 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2955 return new_d_Mul(NULL, op1, op2, mode);
2957 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2958 return new_d_Mulh(NULL, op1, op2, mode);
2960 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2961 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2963 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2964 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2967 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2968 return new_d_Div(NULL, memop, op1, op2, mode, state);
2971 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2972 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2974 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2975 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2977 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2978 return new_d_Abs(NULL, op, mode);
2980 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2981 return new_d_And(NULL, op1, op2, mode);
2983 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2984 return new_d_Or(NULL, op1, op2, mode);
2986 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2987 return new_d_Eor(NULL, op1, op2, mode);
2989 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2990 return new_d_Not(NULL, op, mode);
2992 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2993 return new_d_Shl(NULL, op, k, mode);
2995 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2996 return new_d_Shr(NULL, op, k, mode);
2998 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2999 return new_d_Shrs(NULL, op, k, mode);
3001 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3002 return new_d_Rotl(NULL, op, k, mode);
3004 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3005 return new_d_Carry(NULL, op1, op2, mode);
3007 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3008 return new_d_Borrow(NULL, op1, op2, mode);
3011 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3012 return new_d_Cmp(NULL, op1, op2);
3015 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3016 return new_d_Conv(NULL, op, mode);
3018 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3019 return new_d_strictConv(NULL, op, mode);
3022 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3023 return new_d_Cast(NULL, op, to_tp);
3026 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3027 return new_d_Phi(NULL, arity, in, mode);
3029 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3030 return new_d_Load(NULL, store, addr, mode);
3032 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3033 return new_d_Store(NULL, store, addr, val);
3035 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3036 ir_where_alloc where) {
3037 return new_d_Alloc(NULL, store, size, alloc_type, where);
3039 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3040 ir_type *free_type, ir_where_alloc where) {
3041 return new_d_Free(NULL, store, ptr, size, free_type, where);
3043 ir_node *new_Sync(int arity, ir_node *in[]) {
3044 return new_d_Sync(NULL, arity, in);
3047 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3048 return new_d_Proj(NULL, arg, mode, proj);
3051 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3052 return new_d_defaultProj(NULL, arg, max_proj);
3055 ir_node *new_Tuple(int arity, ir_node **in) {
3056 return new_d_Tuple(NULL, arity, in);
3058 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3059 return new_d_Id(NULL, val, mode);
3062 ir_node *new_Bad(void) {
3063 return get_irg_bad(current_ir_graph);
3065 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3066 return new_d_Confirm(NULL, val, bound, cmp);
3068 ir_node *new_Unknown(ir_mode *m) {
3069 return new_d_Unknown(m);
3071 ir_node *new_CallBegin(ir_node *callee) {
3072 return new_d_CallBegin(NULL, callee);
3074 ir_node *new_EndReg(void) {
3075 return new_d_EndReg(NULL);
3077 ir_node *new_EndExcept(void) {
3078 return new_d_EndExcept(NULL);
3080 ir_node *new_Break(void) {
3081 return new_d_Break(NULL);
3083 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3084 return new_d_Filter(NULL, arg, mode, proj);
3086 ir_node *new_NoMem(void) {
3087 return get_irg_no_mem(current_ir_graph);
3089 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3090 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3092 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3093 return new_d_CopyB(NULL, store, dst, src, data_type);
3095 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3096 return new_d_InstOf(NULL, store, objptr, ent);
3098 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3099 return new_d_Raise(NULL, store, obj);
3101 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3102 return new_d_Bound(NULL, store, idx, lower, upper);
3104 ir_node *new_Pin(ir_node *node) {
3105 return new_d_Pin(NULL, node);
3107 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3108 int n_outs, ir_asm_constraint *outputs,
3109 int n_clobber, ident *clobber[], ident *asm_text) {
3110 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3113 /* create a new anchor node */
3114 ir_node *new_Anchor(ir_graph *irg) {
3115 ir_node *in[anchor_last];
3116 memset(in, 0, sizeof(in));
3117 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);