2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Return(dbg_info *db, ir_node *block,
501 ir_node *store, int arity, ir_node **in) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A (ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
511 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
512 res = optimize_node(res);
513 IRN_VRFY_IRG(res, irg);
515 } /* new_bd_Return */
518 new_bd_Load(dbg_info *db, ir_node *block,
519 ir_node *store, ir_node *adr, ir_mode *mode, cons_flags flags) {
522 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
527 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
528 res->attr.load.load_mode = mode;
529 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
530 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
531 res = optimize_node(res);
532 IRN_VRFY_IRG(res, irg);
537 new_bd_Store(dbg_info *db, ir_node *block,
538 ir_node *store, ir_node *adr, ir_node *val, cons_flags flags) {
541 ir_graph *irg = current_ir_graph;
546 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
547 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
548 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
549 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
551 res = optimize_node(res);
552 IRN_VRFY_IRG(res, irg);
557 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
558 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
561 ir_graph *irg = current_ir_graph;
565 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
566 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
567 res->attr.alloc.where = where;
568 res->attr.alloc.type = alloc_type;
569 res = optimize_node(res);
570 IRN_VRFY_IRG(res, irg);
575 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
576 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
579 ir_graph *irg = current_ir_graph;
584 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
585 res->attr.free.where = where;
586 res->attr.free.type = free_type;
587 res = optimize_node(res);
588 IRN_VRFY_IRG(res, irg);
593 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
594 int arity, ir_node **in, ir_entity *ent) {
598 ir_graph *irg = current_ir_graph;
599 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
601 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
604 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
607 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
609 * Sel's can select functions which should be of mode mode_P_code.
611 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
612 res->attr.sel.ent = ent;
613 res = optimize_node(res);
614 IRN_VRFY_IRG(res, irg);
619 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
620 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
621 ir_graph *irg = current_ir_graph;
622 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
624 res->attr.symc.kind = symkind;
625 res->attr.symc.sym = value;
626 res->attr.symc.tp = tp;
628 res = optimize_node(res);
629 IRN_VRFY_IRG(res, irg);
631 } /* new_bd_SymConst_type */
634 new_bd_Sync(dbg_info *db, ir_node *block) {
636 ir_graph *irg = current_ir_graph;
638 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
639 /* no need to call optimize node here, Sync are always created with no predecessors */
640 IRN_VRFY_IRG(res, irg);
645 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
646 ir_node *in[2], *res;
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
652 res->attr.confirm.cmp = cmp;
653 res = optimize_node(res);
654 IRN_VRFY_IRG(res, irg);
656 } /* new_bd_Confirm */
659 new_bd_Unknown(ir_mode *m) {
661 ir_graph *irg = current_ir_graph;
663 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
664 res = optimize_node(res);
666 } /* new_bd_Unknown */
669 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
672 ir_graph *irg = current_ir_graph;
674 in[0] = get_Call_ptr(call);
675 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
676 /* res->attr.callbegin.irg = irg; */
677 res->attr.callbegin.call = call;
678 res = optimize_node(res);
679 IRN_VRFY_IRG(res, irg);
681 } /* new_bd_CallBegin */
684 new_bd_EndReg(dbg_info *db, ir_node *block) {
686 ir_graph *irg = current_ir_graph;
688 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
689 set_irg_end_reg(irg, res);
690 IRN_VRFY_IRG(res, irg);
692 } /* new_bd_EndReg */
695 new_bd_EndExcept(dbg_info *db, ir_node *block) {
697 ir_graph *irg = current_ir_graph;
699 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
700 set_irg_end_except(irg, res);
701 IRN_VRFY_IRG (res, irg);
703 } /* new_bd_EndExcept */
706 new_bd_Break(dbg_info *db, ir_node *block) {
708 ir_graph *irg = current_ir_graph;
710 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
717 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
720 ir_graph *irg = current_ir_graph;
722 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
723 res->attr.filter.proj = proj;
724 res->attr.filter.in_cg = NULL;
725 res->attr.filter.backedge = NULL;
728 assert(get_Proj_pred(res));
729 assert(get_nodes_block(get_Proj_pred(res)));
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
734 } /* new_bd_Filter */
737 new_bd_Mux(dbg_info *db, ir_node *block,
738 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
747 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_bd_CopyB(dbg_info *db, ir_node *block,
757 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
760 ir_graph *irg = current_ir_graph;
766 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
768 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
769 res->attr.copyb.data_type = data_type;
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
777 ir_node *objptr, ir_type *type) {
780 ir_graph *irg = current_ir_graph;
784 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
785 res->attr.instof.type = type;
786 res = optimize_node(res);
787 IRN_VRFY_IRG(res, irg);
789 } /* new_bd_InstOf */
792 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
795 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_Bound(dbg_info *db, ir_node *block,
807 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
810 ir_graph *irg = current_ir_graph;
816 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
817 res->attr.bound.exc.pin_state = op_pin_state_pinned;
818 res = optimize_node(res);
819 IRN_VRFY_IRG(res, irg);
824 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
826 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
835 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
836 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
838 ir_graph *irg = current_ir_graph;
841 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
842 res->attr.assem.pin_state = op_pin_state_pinned;
843 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
844 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
845 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
846 res->attr.assem.asm_text = asm_text;
848 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
849 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
850 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
852 res = optimize_node(res);
853 IRN_VRFY_IRG(res, irg);
857 /* --------------------------------------------- */
858 /* private interfaces, for professional use only */
859 /* --------------------------------------------- */
862 /* Constructs a Block with a fixed number of predecessors.
863 Does not set current_block. Can not be used with automatic
864 Phi node construction. */
866 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
867 ir_graph *rem = current_ir_graph;
870 current_ir_graph = irg;
871 res = new_bd_Block(db, arity, in);
872 current_ir_graph = rem;
878 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
879 ir_graph *rem = current_ir_graph;
882 current_ir_graph = irg;
883 res = new_bd_Start(db, block);
884 current_ir_graph = rem;
890 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
892 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_End(db, block);
896 current_ir_graph = rem;
902 /* Creates a Phi node with all predecessors. Calling this constructor
903 is only allowed if the corresponding block is mature. */
905 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
907 ir_graph *rem = current_ir_graph;
909 current_ir_graph = irg;
910 res = new_bd_Phi(db, block,arity, in, mode);
911 current_ir_graph = rem;
918 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_Const_type(db, con, tp);
924 current_ir_graph = rem;
927 } /* new_rd_Const_type */
931 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
934 ir_graph *rem = current_ir_graph;
936 current_ir_graph = irg;
937 res = new_bd_Const_type(db, con, firm_unknown_type);
938 current_ir_graph = rem;
940 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
947 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
948 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
949 } /* new_rd_Const_long */
953 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
955 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_Id(db, block, val, mode);
959 current_ir_graph = rem;
965 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
968 ir_graph *rem = current_ir_graph;
970 current_ir_graph = irg;
971 res = new_bd_Proj(db, block, arg, mode, proj);
972 current_ir_graph = rem;
979 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
982 ir_graph *rem = current_ir_graph;
984 current_ir_graph = irg;
985 res = new_bd_defaultProj(db, block, arg, max_proj);
986 current_ir_graph = rem;
989 } /* new_rd_defaultProj */
992 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
994 ir_graph *rem = current_ir_graph;
996 current_ir_graph = irg;
997 res = new_bd_Conv(db, block, op, mode, 0);
998 current_ir_graph = rem;
1005 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1007 ir_graph *rem = current_ir_graph;
1009 current_ir_graph = irg;
1010 res = new_bd_Cast(db, block, op, to_tp);
1011 current_ir_graph = rem;
1017 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1019 ir_graph *rem = current_ir_graph;
1021 current_ir_graph = irg;
1022 res = new_bd_Tuple(db, block, arity, in);
1023 current_ir_graph = rem;
1026 } /* new_rd_Tuple */
1035 NEW_RD_DIVOP(DivMod)
1050 NEW_RD_BINOP(Borrow)
1052 /* creates a rd constructor for an divRL */
1053 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1054 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1057 ir_graph *rem = current_ir_graph;
1058 current_ir_graph = irg;
1059 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1060 current_ir_graph = rem;
1066 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1067 ir_node *op1, ir_node *op2) {
1069 ir_graph *rem = current_ir_graph;
1071 current_ir_graph = irg;
1072 res = new_bd_Cmp(db, block, op1, op2);
1073 current_ir_graph = rem;
1079 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1081 ir_graph *rem = current_ir_graph;
1083 current_ir_graph = irg;
1084 res = new_bd_Jmp(db, block);
1085 current_ir_graph = rem;
1091 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_IJmp(db, block, tgt);
1097 current_ir_graph = rem;
1103 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Cond(db, block, c);
1109 current_ir_graph = rem;
1116 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1117 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1123 current_ir_graph = rem;
1129 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1130 ir_node *store, int arity, ir_node **in) {
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Return(db, block, store, arity, in);
1136 current_ir_graph = rem;
1139 } /* new_rd_Return */
1142 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1143 ir_node *store, ir_node *adr, ir_mode *mode, cons_flags flags) {
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Load(db, block, store, adr, mode, flags);
1149 current_ir_graph = rem;
1155 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1156 ir_node *store, ir_node *adr, ir_node *val, cons_flags flags) {
1158 ir_graph *rem = current_ir_graph;
1160 current_ir_graph = irg;
1161 res = new_bd_Store(db, block, store, adr, val, flags);
1162 current_ir_graph = rem;
1165 } /* new_rd_Store */
1168 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1169 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1171 ir_graph *rem = current_ir_graph;
1173 current_ir_graph = irg;
1174 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1175 current_ir_graph = rem;
1178 } /* new_rd_Alloc */
1181 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1182 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1184 ir_graph *rem = current_ir_graph;
1186 current_ir_graph = irg;
1187 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1188 current_ir_graph = rem;
1194 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1195 ir_node *store, ir_node *objptr, ir_entity *ent) {
1197 ir_graph *rem = current_ir_graph;
1199 current_ir_graph = irg;
1200 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1201 current_ir_graph = rem;
1204 } /* new_rd_simpleSel */
1207 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1208 int arity, ir_node **in, ir_entity *ent) {
1210 ir_graph *rem = current_ir_graph;
1212 current_ir_graph = irg;
1213 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1214 current_ir_graph = rem;
1220 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1221 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1223 ir_graph *rem = current_ir_graph;
1225 current_ir_graph = irg;
1226 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1227 current_ir_graph = rem;
1230 } /* new_rd_SymConst_type */
1233 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1234 symconst_symbol value, symconst_kind symkind) {
1235 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1236 } /* new_rd_SymConst */
1238 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1239 symconst_symbol sym;
1240 sym.entity_p = symbol;
1241 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1242 } /* new_rd_SymConst_addr_ent */
1244 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1245 symconst_symbol sym;
1246 sym.entity_p = symbol;
1247 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1248 } /* new_rd_SymConst_ofs_ent */
1250 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1251 symconst_symbol sym;
1252 sym.ident_p = symbol;
1253 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1254 } /* new_rd_SymConst_addr_name */
1256 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1257 symconst_symbol sym;
1258 sym.type_p = symbol;
1259 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1260 } /* new_rd_SymConst_type_tag */
1262 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1263 symconst_symbol sym;
1264 sym.type_p = symbol;
1265 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1266 } /* new_rd_SymConst_size */
1268 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1269 symconst_symbol sym;
1270 sym.type_p = symbol;
1271 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1272 } /* new_rd_SymConst_align */
1275 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1277 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Sync(db, block);
1282 current_ir_graph = rem;
1284 for (i = 0; i < arity; ++i)
1285 add_Sync_pred(res, in[i]);
1291 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1293 ir_graph *rem = current_ir_graph;
1295 current_ir_graph = irg;
1296 res = new_bd_Confirm(db, block, val, bound, cmp);
1297 current_ir_graph = rem;
1300 } /* new_rd_Confirm */
1303 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1305 ir_graph *rem = current_ir_graph;
1307 current_ir_graph = irg;
1308 res = new_bd_Unknown(m);
1309 current_ir_graph = rem;
1312 } /* new_rd_Unknown */
1315 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1317 ir_graph *rem = current_ir_graph;
1319 current_ir_graph = irg;
1320 res = new_bd_CallBegin(db, block, call);
1321 current_ir_graph = rem;
1324 } /* new_rd_CallBegin */
1327 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1330 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1331 set_irg_end_reg(irg, res);
1332 IRN_VRFY_IRG(res, irg);
1334 } /* new_rd_EndReg */
1337 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1340 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1341 set_irg_end_except(irg, res);
1342 IRN_VRFY_IRG (res, irg);
1344 } /* new_rd_EndExcept */
1347 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1349 ir_graph *rem = current_ir_graph;
1351 current_ir_graph = irg;
1352 res = new_bd_Break(db, block);
1353 current_ir_graph = rem;
1356 } /* new_rd_Break */
1359 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Filter(db, block, arg, mode, proj);
1366 current_ir_graph = rem;
1369 } /* new_rd_Filter */
1372 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1373 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1375 ir_graph *rem = current_ir_graph;
1377 current_ir_graph = irg;
1378 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1379 current_ir_graph = rem;
1384 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1385 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1391 current_ir_graph = rem;
1394 } /* new_rd_CopyB */
1397 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1398 ir_node *objptr, ir_type *type) {
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_InstOf(db, block, store, objptr, type);
1404 current_ir_graph = rem;
1407 } /* new_rd_InstOf */
1410 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1412 ir_graph *rem = current_ir_graph;
1414 current_ir_graph = irg;
1415 res = new_bd_Raise(db, block, store, obj);
1416 current_ir_graph = rem;
1419 } /* new_rd_Raise */
1421 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1422 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1424 ir_graph *rem = current_ir_graph;
1426 current_ir_graph = irg;
1427 res = new_bd_Bound(db, block, store, idx, lower, upper);
1428 current_ir_graph = rem;
1431 } /* new_rd_Bound */
1433 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1435 ir_graph *rem = current_ir_graph;
1437 current_ir_graph = irg;
1438 res = new_bd_Pin(db, block, node);
1439 current_ir_graph = rem;
1444 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1445 int arity, ir_node *in[], ir_asm_constraint *inputs,
1446 int n_outs, ir_asm_constraint *outputs,
1447 int n_clobber, ident *clobber[], ident *asm_text) {
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1453 current_ir_graph = rem;
1460 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1461 return new_rd_Block(NULL, irg, arity, in);
1463 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1464 return new_rd_Start(NULL, irg, block);
1466 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1467 return new_rd_End(NULL, irg, block);
1469 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1470 return new_rd_Jmp(NULL, irg, block);
1472 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1473 return new_rd_IJmp(NULL, irg, block, tgt);
1475 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1476 return new_rd_Cond(NULL, irg, block, c);
1479 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1480 ir_node *store, int arity, ir_node **in) {
1481 return new_rd_Return(NULL, irg, block, store, arity, in);
1483 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1484 return new_rd_Const(NULL, irg, con);
1486 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1487 return new_rd_Const_long(NULL, irg, mode, value);
1490 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1491 return new_rd_Const_type(NULL, irg, con, tp);
1494 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1495 symconst_symbol value, symconst_kind symkind) {
1496 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1498 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1499 ir_node *objptr, ir_entity *ent) {
1500 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1502 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *objptr, int n_index, ir_node **index,
1505 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1507 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1508 ir_node *callee, int arity, ir_node **in,
1510 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1513 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_mode *mode) {
1524 return new_rd_Minus(NULL, irg, block, op, mode);
1526 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1534 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1543 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1544 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1545 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1548 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1549 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1550 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1552 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1553 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1554 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1556 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1557 ir_node *op, ir_mode *mode) {
1558 return new_rd_Abs(NULL, irg, block, op, mode);
1560 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1561 ir_node *op1, ir_node *op2, ir_mode *mode) {
1562 return new_rd_And(NULL, irg, block, op1, op2, mode);
1564 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1565 ir_node *op1, ir_node *op2, ir_mode *mode) {
1566 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1568 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1569 ir_node *op1, ir_node *op2, ir_mode *mode) {
1570 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1572 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1573 ir_node *op, ir_mode *mode) {
1574 return new_rd_Not(NULL, irg, block, op, mode);
1576 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1577 ir_node *op, ir_node *k, ir_mode *mode) {
1578 return new_rd_Shl(NULL, irg, block, op, k, mode);
1580 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1581 ir_node *op, ir_node *k, ir_mode *mode) {
1582 return new_rd_Shr(NULL, irg, block, op, k, mode);
1584 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1585 ir_node *op, ir_node *k, ir_mode *mode) {
1586 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1588 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1589 ir_node *op, ir_node *k, ir_mode *mode) {
1590 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1592 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1593 ir_node *op, ir_node *k, ir_mode *mode) {
1594 return new_rd_Carry(NULL, irg, block, op, k, mode);
1596 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1597 ir_node *op, ir_node *k, ir_mode *mode) {
1598 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1601 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1602 ir_node *op1, ir_node *op2) {
1603 return new_rd_Cmp(NULL, irg, block, op1, op2);
1606 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1607 ir_node *op, ir_mode *mode) {
1608 return new_rd_Conv(NULL, irg, block, op, mode);
1611 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1612 return new_rd_Cast(NULL, irg, block, op, to_tp);
1615 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1616 ir_node **in, ir_mode *mode) {
1617 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1619 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1620 ir_node *store, ir_node *adr, ir_mode *mode, cons_flags flags) {
1621 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1623 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1624 ir_node *store, ir_node *adr, ir_node *val, cons_flags flags) {
1625 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1627 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1628 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1629 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1631 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1632 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1633 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1635 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1636 return new_rd_Sync(NULL, irg, block, arity, in);
1639 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1640 ir_mode *mode, long proj) {
1641 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1644 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1646 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1649 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1650 int arity, ir_node **in) {
1651 return new_rd_Tuple(NULL, irg, block, arity, in );
1653 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1654 ir_node *val, ir_mode *mode) {
1655 return new_rd_Id(NULL, irg, block, val, mode);
1658 ir_node *new_r_Bad(ir_graph *irg) {
1659 return get_irg_bad(irg);
1661 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1662 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1664 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1665 return new_rd_Unknown(irg, m);
1667 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1668 return new_rd_CallBegin(NULL, irg, block, callee);
1670 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1671 return new_rd_EndReg(NULL, irg, block);
1673 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1674 return new_rd_EndExcept(NULL, irg, block);
1676 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1677 return new_rd_Break(NULL, irg, block);
1679 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1680 ir_mode *mode, long proj) {
1681 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1683 ir_node *new_r_NoMem(ir_graph *irg) {
1684 return get_irg_no_mem(irg);
1686 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1687 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1688 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1690 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1691 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1692 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1694 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1696 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1698 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1699 ir_node *store, ir_node *obj) {
1700 return new_rd_Raise(NULL, irg, block, store, obj);
1702 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1703 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1704 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1706 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1707 return new_rd_Pin(NULL, irg, block, node);
1709 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1710 int arity, ir_node *in[], ir_asm_constraint *inputs,
1711 int n_outs, ir_asm_constraint *outputs,
1712 int n_clobber, ident *clobber[], ident *asm_text) {
1713 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1716 /** ********************/
1717 /** public interfaces */
1718 /** construction tools */
1724 * - create a new Start node in the current block
1726 * @return s - pointer to the created Start node
1731 new_d_Start(dbg_info *db) {
1734 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1735 op_Start, mode_T, 0, NULL);
1737 res = optimize_node(res);
1738 IRN_VRFY_IRG(res, current_ir_graph);
1743 new_d_End(dbg_info *db) {
1745 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1746 op_End, mode_X, -1, NULL);
1747 res = optimize_node(res);
1748 IRN_VRFY_IRG(res, current_ir_graph);
1753 /* Constructs a Block with a fixed number of predecessors.
1754 Does set current_block. Can be used with automatic Phi
1755 node construction. */
1757 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1760 int has_unknown = 0;
1762 res = new_bd_Block(db, arity, in);
1764 /* Create and initialize array for Phi-node construction. */
1765 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1766 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1767 current_ir_graph->n_loc);
1768 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1771 for (i = arity-1; i >= 0; i--)
1772 if (is_Unknown(in[i])) {
1777 if (!has_unknown) res = optimize_node(res);
1779 IRN_VRFY_IRG(res, current_ir_graph);
1785 /* ***********************************************************************/
1786 /* Methods necessary for automatic Phi node creation */
1788 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1789 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1790 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1791 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1793 Call Graph: ( A ---> B == A "calls" B)
1795 get_value mature_immBlock
1803 get_r_value_internal |
1807 new_rd_Phi0 new_rd_Phi_in
1809 * *************************************************************************** */
1811 /** Creates a Phi node with 0 predecessors. */
1812 static inline ir_node *
1813 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1816 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1817 IRN_VRFY_IRG(res, irg);
1823 * Internal constructor of a Phi node by a phi_merge operation.
1825 * @param irg the graph on which the Phi will be constructed
1826 * @param block the block in which the Phi will be constructed
1827 * @param mode the mod eof the Phi node
1828 * @param in the input array of the phi node
1829 * @param ins number of elements in the input array
1830 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1831 * the value for which the new Phi is constructed
1833 static inline ir_node *
1834 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1835 ir_node **in, int ins, ir_node *phi0) {
1837 ir_node *res, *known;
1839 /* Allocate a new node on the obstack. The allocation copies the in
1841 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1842 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1844 /* This loop checks whether the Phi has more than one predecessor.
1845 If so, it is a real Phi node and we break the loop. Else the
1846 Phi node merges the same definition on several paths and therefore
1848 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1850 for (i = ins - 1; i >= 0; --i) {
1853 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1855 /* Optimize self referencing Phis: We can't detect them yet properly, as
1856 they still refer to the Phi0 they will replace. So replace right now. */
1857 if (phi0 && in[i] == phi0)
1860 if (in[i] == res || in[i] == known)
1869 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1872 edges_node_deleted(res, current_ir_graph);
1873 obstack_free(current_ir_graph->obst, res);
1874 if (is_Phi(known)) {
1875 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1876 order, an enclosing Phi know may get superfluous. */
1877 res = optimize_in_place_2(known);
1879 exchange(known, res);
1884 /* A undefined value, e.g., in unreachable code. */
1888 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1889 IRN_VRFY_IRG(res, irg);
1890 /* Memory Phis in endless loops must be kept alive.
1891 As we can't distinguish these easily we keep all of them alive. */
1892 if (is_Phi(res) && mode == mode_M)
1893 add_End_keepalive(get_irg_end(irg), res);
1897 } /* new_rd_Phi_in */
1900 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1902 #if PRECISE_EXC_CONTEXT
1904 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1907 * Construct a new frag_array for node n.
1908 * Copy the content from the current graph_arr of the corresponding block:
1909 * this is the current state.
1910 * Set ProjM(n) as current memory state.
1911 * Further the last entry in frag_arr of current block points to n. This
1912 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1914 static inline ir_node **new_frag_arr(ir_node *n) {
1918 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1919 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1920 sizeof(ir_node *)*current_ir_graph->n_loc);
1922 /* turn off optimization before allocating Proj nodes, as res isn't
1924 opt = get_opt_optimize(); set_optimize(0);
1925 /* Here we rely on the fact that all frag ops have Memory as first result! */
1927 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1928 } else if (is_CopyB(n)) {
1929 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1931 assert((pn_Quot_M == pn_DivMod_M) &&
1932 (pn_Quot_M == pn_Div_M) &&
1933 (pn_Quot_M == pn_Mod_M) &&
1934 (pn_Quot_M == pn_Load_M) &&
1935 (pn_Quot_M == pn_Store_M) &&
1936 (pn_Quot_M == pn_Alloc_M) &&
1937 (pn_Quot_M == pn_Bound_M));
1938 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1942 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1944 } /* new_frag_arr */
1947 * Returns the frag_arr from a node.
1949 static inline ir_node **get_frag_arr(ir_node *n) {
1950 switch (get_irn_opcode(n)) {
1952 return n->attr.call.exc.frag_arr;
1954 return n->attr.alloc.exc.frag_arr;
1956 return n->attr.load.exc.frag_arr;
1958 return n->attr.store.exc.frag_arr;
1960 return n->attr.except.frag_arr;
1962 } /* get_frag_arr */
1965 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1966 #ifdef DEBUG_libfirm
1969 for (i = 1024; i >= 0; --i)
1974 if (frag_arr[pos] == NULL)
1975 frag_arr[pos] = val;
1976 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1977 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1978 assert(arr != frag_arr && "Endless recursion detected");
1983 assert(!"potential endless recursion in set_frag_value");
1984 } /* set_frag_value */
1987 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1991 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1993 frag_arr = get_frag_arr(cfOp);
1994 res = frag_arr[pos];
1996 if (block->attr.block.graph_arr[pos] != NULL) {
1997 /* There was a set_value() after the cfOp and no get_value() before that
1998 set_value(). We must build a Phi node now. */
1999 if (block->attr.block.is_matured) {
2000 int ins = get_irn_arity(block);
2002 NEW_ARR_A(ir_node *, nin, ins);
2003 res = phi_merge(block, pos, mode, nin, ins);
2005 res = new_rd_Phi0(current_ir_graph, block, mode);
2006 res->attr.phi.u.pos = pos;
2007 res->attr.phi.next = block->attr.block.phis;
2008 block->attr.block.phis = res;
2010 assert(res != NULL);
2011 /* It's a Phi, we can write this into all graph_arrs with NULL */
2012 set_frag_value(block->attr.block.graph_arr, pos, res);
2014 res = get_r_value_internal(block, pos, mode);
2015 set_frag_value(block->attr.block.graph_arr, pos, res);
2019 } /* get_r_frag_value_internal */
2020 #endif /* PRECISE_EXC_CONTEXT */
2023 * Check whether a control flownode cf_pred represents an exception flow.
2025 * @param cf_pred the control flow node
2026 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2028 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2030 * Note: all projections from a raise are "exceptional control flow" we we handle it
2031 * like a normal Jmp, because there is no "regular" one.
2032 * That's why Raise is no "fragile_op"!
2034 if (is_fragile_op(prev_cf_op)) {
2035 if (is_Proj(cf_pred)) {
2036 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2037 /* the regular control flow, NO exception */
2040 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2043 /* Hmm, exception but not a Proj? */
2044 assert(!"unexpected condition: fragile op without a proj");
2048 } /* is_exception_flow */
2051 * Computes the predecessors for the real phi node, and then
2052 * allocates and returns this node. The routine called to allocate the
2053 * node might optimize it away and return a real value.
2054 * This function must be called with an in-array of proper size.
2057 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2058 ir_node *prevBlock, *res, *phi0, *phi0_all;
2061 /* If this block has no value at pos create a Phi0 and remember it
2062 in graph_arr to break recursions.
2063 Else we may not set graph_arr as there a later value is remembered. */
2065 if (block->attr.block.graph_arr[pos] == NULL) {
2066 ir_graph *irg = current_ir_graph;
2068 if (block == get_irg_start_block(irg)) {
2069 /* Collapsing to Bad tarvals is no good idea.
2070 So we call a user-supplied routine here that deals with this case as
2071 appropriate for the given language. Sorrily the only help we can give
2072 here is the position.
2074 Even if all variables are defined before use, it can happen that
2075 we get to the start block, if a Cond has been replaced by a tuple
2076 (bad, jmp). In this case we call the function needlessly, eventually
2077 generating an non existent error.
2078 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2081 if (default_initialize_local_variable != NULL) {
2082 ir_node *rem = get_cur_block();
2084 set_cur_block(block);
2085 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2089 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2090 /* We don't need to care about exception ops in the start block.
2091 There are none by definition. */
2092 return block->attr.block.graph_arr[pos];
2094 phi0 = new_rd_Phi0(irg, block, mode);
2095 block->attr.block.graph_arr[pos] = phi0;
2096 #if PRECISE_EXC_CONTEXT
2097 if (get_opt_precise_exc_context()) {
2098 /* Set graph_arr for fragile ops. Also here we should break recursion.
2099 We could choose a cyclic path through an cfop. But the recursion would
2100 break at some point. */
2101 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2107 /* This loop goes to all predecessor blocks of the block the Phi node
2108 is in and there finds the operands of the Phi node by calling
2109 get_r_value_internal. */
2110 for (i = 1; i <= ins; ++i) {
2111 ir_node *cf_pred = block->in[i];
2112 ir_node *prevCfOp = skip_Proj(cf_pred);
2114 if (is_Bad(prevCfOp)) {
2115 /* In case a Cond has been optimized we would get right to the start block
2116 with an invalid definition. */
2117 nin[i-1] = new_Bad();
2120 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2122 if (!is_Bad(prevBlock)) {
2123 #if PRECISE_EXC_CONTEXT
2124 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2125 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2126 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2129 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2131 nin[i-1] = new_Bad();
2135 /* We want to pass the Phi0 node to the constructor: this finds additional
2136 optimization possibilities.
2137 The Phi0 node either is allocated in this function, or it comes from
2138 a former call to get_r_value_internal(). In this case we may not yet
2139 exchange phi0, as this is done in mature_immBlock(). */
2141 phi0_all = block->attr.block.graph_arr[pos];
2142 if (! is_Phi0(phi0_all) ||
2143 get_irn_arity(phi0_all) != 0 ||
2144 get_nodes_block(phi0_all) != block)
2150 /* After collecting all predecessors into the array nin a new Phi node
2151 with these predecessors is created. This constructor contains an
2152 optimization: If all predecessors of the Phi node are identical it
2153 returns the only operand instead of a new Phi node. */
2154 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2156 /* In case we allocated a Phi0 node at the beginning of this procedure,
2157 we need to exchange this Phi0 with the real Phi. */
2159 exchange(phi0, res);
2160 block->attr.block.graph_arr[pos] = res;
2161 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2162 only an optimization. */
2169 * This function returns the last definition of a value. In case
2170 * this value was last defined in a previous block, Phi nodes are
2171 * inserted. If the part of the firm graph containing the definition
2172 * is not yet constructed, a dummy Phi node is returned.
2174 * @param block the current block
2175 * @param pos the value number of the value searched
2176 * @param mode the mode of this value (needed for Phi construction)
2179 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2181 /* There are 4 cases to treat.
2183 1. The block is not mature and we visit it the first time. We can not
2184 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2185 predecessors is returned. This node is added to the linked list (block
2186 attribute "phis") of the containing block to be completed when this block is
2187 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2190 2. The value is already known in this block, graph_arr[pos] is set and we
2191 visit the block the first time. We can return the value without
2192 creating any new nodes.
2194 3. The block is mature and we visit it the first time. A Phi node needs
2195 to be created (phi_merge). If the Phi is not needed, as all it's
2196 operands are the same value reaching the block through different
2197 paths, it's optimized away and the value itself is returned.
2199 4. The block is mature, and we visit it the second time. Now two
2200 subcases are possible:
2201 * The value was computed completely the last time we were here. This
2202 is the case if there is no loop. We can return the proper value.
2203 * The recursion that visited this node and set the flag did not
2204 return yet. We are computing a value in a loop and need to
2205 break the recursion. This case only happens if we visited
2206 the same block with phi_merge before, which inserted a Phi0.
2207 So we return the Phi0.
2210 /* case 4 -- already visited. */
2211 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2212 /* As phi_merge allocates a Phi0 this value is always defined. Here
2213 is the critical difference of the two algorithms. */
2214 assert(block->attr.block.graph_arr[pos]);
2215 return block->attr.block.graph_arr[pos];
2218 /* visited the first time */
2219 set_irn_visited(block, get_irg_visited(current_ir_graph));
2221 /* Get the local valid value */
2222 res = block->attr.block.graph_arr[pos];
2224 /* case 2 -- If the value is actually computed, return it. */
2228 if (block->attr.block.is_matured) { /* case 3 */
2230 /* The Phi has the same amount of ins as the corresponding block. */
2231 int ins = get_irn_arity(block);
2233 NEW_ARR_A(ir_node *, nin, ins);
2235 /* Phi merge collects the predecessors and then creates a node. */
2236 res = phi_merge(block, pos, mode, nin, ins);
2238 } else { /* case 1 */
2239 /* The block is not mature, we don't know how many in's are needed. A Phi
2240 with zero predecessors is created. Such a Phi node is called Phi0
2241 node. The Phi0 is then added to the list of Phi0 nodes in this block
2242 to be matured by mature_immBlock later.
2243 The Phi0 has to remember the pos of it's internal value. If the real
2244 Phi is computed, pos is used to update the array with the local
2246 res = new_rd_Phi0(current_ir_graph, block, mode);
2247 res->attr.phi.u.pos = pos;
2248 res->attr.phi.next = block->attr.block.phis;
2249 block->attr.block.phis = res;
2252 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2254 /* The local valid value is available now. */
2255 block->attr.block.graph_arr[pos] = res;
2258 } /* get_r_value_internal */
2260 /* ************************************************************************** */
2263 * Finalize a Block node, when all control flows are known.
2264 * Acceptable parameters are only Block nodes.
2267 mature_immBlock(ir_node *block) {
2272 assert(is_Block(block));
2273 if (!get_Block_matured(block)) {
2274 ir_graph *irg = current_ir_graph;
2276 ins = ARR_LEN(block->in) - 1;
2277 /* Fix block parameters */
2278 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2280 /* An array for building the Phi nodes. */
2281 NEW_ARR_A(ir_node *, nin, ins);
2283 /* Traverse a chain of Phi nodes attached to this block and mature
2285 for (n = block->attr.block.phis; n; n = next) {
2286 inc_irg_visited(irg);
2287 next = n->attr.phi.next;
2288 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2291 block->attr.block.is_matured = 1;
2293 /* Now, as the block is a finished Firm node, we can optimize it.
2294 Since other nodes have been allocated since the block was created
2295 we can not free the node on the obstack. Therefore we have to call
2296 optimize_in_place().
2297 Unfortunately the optimization does not change a lot, as all allocated
2298 nodes refer to the unoptimized node.
2299 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2300 block = optimize_in_place_2(block);
2301 IRN_VRFY_IRG(block, irg);
2303 } /* mature_immBlock */
2306 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2307 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2311 new_d_Const(dbg_info *db, tarval *con) {
2312 return new_bd_Const(db, con);
2316 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2317 return new_bd_Const_long(db, mode, value);
2318 } /* new_d_Const_long */
2322 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2323 return new_bd_Const_type(db, con, tp);
2324 } /* new_d_Const_type */
2328 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2329 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2333 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2334 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2339 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2342 assert(arg->op == op_Cond);
2343 arg->attr.cond.kind = fragmentary;
2344 arg->attr.cond.default_proj = max_proj;
2345 res = new_Proj(arg, mode_X, max_proj);
2347 } /* new_d_defaultProj */
2350 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2351 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2355 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2356 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2357 } /* new_d_strictConv */
2361 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2362 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2366 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2367 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2378 * Allocate a frag array for a node if the current graph state is phase_building.
2380 * @param irn the node for which the frag array should be allocated
2381 * @param op the opcode of the (original) node, if does not match opcode of irn,
2383 * @param frag_store the address of the frag store in irn attributes, if this
2384 * address contains a value != NULL, does nothing
2386 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2387 if (get_opt_precise_exc_context()) {
2388 if ((current_ir_graph->phase_state == phase_building) &&
2389 (get_irn_op(irn) == op) && /* Could be optimized away. */
2390 !*frag_store) /* Could be a cse where the arr is already set. */ {
2391 *frag_store = new_frag_arr(irn);
2394 } /* firm_alloc_frag_arr */
2397 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2399 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2400 #if PRECISE_EXC_CONTEXT
2401 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2408 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2410 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2411 #if PRECISE_EXC_CONTEXT
2412 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2416 } /* new_d_DivMod */
2420 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2422 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2423 #if PRECISE_EXC_CONTEXT
2424 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2432 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2434 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2435 #if PRECISE_EXC_CONTEXT
2436 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2443 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2445 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2446 #if PRECISE_EXC_CONTEXT
2447 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2467 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2468 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2472 new_d_Jmp(dbg_info *db) {
2473 return new_bd_Jmp(db, current_ir_graph->current_block);
2477 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2478 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2482 new_d_Cond(dbg_info *db, ir_node *c) {
2483 return new_bd_Cond(db, current_ir_graph->current_block, c);
2488 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2491 res = new_bd_Call(db, current_ir_graph->current_block,
2492 store, callee, arity, in, tp);
2493 #if PRECISE_EXC_CONTEXT
2494 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2501 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2502 return new_bd_Return(db, current_ir_graph->current_block,
2504 } /* new_d_Return */
2507 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, cons_flags flags) {
2509 res = new_bd_Load(db, current_ir_graph->current_block,
2510 store, addr, mode, flags);
2511 #if PRECISE_EXC_CONTEXT
2512 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2519 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, cons_flags flags) {
2521 res = new_bd_Store(db, current_ir_graph->current_block,
2522 store, addr, val, flags);
2523 #if PRECISE_EXC_CONTEXT
2524 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2531 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2532 ir_where_alloc where) {
2534 res = new_bd_Alloc(db, current_ir_graph->current_block,
2535 store, size, alloc_type, where);
2536 #if PRECISE_EXC_CONTEXT
2537 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2544 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2545 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2546 return new_bd_Free(db, current_ir_graph->current_block,
2547 store, ptr, size, free_type, where);
2551 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2552 /* GL: objptr was called frame before. Frame was a bad choice for the name
2553 as the operand could as well be a pointer to a dynamic object. */
2555 return new_bd_Sel(db, current_ir_graph->current_block,
2556 store, objptr, 0, NULL, ent);
2557 } /* new_d_simpleSel */
2560 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2561 return new_bd_Sel(db, current_ir_graph->current_block,
2562 store, objptr, n_index, index, sel);
2566 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2567 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2569 } /* new_d_SymConst_type */
2572 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2573 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2574 value, kind, firm_unknown_type);
2575 } /* new_d_SymConst */
2578 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2579 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2583 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2584 return new_bd_Confirm(db, current_ir_graph->current_block,
2586 } /* new_d_Confirm */
2589 new_d_Unknown(ir_mode *m) {
2590 return new_bd_Unknown(m);
2591 } /* new_d_Unknown */
2594 new_d_CallBegin(dbg_info *db, ir_node *call) {
2595 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2596 } /* new_d_CallBegin */
2599 new_d_EndReg(dbg_info *db) {
2600 return new_bd_EndReg(db, current_ir_graph->current_block);
2601 } /* new_d_EndReg */
2604 new_d_EndExcept(dbg_info *db) {
2605 return new_bd_EndExcept(db, current_ir_graph->current_block);
2606 } /* new_d_EndExcept */
2609 new_d_Break(dbg_info *db) {
2610 return new_bd_Break(db, current_ir_graph->current_block);
2614 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2615 return new_bd_Filter(db, current_ir_graph->current_block,
2617 } /* new_d_Filter */
2620 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2621 ir_node *ir_true, ir_mode *mode) {
2622 return new_bd_Mux(db, current_ir_graph->current_block,
2623 sel, ir_false, ir_true, mode);
2626 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2627 ir_node *dst, ir_node *src, ir_type *data_type) {
2629 res = new_bd_CopyB(db, current_ir_graph->current_block,
2630 store, dst, src, data_type);
2631 #if PRECISE_EXC_CONTEXT
2632 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2638 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2639 return new_bd_InstOf(db, current_ir_graph->current_block,
2640 store, objptr, type);
2641 } /* new_d_InstOf */
2644 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2645 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2648 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2649 ir_node *idx, ir_node *lower, ir_node *upper) {
2651 res = new_bd_Bound(db, current_ir_graph->current_block,
2652 store, idx, lower, upper);
2653 #if PRECISE_EXC_CONTEXT
2654 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2660 new_d_Pin(dbg_info *db, ir_node *node) {
2661 return new_bd_Pin(db, current_ir_graph->current_block, node);
2665 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2666 int n_outs, ir_asm_constraint *outputs,
2667 int n_clobber, ident *clobber[], ident *asm_text) {
2668 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2671 /* ********************************************************************* */
2672 /* Comfortable interface with automatic Phi node construction. */
2673 /* (Uses also constructors of ?? interface, except new_Block. */
2674 /* ********************************************************************* */
2676 /* Block construction */
2677 /* immature Block without predecessors */
2679 new_d_immBlock(dbg_info *db) {
2682 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2683 /* creates a new dynamic in-array as length of in is -1 */
2684 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2686 /* macroblock head */
2689 res->attr.block.is_matured = 0;
2690 res->attr.block.is_dead = 0;
2691 res->attr.block.is_mb_head = 1;
2692 res->attr.block.has_label = 0;
2693 res->attr.block.irg = current_ir_graph;
2694 res->attr.block.backedge = NULL;
2695 res->attr.block.in_cg = NULL;
2696 res->attr.block.cg_backedge = NULL;
2697 res->attr.block.extblk = NULL;
2698 res->attr.block.region = NULL;
2699 res->attr.block.mb_depth = 0;
2700 res->attr.block.label = 0;
2702 set_Block_block_visited(res, 0);
2704 /* Create and initialize array for Phi-node construction. */
2705 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2706 current_ir_graph->n_loc);
2707 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2709 /* Immature block may not be optimized! */
2710 IRN_VRFY_IRG(res, current_ir_graph);
2713 } /* new_d_immBlock */
2716 new_immBlock(void) {
2717 return new_d_immBlock(NULL);
2718 } /* new_immBlock */
2720 /* immature PartBlock with its predecessors */
2722 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2723 ir_node *res = new_d_immBlock(db);
2724 ir_node *blk = get_nodes_block(pred_jmp);
2726 res->in[0] = blk->in[0];
2727 assert(res->in[0] != NULL);
2728 add_immBlock_pred(res, pred_jmp);
2730 res->attr.block.is_mb_head = 0;
2731 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2734 } /* new_d_immPartBlock */
2737 new_immPartBlock(ir_node *pred_jmp) {
2738 return new_d_immPartBlock(NULL, pred_jmp);
2739 } /* new_immPartBlock */
2741 /* add an edge to a jmp/control flow node */
2743 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2744 int n = ARR_LEN(block->in) - 1;
2746 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2747 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2748 assert(is_ir_node(jmp));
2750 ARR_APP1(ir_node *, block->in, jmp);
2752 hook_set_irn_n(block, n, jmp, NULL);
2753 } /* add_immBlock_pred */
2755 /* changing the current block */
2757 set_cur_block(ir_node *target) {
2758 current_ir_graph->current_block = target;
2759 } /* set_cur_block */
2761 /* ************************ */
2762 /* parameter administration */
2764 /* get a value from the parameter array from the current block by its index */
2766 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2767 ir_graph *irg = current_ir_graph;
2768 assert(get_irg_phase_state(irg) == phase_building);
2769 inc_irg_visited(irg);
2774 return get_r_value_internal(irg->current_block, pos + 1, mode);
2777 /* get a value from the parameter array from the current block by its index */
2779 get_value(int pos, ir_mode *mode) {
2780 return get_d_value(NULL, pos, mode);
2783 /* set a value at position pos in the parameter array from the current block */
2785 set_value(int pos, ir_node *value) {
2786 ir_graph *irg = current_ir_graph;
2787 assert(get_irg_phase_state(irg) == phase_building);
2789 assert(pos+1 < irg->n_loc);
2790 assert(is_ir_node(value));
2791 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2794 /* Find the value number for a node in the current block.*/
2796 find_value(ir_node *value) {
2798 ir_node *bl = current_ir_graph->current_block;
2800 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2801 if (bl->attr.block.graph_arr[i] == value)
2806 /* get the current store */
2809 ir_graph *irg = current_ir_graph;
2811 assert(get_irg_phase_state(irg) == phase_building);
2812 /* GL: one could call get_value instead */
2813 inc_irg_visited(irg);
2814 return get_r_value_internal(irg->current_block, 0, mode_M);
2817 /* set the current store: handles automatic Sync construction for Load nodes */
2819 set_store(ir_node *store) {
2820 ir_node *load, *pload, *pred, *in[2];
2822 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2823 /* Beware: due to dead code elimination, a store might become a Bad node even in
2824 the construction phase. */
2825 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2827 if (get_opt_auto_create_sync()) {
2828 /* handle non-volatile Load nodes by automatically creating Sync's */
2829 load = skip_Proj(store);
2830 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2831 pred = get_Load_mem(load);
2833 if (is_Sync(pred)) {
2834 /* a Load after a Sync: move it up */
2835 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2837 set_Load_mem(load, get_memop_mem(mem));
2838 add_Sync_pred(pred, store);
2841 pload = skip_Proj(pred);
2842 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2843 /* a Load after a Load: create a new Sync */
2844 set_Load_mem(load, get_Load_mem(pload));
2848 store = new_Sync(2, in);
2853 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2857 keep_alive(ir_node *ka) {
2858 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2861 /* --- Useful access routines --- */
2862 /* Returns the current block of the current graph. To set the current
2863 block use set_cur_block. */
2864 ir_node *get_cur_block(void) {
2865 return get_irg_current_block(current_ir_graph);
2866 } /* get_cur_block */
2868 /* Returns the frame type of the current graph */
2869 ir_type *get_cur_frame_type(void) {
2870 return get_irg_frame_type(current_ir_graph);
2871 } /* get_cur_frame_type */
2874 /* ********************************************************************* */
2877 /* call once for each run of the library */
2879 firm_init_cons(uninitialized_local_variable_func_t *func) {
2880 default_initialize_local_variable = func;
2881 } /* firm_init_cons */
2884 irp_finalize_cons(void) {
2886 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2887 irg_finalize_cons(get_irp_irg(i));
2889 irp->phase_state = phase_high;
2890 } /* irp_finalize_cons */
2893 ir_node *new_Block(int arity, ir_node **in) {
2894 return new_d_Block(NULL, arity, in);
2896 ir_node *new_Start(void) {
2897 return new_d_Start(NULL);
2899 ir_node *new_End(void) {
2900 return new_d_End(NULL);
2902 ir_node *new_Jmp(void) {
2903 return new_d_Jmp(NULL);
2905 ir_node *new_IJmp(ir_node *tgt) {
2906 return new_d_IJmp(NULL, tgt);
2908 ir_node *new_Cond(ir_node *c) {
2909 return new_d_Cond(NULL, c);
2912 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2913 return new_d_Return(NULL, store, arity, in);
2915 ir_node *new_Const(tarval *con) {
2916 return new_d_Const(NULL, con);
2919 ir_node *new_Const_long(ir_mode *mode, long value) {
2920 return new_d_Const_long(NULL, mode, value);
2924 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2925 return new_d_Const_type(NULL, con, tp);
2929 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2930 return new_d_SymConst_type(NULL, mode, value, kind, type);
2932 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2933 return new_d_SymConst(NULL, mode, value, kind);
2935 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2936 return new_d_simpleSel(NULL, store, objptr, ent);
2938 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2940 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2942 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2944 return new_d_Call(NULL, store, callee, arity, in, tp);
2947 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2948 return new_d_Add(NULL, op1, op2, mode);
2951 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2952 return new_d_Sub(NULL, op1, op2, mode);
2954 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2955 return new_d_Minus(NULL, op, mode);
2957 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2958 return new_d_Mul(NULL, op1, op2, mode);
2960 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2961 return new_d_Mulh(NULL, op1, op2, mode);
2963 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2964 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2966 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2967 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2970 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2971 return new_d_Div(NULL, memop, op1, op2, mode, state);
2974 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2975 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2977 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2978 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2980 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2981 return new_d_Abs(NULL, op, mode);
2983 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2984 return new_d_And(NULL, op1, op2, mode);
2986 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2987 return new_d_Or(NULL, op1, op2, mode);
2989 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2990 return new_d_Eor(NULL, op1, op2, mode);
2992 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2993 return new_d_Not(NULL, op, mode);
2995 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2996 return new_d_Shl(NULL, op, k, mode);
2998 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2999 return new_d_Shr(NULL, op, k, mode);
3001 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3002 return new_d_Shrs(NULL, op, k, mode);
3004 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3005 return new_d_Rotl(NULL, op, k, mode);
3007 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3008 return new_d_Carry(NULL, op1, op2, mode);
3010 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3011 return new_d_Borrow(NULL, op1, op2, mode);
3014 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3015 return new_d_Cmp(NULL, op1, op2);
3018 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3019 return new_d_Conv(NULL, op, mode);
3021 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3022 return new_d_strictConv(NULL, op, mode);
3025 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3026 return new_d_Cast(NULL, op, to_tp);
3029 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3030 return new_d_Phi(NULL, arity, in, mode);
3032 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, cons_flags flags) {
3033 return new_d_Load(NULL, store, addr, mode, flags);
3035 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, cons_flags flags) {
3036 return new_d_Store(NULL, store, addr, val, flags);
3038 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3039 ir_where_alloc where) {
3040 return new_d_Alloc(NULL, store, size, alloc_type, where);
3042 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3043 ir_type *free_type, ir_where_alloc where) {
3044 return new_d_Free(NULL, store, ptr, size, free_type, where);
3046 ir_node *new_Sync(int arity, ir_node *in[]) {
3047 return new_d_Sync(NULL, arity, in);
3050 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3051 return new_d_Proj(NULL, arg, mode, proj);
3054 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3055 return new_d_defaultProj(NULL, arg, max_proj);
3058 ir_node *new_Tuple(int arity, ir_node **in) {
3059 return new_d_Tuple(NULL, arity, in);
3061 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3062 return new_d_Id(NULL, val, mode);
3065 ir_node *new_Bad(void) {
3066 return get_irg_bad(current_ir_graph);
3068 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3069 return new_d_Confirm(NULL, val, bound, cmp);
3071 ir_node *new_Unknown(ir_mode *m) {
3072 return new_d_Unknown(m);
3074 ir_node *new_CallBegin(ir_node *callee) {
3075 return new_d_CallBegin(NULL, callee);
3077 ir_node *new_EndReg(void) {
3078 return new_d_EndReg(NULL);
3080 ir_node *new_EndExcept(void) {
3081 return new_d_EndExcept(NULL);
3083 ir_node *new_Break(void) {
3084 return new_d_Break(NULL);
3086 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3087 return new_d_Filter(NULL, arg, mode, proj);
3089 ir_node *new_NoMem(void) {
3090 return get_irg_no_mem(current_ir_graph);
3092 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3093 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3095 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3096 return new_d_CopyB(NULL, store, dst, src, data_type);
3098 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3099 return new_d_InstOf(NULL, store, objptr, ent);
3101 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3102 return new_d_Raise(NULL, store, obj);
3104 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3105 return new_d_Bound(NULL, store, idx, lower, upper);
3107 ir_node *new_Pin(ir_node *node) {
3108 return new_d_Pin(NULL, node);
3110 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3111 int n_outs, ir_asm_constraint *outputs,
3112 int n_clobber, ident *clobber[], ident *asm_text) {
3113 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3116 /* create a new anchor node */
3117 ir_node *new_Anchor(ir_graph *irg) {
3118 ir_node *in[anchor_last];
3119 memset(in, 0, sizeof(in));
3120 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);