2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Return(dbg_info *db, ir_node *block,
501 ir_node *store, int arity, ir_node **in) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A (ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
511 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
512 res = optimize_node(res);
513 IRN_VRFY_IRG(res, irg);
515 } /* new_bd_Return */
518 new_bd_Load(dbg_info *db, ir_node *block,
519 ir_node *store, ir_node *adr, ir_mode *mode) {
522 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
527 res->attr.load.exc.pin_state = op_pin_state_pinned;
528 res->attr.load.load_mode = mode;
529 res->attr.load.volatility = volatility_non_volatile;
530 res->attr.load.aligned = align_is_aligned;
531 res = optimize_node(res);
532 IRN_VRFY_IRG(res, irg);
537 new_bd_Store(dbg_info *db, ir_node *block,
538 ir_node *store, ir_node *adr, ir_node *val) {
541 ir_graph *irg = current_ir_graph;
546 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
547 res->attr.store.exc.pin_state = op_pin_state_pinned;
548 res->attr.store.volatility = volatility_non_volatile;
549 res->attr.store.aligned = align_is_aligned;
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
557 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
560 ir_graph *irg = current_ir_graph;
564 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
565 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
566 res->attr.alloc.where = where;
567 res->attr.alloc.type = alloc_type;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
578 ir_graph *irg = current_ir_graph;
583 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
584 res->attr.free.where = where;
585 res->attr.free.type = free_type;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
593 int arity, ir_node **in, ir_entity *ent) {
597 ir_graph *irg = current_ir_graph;
598 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
600 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
603 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
606 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
608 * Sel's can select functions which should be of mode mode_P_code.
610 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
611 res->attr.sel.ent = ent;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
618 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
619 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
620 ir_graph *irg = current_ir_graph;
621 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
623 res->attr.symc.kind = symkind;
624 res->attr.symc.sym = value;
625 res->attr.symc.tp = tp;
627 res = optimize_node(res);
628 IRN_VRFY_IRG(res, irg);
630 } /* new_bd_SymConst_type */
633 new_bd_Sync(dbg_info *db, ir_node *block) {
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
638 /* no need to call optimize node here, Sync are always created with no predecessors */
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
645 ir_node *in[2], *res;
646 ir_graph *irg = current_ir_graph;
650 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
651 res->attr.confirm.cmp = cmp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_Confirm */
658 new_bd_Unknown(ir_mode *m) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
663 res = optimize_node(res);
665 } /* new_bd_Unknown */
668 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
671 ir_graph *irg = current_ir_graph;
673 in[0] = get_Call_ptr(call);
674 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
675 /* res->attr.callbegin.irg = irg; */
676 res->attr.callbegin.call = call;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_CallBegin */
683 new_bd_EndReg(dbg_info *db, ir_node *block) {
685 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
688 set_irg_end_reg(irg, res);
689 IRN_VRFY_IRG(res, irg);
691 } /* new_bd_EndReg */
694 new_bd_EndExcept(dbg_info *db, ir_node *block) {
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
699 set_irg_end_except(irg, res);
700 IRN_VRFY_IRG (res, irg);
702 } /* new_bd_EndExcept */
705 new_bd_Break(dbg_info *db, ir_node *block) {
707 ir_graph *irg = current_ir_graph;
709 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
710 res = optimize_node(res);
711 IRN_VRFY_IRG(res, irg);
716 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
719 ir_graph *irg = current_ir_graph;
721 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
722 res->attr.filter.proj = proj;
723 res->attr.filter.in_cg = NULL;
724 res->attr.filter.backedge = NULL;
727 assert(get_Proj_pred(res));
728 assert(get_nodes_block(get_Proj_pred(res)));
730 res = optimize_node(res);
731 IRN_VRFY_IRG(res, irg);
733 } /* new_bd_Filter */
736 new_bd_Mux(dbg_info *db, ir_node *block,
737 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
740 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
749 res = optimize_node(res);
750 IRN_VRFY_IRG(res, irg);
755 new_bd_CopyB(dbg_info *db, ir_node *block,
756 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
759 ir_graph *irg = current_ir_graph;
765 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
767 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
768 res->attr.copyb.data_type = data_type;
769 res = optimize_node(res);
770 IRN_VRFY_IRG(res, irg);
775 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
776 ir_node *objptr, ir_type *type) {
779 ir_graph *irg = current_ir_graph;
783 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
784 res->attr.instof.type = type;
785 res = optimize_node(res);
786 IRN_VRFY_IRG(res, irg);
788 } /* new_bd_InstOf */
791 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
794 ir_graph *irg = current_ir_graph;
798 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_Bound(dbg_info *db, ir_node *block,
806 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
809 ir_graph *irg = current_ir_graph;
815 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
816 res->attr.bound.exc.pin_state = op_pin_state_pinned;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
823 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
825 ir_graph *irg = current_ir_graph;
827 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
828 res = optimize_node(res);
829 IRN_VRFY_IRG(res, irg);
834 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
835 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
837 ir_graph *irg = current_ir_graph;
840 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
841 res->attr.assem.pin_state = op_pin_state_pinned;
842 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
843 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
844 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
845 res->attr.assem.asm_text = asm_text;
847 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
848 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
849 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
856 /* --------------------------------------------- */
857 /* private interfaces, for professional use only */
858 /* --------------------------------------------- */
861 /* Constructs a Block with a fixed number of predecessors.
862 Does not set current_block. Can not be used with automatic
863 Phi node construction. */
865 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
866 ir_graph *rem = current_ir_graph;
869 current_ir_graph = irg;
870 res = new_bd_Block(db, arity, in);
871 current_ir_graph = rem;
877 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
878 ir_graph *rem = current_ir_graph;
881 current_ir_graph = irg;
882 res = new_bd_Start(db, block);
883 current_ir_graph = rem;
889 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
891 ir_graph *rem = current_ir_graph;
893 current_ir_graph = irg;
894 res = new_bd_End(db, block);
895 current_ir_graph = rem;
901 /* Creates a Phi node with all predecessors. Calling this constructor
902 is only allowed if the corresponding block is mature. */
904 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
906 ir_graph *rem = current_ir_graph;
908 current_ir_graph = irg;
909 res = new_bd_Phi(db, block,arity, in, mode);
910 current_ir_graph = rem;
917 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
919 ir_graph *rem = current_ir_graph;
921 current_ir_graph = irg;
922 res = new_bd_Const_type(db, con, tp);
923 current_ir_graph = rem;
926 } /* new_rd_Const_type */
930 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
933 ir_graph *rem = current_ir_graph;
935 current_ir_graph = irg;
936 res = new_bd_Const_type(db, con, firm_unknown_type);
937 current_ir_graph = rem;
939 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
952 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_Id(db, block, val, mode);
958 current_ir_graph = rem;
964 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_Proj(db, block, arg, mode, proj);
971 current_ir_graph = rem;
978 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
981 ir_graph *rem = current_ir_graph;
983 current_ir_graph = irg;
984 res = new_bd_defaultProj(db, block, arg, max_proj);
985 current_ir_graph = rem;
988 } /* new_rd_defaultProj */
991 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
993 ir_graph *rem = current_ir_graph;
995 current_ir_graph = irg;
996 res = new_bd_Conv(db, block, op, mode, 0);
997 current_ir_graph = rem;
1004 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1006 ir_graph *rem = current_ir_graph;
1008 current_ir_graph = irg;
1009 res = new_bd_Cast(db, block, op, to_tp);
1010 current_ir_graph = rem;
1016 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = irg;
1021 res = new_bd_Tuple(db, block, arity, in);
1022 current_ir_graph = rem;
1025 } /* new_rd_Tuple */
1034 NEW_RD_DIVOP(DivMod)
1049 NEW_RD_BINOP(Borrow)
1051 /* creates a rd constructor for an divRL */
1052 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1053 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1056 ir_graph *rem = current_ir_graph;
1057 current_ir_graph = irg;
1058 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1059 current_ir_graph = rem;
1065 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1066 ir_node *op1, ir_node *op2) {
1068 ir_graph *rem = current_ir_graph;
1070 current_ir_graph = irg;
1071 res = new_bd_Cmp(db, block, op1, op2);
1072 current_ir_graph = rem;
1078 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Jmp(db, block);
1084 current_ir_graph = rem;
1090 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_IJmp(db, block, tgt);
1096 current_ir_graph = rem;
1102 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1104 ir_graph *rem = current_ir_graph;
1106 current_ir_graph = irg;
1107 res = new_bd_Cond(db, block, c);
1108 current_ir_graph = rem;
1115 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1122 current_ir_graph = rem;
1128 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1129 ir_node *store, int arity, ir_node **in) {
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Return(db, block, store, arity, in);
1135 current_ir_graph = rem;
1138 } /* new_rd_Return */
1141 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1142 ir_node *store, ir_node *adr, ir_mode *mode) {
1144 ir_graph *rem = current_ir_graph;
1146 current_ir_graph = irg;
1147 res = new_bd_Load(db, block, store, adr, mode);
1148 current_ir_graph = rem;
1154 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1155 ir_node *store, ir_node *adr, ir_node *val) {
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_Store(db, block, store, adr, val);
1161 current_ir_graph = rem;
1164 } /* new_rd_Store */
1167 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1168 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1170 ir_graph *rem = current_ir_graph;
1172 current_ir_graph = irg;
1173 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1174 current_ir_graph = rem;
1177 } /* new_rd_Alloc */
1180 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1181 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1183 ir_graph *rem = current_ir_graph;
1185 current_ir_graph = irg;
1186 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1187 current_ir_graph = rem;
1193 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1194 ir_node *store, ir_node *objptr, ir_entity *ent) {
1196 ir_graph *rem = current_ir_graph;
1198 current_ir_graph = irg;
1199 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1200 current_ir_graph = rem;
1203 } /* new_rd_simpleSel */
1206 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1207 int arity, ir_node **in, ir_entity *ent) {
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1220 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1222 ir_graph *rem = current_ir_graph;
1224 current_ir_graph = irg;
1225 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1226 current_ir_graph = rem;
1229 } /* new_rd_SymConst_type */
1232 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1233 symconst_symbol value, symconst_kind symkind) {
1234 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1235 } /* new_rd_SymConst */
1237 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1238 symconst_symbol sym;
1239 sym.entity_p = symbol;
1240 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1241 } /* new_rd_SymConst_addr_ent */
1243 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1244 symconst_symbol sym;
1245 sym.entity_p = symbol;
1246 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1247 } /* new_rd_SymConst_ofs_ent */
1249 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1250 symconst_symbol sym;
1251 sym.ident_p = symbol;
1252 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1253 } /* new_rd_SymConst_addr_name */
1255 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1256 symconst_symbol sym;
1257 sym.type_p = symbol;
1258 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1259 } /* new_rd_SymConst_type_tag */
1261 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1262 symconst_symbol sym;
1263 sym.type_p = symbol;
1264 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1265 } /* new_rd_SymConst_size */
1267 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1268 symconst_symbol sym;
1269 sym.type_p = symbol;
1270 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1271 } /* new_rd_SymConst_align */
1274 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1276 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Sync(db, block);
1281 current_ir_graph = rem;
1283 for (i = 0; i < arity; ++i)
1284 add_Sync_pred(res, in[i]);
1290 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1292 ir_graph *rem = current_ir_graph;
1294 current_ir_graph = irg;
1295 res = new_bd_Confirm(db, block, val, bound, cmp);
1296 current_ir_graph = rem;
1299 } /* new_rd_Confirm */
1302 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1304 ir_graph *rem = current_ir_graph;
1306 current_ir_graph = irg;
1307 res = new_bd_Unknown(m);
1308 current_ir_graph = rem;
1311 } /* new_rd_Unknown */
1314 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1316 ir_graph *rem = current_ir_graph;
1318 current_ir_graph = irg;
1319 res = new_bd_CallBegin(db, block, call);
1320 current_ir_graph = rem;
1323 } /* new_rd_CallBegin */
1326 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1329 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1330 set_irg_end_reg(irg, res);
1331 IRN_VRFY_IRG(res, irg);
1333 } /* new_rd_EndReg */
1336 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1339 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1340 set_irg_end_except(irg, res);
1341 IRN_VRFY_IRG (res, irg);
1343 } /* new_rd_EndExcept */
1346 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_Break(db, block);
1352 current_ir_graph = rem;
1355 } /* new_rd_Break */
1358 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1361 ir_graph *rem = current_ir_graph;
1363 current_ir_graph = irg;
1364 res = new_bd_Filter(db, block, arg, mode, proj);
1365 current_ir_graph = rem;
1368 } /* new_rd_Filter */
1371 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1372 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1378 current_ir_graph = rem;
1383 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1384 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1390 current_ir_graph = rem;
1393 } /* new_rd_CopyB */
1396 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1397 ir_node *objptr, ir_type *type) {
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_InstOf(db, block, store, objptr, type);
1403 current_ir_graph = rem;
1406 } /* new_rd_InstOf */
1409 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Raise(db, block, store, obj);
1415 current_ir_graph = rem;
1418 } /* new_rd_Raise */
1420 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Bound(db, block, store, idx, lower, upper);
1427 current_ir_graph = rem;
1430 } /* new_rd_Bound */
1432 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1434 ir_graph *rem = current_ir_graph;
1436 current_ir_graph = irg;
1437 res = new_bd_Pin(db, block, node);
1438 current_ir_graph = rem;
1443 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1444 int arity, ir_node *in[], ir_asm_constraint *inputs,
1445 int n_outs, ir_asm_constraint *outputs,
1446 int n_clobber, ident *clobber[], ident *asm_text) {
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1452 current_ir_graph = rem;
1459 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1460 return new_rd_Block(NULL, irg, arity, in);
1462 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1463 return new_rd_Start(NULL, irg, block);
1465 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1466 return new_rd_End(NULL, irg, block);
1468 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1469 return new_rd_Jmp(NULL, irg, block);
1471 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1472 return new_rd_IJmp(NULL, irg, block, tgt);
1474 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1475 return new_rd_Cond(NULL, irg, block, c);
1478 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1479 ir_node *store, int arity, ir_node **in) {
1480 return new_rd_Return(NULL, irg, block, store, arity, in);
1482 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1483 return new_rd_Const(NULL, irg, con);
1485 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1486 return new_rd_Const_long(NULL, irg, mode, value);
1489 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1490 return new_rd_Const_type(NULL, irg, con, tp);
1493 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1494 symconst_symbol value, symconst_kind symkind) {
1495 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1497 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1498 ir_node *objptr, ir_entity *ent) {
1499 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1501 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *objptr, int n_index, ir_node **index,
1504 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1506 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1507 ir_node *callee, int arity, ir_node **in,
1509 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1512 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1522 ir_node *op, ir_mode *mode) {
1523 return new_rd_Minus(NULL, irg, block, op, mode);
1525 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1526 ir_node *op1, ir_node *op2, ir_mode *mode) {
1527 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1529 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1530 ir_node *op1, ir_node *op2, ir_mode *mode) {
1531 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1533 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1535 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1537 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1539 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1544 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1547 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1548 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1549 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1551 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1552 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1553 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1555 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1556 ir_node *op, ir_mode *mode) {
1557 return new_rd_Abs(NULL, irg, block, op, mode);
1559 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1560 ir_node *op1, ir_node *op2, ir_mode *mode) {
1561 return new_rd_And(NULL, irg, block, op1, op2, mode);
1563 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1564 ir_node *op1, ir_node *op2, ir_mode *mode) {
1565 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1567 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1568 ir_node *op1, ir_node *op2, ir_mode *mode) {
1569 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_mode *mode) {
1573 return new_rd_Not(NULL, irg, block, op, mode);
1575 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Shl(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Shr(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1584 ir_node *op, ir_node *k, ir_mode *mode) {
1585 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1587 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_node *k, ir_mode *mode) {
1589 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1591 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_node *k, ir_mode *mode) {
1593 return new_rd_Carry(NULL, irg, block, op, k, mode);
1595 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_node *k, ir_mode *mode) {
1597 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1600 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1601 ir_node *op1, ir_node *op2) {
1602 return new_rd_Cmp(NULL, irg, block, op1, op2);
1605 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1606 ir_node *op, ir_mode *mode) {
1607 return new_rd_Conv(NULL, irg, block, op, mode);
1610 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1611 return new_rd_Cast(NULL, irg, block, op, to_tp);
1614 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1615 ir_node **in, ir_mode *mode) {
1616 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1618 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1619 ir_node *store, ir_node *adr, ir_mode *mode) {
1620 return new_rd_Load(NULL, irg, block, store, adr, mode);
1622 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1623 ir_node *store, ir_node *adr, ir_node *val) {
1624 return new_rd_Store(NULL, irg, block, store, adr, val);
1626 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1627 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1628 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1630 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1631 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1632 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1634 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1635 return new_rd_Sync(NULL, irg, block, arity, in);
1638 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1639 ir_mode *mode, long proj) {
1640 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1643 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1645 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1648 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1649 int arity, ir_node **in) {
1650 return new_rd_Tuple(NULL, irg, block, arity, in );
1652 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1653 ir_node *val, ir_mode *mode) {
1654 return new_rd_Id(NULL, irg, block, val, mode);
1657 ir_node *new_r_Bad(ir_graph *irg) {
1658 return get_irg_bad(irg);
1660 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1661 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1663 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1664 return new_rd_Unknown(irg, m);
1666 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1667 return new_rd_CallBegin(NULL, irg, block, callee);
1669 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1670 return new_rd_EndReg(NULL, irg, block);
1672 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1673 return new_rd_EndExcept(NULL, irg, block);
1675 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1676 return new_rd_Break(NULL, irg, block);
1678 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1679 ir_mode *mode, long proj) {
1680 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1682 ir_node *new_r_NoMem(ir_graph *irg) {
1683 return get_irg_no_mem(irg);
1685 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1686 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1687 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1689 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1690 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1691 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1693 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1695 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1697 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1698 ir_node *store, ir_node *obj) {
1699 return new_rd_Raise(NULL, irg, block, store, obj);
1701 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1702 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1703 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1705 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1706 return new_rd_Pin(NULL, irg, block, node);
1708 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1709 int arity, ir_node *in[], ir_asm_constraint *inputs,
1710 int n_outs, ir_asm_constraint *outputs,
1711 int n_clobber, ident *clobber[], ident *asm_text) {
1712 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1715 /** ********************/
1716 /** public interfaces */
1717 /** construction tools */
1723 * - create a new Start node in the current block
1725 * @return s - pointer to the created Start node
1730 new_d_Start(dbg_info *db) {
1733 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1734 op_Start, mode_T, 0, NULL);
1736 res = optimize_node(res);
1737 IRN_VRFY_IRG(res, current_ir_graph);
1742 new_d_End(dbg_info *db) {
1744 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1745 op_End, mode_X, -1, NULL);
1746 res = optimize_node(res);
1747 IRN_VRFY_IRG(res, current_ir_graph);
1752 /* Constructs a Block with a fixed number of predecessors.
1753 Does set current_block. Can be used with automatic Phi
1754 node construction. */
1756 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1759 int has_unknown = 0;
1761 res = new_bd_Block(db, arity, in);
1763 /* Create and initialize array for Phi-node construction. */
1764 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1765 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1766 current_ir_graph->n_loc);
1767 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1770 for (i = arity-1; i >= 0; i--)
1771 if (is_Unknown(in[i])) {
1776 if (!has_unknown) res = optimize_node(res);
1777 current_ir_graph->current_block = res;
1779 IRN_VRFY_IRG(res, current_ir_graph);
1785 /* ***********************************************************************/
1786 /* Methods necessary for automatic Phi node creation */
1788 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1789 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1790 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1791 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1793 Call Graph: ( A ---> B == A "calls" B)
1795 get_value mature_immBlock
1803 get_r_value_internal |
1807 new_rd_Phi0 new_rd_Phi_in
1809 * *************************************************************************** */
1811 /** Creates a Phi node with 0 predecessors. */
1812 static inline ir_node *
1813 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1816 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1817 IRN_VRFY_IRG(res, irg);
1823 * Internal constructor of a Phi node by a phi_merge operation.
1825 * @param irg the graph on which the Phi will be constructed
1826 * @param block the block in which the Phi will be constructed
1827 * @param mode the mod eof the Phi node
1828 * @param in the input array of the phi node
1829 * @param ins number of elements in the input array
1830 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1831 * the value for which the new Phi is constructed
1833 static inline ir_node *
1834 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1835 ir_node **in, int ins, ir_node *phi0) {
1837 ir_node *res, *known;
1839 /* Allocate a new node on the obstack. The allocation copies the in
1841 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1842 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1844 /* This loop checks whether the Phi has more than one predecessor.
1845 If so, it is a real Phi node and we break the loop. Else the
1846 Phi node merges the same definition on several paths and therefore
1848 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1850 for (i = ins - 1; i >= 0; --i) {
1853 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1855 /* Optimize self referencing Phis: We can't detect them yet properly, as
1856 they still refer to the Phi0 they will replace. So replace right now. */
1857 if (phi0 && in[i] == phi0)
1860 if (in[i] == res || in[i] == known)
1869 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1872 edges_node_deleted(res, current_ir_graph);
1873 obstack_free(current_ir_graph->obst, res);
1874 if (is_Phi(known)) {
1875 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1876 order, an enclosing Phi know may get superfluous. */
1877 res = optimize_in_place_2(known);
1879 exchange(known, res);
1884 /* A undefined value, e.g., in unreachable code. */
1888 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1889 IRN_VRFY_IRG(res, irg);
1890 /* Memory Phis in endless loops must be kept alive.
1891 As we can't distinguish these easily we keep all of them alive. */
1892 if (is_Phi(res) && mode == mode_M)
1893 add_End_keepalive(get_irg_end(irg), res);
1897 } /* new_rd_Phi_in */
1900 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1902 #if PRECISE_EXC_CONTEXT
1904 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1907 * Construct a new frag_array for node n.
1908 * Copy the content from the current graph_arr of the corresponding block:
1909 * this is the current state.
1910 * Set ProjM(n) as current memory state.
1911 * Further the last entry in frag_arr of current block points to n. This
1912 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1914 static inline ir_node **new_frag_arr(ir_node *n) {
1918 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1919 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1920 sizeof(ir_node *)*current_ir_graph->n_loc);
1922 /* turn off optimization before allocating Proj nodes, as res isn't
1924 opt = get_opt_optimize(); set_optimize(0);
1925 /* Here we rely on the fact that all frag ops have Memory as first result! */
1927 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1928 } else if (is_CopyB(n)) {
1929 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1931 assert((pn_Quot_M == pn_DivMod_M) &&
1932 (pn_Quot_M == pn_Div_M) &&
1933 (pn_Quot_M == pn_Mod_M) &&
1934 (pn_Quot_M == pn_Load_M) &&
1935 (pn_Quot_M == pn_Store_M) &&
1936 (pn_Quot_M == pn_Alloc_M) &&
1937 (pn_Quot_M == pn_Bound_M));
1938 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1942 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1944 } /* new_frag_arr */
1947 * Returns the frag_arr from a node.
1949 static inline ir_node **get_frag_arr(ir_node *n) {
1950 switch (get_irn_opcode(n)) {
1952 return n->attr.call.exc.frag_arr;
1954 return n->attr.alloc.exc.frag_arr;
1956 return n->attr.load.exc.frag_arr;
1958 return n->attr.store.exc.frag_arr;
1960 return n->attr.except.frag_arr;
1962 } /* get_frag_arr */
1965 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1966 #ifdef DEBUG_libfirm
1969 for (i = 1024; i >= 0; --i)
1974 if (frag_arr[pos] == NULL)
1975 frag_arr[pos] = val;
1976 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1977 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1978 assert(arr != frag_arr && "Endless recursion detected");
1983 assert(!"potential endless recursion in set_frag_value");
1984 } /* set_frag_value */
1987 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1991 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1993 frag_arr = get_frag_arr(cfOp);
1994 res = frag_arr[pos];
1996 if (block->attr.block.graph_arr[pos] != NULL) {
1997 /* There was a set_value() after the cfOp and no get_value() before that
1998 set_value(). We must build a Phi node now. */
1999 if (block->attr.block.is_matured) {
2000 int ins = get_irn_arity(block);
2002 NEW_ARR_A(ir_node *, nin, ins);
2003 res = phi_merge(block, pos, mode, nin, ins);
2005 res = new_rd_Phi0(current_ir_graph, block, mode);
2006 res->attr.phi.u.pos = pos;
2007 res->attr.phi.next = block->attr.block.phis;
2008 block->attr.block.phis = res;
2010 assert(res != NULL);
2011 /* It's a Phi, we can write this into all graph_arrs with NULL */
2012 set_frag_value(block->attr.block.graph_arr, pos, res);
2014 res = get_r_value_internal(block, pos, mode);
2015 set_frag_value(block->attr.block.graph_arr, pos, res);
2019 } /* get_r_frag_value_internal */
2020 #endif /* PRECISE_EXC_CONTEXT */
2023 * Check whether a control flownode cf_pred represents an exception flow.
2025 * @param cf_pred the control flow node
2026 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2028 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2030 * Note: all projections from a raise are "exceptional control flow" we we handle it
2031 * like a normal Jmp, because there is no "regular" one.
2032 * That's why Raise is no "fragile_op"!
2034 if (is_fragile_op(prev_cf_op)) {
2035 if (is_Proj(cf_pred)) {
2036 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2037 /* the regular control flow, NO exception */
2040 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2043 /* Hmm, exception but not a Proj? */
2044 assert(!"unexpected condition: fragile op without a proj");
2048 } /* is_exception_flow */
2051 * Computes the predecessors for the real phi node, and then
2052 * allocates and returns this node. The routine called to allocate the
2053 * node might optimize it away and return a real value.
2054 * This function must be called with an in-array of proper size.
2057 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2058 ir_node *prevBlock, *res, *phi0, *phi0_all;
2061 /* If this block has no value at pos create a Phi0 and remember it
2062 in graph_arr to break recursions.
2063 Else we may not set graph_arr as there a later value is remembered. */
2065 if (block->attr.block.graph_arr[pos] == NULL) {
2066 ir_graph *irg = current_ir_graph;
2068 if (block == get_irg_start_block(irg)) {
2069 /* Collapsing to Bad tarvals is no good idea.
2070 So we call a user-supplied routine here that deals with this case as
2071 appropriate for the given language. Sorrily the only help we can give
2072 here is the position.
2074 Even if all variables are defined before use, it can happen that
2075 we get to the start block, if a Cond has been replaced by a tuple
2076 (bad, jmp). In this case we call the function needlessly, eventually
2077 generating an non existent error.
2078 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2081 if (default_initialize_local_variable != NULL) {
2082 ir_node *rem = get_cur_block();
2084 set_cur_block(block);
2085 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2089 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2090 /* We don't need to care about exception ops in the start block.
2091 There are none by definition. */
2092 return block->attr.block.graph_arr[pos];
2094 phi0 = new_rd_Phi0(irg, block, mode);
2095 block->attr.block.graph_arr[pos] = phi0;
2096 #if PRECISE_EXC_CONTEXT
2097 if (get_opt_precise_exc_context()) {
2098 /* Set graph_arr for fragile ops. Also here we should break recursion.
2099 We could choose a cyclic path through an cfop. But the recursion would
2100 break at some point. */
2101 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2107 /* This loop goes to all predecessor blocks of the block the Phi node
2108 is in and there finds the operands of the Phi node by calling
2109 get_r_value_internal. */
2110 for (i = 1; i <= ins; ++i) {
2111 ir_node *cf_pred = block->in[i];
2112 ir_node *prevCfOp = skip_Proj(cf_pred);
2114 if (is_Bad(prevCfOp)) {
2115 /* In case a Cond has been optimized we would get right to the start block
2116 with an invalid definition. */
2117 nin[i-1] = new_Bad();
2120 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2122 if (!is_Bad(prevBlock)) {
2123 #if PRECISE_EXC_CONTEXT
2124 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2125 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2126 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2129 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2131 nin[i-1] = new_Bad();
2135 /* We want to pass the Phi0 node to the constructor: this finds additional
2136 optimization possibilities.
2137 The Phi0 node either is allocated in this function, or it comes from
2138 a former call to get_r_value_internal(). In this case we may not yet
2139 exchange phi0, as this is done in mature_immBlock(). */
2141 phi0_all = block->attr.block.graph_arr[pos];
2142 if (! is_Phi0(phi0_all) ||
2143 get_irn_arity(phi0_all) != 0 ||
2144 get_nodes_block(phi0_all) != block)
2150 /* After collecting all predecessors into the array nin a new Phi node
2151 with these predecessors is created. This constructor contains an
2152 optimization: If all predecessors of the Phi node are identical it
2153 returns the only operand instead of a new Phi node. */
2154 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2156 /* In case we allocated a Phi0 node at the beginning of this procedure,
2157 we need to exchange this Phi0 with the real Phi. */
2159 exchange(phi0, res);
2160 block->attr.block.graph_arr[pos] = res;
2161 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2162 only an optimization. */
2169 * This function returns the last definition of a value. In case
2170 * this value was last defined in a previous block, Phi nodes are
2171 * inserted. If the part of the firm graph containing the definition
2172 * is not yet constructed, a dummy Phi node is returned.
2174 * @param block the current block
2175 * @param pos the value number of the value searched
2176 * @param mode the mode of this value (needed for Phi construction)
2179 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2181 /* There are 4 cases to treat.
2183 1. The block is not mature and we visit it the first time. We can not
2184 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2185 predecessors is returned. This node is added to the linked list (block
2186 attribute "phis") of the containing block to be completed when this block is
2187 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2190 2. The value is already known in this block, graph_arr[pos] is set and we
2191 visit the block the first time. We can return the value without
2192 creating any new nodes.
2194 3. The block is mature and we visit it the first time. A Phi node needs
2195 to be created (phi_merge). If the Phi is not needed, as all it's
2196 operands are the same value reaching the block through different
2197 paths, it's optimized away and the value itself is returned.
2199 4. The block is mature, and we visit it the second time. Now two
2200 subcases are possible:
2201 * The value was computed completely the last time we were here. This
2202 is the case if there is no loop. We can return the proper value.
2203 * The recursion that visited this node and set the flag did not
2204 return yet. We are computing a value in a loop and need to
2205 break the recursion. This case only happens if we visited
2206 the same block with phi_merge before, which inserted a Phi0.
2207 So we return the Phi0.
2210 /* case 4 -- already visited. */
2211 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2212 /* As phi_merge allocates a Phi0 this value is always defined. Here
2213 is the critical difference of the two algorithms. */
2214 assert(block->attr.block.graph_arr[pos]);
2215 return block->attr.block.graph_arr[pos];
2218 /* visited the first time */
2219 set_irn_visited(block, get_irg_visited(current_ir_graph));
2221 /* Get the local valid value */
2222 res = block->attr.block.graph_arr[pos];
2224 /* case 2 -- If the value is actually computed, return it. */
2228 if (block->attr.block.is_matured) { /* case 3 */
2230 /* The Phi has the same amount of ins as the corresponding block. */
2231 int ins = get_irn_arity(block);
2233 NEW_ARR_A(ir_node *, nin, ins);
2235 /* Phi merge collects the predecessors and then creates a node. */
2236 res = phi_merge(block, pos, mode, nin, ins);
2238 } else { /* case 1 */
2239 /* The block is not mature, we don't know how many in's are needed. A Phi
2240 with zero predecessors is created. Such a Phi node is called Phi0
2241 node. The Phi0 is then added to the list of Phi0 nodes in this block
2242 to be matured by mature_immBlock later.
2243 The Phi0 has to remember the pos of it's internal value. If the real
2244 Phi is computed, pos is used to update the array with the local
2246 res = new_rd_Phi0(current_ir_graph, block, mode);
2247 res->attr.phi.u.pos = pos;
2248 res->attr.phi.next = block->attr.block.phis;
2249 block->attr.block.phis = res;
2252 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2254 /* The local valid value is available now. */
2255 block->attr.block.graph_arr[pos] = res;
2258 } /* get_r_value_internal */
2260 /* ************************************************************************** */
2263 * Finalize a Block node, when all control flows are known.
2264 * Acceptable parameters are only Block nodes.
2267 mature_immBlock(ir_node *block) {
2272 assert(is_Block(block));
2273 if (!get_Block_matured(block)) {
2274 ir_graph *irg = current_ir_graph;
2276 ins = ARR_LEN(block->in) - 1;
2277 /* Fix block parameters */
2278 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2280 /* An array for building the Phi nodes. */
2281 NEW_ARR_A(ir_node *, nin, ins);
2283 /* Traverse a chain of Phi nodes attached to this block and mature
2285 for (n = block->attr.block.phis; n; n = next) {
2286 inc_irg_visited(irg);
2287 next = n->attr.phi.next;
2288 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2291 block->attr.block.is_matured = 1;
2293 /* Now, as the block is a finished Firm node, we can optimize it.
2294 Since other nodes have been allocated since the block was created
2295 we can not free the node on the obstack. Therefore we have to call
2296 optimize_in_place().
2297 Unfortunately the optimization does not change a lot, as all allocated
2298 nodes refer to the unoptimized node.
2299 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2300 block = optimize_in_place_2(block);
2301 IRN_VRFY_IRG(block, irg);
2303 } /* mature_immBlock */
2306 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2307 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2311 new_d_Const(dbg_info *db, tarval *con) {
2312 return new_bd_Const(db, con);
2316 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2317 return new_bd_Const_long(db, mode, value);
2318 } /* new_d_Const_long */
2322 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2323 return new_bd_Const_type(db, con, tp);
2324 } /* new_d_Const_type */
2328 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2329 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2333 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2334 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2339 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2342 assert(arg->op == op_Cond);
2343 arg->attr.cond.kind = fragmentary;
2344 arg->attr.cond.default_proj = max_proj;
2345 res = new_Proj(arg, mode_X, max_proj);
2347 } /* new_d_defaultProj */
2350 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2351 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2355 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2356 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2357 } /* new_d_strictConv */
2361 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2362 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2366 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2367 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2378 * Allocate a frag array for a node if the current graph state is phase_building.
2380 * @param irn the node for which the frag array should be allocated
2381 * @param op the opcode of the (original) node, if does not match opcode of irn,
2383 * @param frag_store the address of the frag store in irn attributes, if this
2384 * address contains a value != NULL, does nothing
2386 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2387 if (get_opt_precise_exc_context()) {
2388 if ((current_ir_graph->phase_state == phase_building) &&
2389 (get_irn_op(irn) == op) && /* Could be optimized away. */
2390 !*frag_store) /* Could be a cse where the arr is already set. */ {
2391 *frag_store = new_frag_arr(irn);
2394 } /* firm_alloc_frag_arr */
2397 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2399 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2400 #if PRECISE_EXC_CONTEXT
2401 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2408 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2410 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2411 #if PRECISE_EXC_CONTEXT
2412 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2416 } /* new_d_DivMod */
2420 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2422 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2423 #if PRECISE_EXC_CONTEXT
2424 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2432 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2434 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2435 #if PRECISE_EXC_CONTEXT
2436 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2443 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2445 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2446 #if PRECISE_EXC_CONTEXT
2447 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2467 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2468 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2472 new_d_Jmp(dbg_info *db) {
2473 return new_bd_Jmp(db, current_ir_graph->current_block);
2477 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2478 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2482 new_d_Cond(dbg_info *db, ir_node *c) {
2483 return new_bd_Cond(db, current_ir_graph->current_block, c);
2488 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2491 res = new_bd_Call(db, current_ir_graph->current_block,
2492 store, callee, arity, in, tp);
2493 #if PRECISE_EXC_CONTEXT
2494 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2501 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2502 return new_bd_Return(db, current_ir_graph->current_block,
2504 } /* new_d_Return */
2507 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2509 res = new_bd_Load(db, current_ir_graph->current_block,
2511 #if PRECISE_EXC_CONTEXT
2512 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2519 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2521 res = new_bd_Store(db, current_ir_graph->current_block,
2523 #if PRECISE_EXC_CONTEXT
2524 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2531 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2532 ir_where_alloc where) {
2534 res = new_bd_Alloc(db, current_ir_graph->current_block,
2535 store, size, alloc_type, where);
2536 #if PRECISE_EXC_CONTEXT
2537 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2544 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2545 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2546 return new_bd_Free(db, current_ir_graph->current_block,
2547 store, ptr, size, free_type, where);
2551 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2552 /* GL: objptr was called frame before. Frame was a bad choice for the name
2553 as the operand could as well be a pointer to a dynamic object. */
2555 return new_bd_Sel(db, current_ir_graph->current_block,
2556 store, objptr, 0, NULL, ent);
2557 } /* new_d_simpleSel */
2560 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2561 return new_bd_Sel(db, current_ir_graph->current_block,
2562 store, objptr, n_index, index, sel);
2566 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2567 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2569 } /* new_d_SymConst_type */
2572 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2573 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2574 value, kind, firm_unknown_type);
2575 } /* new_d_SymConst */
2578 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2579 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2583 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2584 return new_bd_Confirm(db, current_ir_graph->current_block,
2586 } /* new_d_Confirm */
2589 new_d_Unknown(ir_mode *m) {
2590 return new_bd_Unknown(m);
2591 } /* new_d_Unknown */
2594 new_d_CallBegin(dbg_info *db, ir_node *call) {
2595 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2596 } /* new_d_CallBegin */
2599 new_d_EndReg(dbg_info *db) {
2600 return new_bd_EndReg(db, current_ir_graph->current_block);
2601 } /* new_d_EndReg */
2604 new_d_EndExcept(dbg_info *db) {
2605 return new_bd_EndExcept(db, current_ir_graph->current_block);
2606 } /* new_d_EndExcept */
2609 new_d_Break(dbg_info *db) {
2610 return new_bd_Break(db, current_ir_graph->current_block);
2614 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2615 return new_bd_Filter(db, current_ir_graph->current_block,
2617 } /* new_d_Filter */
2620 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2621 ir_node *ir_true, ir_mode *mode) {
2622 return new_bd_Mux(db, current_ir_graph->current_block,
2623 sel, ir_false, ir_true, mode);
2626 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2627 ir_node *dst, ir_node *src, ir_type *data_type) {
2629 res = new_bd_CopyB(db, current_ir_graph->current_block,
2630 store, dst, src, data_type);
2631 #if PRECISE_EXC_CONTEXT
2632 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2638 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2639 return new_bd_InstOf(db, current_ir_graph->current_block,
2640 store, objptr, type);
2641 } /* new_d_InstOf */
2644 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2645 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2648 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2649 ir_node *idx, ir_node *lower, ir_node *upper) {
2651 res = new_bd_Bound(db, current_ir_graph->current_block,
2652 store, idx, lower, upper);
2653 #if PRECISE_EXC_CONTEXT
2654 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2660 new_d_Pin(dbg_info *db, ir_node *node) {
2661 return new_bd_Pin(db, current_ir_graph->current_block, node);
2665 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2666 int n_outs, ir_asm_constraint *outputs,
2667 int n_clobber, ident *clobber[], ident *asm_text) {
2668 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2671 /* ********************************************************************* */
2672 /* Comfortable interface with automatic Phi node construction. */
2673 /* (Uses also constructors of ?? interface, except new_Block. */
2674 /* ********************************************************************* */
2676 /* Block construction */
2677 /* immature Block without predecessors */
2679 new_d_immBlock(dbg_info *db) {
2682 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2683 /* creates a new dynamic in-array as length of in is -1 */
2684 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2685 current_ir_graph->current_block = res;
2687 /* macroblock head */
2690 res->attr.block.is_matured = 0;
2691 res->attr.block.is_dead = 0;
2692 res->attr.block.is_mb_head = 1;
2693 res->attr.block.has_label = 0;
2694 res->attr.block.irg = current_ir_graph;
2695 res->attr.block.backedge = NULL;
2696 res->attr.block.in_cg = NULL;
2697 res->attr.block.cg_backedge = NULL;
2698 res->attr.block.extblk = NULL;
2699 res->attr.block.region = NULL;
2700 res->attr.block.mb_depth = 0;
2701 res->attr.block.label = 0;
2703 set_Block_block_visited(res, 0);
2705 /* Create and initialize array for Phi-node construction. */
2706 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2707 current_ir_graph->n_loc);
2708 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2710 /* Immature block may not be optimized! */
2711 IRN_VRFY_IRG(res, current_ir_graph);
2714 } /* new_d_immBlock */
2717 new_immBlock(void) {
2718 return new_d_immBlock(NULL);
2719 } /* new_immBlock */
2721 /* immature PartBlock with its predecessors */
2723 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2724 ir_node *res = new_d_immBlock(db);
2725 ir_node *blk = get_nodes_block(pred_jmp);
2727 res->in[0] = blk->in[0];
2728 assert(res->in[0] != NULL);
2729 add_immBlock_pred(res, pred_jmp);
2731 res->attr.block.is_mb_head = 0;
2732 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2735 } /* new_d_immPartBlock */
2738 new_immPartBlock(ir_node *pred_jmp) {
2739 return new_d_immPartBlock(NULL, pred_jmp);
2740 } /* new_immPartBlock */
2742 /* add an edge to a jmp/control flow node */
2744 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2745 int n = ARR_LEN(block->in) - 1;
2747 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2748 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2749 assert(is_ir_node(jmp));
2751 ARR_APP1(ir_node *, block->in, jmp);
2753 hook_set_irn_n(block, n, jmp, NULL);
2754 } /* add_immBlock_pred */
2756 /* changing the current block */
2758 set_cur_block(ir_node *target) {
2759 current_ir_graph->current_block = target;
2760 } /* set_cur_block */
2762 /* ************************ */
2763 /* parameter administration */
2765 /* get a value from the parameter array from the current block by its index */
2767 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2768 ir_graph *irg = current_ir_graph;
2769 assert(get_irg_phase_state(irg) == phase_building);
2770 inc_irg_visited(irg);
2775 return get_r_value_internal(irg->current_block, pos + 1, mode);
2778 /* get a value from the parameter array from the current block by its index */
2780 get_value(int pos, ir_mode *mode) {
2781 return get_d_value(NULL, pos, mode);
2784 /* set a value at position pos in the parameter array from the current block */
2786 set_value(int pos, ir_node *value) {
2787 ir_graph *irg = current_ir_graph;
2788 assert(get_irg_phase_state(irg) == phase_building);
2790 assert(pos+1 < irg->n_loc);
2791 assert(is_ir_node(value));
2792 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2795 /* Find the value number for a node in the current block.*/
2797 find_value(ir_node *value) {
2799 ir_node *bl = current_ir_graph->current_block;
2801 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2802 if (bl->attr.block.graph_arr[i] == value)
2807 /* get the current store */
2810 ir_graph *irg = current_ir_graph;
2812 assert(get_irg_phase_state(irg) == phase_building);
2813 /* GL: one could call get_value instead */
2814 inc_irg_visited(irg);
2815 return get_r_value_internal(irg->current_block, 0, mode_M);
2818 /* set the current store: handles automatic Sync construction for Load nodes */
2820 set_store(ir_node *store) {
2821 ir_node *load, *pload, *pred, *in[2];
2823 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2824 /* Beware: due to dead code elimination, a store might become a Bad node even in
2825 the construction phase. */
2826 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2828 if (get_opt_auto_create_sync()) {
2829 /* handle non-volatile Load nodes by automatically creating Sync's */
2830 load = skip_Proj(store);
2831 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2832 pred = get_Load_mem(load);
2834 if (is_Sync(pred)) {
2835 /* a Load after a Sync: move it up */
2836 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2838 set_Load_mem(load, get_memop_mem(mem));
2839 add_Sync_pred(pred, store);
2842 pload = skip_Proj(pred);
2843 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2844 /* a Load after a Load: create a new Sync */
2845 set_Load_mem(load, get_Load_mem(pload));
2849 store = new_Sync(2, in);
2854 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2858 keep_alive(ir_node *ka) {
2859 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2862 /* --- Useful access routines --- */
2863 /* Returns the current block of the current graph. To set the current
2864 block use set_cur_block. */
2865 ir_node *get_cur_block(void) {
2866 return get_irg_current_block(current_ir_graph);
2867 } /* get_cur_block */
2869 /* Returns the frame type of the current graph */
2870 ir_type *get_cur_frame_type(void) {
2871 return get_irg_frame_type(current_ir_graph);
2872 } /* get_cur_frame_type */
2875 /* ********************************************************************* */
2878 /* call once for each run of the library */
2880 firm_init_cons(uninitialized_local_variable_func_t *func) {
2881 default_initialize_local_variable = func;
2882 } /* firm_init_cons */
2885 irp_finalize_cons(void) {
2887 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2888 irg_finalize_cons(get_irp_irg(i));
2890 irp->phase_state = phase_high;
2891 } /* irp_finalize_cons */
2894 ir_node *new_Block(int arity, ir_node **in) {
2895 return new_d_Block(NULL, arity, in);
2897 ir_node *new_Start(void) {
2898 return new_d_Start(NULL);
2900 ir_node *new_End(void) {
2901 return new_d_End(NULL);
2903 ir_node *new_Jmp(void) {
2904 return new_d_Jmp(NULL);
2906 ir_node *new_IJmp(ir_node *tgt) {
2907 return new_d_IJmp(NULL, tgt);
2909 ir_node *new_Cond(ir_node *c) {
2910 return new_d_Cond(NULL, c);
2913 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2914 return new_d_Return(NULL, store, arity, in);
2916 ir_node *new_Const(tarval *con) {
2917 return new_d_Const(NULL, con);
2920 ir_node *new_Const_long(ir_mode *mode, long value) {
2921 return new_d_Const_long(NULL, mode, value);
2925 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2926 return new_d_Const_type(NULL, con, tp);
2930 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2931 return new_d_SymConst_type(NULL, mode, value, kind, type);
2933 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2934 return new_d_SymConst(NULL, mode, value, kind);
2936 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2937 return new_d_simpleSel(NULL, store, objptr, ent);
2939 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2941 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2943 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2945 return new_d_Call(NULL, store, callee, arity, in, tp);
2948 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2949 return new_d_Add(NULL, op1, op2, mode);
2952 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2953 return new_d_Sub(NULL, op1, op2, mode);
2955 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2956 return new_d_Minus(NULL, op, mode);
2958 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2959 return new_d_Mul(NULL, op1, op2, mode);
2961 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2962 return new_d_Mulh(NULL, op1, op2, mode);
2964 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2965 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2967 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2968 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2971 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2972 return new_d_Div(NULL, memop, op1, op2, mode, state);
2975 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2976 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2978 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2979 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2981 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2982 return new_d_Abs(NULL, op, mode);
2984 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2985 return new_d_And(NULL, op1, op2, mode);
2987 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2988 return new_d_Or(NULL, op1, op2, mode);
2990 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2991 return new_d_Eor(NULL, op1, op2, mode);
2993 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2994 return new_d_Not(NULL, op, mode);
2996 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2997 return new_d_Shl(NULL, op, k, mode);
2999 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3000 return new_d_Shr(NULL, op, k, mode);
3002 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3003 return new_d_Shrs(NULL, op, k, mode);
3005 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3006 return new_d_Rotl(NULL, op, k, mode);
3008 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3009 return new_d_Carry(NULL, op1, op2, mode);
3011 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3012 return new_d_Borrow(NULL, op1, op2, mode);
3015 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3016 return new_d_Cmp(NULL, op1, op2);
3019 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3020 return new_d_Conv(NULL, op, mode);
3022 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3023 return new_d_strictConv(NULL, op, mode);
3026 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3027 return new_d_Cast(NULL, op, to_tp);
3030 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3031 return new_d_Phi(NULL, arity, in, mode);
3033 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3034 return new_d_Load(NULL, store, addr, mode);
3036 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3037 return new_d_Store(NULL, store, addr, val);
3039 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3040 ir_where_alloc where) {
3041 return new_d_Alloc(NULL, store, size, alloc_type, where);
3043 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3044 ir_type *free_type, ir_where_alloc where) {
3045 return new_d_Free(NULL, store, ptr, size, free_type, where);
3047 ir_node *new_Sync(int arity, ir_node *in[]) {
3048 return new_d_Sync(NULL, arity, in);
3051 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3052 return new_d_Proj(NULL, arg, mode, proj);
3055 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3056 return new_d_defaultProj(NULL, arg, max_proj);
3059 ir_node *new_Tuple(int arity, ir_node **in) {
3060 return new_d_Tuple(NULL, arity, in);
3062 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3063 return new_d_Id(NULL, val, mode);
3066 ir_node *new_Bad(void) {
3067 return get_irg_bad(current_ir_graph);
3069 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3070 return new_d_Confirm(NULL, val, bound, cmp);
3072 ir_node *new_Unknown(ir_mode *m) {
3073 return new_d_Unknown(m);
3075 ir_node *new_CallBegin(ir_node *callee) {
3076 return new_d_CallBegin(NULL, callee);
3078 ir_node *new_EndReg(void) {
3079 return new_d_EndReg(NULL);
3081 ir_node *new_EndExcept(void) {
3082 return new_d_EndExcept(NULL);
3084 ir_node *new_Break(void) {
3085 return new_d_Break(NULL);
3087 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3088 return new_d_Filter(NULL, arg, mode, proj);
3090 ir_node *new_NoMem(void) {
3091 return get_irg_no_mem(current_ir_graph);
3093 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3094 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3096 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3097 return new_d_CopyB(NULL, store, dst, src, data_type);
3099 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3100 return new_d_InstOf(NULL, store, objptr, ent);
3102 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3103 return new_d_Raise(NULL, store, obj);
3105 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3106 return new_d_Bound(NULL, store, idx, lower, upper);
3108 ir_node *new_Pin(ir_node *node) {
3109 return new_d_Pin(NULL, node);
3111 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3112 int n_outs, ir_asm_constraint *outputs,
3113 int n_clobber, ident *clobber[], ident *asm_text) {
3114 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3117 /* create a new anchor node */
3118 ir_node *new_Anchor(ir_graph *irg) {
3119 ir_node *in[anchor_last];
3120 memset(in, 0, sizeof(in));
3121 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);