2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.res_mode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
169 * Constructs a Block with a fixed number of predecessors.
170 * Does not set current_block. Cannot be used with automatic
171 * Phi node construction.
174 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
176 ir_graph *irg = current_ir_graph;
178 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
180 /* macroblock header */
183 res->attr.block.is_dead = 0;
184 res->attr.block.is_mb_head = 1;
185 res->attr.block.has_label = 0;
186 res->attr.block.irg = irg;
187 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
188 res->attr.block.in_cg = NULL;
189 res->attr.block.cg_backedge = NULL;
190 res->attr.block.extblk = NULL;
191 res->attr.block.mb_depth = 0;
192 res->attr.block.label = 0;
194 set_Block_matured(res, 1);
195 set_Block_block_visited(res, 0);
197 IRN_VRFY_IRG(res, irg);
202 new_bd_Start(dbg_info *db, ir_node *block) {
204 ir_graph *irg = current_ir_graph;
206 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
208 IRN_VRFY_IRG(res, irg);
213 new_bd_End(dbg_info *db, ir_node *block) {
215 ir_graph *irg = current_ir_graph;
217 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
219 IRN_VRFY_IRG(res, irg);
224 * Creates a Phi node with all predecessors. Calling this constructor
225 * is only allowed if the corresponding block is mature.
228 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
230 ir_graph *irg = current_ir_graph;
234 /* Don't assert that block matured: the use of this constructor is strongly
236 if (get_Block_matured(block))
237 assert(get_irn_arity(block) == arity);
239 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
241 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
243 for (i = arity - 1; i >= 0; --i)
244 if (is_Unknown(in[i])) {
249 if (!has_unknown) res = optimize_node(res);
250 IRN_VRFY_IRG(res, irg);
252 /* Memory Phis in endless loops must be kept alive.
253 As we can't distinguish these easily we keep all of them alive. */
254 if (is_Phi(res) && mode == mode_M)
255 add_End_keepalive(get_irg_end(irg), res);
260 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
262 ir_graph *irg = current_ir_graph;
265 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
266 res->attr.con.tv = con;
267 set_Const_type(res, tp); /* Call method because of complex assertion. */
268 res = optimize_node (res);
269 assert(get_Const_type(res) == tp);
270 IRN_VRFY_IRG(res, irg);
273 } /* new_bd_Const_type */
276 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
277 ir_graph *irg = current_ir_graph;
279 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
283 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
287 } /* new_bd_Const_long */
290 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
292 ir_graph *irg = current_ir_graph;
294 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
295 res = optimize_node(res);
296 IRN_VRFY_IRG(res, irg);
301 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
307 res->attr.proj = proj;
310 assert(get_Proj_pred(res));
311 assert(get_nodes_block(get_Proj_pred(res)));
313 res = optimize_node(res);
315 IRN_VRFY_IRG(res, irg);
320 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
323 ir_graph *irg = current_ir_graph;
325 assert(arg->op == op_Cond);
326 arg->attr.cond.kind = fragmentary;
327 arg->attr.cond.default_proj = max_proj;
328 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
330 } /* new_bd_defaultProj */
333 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
338 res->attr.conv.strict = strict_flag;
339 res = optimize_node(res);
340 IRN_VRFY_IRG(res, irg);
345 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
347 ir_graph *irg = current_ir_graph;
349 assert(is_atomic_type(to_tp));
351 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
352 res->attr.cast.totype = to_tp;
353 res = optimize_node(res);
354 IRN_VRFY_IRG(res, irg);
359 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
361 ir_graph *irg = current_ir_graph;
363 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
364 res = optimize_node (res);
365 IRN_VRFY_IRG(res, irg);
390 /** Creates a remainderless Div node. */
391 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
392 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
396 ir_graph *irg = current_ir_graph;
400 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
401 res->attr.divmod.exc.pin_state = state;
402 res->attr.divmod.res_mode = mode;
403 res->attr.divmod.no_remainder = 1;
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
413 ir_graph *irg = current_ir_graph;
416 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
417 res = optimize_node(res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_Jmp(dbg_info *db, ir_node *block) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
436 ir_graph *irg = current_ir_graph;
438 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
439 res = optimize_node(res);
440 IRN_VRFY_IRG(res, irg);
445 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
447 ir_graph *irg = current_ir_graph;
449 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
450 res->attr.cond.kind = dense;
451 res->attr.cond.default_proj = 0;
452 res->attr.cond.pred = COND_JMP_PRED_NONE;
453 res = optimize_node(res);
454 IRN_VRFY_IRG(res, irg);
459 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
460 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
464 ir_graph *irg = current_ir_graph;
467 NEW_ARR_A(ir_node *, r_in, r_arity);
470 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
472 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
474 assert((get_unknown_type() == tp) || is_Method_type(tp));
475 set_Call_type(res, tp);
476 res->attr.call.exc.pin_state = op_pin_state_pinned;
477 res->attr.call.callee_arr = NULL;
478 res = optimize_node(res);
479 IRN_VRFY_IRG(res, irg);
484 new_bd_Return(dbg_info *db, ir_node *block,
485 ir_node *store, int arity, ir_node **in) {
489 ir_graph *irg = current_ir_graph;
492 NEW_ARR_A (ir_node *, r_in, r_arity);
494 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
495 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
496 res = optimize_node(res);
497 IRN_VRFY_IRG(res, irg);
499 } /* new_bd_Return */
502 new_bd_Load(dbg_info *db, ir_node *block,
503 ir_node *store, ir_node *adr, ir_mode *mode) {
506 ir_graph *irg = current_ir_graph;
510 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
511 res->attr.load.exc.pin_state = op_pin_state_pinned;
512 res->attr.load.load_mode = mode;
513 res->attr.load.volatility = volatility_non_volatile;
514 res->attr.load.aligned = align_is_aligned;
515 res = optimize_node(res);
516 IRN_VRFY_IRG(res, irg);
521 new_bd_Store(dbg_info *db, ir_node *block,
522 ir_node *store, ir_node *adr, ir_node *val) {
525 ir_graph *irg = current_ir_graph;
530 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
531 res->attr.store.exc.pin_state = op_pin_state_pinned;
532 res->attr.store.volatility = volatility_non_volatile;
533 res->attr.store.aligned = align_is_aligned;
534 res = optimize_node(res);
535 IRN_VRFY_IRG(res, irg);
540 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
541 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
544 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
549 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
550 res->attr.alloc.where = where;
551 res->attr.alloc.type = alloc_type;
552 res = optimize_node(res);
553 IRN_VRFY_IRG(res, irg);
558 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
559 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
562 ir_graph *irg = current_ir_graph;
567 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
568 res->attr.free.where = where;
569 res->attr.free.type = free_type;
570 res = optimize_node(res);
571 IRN_VRFY_IRG(res, irg);
576 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
577 int arity, ir_node **in, ir_entity *ent) {
581 ir_graph *irg = current_ir_graph;
582 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
584 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
587 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
590 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
592 * Sel's can select functions which should be of mode mode_P_code.
594 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
595 res->attr.sel.ent = ent;
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
603 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
604 ir_graph *irg = current_ir_graph;
605 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
607 res->attr.symc.kind = symkind;
608 res->attr.symc.sym = value;
609 res->attr.symc.tp = tp;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
614 } /* new_bd_SymConst_type */
617 new_bd_Sync(dbg_info *db, ir_node *block) {
619 ir_graph *irg = current_ir_graph;
621 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
622 /* no need to call optimize node here, Sync are always created with no predecessors */
623 IRN_VRFY_IRG(res, irg);
628 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
629 ir_node *in[2], *res;
630 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
635 res->attr.confirm.cmp = cmp;
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
639 } /* new_bd_Confirm */
642 new_bd_Unknown(ir_mode *m) {
644 ir_graph *irg = current_ir_graph;
646 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
647 res = optimize_node(res);
649 } /* new_bd_Unknown */
652 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
655 ir_graph *irg = current_ir_graph;
657 in[0] = get_Call_ptr(call);
658 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
659 /* res->attr.callbegin.irg = irg; */
660 res->attr.callbegin.call = call;
661 res = optimize_node(res);
662 IRN_VRFY_IRG(res, irg);
664 } /* new_bd_CallBegin */
667 new_bd_EndReg(dbg_info *db, ir_node *block) {
669 ir_graph *irg = current_ir_graph;
671 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
672 set_irg_end_reg(irg, res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_EndReg */
678 new_bd_EndExcept(dbg_info *db, ir_node *block) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
683 set_irg_end_except(irg, res);
684 IRN_VRFY_IRG (res, irg);
686 } /* new_bd_EndExcept */
689 new_bd_Break(dbg_info *db, ir_node *block) {
691 ir_graph *irg = current_ir_graph;
693 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
694 res = optimize_node(res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
703 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
706 res->attr.filter.proj = proj;
707 res->attr.filter.in_cg = NULL;
708 res->attr.filter.backedge = NULL;
711 assert(get_Proj_pred(res));
712 assert(get_nodes_block(get_Proj_pred(res)));
714 res = optimize_node(res);
715 IRN_VRFY_IRG(res, irg);
717 } /* new_bd_Filter */
720 new_bd_Mux(dbg_info *db, ir_node *block,
721 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
724 ir_graph *irg = current_ir_graph;
730 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_bd_CopyB(dbg_info *db, ir_node *block,
740 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
743 ir_graph *irg = current_ir_graph;
749 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
751 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
752 res->attr.copyb.data_type = data_type;
753 res = optimize_node(res);
754 IRN_VRFY_IRG(res, irg);
759 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
760 ir_node *objptr, ir_type *type) {
763 ir_graph *irg = current_ir_graph;
767 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
768 res->attr.instof.type = type;
769 res = optimize_node(res);
770 IRN_VRFY_IRG(res, irg);
772 } /* new_bd_InstOf */
775 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
778 ir_graph *irg = current_ir_graph;
782 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
783 res = optimize_node(res);
784 IRN_VRFY_IRG(res, irg);
789 new_bd_Bound(dbg_info *db, ir_node *block,
790 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
793 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
800 res->attr.bound.exc.pin_state = op_pin_state_pinned;
801 res = optimize_node(res);
802 IRN_VRFY_IRG(res, irg);
807 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
809 ir_graph *irg = current_ir_graph;
811 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
812 res = optimize_node(res);
813 IRN_VRFY_IRG(res, irg);
818 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
819 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
821 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
825 res->attr.assem.pin_state = op_pin_state_pinned;
826 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
827 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
828 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
829 res->attr.assem.asm_text = asm_text;
831 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
832 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
833 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
835 res = optimize_node(res);
836 IRN_VRFY_IRG(res, irg);
840 /* --------------------------------------------- */
841 /* private interfaces, for professional use only */
842 /* --------------------------------------------- */
844 /* Constructs a Block with a fixed number of predecessors.
845 Does not set current_block. Can not be used with automatic
846 Phi node construction. */
848 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
849 ir_graph *rem = current_ir_graph;
852 current_ir_graph = irg;
853 res = new_bd_Block(db, arity, in);
854 current_ir_graph = rem;
860 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
861 ir_graph *rem = current_ir_graph;
864 current_ir_graph = irg;
865 res = new_bd_Start(db, block);
866 current_ir_graph = rem;
872 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
874 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_End(db, block);
878 current_ir_graph = rem;
883 /* Creates a Phi node with all predecessors. Calling this constructor
884 is only allowed if the corresponding block is mature. */
886 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
888 ir_graph *rem = current_ir_graph;
890 current_ir_graph = irg;
891 res = new_bd_Phi(db, block,arity, in, mode);
892 current_ir_graph = rem;
898 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
900 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Const_type(db, block, mode, con, tp);
904 current_ir_graph = rem;
907 } /* new_rd_Const_type */
910 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
916 current_ir_graph = rem;
922 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
923 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
924 } /* new_rd_Const_long */
927 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
929 ir_graph *rem = current_ir_graph;
931 current_ir_graph = irg;
932 res = new_bd_Id(db, block, val, mode);
933 current_ir_graph = rem;
939 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
942 ir_graph *rem = current_ir_graph;
944 current_ir_graph = irg;
945 res = new_bd_Proj(db, block, arg, mode, proj);
946 current_ir_graph = rem;
952 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
955 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_defaultProj(db, block, arg, max_proj);
959 current_ir_graph = rem;
962 } /* new_rd_defaultProj */
965 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_Conv(db, block, op, mode, 0);
971 current_ir_graph = rem;
977 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Cast(db, block, op, to_tp);
983 current_ir_graph = rem;
989 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Tuple(db, block, arity, in);
995 current_ir_graph = rem;
1006 NEW_RD_DIVOP(DivMod)
1019 NEW_RD_BINOP(Borrow)
1021 /* creates a rd constructor for an divRL */
1022 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1023 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1026 ir_graph *rem = current_ir_graph;
1027 current_ir_graph = irg;
1028 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1029 current_ir_graph = rem;
1034 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1035 ir_node *op1, ir_node *op2) {
1037 ir_graph *rem = current_ir_graph;
1039 current_ir_graph = irg;
1040 res = new_bd_Cmp(db, block, op1, op2);
1041 current_ir_graph = rem;
1047 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Jmp(db, block);
1053 current_ir_graph = rem;
1059 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_IJmp(db, block, tgt);
1065 current_ir_graph = rem;
1071 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_Cond(db, block, c);
1077 current_ir_graph = rem;
1083 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1084 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1090 current_ir_graph = rem;
1096 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1097 ir_node *store, int arity, ir_node **in) {
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Return(db, block, store, arity, in);
1103 current_ir_graph = rem;
1106 } /* new_rd_Return */
1109 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1110 ir_node *store, ir_node *adr, ir_mode *mode) {
1112 ir_graph *rem = current_ir_graph;
1114 current_ir_graph = irg;
1115 res = new_bd_Load(db, block, store, adr, mode);
1116 current_ir_graph = rem;
1122 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1123 ir_node *store, ir_node *adr, ir_node *val) {
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Store(db, block, store, adr, val);
1129 current_ir_graph = rem;
1132 } /* new_rd_Store */
1135 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1136 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1138 ir_graph *rem = current_ir_graph;
1140 current_ir_graph = irg;
1141 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1142 current_ir_graph = rem;
1145 } /* new_rd_Alloc */
1148 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1149 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1151 ir_graph *rem = current_ir_graph;
1153 current_ir_graph = irg;
1154 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1155 current_ir_graph = rem;
1161 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1162 ir_node *store, ir_node *objptr, ir_entity *ent) {
1164 ir_graph *rem = current_ir_graph;
1166 current_ir_graph = irg;
1167 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1168 current_ir_graph = rem;
1171 } /* new_rd_simpleSel */
1174 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1175 int arity, ir_node **in, ir_entity *ent) {
1177 ir_graph *rem = current_ir_graph;
1179 current_ir_graph = irg;
1180 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1181 current_ir_graph = rem;
1187 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1188 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1190 ir_graph *rem = current_ir_graph;
1192 current_ir_graph = irg;
1193 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1194 current_ir_graph = rem;
1197 } /* new_rd_SymConst_type */
1200 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1201 symconst_symbol value, symconst_kind symkind) {
1202 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1203 } /* new_rd_SymConst */
1205 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1206 symconst_symbol sym;
1207 sym.entity_p = symbol;
1208 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1209 } /* new_rd_SymConst_addr_ent */
1211 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1212 symconst_symbol sym;
1213 sym.entity_p = symbol;
1214 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1215 } /* new_rd_SymConst_ofs_ent */
1217 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1218 symconst_symbol sym;
1219 sym.ident_p = symbol;
1220 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1221 } /* new_rd_SymConst_addr_name */
1223 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1224 symconst_symbol sym;
1225 sym.type_p = symbol;
1226 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1227 } /* new_rd_SymConst_type_tag */
1229 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1230 symconst_symbol sym;
1231 sym.type_p = symbol;
1232 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1233 } /* new_rd_SymConst_size */
1235 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1236 symconst_symbol sym;
1237 sym.type_p = symbol;
1238 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1239 } /* new_rd_SymConst_align */
1242 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1244 ir_graph *rem = current_ir_graph;
1247 current_ir_graph = irg;
1248 res = new_bd_Sync(db, block);
1249 current_ir_graph = rem;
1251 for (i = 0; i < arity; ++i)
1252 add_Sync_pred(res, in[i]);
1258 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1260 ir_graph *rem = current_ir_graph;
1262 current_ir_graph = irg;
1263 res = new_bd_Confirm(db, block, val, bound, cmp);
1264 current_ir_graph = rem;
1267 } /* new_rd_Confirm */
1270 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1272 ir_graph *rem = current_ir_graph;
1274 current_ir_graph = irg;
1275 res = new_bd_Unknown(m);
1276 current_ir_graph = rem;
1279 } /* new_rd_Unknown */
1282 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1284 ir_graph *rem = current_ir_graph;
1286 current_ir_graph = irg;
1287 res = new_bd_CallBegin(db, block, call);
1288 current_ir_graph = rem;
1291 } /* new_rd_CallBegin */
1294 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1297 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1298 set_irg_end_reg(irg, res);
1299 IRN_VRFY_IRG(res, irg);
1301 } /* new_rd_EndReg */
1304 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1307 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1308 set_irg_end_except(irg, res);
1309 IRN_VRFY_IRG (res, irg);
1311 } /* new_rd_EndExcept */
1314 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1316 ir_graph *rem = current_ir_graph;
1318 current_ir_graph = irg;
1319 res = new_bd_Break(db, block);
1320 current_ir_graph = rem;
1323 } /* new_rd_Break */
1326 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1329 ir_graph *rem = current_ir_graph;
1331 current_ir_graph = irg;
1332 res = new_bd_Filter(db, block, arg, mode, proj);
1333 current_ir_graph = rem;
1336 } /* new_rd_Filter */
1339 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1340 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1342 ir_graph *rem = current_ir_graph;
1344 current_ir_graph = irg;
1345 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1346 current_ir_graph = rem;
1351 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1352 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1354 ir_graph *rem = current_ir_graph;
1356 current_ir_graph = irg;
1357 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1358 current_ir_graph = rem;
1361 } /* new_rd_CopyB */
1364 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1365 ir_node *objptr, ir_type *type) {
1367 ir_graph *rem = current_ir_graph;
1369 current_ir_graph = irg;
1370 res = new_bd_InstOf(db, block, store, objptr, type);
1371 current_ir_graph = rem;
1374 } /* new_rd_InstOf */
1377 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1379 ir_graph *rem = current_ir_graph;
1381 current_ir_graph = irg;
1382 res = new_bd_Raise(db, block, store, obj);
1383 current_ir_graph = rem;
1386 } /* new_rd_Raise */
1388 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1389 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1391 ir_graph *rem = current_ir_graph;
1393 current_ir_graph = irg;
1394 res = new_bd_Bound(db, block, store, idx, lower, upper);
1395 current_ir_graph = rem;
1398 } /* new_rd_Bound */
1400 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_Pin(db, block, node);
1406 current_ir_graph = rem;
1411 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1412 int arity, ir_node *in[], ir_asm_constraint *inputs,
1413 int n_outs, ir_asm_constraint *outputs,
1414 int n_clobber, ident *clobber[], ident *asm_text) {
1416 ir_graph *rem = current_ir_graph;
1418 current_ir_graph = irg;
1419 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1420 current_ir_graph = rem;
1426 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1427 return new_rd_Block(NULL, irg, arity, in);
1429 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1430 return new_rd_Start(NULL, irg, block);
1432 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1433 return new_rd_End(NULL, irg, block);
1435 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1436 return new_rd_Jmp(NULL, irg, block);
1438 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1439 return new_rd_IJmp(NULL, irg, block, tgt);
1441 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1442 return new_rd_Cond(NULL, irg, block, c);
1444 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1445 ir_node *store, int arity, ir_node **in) {
1446 return new_rd_Return(NULL, irg, block, store, arity, in);
1448 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1449 ir_mode *mode, tarval *con) {
1450 return new_rd_Const(NULL, irg, block, mode, con);
1452 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1453 ir_mode *mode, long value) {
1454 return new_rd_Const_long(NULL, irg, block, mode, value);
1456 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1457 ir_mode *mode, tarval *con, ir_type *tp) {
1458 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1460 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1461 symconst_symbol value, symconst_kind symkind) {
1462 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1464 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1465 ir_node *objptr, ir_entity *ent) {
1466 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1468 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1469 ir_node *objptr, int n_index, ir_node **index,
1471 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1473 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1474 ir_node *callee, int arity, ir_node **in,
1476 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1478 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1479 ir_node *op1, ir_node *op2, ir_mode *mode) {
1480 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1482 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1483 ir_node *op1, ir_node *op2, ir_mode *mode) {
1484 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1486 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1487 ir_node *op, ir_mode *mode) {
1488 return new_rd_Minus(NULL, irg, block, op, mode);
1490 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1491 ir_node *op1, ir_node *op2, ir_mode *mode) {
1492 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1494 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1495 ir_node *op1, ir_node *op2, ir_mode *mode) {
1496 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1498 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1499 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1500 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1502 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1503 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1504 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1506 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1507 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1508 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1510 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1511 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1512 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1514 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1515 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1516 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1518 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1519 ir_node *op, ir_mode *mode) {
1520 return new_rd_Abs(NULL, irg, block, op, mode);
1522 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1523 ir_node *op1, ir_node *op2, ir_mode *mode) {
1524 return new_rd_And(NULL, irg, block, op1, op2, mode);
1526 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1534 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1535 ir_node *op, ir_mode *mode) {
1536 return new_rd_Not(NULL, irg, block, op, mode);
1538 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1539 ir_node *op, ir_node *k, ir_mode *mode) {
1540 return new_rd_Shl(NULL, irg, block, op, k, mode);
1542 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1543 ir_node *op, ir_node *k, ir_mode *mode) {
1544 return new_rd_Shr(NULL, irg, block, op, k, mode);
1546 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1547 ir_node *op, ir_node *k, ir_mode *mode) {
1548 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1550 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1551 ir_node *op, ir_node *k, ir_mode *mode) {
1552 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1554 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1555 ir_node *op, ir_node *k, ir_mode *mode) {
1556 return new_rd_Carry(NULL, irg, block, op, k, mode);
1558 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1559 ir_node *op, ir_node *k, ir_mode *mode) {
1560 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1562 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1563 ir_node *op1, ir_node *op2) {
1564 return new_rd_Cmp(NULL, irg, block, op1, op2);
1566 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_mode *mode) {
1568 return new_rd_Conv(NULL, irg, block, op, mode);
1570 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1571 return new_rd_Cast(NULL, irg, block, op, to_tp);
1573 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1574 ir_node **in, ir_mode *mode) {
1575 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1577 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1578 ir_node *store, ir_node *adr, ir_mode *mode) {
1579 return new_rd_Load(NULL, irg, block, store, adr, mode);
1581 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1582 ir_node *store, ir_node *adr, ir_node *val) {
1583 return new_rd_Store(NULL, irg, block, store, adr, val);
1585 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1586 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1587 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1589 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1590 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1591 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1593 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1594 return new_rd_Sync(NULL, irg, block, arity, in);
1596 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1597 ir_mode *mode, long proj) {
1598 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1600 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1602 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1604 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1605 int arity, ir_node **in) {
1606 return new_rd_Tuple(NULL, irg, block, arity, in );
1608 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1609 ir_node *val, ir_mode *mode) {
1610 return new_rd_Id(NULL, irg, block, val, mode);
1612 ir_node *new_r_Bad(ir_graph *irg) {
1613 return get_irg_bad(irg);
1615 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1616 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1618 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1619 return new_rd_Unknown(irg, m);
1621 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1622 return new_rd_CallBegin(NULL, irg, block, callee);
1624 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1625 return new_rd_EndReg(NULL, irg, block);
1627 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1628 return new_rd_EndExcept(NULL, irg, block);
1630 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1631 return new_rd_Break(NULL, irg, block);
1633 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1634 ir_mode *mode, long proj) {
1635 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1637 ir_node *new_r_NoMem(ir_graph *irg) {
1638 return get_irg_no_mem(irg);
1640 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1641 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1642 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1644 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1645 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1646 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1648 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1650 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1652 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1653 ir_node *store, ir_node *obj) {
1654 return new_rd_Raise(NULL, irg, block, store, obj);
1656 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1657 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1658 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1660 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1661 return new_rd_Pin(NULL, irg, block, node);
1663 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1664 int arity, ir_node *in[], ir_asm_constraint *inputs,
1665 int n_outs, ir_asm_constraint *outputs,
1666 int n_clobber, ident *clobber[], ident *asm_text) {
1667 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1670 /** ********************/
1671 /** public interfaces */
1672 /** construction tools */
1676 * - create a new Start node in the current block
1678 * @return s - pointer to the created Start node
1683 new_d_Start(dbg_info *db) {
1686 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1687 op_Start, mode_T, 0, NULL);
1689 res = optimize_node(res);
1690 IRN_VRFY_IRG(res, current_ir_graph);
1695 new_d_End(dbg_info *db) {
1697 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1698 op_End, mode_X, -1, NULL);
1699 res = optimize_node(res);
1700 IRN_VRFY_IRG(res, current_ir_graph);
1705 /* Constructs a Block with a fixed number of predecessors.
1706 Does set current_block. Can be used with automatic Phi
1707 node construction. */
1709 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1712 int has_unknown = 0;
1714 res = new_bd_Block(db, arity, in);
1716 /* Create and initialize array for Phi-node construction. */
1717 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1718 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1719 current_ir_graph->n_loc);
1720 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1723 for (i = arity-1; i >= 0; i--)
1724 if (is_Unknown(in[i])) {
1729 if (!has_unknown) res = optimize_node(res);
1730 current_ir_graph->current_block = res;
1732 IRN_VRFY_IRG(res, current_ir_graph);
1737 /* ***********************************************************************/
1738 /* Methods necessary for automatic Phi node creation */
1740 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1741 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1742 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1743 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1745 Call Graph: ( A ---> B == A "calls" B)
1747 get_value mature_immBlock
1755 get_r_value_internal |
1759 new_rd_Phi0 new_rd_Phi_in
1761 * *************************************************************************** */
1763 /** Creates a Phi node with 0 predecessors. */
1764 static inline ir_node *
1765 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1768 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1769 IRN_VRFY_IRG(res, irg);
1775 * Internal constructor of a Phi node by a phi_merge operation.
1777 * @param irg the graph on which the Phi will be constructed
1778 * @param block the block in which the Phi will be constructed
1779 * @param mode the mod eof the Phi node
1780 * @param in the input array of the phi node
1781 * @param ins number of elements in the input array
1782 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1783 * the value for which the new Phi is constructed
1785 static inline ir_node *
1786 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1787 ir_node **in, int ins, ir_node *phi0) {
1789 ir_node *res, *known;
1791 /* Allocate a new node on the obstack. The allocation copies the in
1793 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1794 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1796 /* This loop checks whether the Phi has more than one predecessor.
1797 If so, it is a real Phi node and we break the loop. Else the
1798 Phi node merges the same definition on several paths and therefore
1799 is not needed. Don't consider Bad nodes! */
1801 for (i = ins - 1; i >= 0; --i) {
1804 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1806 /* Optimize self referencing Phis: We can't detect them yet properly, as
1807 they still refer to the Phi0 they will replace. So replace right now. */
1808 if (phi0 && in[i] == phi0)
1811 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1820 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1823 edges_node_deleted(res, current_ir_graph);
1824 obstack_free(current_ir_graph->obst, res);
1825 if (is_Phi(known)) {
1826 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1827 order, an enclosing Phi know may get superfluous. */
1828 res = optimize_in_place_2(known);
1830 exchange(known, res);
1835 /* A undefined value, e.g., in unreachable code. */
1839 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1840 IRN_VRFY_IRG(res, irg);
1841 /* Memory Phis in endless loops must be kept alive.
1842 As we can't distinguish these easily we keep all of them alive. */
1843 if (is_Phi(res) && mode == mode_M)
1844 add_End_keepalive(get_irg_end(irg), res);
1848 } /* new_rd_Phi_in */
1851 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1853 #if PRECISE_EXC_CONTEXT
1855 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1858 * Construct a new frag_array for node n.
1859 * Copy the content from the current graph_arr of the corresponding block:
1860 * this is the current state.
1861 * Set ProjM(n) as current memory state.
1862 * Further the last entry in frag_arr of current block points to n. This
1863 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1865 static inline ir_node **new_frag_arr(ir_node *n) {
1869 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1870 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1871 sizeof(ir_node *)*current_ir_graph->n_loc);
1873 /* turn off optimization before allocating Proj nodes, as res isn't
1875 opt = get_opt_optimize(); set_optimize(0);
1876 /* Here we rely on the fact that all frag ops have Memory as first result! */
1878 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1879 } else if (is_CopyB(n)) {
1880 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1882 assert((pn_Quot_M == pn_DivMod_M) &&
1883 (pn_Quot_M == pn_Div_M) &&
1884 (pn_Quot_M == pn_Mod_M) &&
1885 (pn_Quot_M == pn_Load_M) &&
1886 (pn_Quot_M == pn_Store_M) &&
1887 (pn_Quot_M == pn_Alloc_M) &&
1888 (pn_Quot_M == pn_Bound_M));
1889 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1893 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1895 } /* new_frag_arr */
1898 * Returns the frag_arr from a node.
1900 static inline ir_node **get_frag_arr(ir_node *n) {
1901 switch (get_irn_opcode(n)) {
1903 return n->attr.call.exc.frag_arr;
1905 return n->attr.alloc.exc.frag_arr;
1907 return n->attr.load.exc.frag_arr;
1909 return n->attr.store.exc.frag_arr;
1911 return n->attr.except.frag_arr;
1913 } /* get_frag_arr */
1916 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1917 #ifdef DEBUG_libfirm
1920 for (i = 1024; i >= 0; --i)
1925 if (frag_arr[pos] == NULL)
1926 frag_arr[pos] = val;
1927 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1928 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1929 assert(arr != frag_arr && "Endless recursion detected");
1934 assert(!"potential endless recursion in set_frag_value");
1935 } /* set_frag_value */
1938 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1942 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1944 frag_arr = get_frag_arr(cfOp);
1945 res = frag_arr[pos];
1947 if (block->attr.block.graph_arr[pos] != NULL) {
1948 /* There was a set_value() after the cfOp and no get_value() before that
1949 set_value(). We must build a Phi node now. */
1950 if (block->attr.block.is_matured) {
1951 int ins = get_irn_arity(block);
1953 NEW_ARR_A(ir_node *, nin, ins);
1954 res = phi_merge(block, pos, mode, nin, ins);
1956 res = new_rd_Phi0(current_ir_graph, block, mode);
1957 res->attr.phi.u.pos = pos;
1958 res->attr.phi.next = block->attr.block.phis;
1959 block->attr.block.phis = res;
1961 assert(res != NULL);
1962 /* It's a Phi, we can write this into all graph_arrs with NULL */
1963 set_frag_value(block->attr.block.graph_arr, pos, res);
1965 res = get_r_value_internal(block, pos, mode);
1966 set_frag_value(block->attr.block.graph_arr, pos, res);
1970 } /* get_r_frag_value_internal */
1971 #endif /* PRECISE_EXC_CONTEXT */
1974 * Check whether a control flownode cf_pred represents an exception flow.
1976 * @param cf_pred the control flow node
1977 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
1979 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
1981 * Note: all projections from a raise are "exceptional control flow" we we handle it
1982 * like a normal Jmp, because there is no "regular" one.
1983 * That's why Raise is no "fragile_op"!
1985 if (is_fragile_op(prev_cf_op)) {
1986 if (is_Proj(cf_pred)) {
1987 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
1988 /* the regular control flow, NO exception */
1991 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
1994 /* Hmm, exception but not a Proj? */
1995 assert(!"unexpected condition: fragile op without a proj");
1999 } /* is_exception_flow */
2002 * Computes the predecessors for the real phi node, and then
2003 * allocates and returns this node. The routine called to allocate the
2004 * node might optimize it away and return a real value.
2005 * This function must be called with an in-array of proper size.
2008 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2009 ir_node *prevBlock, *res, *phi0, *phi0_all;
2012 /* If this block has no value at pos create a Phi0 and remember it
2013 in graph_arr to break recursions.
2014 Else we may not set graph_arr as there a later value is remembered. */
2016 if (block->attr.block.graph_arr[pos] == NULL) {
2017 ir_graph *irg = current_ir_graph;
2019 if (block == get_irg_start_block(irg)) {
2020 /* Collapsing to Bad tarvals is no good idea.
2021 So we call a user-supplied routine here that deals with this case as
2022 appropriate for the given language. Sorrily the only help we can give
2023 here is the position.
2025 Even if all variables are defined before use, it can happen that
2026 we get to the start block, if a Cond has been replaced by a tuple
2027 (bad, jmp). In this case we call the function needlessly, eventually
2028 generating an non existent error.
2029 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2032 if (default_initialize_local_variable != NULL) {
2033 ir_node *rem = get_cur_block();
2035 set_cur_block(block);
2036 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2040 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2041 /* We don't need to care about exception ops in the start block.
2042 There are none by definition. */
2043 return block->attr.block.graph_arr[pos];
2045 phi0 = new_rd_Phi0(irg, block, mode);
2046 block->attr.block.graph_arr[pos] = phi0;
2047 #if PRECISE_EXC_CONTEXT
2048 if (get_opt_precise_exc_context()) {
2049 /* Set graph_arr for fragile ops. Also here we should break recursion.
2050 We could choose a cyclic path through an cfop. But the recursion would
2051 break at some point. */
2052 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2058 /* This loop goes to all predecessor blocks of the block the Phi node
2059 is in and there finds the operands of the Phi node by calling
2060 get_r_value_internal. */
2061 for (i = 1; i <= ins; ++i) {
2062 ir_node *cf_pred = block->in[i];
2063 ir_node *prevCfOp = skip_Proj(cf_pred);
2065 if (is_Bad(prevCfOp)) {
2066 /* In case a Cond has been optimized we would get right to the start block
2067 with an invalid definition. */
2068 nin[i-1] = new_Bad();
2071 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2073 if (!is_Bad(prevBlock)) {
2074 #if PRECISE_EXC_CONTEXT
2075 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2076 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2077 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2080 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2082 nin[i-1] = new_Bad();
2086 /* We want to pass the Phi0 node to the constructor: this finds additional
2087 optimization possibilities.
2088 The Phi0 node either is allocated in this function, or it comes from
2089 a former call to get_r_value_internal(). In this case we may not yet
2090 exchange phi0, as this is done in mature_immBlock(). */
2092 phi0_all = block->attr.block.graph_arr[pos];
2093 if (!(is_Phi(phi0_all) &&
2094 (get_irn_arity(phi0_all) == 0) &&
2095 (get_nodes_block(phi0_all) == block)))
2101 /* After collecting all predecessors into the array nin a new Phi node
2102 with these predecessors is created. This constructor contains an
2103 optimization: If all predecessors of the Phi node are identical it
2104 returns the only operand instead of a new Phi node. */
2105 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2107 /* In case we allocated a Phi0 node at the beginning of this procedure,
2108 we need to exchange this Phi0 with the real Phi. */
2110 exchange(phi0, res);
2111 block->attr.block.graph_arr[pos] = res;
2112 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2113 only an optimization. */
2120 * This function returns the last definition of a value. In case
2121 * this value was last defined in a previous block, Phi nodes are
2122 * inserted. If the part of the firm graph containing the definition
2123 * is not yet constructed, a dummy Phi node is returned.
2125 * @param block the current block
2126 * @param pos the value number of the value searched
2127 * @param mode the mode of this value (needed for Phi construction)
2130 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2132 /* There are 4 cases to treat.
2134 1. The block is not mature and we visit it the first time. We can not
2135 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2136 predecessors is returned. This node is added to the linked list (block
2137 attribute "phis") of the containing block to be completed when this block is
2138 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2141 2. The value is already known in this block, graph_arr[pos] is set and we
2142 visit the block the first time. We can return the value without
2143 creating any new nodes.
2145 3. The block is mature and we visit it the first time. A Phi node needs
2146 to be created (phi_merge). If the Phi is not needed, as all it's
2147 operands are the same value reaching the block through different
2148 paths, it's optimized away and the value itself is returned.
2150 4. The block is mature, and we visit it the second time. Now two
2151 subcases are possible:
2152 * The value was computed completely the last time we were here. This
2153 is the case if there is no loop. We can return the proper value.
2154 * The recursion that visited this node and set the flag did not
2155 return yet. We are computing a value in a loop and need to
2156 break the recursion. This case only happens if we visited
2157 the same block with phi_merge before, which inserted a Phi0.
2158 So we return the Phi0.
2161 /* case 4 -- already visited. */
2162 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2163 /* As phi_merge allocates a Phi0 this value is always defined. Here
2164 is the critical difference of the two algorithms. */
2165 assert(block->attr.block.graph_arr[pos]);
2166 return block->attr.block.graph_arr[pos];
2169 /* visited the first time */
2170 set_irn_visited(block, get_irg_visited(current_ir_graph));
2172 /* Get the local valid value */
2173 res = block->attr.block.graph_arr[pos];
2175 /* case 2 -- If the value is actually computed, return it. */
2179 if (block->attr.block.is_matured) { /* case 3 */
2181 /* The Phi has the same amount of ins as the corresponding block. */
2182 int ins = get_irn_arity(block);
2184 NEW_ARR_A(ir_node *, nin, ins);
2186 /* Phi merge collects the predecessors and then creates a node. */
2187 res = phi_merge(block, pos, mode, nin, ins);
2189 } else { /* case 1 */
2190 /* The block is not mature, we don't know how many in's are needed. A Phi
2191 with zero predecessors is created. Such a Phi node is called Phi0
2192 node. The Phi0 is then added to the list of Phi0 nodes in this block
2193 to be matured by mature_immBlock later.
2194 The Phi0 has to remember the pos of it's internal value. If the real
2195 Phi is computed, pos is used to update the array with the local
2197 res = new_rd_Phi0(current_ir_graph, block, mode);
2198 res->attr.phi.u.pos = pos;
2199 res->attr.phi.next = block->attr.block.phis;
2200 block->attr.block.phis = res;
2203 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2205 /* The local valid value is available now. */
2206 block->attr.block.graph_arr[pos] = res;
2209 } /* get_r_value_internal */
2211 /* ************************************************************************** */
2214 * Finalize a Block node, when all control flows are known.
2215 * Acceptable parameters are only Block nodes.
2218 mature_immBlock(ir_node *block) {
2223 assert(is_Block(block));
2224 if (!get_Block_matured(block)) {
2225 ir_graph *irg = current_ir_graph;
2227 ins = ARR_LEN(block->in) - 1;
2228 /* Fix block parameters */
2229 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2231 /* An array for building the Phi nodes. */
2232 NEW_ARR_A(ir_node *, nin, ins);
2234 /* Traverse a chain of Phi nodes attached to this block and mature
2236 for (n = block->attr.block.phis; n; n = next) {
2237 inc_irg_visited(irg);
2238 next = n->attr.phi.next;
2239 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2242 block->attr.block.is_matured = 1;
2244 /* Now, as the block is a finished Firm node, we can optimize it.
2245 Since other nodes have been allocated since the block was created
2246 we can not free the node on the obstack. Therefore we have to call
2247 optimize_in_place().
2248 Unfortunately the optimization does not change a lot, as all allocated
2249 nodes refer to the unoptimized node.
2250 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2251 block = optimize_in_place_2(block);
2252 IRN_VRFY_IRG(block, irg);
2254 } /* mature_immBlock */
2257 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2258 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2262 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2263 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2267 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2268 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2269 } /* new_d_Const_long */
2272 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2273 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2274 } /* new_d_Const_type */
2278 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2279 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2283 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2284 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2288 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2291 assert(arg->op == op_Cond);
2292 arg->attr.cond.kind = fragmentary;
2293 arg->attr.cond.default_proj = max_proj;
2294 res = new_Proj(arg, mode_X, max_proj);
2296 } /* new_d_defaultProj */
2299 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2300 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2304 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2305 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2306 } /* new_d_strictConv */
2309 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2310 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2314 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2315 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2325 * Allocate a frag array for a node if the current graph state is phase_building.
2327 * @param irn the node for which the frag array should be allocated
2328 * @param op the opcode of the (original) node, if does not match opcode of irn,
2330 * @param frag_store the address of the frag store in irn attributes, if this
2331 * address contains a value != NULL, does nothing
2333 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2334 if (get_opt_precise_exc_context()) {
2335 if ((current_ir_graph->phase_state == phase_building) &&
2336 (get_irn_op(irn) == op) && /* Could be optimized away. */
2337 !*frag_store) /* Could be a cse where the arr is already set. */ {
2338 *frag_store = new_frag_arr(irn);
2341 } /* firm_alloc_frag_arr */
2344 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2346 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2347 #if PRECISE_EXC_CONTEXT
2348 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2355 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2357 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2358 #if PRECISE_EXC_CONTEXT
2359 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2363 } /* new_d_DivMod */
2366 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2368 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2369 #if PRECISE_EXC_CONTEXT
2370 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2377 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2379 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2380 #if PRECISE_EXC_CONTEXT
2381 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2388 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2390 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2391 #if PRECISE_EXC_CONTEXT
2392 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2411 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2412 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2416 new_d_Jmp(dbg_info *db) {
2417 return new_bd_Jmp(db, current_ir_graph->current_block);
2421 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2422 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2426 new_d_Cond(dbg_info *db, ir_node *c) {
2427 return new_bd_Cond(db, current_ir_graph->current_block, c);
2431 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2434 res = new_bd_Call(db, current_ir_graph->current_block,
2435 store, callee, arity, in, tp);
2436 #if PRECISE_EXC_CONTEXT
2437 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2444 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2445 return new_bd_Return(db, current_ir_graph->current_block,
2447 } /* new_d_Return */
2450 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2452 res = new_bd_Load(db, current_ir_graph->current_block,
2454 #if PRECISE_EXC_CONTEXT
2455 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2462 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2464 res = new_bd_Store(db, current_ir_graph->current_block,
2466 #if PRECISE_EXC_CONTEXT
2467 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2474 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2475 ir_where_alloc where) {
2477 res = new_bd_Alloc(db, current_ir_graph->current_block,
2478 store, size, alloc_type, where);
2479 #if PRECISE_EXC_CONTEXT
2480 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2487 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2488 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2489 return new_bd_Free(db, current_ir_graph->current_block,
2490 store, ptr, size, free_type, where);
2494 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2495 /* GL: objptr was called frame before. Frame was a bad choice for the name
2496 as the operand could as well be a pointer to a dynamic object. */
2498 return new_bd_Sel(db, current_ir_graph->current_block,
2499 store, objptr, 0, NULL, ent);
2500 } /* new_d_simpleSel */
2503 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2504 return new_bd_Sel(db, current_ir_graph->current_block,
2505 store, objptr, n_index, index, sel);
2509 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2510 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2512 } /* new_d_SymConst_type */
2515 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2516 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2517 value, kind, firm_unknown_type);
2518 } /* new_d_SymConst */
2521 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2522 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2526 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2527 return new_bd_Confirm(db, current_ir_graph->current_block,
2529 } /* new_d_Confirm */
2532 new_d_Unknown(ir_mode *m) {
2533 return new_bd_Unknown(m);
2534 } /* new_d_Unknown */
2537 new_d_CallBegin(dbg_info *db, ir_node *call) {
2538 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2539 } /* new_d_CallBegin */
2542 new_d_EndReg(dbg_info *db) {
2543 return new_bd_EndReg(db, current_ir_graph->current_block);
2544 } /* new_d_EndReg */
2547 new_d_EndExcept(dbg_info *db) {
2548 return new_bd_EndExcept(db, current_ir_graph->current_block);
2549 } /* new_d_EndExcept */
2552 new_d_Break(dbg_info *db) {
2553 return new_bd_Break(db, current_ir_graph->current_block);
2557 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2558 return new_bd_Filter(db, current_ir_graph->current_block,
2560 } /* new_d_Filter */
2563 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2564 ir_node *ir_true, ir_mode *mode) {
2565 return new_bd_Mux(db, current_ir_graph->current_block,
2566 sel, ir_false, ir_true, mode);
2569 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2570 ir_node *dst, ir_node *src, ir_type *data_type) {
2572 res = new_bd_CopyB(db, current_ir_graph->current_block,
2573 store, dst, src, data_type);
2574 #if PRECISE_EXC_CONTEXT
2575 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2581 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2582 return new_bd_InstOf(db, current_ir_graph->current_block,
2583 store, objptr, type);
2584 } /* new_d_InstOf */
2587 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2588 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2591 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2592 ir_node *idx, ir_node *lower, ir_node *upper) {
2594 res = new_bd_Bound(db, current_ir_graph->current_block,
2595 store, idx, lower, upper);
2596 #if PRECISE_EXC_CONTEXT
2597 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2603 new_d_Pin(dbg_info *db, ir_node *node) {
2604 return new_bd_Pin(db, current_ir_graph->current_block, node);
2608 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2609 int n_outs, ir_asm_constraint *outputs,
2610 int n_clobber, ident *clobber[], ident *asm_text) {
2611 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2614 /* ********************************************************************* */
2615 /* Comfortable interface with automatic Phi node construction. */
2616 /* (Uses also constructors of ?? interface, except new_Block. */
2617 /* ********************************************************************* */
2619 /* Block construction */
2620 /* immature Block without predecessors */
2622 new_d_immBlock(dbg_info *db) {
2625 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2626 /* creates a new dynamic in-array as length of in is -1 */
2627 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2628 current_ir_graph->current_block = res;
2630 /* macroblock head */
2633 res->attr.block.is_matured = 0;
2634 res->attr.block.is_dead = 0;
2635 res->attr.block.is_mb_head = 1;
2636 res->attr.block.has_label = 0;
2637 res->attr.block.irg = current_ir_graph;
2638 res->attr.block.backedge = NULL;
2639 res->attr.block.in_cg = NULL;
2640 res->attr.block.cg_backedge = NULL;
2641 res->attr.block.extblk = NULL;
2642 res->attr.block.region = NULL;
2643 res->attr.block.mb_depth = 0;
2644 res->attr.block.label = 0;
2646 set_Block_block_visited(res, 0);
2648 /* Create and initialize array for Phi-node construction. */
2649 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2650 current_ir_graph->n_loc);
2651 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2653 /* Immature block may not be optimized! */
2654 IRN_VRFY_IRG(res, current_ir_graph);
2657 } /* new_d_immBlock */
2660 new_immBlock(void) {
2661 return new_d_immBlock(NULL);
2662 } /* new_immBlock */
2664 /* immature PartBlock with its predecessors */
2666 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2667 ir_node *res = new_d_immBlock(db);
2668 ir_node *blk = get_nodes_block(pred_jmp);
2670 res->in[0] = blk->in[0];
2671 assert(res->in[0] != NULL);
2672 add_immBlock_pred(res, pred_jmp);
2674 res->attr.block.is_mb_head = 0;
2675 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2678 } /* new_d_immPartBlock */
2681 new_immPartBlock(ir_node *pred_jmp) {
2682 return new_d_immPartBlock(NULL, pred_jmp);
2683 } /* new_immPartBlock */
2685 /* add an edge to a jmp/control flow node */
2687 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2688 int n = ARR_LEN(block->in) - 1;
2690 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2691 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2692 assert(is_ir_node(jmp));
2694 ARR_APP1(ir_node *, block->in, jmp);
2696 hook_set_irn_n(block, n, jmp, NULL);
2697 } /* add_immBlock_pred */
2699 /* changing the current block */
2701 set_cur_block(ir_node *target) {
2702 current_ir_graph->current_block = target;
2703 } /* set_cur_block */
2705 /* ************************ */
2706 /* parameter administration */
2708 /* get a value from the parameter array from the current block by its index */
2710 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2711 ir_graph *irg = current_ir_graph;
2712 assert(get_irg_phase_state(irg) == phase_building);
2713 inc_irg_visited(irg);
2716 return get_r_value_internal(irg->current_block, pos + 1, mode);
2719 /* get a value from the parameter array from the current block by its index */
2721 get_value(int pos, ir_mode *mode) {
2722 return get_d_value(NULL, pos, mode);
2725 /* set a value at position pos in the parameter array from the current block */
2727 set_value(int pos, ir_node *value) {
2728 ir_graph *irg = current_ir_graph;
2729 assert(get_irg_phase_state(irg) == phase_building);
2730 assert(pos+1 < irg->n_loc);
2731 assert(is_ir_node(value));
2732 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2735 /* Find the value number for a node in the current block.*/
2737 find_value(ir_node *value) {
2739 ir_node *bl = current_ir_graph->current_block;
2741 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2742 if (bl->attr.block.graph_arr[i] == value)
2747 /* get the current store */
2750 ir_graph *irg = current_ir_graph;
2752 assert(get_irg_phase_state(irg) == phase_building);
2753 /* GL: one could call get_value instead */
2754 inc_irg_visited(irg);
2755 return get_r_value_internal(irg->current_block, 0, mode_M);
2758 /* set the current store: handles automatic Sync construction for Load nodes */
2760 set_store(ir_node *store) {
2761 ir_node *load, *pload, *pred, *in[2];
2763 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2764 /* Beware: due to dead code elimination, a store might become a Bad node even in
2765 the construction phase. */
2766 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2768 if (get_opt_auto_create_sync()) {
2769 /* handle non-volatile Load nodes by automatically creating Sync's */
2770 load = skip_Proj(store);
2771 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2772 pred = get_Load_mem(load);
2774 if (is_Sync(pred)) {
2775 /* a Load after a Sync: move it up */
2776 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2778 set_Load_mem(load, get_memop_mem(mem));
2779 add_Sync_pred(pred, store);
2782 pload = skip_Proj(pred);
2783 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2784 /* a Load after a Load: create a new Sync */
2785 set_Load_mem(load, get_Load_mem(pload));
2789 store = new_Sync(2, in);
2794 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2798 keep_alive(ir_node *ka) {
2799 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2802 /* --- Useful access routines --- */
2803 /* Returns the current block of the current graph. To set the current
2804 block use set_cur_block. */
2805 ir_node *get_cur_block(void) {
2806 return get_irg_current_block(current_ir_graph);
2807 } /* get_cur_block */
2809 /* Returns the frame type of the current graph */
2810 ir_type *get_cur_frame_type(void) {
2811 return get_irg_frame_type(current_ir_graph);
2812 } /* get_cur_frame_type */
2815 /* ********************************************************************* */
2818 /* call once for each run of the library */
2820 firm_init_cons(uninitialized_local_variable_func_t *func) {
2821 default_initialize_local_variable = func;
2822 } /* firm_init_cons */
2825 irp_finalize_cons(void) {
2827 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2828 irg_finalize_cons(get_irp_irg(i));
2830 irp->phase_state = phase_high;
2831 } /* irp_finalize_cons */
2834 ir_node *new_Block(int arity, ir_node **in) {
2835 return new_d_Block(NULL, arity, in);
2837 ir_node *new_Start(void) {
2838 return new_d_Start(NULL);
2840 ir_node *new_End(void) {
2841 return new_d_End(NULL);
2843 ir_node *new_Jmp(void) {
2844 return new_d_Jmp(NULL);
2846 ir_node *new_IJmp(ir_node *tgt) {
2847 return new_d_IJmp(NULL, tgt);
2849 ir_node *new_Cond(ir_node *c) {
2850 return new_d_Cond(NULL, c);
2852 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2853 return new_d_Return(NULL, store, arity, in);
2855 ir_node *new_Const(ir_mode *mode, tarval *con) {
2856 return new_d_Const(NULL, mode, con);
2859 ir_node *new_Const_long(ir_mode *mode, long value) {
2860 return new_d_Const_long(NULL, mode, value);
2863 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2864 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2867 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2868 return new_d_SymConst_type(NULL, mode, value, kind, type);
2870 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2871 return new_d_SymConst(NULL, mode, value, kind);
2873 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2874 return new_d_simpleSel(NULL, store, objptr, ent);
2876 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2878 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2880 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2882 return new_d_Call(NULL, store, callee, arity, in, tp);
2884 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2885 return new_d_Add(NULL, op1, op2, mode);
2887 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2888 return new_d_Sub(NULL, op1, op2, mode);
2890 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2891 return new_d_Minus(NULL, op, mode);
2893 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2894 return new_d_Mul(NULL, op1, op2, mode);
2896 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2897 return new_d_Mulh(NULL, op1, op2, mode);
2899 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2900 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2902 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2903 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2905 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2906 return new_d_Div(NULL, memop, op1, op2, mode, state);
2908 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2909 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2911 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2912 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2914 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2915 return new_d_Abs(NULL, op, mode);
2917 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2918 return new_d_And(NULL, op1, op2, mode);
2920 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2921 return new_d_Or(NULL, op1, op2, mode);
2923 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2924 return new_d_Eor(NULL, op1, op2, mode);
2926 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2927 return new_d_Not(NULL, op, mode);
2929 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2930 return new_d_Shl(NULL, op, k, mode);
2932 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2933 return new_d_Shr(NULL, op, k, mode);
2935 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2936 return new_d_Shrs(NULL, op, k, mode);
2938 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
2939 return new_d_Rotl(NULL, op, k, mode);
2941 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2942 return new_d_Carry(NULL, op1, op2, mode);
2944 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2945 return new_d_Borrow(NULL, op1, op2, mode);
2947 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2948 return new_d_Cmp(NULL, op1, op2);
2950 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2951 return new_d_Conv(NULL, op, mode);
2953 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2954 return new_d_strictConv(NULL, op, mode);
2956 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2957 return new_d_Cast(NULL, op, to_tp);
2959 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2960 return new_d_Phi(NULL, arity, in, mode);
2962 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2963 return new_d_Load(NULL, store, addr, mode);
2965 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2966 return new_d_Store(NULL, store, addr, val);
2968 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2969 ir_where_alloc where) {
2970 return new_d_Alloc(NULL, store, size, alloc_type, where);
2972 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
2973 ir_type *free_type, ir_where_alloc where) {
2974 return new_d_Free(NULL, store, ptr, size, free_type, where);
2976 ir_node *new_Sync(int arity, ir_node *in[]) {
2977 return new_d_Sync(NULL, arity, in);
2979 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
2980 return new_d_Proj(NULL, arg, mode, proj);
2982 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
2983 return new_d_defaultProj(NULL, arg, max_proj);
2985 ir_node *new_Tuple(int arity, ir_node **in) {
2986 return new_d_Tuple(NULL, arity, in);
2988 ir_node *new_Id(ir_node *val, ir_mode *mode) {
2989 return new_d_Id(NULL, val, mode);
2991 ir_node *new_Bad(void) {
2992 return get_irg_bad(current_ir_graph);
2994 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
2995 return new_d_Confirm(NULL, val, bound, cmp);
2997 ir_node *new_Unknown(ir_mode *m) {
2998 return new_d_Unknown(m);
3000 ir_node *new_CallBegin(ir_node *callee) {
3001 return new_d_CallBegin(NULL, callee);
3003 ir_node *new_EndReg(void) {
3004 return new_d_EndReg(NULL);
3006 ir_node *new_EndExcept(void) {
3007 return new_d_EndExcept(NULL);
3009 ir_node *new_Break(void) {
3010 return new_d_Break(NULL);
3012 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3013 return new_d_Filter(NULL, arg, mode, proj);
3015 ir_node *new_NoMem(void) {
3016 return get_irg_no_mem(current_ir_graph);
3018 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3019 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3021 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3022 return new_d_CopyB(NULL, store, dst, src, data_type);
3024 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3025 return new_d_InstOf(NULL, store, objptr, ent);
3027 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3028 return new_d_Raise(NULL, store, obj);
3030 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3031 return new_d_Bound(NULL, store, idx, lower, upper);
3033 ir_node *new_Pin(ir_node *node) {
3034 return new_d_Pin(NULL, node);
3036 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3037 int n_outs, ir_asm_constraint *outputs,
3038 int n_clobber, ident *clobber[], ident *asm_text) {
3039 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3042 /* create a new anchor node */
3043 ir_node *new_Anchor(ir_graph *irg) {
3044 ir_node *in[anchor_last];
3045 memset(in, 0, sizeof(in));
3046 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);