2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.res_mode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
169 * Constructs a Block with a fixed number of predecessors.
170 * Does not set current_block. Cannot be used with automatic
171 * Phi node construction.
174 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
176 ir_graph *irg = current_ir_graph;
178 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
180 /* macroblock header */
183 res->attr.block.is_dead = 0;
184 res->attr.block.is_mb_head = 1;
185 res->attr.block.has_label = 0;
186 res->attr.block.irg = irg;
187 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
188 res->attr.block.in_cg = NULL;
189 res->attr.block.cg_backedge = NULL;
190 res->attr.block.extblk = NULL;
191 res->attr.block.mb_depth = 0;
192 res->attr.block.label = 0;
194 set_Block_matured(res, 1);
195 set_Block_block_visited(res, 0);
197 IRN_VRFY_IRG(res, irg);
202 new_bd_Start(dbg_info *db, ir_node *block) {
204 ir_graph *irg = current_ir_graph;
206 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
208 IRN_VRFY_IRG(res, irg);
213 new_bd_End(dbg_info *db, ir_node *block) {
215 ir_graph *irg = current_ir_graph;
217 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
219 IRN_VRFY_IRG(res, irg);
224 * Creates a Phi node with all predecessors. Calling this constructor
225 * is only allowed if the corresponding block is mature.
228 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
230 ir_graph *irg = current_ir_graph;
234 /* Don't assert that block matured: the use of this constructor is strongly
236 if (get_Block_matured(block))
237 assert(get_irn_arity(block) == arity);
239 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
241 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
243 for (i = arity - 1; i >= 0; --i)
244 if (is_Unknown(in[i])) {
249 if (!has_unknown) res = optimize_node(res);
250 IRN_VRFY_IRG(res, irg);
252 /* Memory Phis in endless loops must be kept alive.
253 As we can't distinguish these easily we keep all of them alive. */
254 if (is_Phi(res) && mode == mode_M)
255 add_End_keepalive(get_irg_end(irg), res);
260 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
262 ir_graph *irg = current_ir_graph;
265 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
266 res->attr.con.tv = con;
267 set_Const_type(res, tp); /* Call method because of complex assertion. */
268 res = optimize_node (res);
269 assert(get_Const_type(res) == tp);
270 IRN_VRFY_IRG(res, irg);
273 } /* new_bd_Const_type */
276 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
277 ir_graph *irg = current_ir_graph;
279 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
283 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
287 } /* new_bd_Const_long */
290 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
292 ir_graph *irg = current_ir_graph;
294 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
295 res = optimize_node(res);
296 IRN_VRFY_IRG(res, irg);
301 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
307 res->attr.proj = proj;
310 assert(get_Proj_pred(res));
311 assert(get_nodes_block(get_Proj_pred(res)));
313 res = optimize_node(res);
315 IRN_VRFY_IRG(res, irg);
320 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
323 ir_graph *irg = current_ir_graph;
325 assert(arg->op == op_Cond);
326 arg->attr.cond.kind = fragmentary;
327 arg->attr.cond.default_proj = max_proj;
328 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
330 } /* new_bd_defaultProj */
333 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
335 ir_graph *irg = current_ir_graph;
337 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
338 res->attr.conv.strict = strict_flag;
339 res = optimize_node(res);
340 IRN_VRFY_IRG(res, irg);
345 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
347 ir_graph *irg = current_ir_graph;
349 assert(is_atomic_type(to_tp));
351 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
352 res->attr.cast.totype = to_tp;
353 res = optimize_node(res);
354 IRN_VRFY_IRG(res, irg);
359 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
361 ir_graph *irg = current_ir_graph;
363 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
364 res = optimize_node (res);
365 IRN_VRFY_IRG(res, irg);
390 /** Creates a remainderless Div node. */
391 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
392 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
396 ir_graph *irg = current_ir_graph;
400 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
401 res->attr.divmod.exc.pin_state = state;
402 res->attr.divmod.res_mode = mode;
403 res->attr.divmod.no_remainder = 1;
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
413 ir_graph *irg = current_ir_graph;
416 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
417 res = optimize_node(res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_Jmp(dbg_info *db, ir_node *block) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
436 ir_graph *irg = current_ir_graph;
438 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
439 res = optimize_node(res);
440 IRN_VRFY_IRG(res, irg);
445 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
447 ir_graph *irg = current_ir_graph;
449 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
450 res->attr.cond.kind = dense;
451 res->attr.cond.default_proj = 0;
452 res->attr.cond.pred = COND_JMP_PRED_NONE;
453 res = optimize_node(res);
454 IRN_VRFY_IRG(res, irg);
459 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
460 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
464 ir_graph *irg = current_ir_graph;
467 NEW_ARR_A(ir_node *, r_in, r_arity);
470 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
472 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
474 assert((get_unknown_type() == tp) || is_Method_type(tp));
475 set_Call_type(res, tp);
476 res->attr.call.exc.pin_state = op_pin_state_pinned;
477 res->attr.call.callee_arr = NULL;
478 res = optimize_node(res);
479 IRN_VRFY_IRG(res, irg);
484 new_bd_Return(dbg_info *db, ir_node *block,
485 ir_node *store, int arity, ir_node **in) {
489 ir_graph *irg = current_ir_graph;
492 NEW_ARR_A (ir_node *, r_in, r_arity);
494 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
495 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
496 res = optimize_node(res);
497 IRN_VRFY_IRG(res, irg);
499 } /* new_bd_Return */
502 new_bd_Load(dbg_info *db, ir_node *block,
503 ir_node *store, ir_node *adr, ir_mode *mode) {
506 ir_graph *irg = current_ir_graph;
510 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
511 res->attr.load.exc.pin_state = op_pin_state_pinned;
512 res->attr.load.load_mode = mode;
513 res->attr.load.volatility = volatility_non_volatile;
514 res->attr.load.aligned = align_is_aligned;
515 res = optimize_node(res);
516 IRN_VRFY_IRG(res, irg);
521 new_bd_Store(dbg_info *db, ir_node *block,
522 ir_node *store, ir_node *adr, ir_node *val) {
525 ir_graph *irg = current_ir_graph;
530 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
531 res->attr.store.exc.pin_state = op_pin_state_pinned;
532 res->attr.store.volatility = volatility_non_volatile;
533 res->attr.store.aligned = align_is_aligned;
534 res = optimize_node(res);
535 IRN_VRFY_IRG(res, irg);
540 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
541 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
544 ir_graph *irg = current_ir_graph;
548 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
549 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
550 res->attr.alloc.where = where;
551 res->attr.alloc.type = alloc_type;
552 res = optimize_node(res);
553 IRN_VRFY_IRG(res, irg);
558 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
559 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
562 ir_graph *irg = current_ir_graph;
567 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
568 res->attr.free.where = where;
569 res->attr.free.type = free_type;
570 res = optimize_node(res);
571 IRN_VRFY_IRG(res, irg);
576 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
577 int arity, ir_node **in, ir_entity *ent) {
581 ir_graph *irg = current_ir_graph;
582 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
584 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
587 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
590 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
592 * Sel's can select functions which should be of mode mode_P_code.
594 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
595 res->attr.sel.ent = ent;
596 res = optimize_node(res);
597 IRN_VRFY_IRG(res, irg);
602 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
603 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
604 ir_graph *irg = current_ir_graph;
605 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
607 res->attr.symc.kind = symkind;
608 res->attr.symc.sym = value;
609 res->attr.symc.tp = tp;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
614 } /* new_bd_SymConst_type */
617 new_bd_Sync(dbg_info *db, ir_node *block) {
619 ir_graph *irg = current_ir_graph;
621 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
622 /* no need to call optimize node here, Sync are always created with no predecessors */
623 IRN_VRFY_IRG(res, irg);
628 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
629 ir_node *in[2], *res;
630 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
635 res->attr.confirm.cmp = cmp;
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
639 } /* new_bd_Confirm */
642 new_bd_Unknown(ir_mode *m) {
644 ir_graph *irg = current_ir_graph;
646 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
647 res = optimize_node(res);
649 } /* new_bd_Unknown */
652 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
655 ir_graph *irg = current_ir_graph;
657 in[0] = get_Call_ptr(call);
658 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
659 /* res->attr.callbegin.irg = irg; */
660 res->attr.callbegin.call = call;
661 res = optimize_node(res);
662 IRN_VRFY_IRG(res, irg);
664 } /* new_bd_CallBegin */
667 new_bd_EndReg(dbg_info *db, ir_node *block) {
669 ir_graph *irg = current_ir_graph;
671 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
672 set_irg_end_reg(irg, res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_EndReg */
678 new_bd_EndExcept(dbg_info *db, ir_node *block) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
683 set_irg_end_except(irg, res);
684 IRN_VRFY_IRG (res, irg);
686 } /* new_bd_EndExcept */
689 new_bd_Break(dbg_info *db, ir_node *block) {
691 ir_graph *irg = current_ir_graph;
693 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
694 res = optimize_node(res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
703 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
706 res->attr.filter.proj = proj;
707 res->attr.filter.in_cg = NULL;
708 res->attr.filter.backedge = NULL;
711 assert(get_Proj_pred(res));
712 assert(get_nodes_block(get_Proj_pred(res)));
714 res = optimize_node(res);
715 IRN_VRFY_IRG(res, irg);
717 } /* new_bd_Filter */
720 new_bd_Mux(dbg_info *db, ir_node *block,
721 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
724 ir_graph *irg = current_ir_graph;
730 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_bd_CopyB(dbg_info *db, ir_node *block,
740 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
743 ir_graph *irg = current_ir_graph;
749 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
751 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
752 res->attr.copyb.data_type = data_type;
753 res = optimize_node(res);
754 IRN_VRFY_IRG(res, irg);
759 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
760 ir_node *objptr, ir_type *type) {
763 ir_graph *irg = current_ir_graph;
767 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
768 res->attr.instof.type = type;
769 res = optimize_node(res);
770 IRN_VRFY_IRG(res, irg);
772 } /* new_bd_InstOf */
775 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
778 ir_graph *irg = current_ir_graph;
782 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
783 res = optimize_node(res);
784 IRN_VRFY_IRG(res, irg);
789 new_bd_Bound(dbg_info *db, ir_node *block,
790 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
793 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
800 res->attr.bound.exc.pin_state = op_pin_state_pinned;
801 res = optimize_node(res);
802 IRN_VRFY_IRG(res, irg);
807 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
809 ir_graph *irg = current_ir_graph;
811 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
812 res = optimize_node(res);
813 IRN_VRFY_IRG(res, irg);
818 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
819 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
821 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
825 res->attr.assem.pin_state = op_pin_state_pinned;
826 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
827 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
828 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
829 res->attr.assem.asm_text = asm_text;
831 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
832 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
833 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
835 res = optimize_node(res);
836 IRN_VRFY_IRG(res, irg);
840 /* --------------------------------------------- */
841 /* private interfaces, for professional use only */
842 /* --------------------------------------------- */
844 /* Constructs a Block with a fixed number of predecessors.
845 Does not set current_block. Can not be used with automatic
846 Phi node construction. */
848 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
849 ir_graph *rem = current_ir_graph;
852 current_ir_graph = irg;
853 res = new_bd_Block(db, arity, in);
854 current_ir_graph = rem;
860 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
861 ir_graph *rem = current_ir_graph;
864 current_ir_graph = irg;
865 res = new_bd_Start(db, block);
866 current_ir_graph = rem;
872 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
874 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_End(db, block);
878 current_ir_graph = rem;
883 /* Creates a Phi node with all predecessors. Calling this constructor
884 is only allowed if the corresponding block is mature. */
886 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
888 ir_graph *rem = current_ir_graph;
890 current_ir_graph = irg;
891 res = new_bd_Phi(db, block,arity, in, mode);
892 current_ir_graph = rem;
898 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
900 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Const_type(db, block, mode, con, tp);
904 current_ir_graph = rem;
907 } /* new_rd_Const_type */
910 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
916 current_ir_graph = rem;
922 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
923 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
924 } /* new_rd_Const_long */
927 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
929 ir_graph *rem = current_ir_graph;
931 current_ir_graph = irg;
932 res = new_bd_Id(db, block, val, mode);
933 current_ir_graph = rem;
939 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
942 ir_graph *rem = current_ir_graph;
944 current_ir_graph = irg;
945 res = new_bd_Proj(db, block, arg, mode, proj);
946 current_ir_graph = rem;
952 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
955 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_defaultProj(db, block, arg, max_proj);
959 current_ir_graph = rem;
962 } /* new_rd_defaultProj */
965 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_Conv(db, block, op, mode, 0);
971 current_ir_graph = rem;
977 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Cast(db, block, op, to_tp);
983 current_ir_graph = rem;
989 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Tuple(db, block, arity, in);
995 current_ir_graph = rem;
1006 NEW_RD_DIVOP(DivMod)
1019 NEW_RD_BINOP(Borrow)
1021 /* creates a rd constructor for an divRL */
1022 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1023 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1026 ir_graph *rem = current_ir_graph;
1027 current_ir_graph = irg;
1028 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1029 current_ir_graph = rem;
1034 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1035 ir_node *op1, ir_node *op2) {
1037 ir_graph *rem = current_ir_graph;
1039 current_ir_graph = irg;
1040 res = new_bd_Cmp(db, block, op1, op2);
1041 current_ir_graph = rem;
1047 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Jmp(db, block);
1053 current_ir_graph = rem;
1059 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_IJmp(db, block, tgt);
1065 current_ir_graph = rem;
1071 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_Cond(db, block, c);
1077 current_ir_graph = rem;
1083 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1084 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1090 current_ir_graph = rem;
1096 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1097 ir_node *store, int arity, ir_node **in) {
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Return(db, block, store, arity, in);
1103 current_ir_graph = rem;
1106 } /* new_rd_Return */
1109 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1110 ir_node *store, ir_node *adr, ir_mode *mode) {
1112 ir_graph *rem = current_ir_graph;
1114 current_ir_graph = irg;
1115 res = new_bd_Load(db, block, store, adr, mode);
1116 current_ir_graph = rem;
1122 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1123 ir_node *store, ir_node *adr, ir_node *val) {
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Store(db, block, store, adr, val);
1129 current_ir_graph = rem;
1132 } /* new_rd_Store */
1135 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1136 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1138 ir_graph *rem = current_ir_graph;
1140 current_ir_graph = irg;
1141 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1142 current_ir_graph = rem;
1145 } /* new_rd_Alloc */
1148 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1149 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1151 ir_graph *rem = current_ir_graph;
1153 current_ir_graph = irg;
1154 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1155 current_ir_graph = rem;
1161 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1162 ir_node *store, ir_node *objptr, ir_entity *ent) {
1164 ir_graph *rem = current_ir_graph;
1166 current_ir_graph = irg;
1167 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1168 current_ir_graph = rem;
1171 } /* new_rd_simpleSel */
1174 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1175 int arity, ir_node **in, ir_entity *ent) {
1177 ir_graph *rem = current_ir_graph;
1179 current_ir_graph = irg;
1180 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1181 current_ir_graph = rem;
1187 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1188 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1190 ir_graph *rem = current_ir_graph;
1192 current_ir_graph = irg;
1193 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1194 current_ir_graph = rem;
1197 } /* new_rd_SymConst_type */
1200 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1201 symconst_symbol value, symconst_kind symkind) {
1202 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1203 } /* new_rd_SymConst */
1205 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1206 symconst_symbol sym;
1207 sym.entity_p = symbol;
1208 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1209 } /* new_rd_SymConst_addr_ent */
1211 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1212 symconst_symbol sym;
1213 sym.entity_p = symbol;
1214 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1215 } /* new_rd_SymConst_ofs_ent */
1217 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1218 symconst_symbol sym;
1219 sym.ident_p = symbol;
1220 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1221 } /* new_rd_SymConst_addr_name */
1223 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1224 symconst_symbol sym;
1225 sym.type_p = symbol;
1226 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1227 } /* new_rd_SymConst_type_tag */
1229 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1230 symconst_symbol sym;
1231 sym.type_p = symbol;
1232 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1233 } /* new_rd_SymConst_size */
1235 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1236 symconst_symbol sym;
1237 sym.type_p = symbol;
1238 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1239 } /* new_rd_SymConst_align */
1242 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1244 ir_graph *rem = current_ir_graph;
1247 current_ir_graph = irg;
1248 res = new_bd_Sync(db, block);
1249 current_ir_graph = rem;
1251 for (i = 0; i < arity; ++i)
1252 add_Sync_pred(res, in[i]);
1258 new_rd_Bad(ir_graph *irg) {
1259 return get_irg_bad(irg);
1263 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1265 ir_graph *rem = current_ir_graph;
1267 current_ir_graph = irg;
1268 res = new_bd_Confirm(db, block, val, bound, cmp);
1269 current_ir_graph = rem;
1272 } /* new_rd_Confirm */
1275 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1277 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Unknown(m);
1281 current_ir_graph = rem;
1284 } /* new_rd_Unknown */
1287 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1289 ir_graph *rem = current_ir_graph;
1291 current_ir_graph = irg;
1292 res = new_bd_CallBegin(db, block, call);
1293 current_ir_graph = rem;
1296 } /* new_rd_CallBegin */
1299 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1302 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1303 set_irg_end_reg(irg, res);
1304 IRN_VRFY_IRG(res, irg);
1306 } /* new_rd_EndReg */
1309 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1312 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1313 set_irg_end_except(irg, res);
1314 IRN_VRFY_IRG (res, irg);
1316 } /* new_rd_EndExcept */
1319 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1321 ir_graph *rem = current_ir_graph;
1323 current_ir_graph = irg;
1324 res = new_bd_Break(db, block);
1325 current_ir_graph = rem;
1328 } /* new_rd_Break */
1331 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Filter(db, block, arg, mode, proj);
1338 current_ir_graph = rem;
1341 } /* new_rd_Filter */
1344 new_rd_NoMem(ir_graph *irg) {
1345 return get_irg_no_mem(irg);
1346 } /* new_rd_NoMem */
1349 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1350 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1352 ir_graph *rem = current_ir_graph;
1354 current_ir_graph = irg;
1355 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1356 current_ir_graph = rem;
1361 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1362 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1364 ir_graph *rem = current_ir_graph;
1366 current_ir_graph = irg;
1367 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1368 current_ir_graph = rem;
1371 } /* new_rd_CopyB */
1374 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1375 ir_node *objptr, ir_type *type) {
1377 ir_graph *rem = current_ir_graph;
1379 current_ir_graph = irg;
1380 res = new_bd_InstOf(db, block, store, objptr, type);
1381 current_ir_graph = rem;
1384 } /* new_rd_InstOf */
1387 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_Raise(db, block, store, obj);
1393 current_ir_graph = rem;
1396 } /* new_rd_Raise */
1398 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1399 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1401 ir_graph *rem = current_ir_graph;
1403 current_ir_graph = irg;
1404 res = new_bd_Bound(db, block, store, idx, lower, upper);
1405 current_ir_graph = rem;
1408 } /* new_rd_Bound */
1410 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1412 ir_graph *rem = current_ir_graph;
1414 current_ir_graph = irg;
1415 res = new_bd_Pin(db, block, node);
1416 current_ir_graph = rem;
1421 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1422 int arity, ir_node *in[], ir_asm_constraint *inputs,
1423 int n_outs, ir_asm_constraint *outputs,
1424 int n_clobber, ident *clobber[], ident *asm_text) {
1426 ir_graph *rem = current_ir_graph;
1428 current_ir_graph = irg;
1429 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1430 current_ir_graph = rem;
1436 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1437 return new_rd_Block(NULL, irg, arity, in);
1439 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1440 return new_rd_Start(NULL, irg, block);
1442 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1443 return new_rd_End(NULL, irg, block);
1445 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1446 return new_rd_Jmp(NULL, irg, block);
1448 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1449 return new_rd_IJmp(NULL, irg, block, tgt);
1451 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1452 return new_rd_Cond(NULL, irg, block, c);
1454 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1455 ir_node *store, int arity, ir_node **in) {
1456 return new_rd_Return(NULL, irg, block, store, arity, in);
1458 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1459 ir_mode *mode, tarval *con) {
1460 return new_rd_Const(NULL, irg, block, mode, con);
1462 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1463 ir_mode *mode, long value) {
1464 return new_rd_Const_long(NULL, irg, block, mode, value);
1466 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1467 ir_mode *mode, tarval *con, ir_type *tp) {
1468 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1470 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1471 symconst_symbol value, symconst_kind symkind) {
1472 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1474 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1475 ir_node *objptr, ir_entity *ent) {
1476 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1478 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1479 ir_node *objptr, int n_index, ir_node **index,
1481 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1483 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1484 ir_node *callee, int arity, ir_node **in,
1486 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1488 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1489 ir_node *op1, ir_node *op2, ir_mode *mode) {
1490 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1492 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1493 ir_node *op1, ir_node *op2, ir_mode *mode) {
1494 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1496 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1497 ir_node *op, ir_mode *mode) {
1498 return new_rd_Minus(NULL, irg, block, op, mode);
1500 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1501 ir_node *op1, ir_node *op2, ir_mode *mode) {
1502 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1504 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1505 ir_node *op1, ir_node *op2, ir_mode *mode) {
1506 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1508 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1509 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1510 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1512 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1513 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1514 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1516 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1517 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1518 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1520 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1521 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1522 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1524 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1525 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1526 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1528 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1529 ir_node *op, ir_mode *mode) {
1530 return new_rd_Abs(NULL, irg, block, op, mode);
1532 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1533 ir_node *op1, ir_node *op2, ir_mode *mode) {
1534 return new_rd_And(NULL, irg, block, op1, op2, mode);
1536 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1537 ir_node *op1, ir_node *op2, ir_mode *mode) {
1538 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1540 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1541 ir_node *op1, ir_node *op2, ir_mode *mode) {
1542 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1544 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1545 ir_node *op, ir_mode *mode) {
1546 return new_rd_Not(NULL, irg, block, op, mode);
1548 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1549 ir_node *op, ir_node *k, ir_mode *mode) {
1550 return new_rd_Shl(NULL, irg, block, op, k, mode);
1552 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1553 ir_node *op, ir_node *k, ir_mode *mode) {
1554 return new_rd_Shr(NULL, irg, block, op, k, mode);
1556 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1557 ir_node *op, ir_node *k, ir_mode *mode) {
1558 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1560 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1561 ir_node *op, ir_node *k, ir_mode *mode) {
1562 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1564 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_node *k, ir_mode *mode) {
1566 return new_rd_Carry(NULL, irg, block, op, k, mode);
1568 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1569 ir_node *op, ir_node *k, ir_mode *mode) {
1570 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1572 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1573 ir_node *op1, ir_node *op2) {
1574 return new_rd_Cmp(NULL, irg, block, op1, op2);
1576 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1577 ir_node *op, ir_mode *mode) {
1578 return new_rd_Conv(NULL, irg, block, op, mode);
1580 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1581 return new_rd_Cast(NULL, irg, block, op, to_tp);
1583 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1584 ir_node **in, ir_mode *mode) {
1585 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1587 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1588 ir_node *store, ir_node *adr, ir_mode *mode) {
1589 return new_rd_Load(NULL, irg, block, store, adr, mode);
1591 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1592 ir_node *store, ir_node *adr, ir_node *val) {
1593 return new_rd_Store(NULL, irg, block, store, adr, val);
1595 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1596 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1597 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1599 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1600 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1601 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1603 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1604 return new_rd_Sync(NULL, irg, block, arity, in);
1606 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1607 ir_mode *mode, long proj) {
1608 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1610 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1612 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1614 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1615 int arity, ir_node **in) {
1616 return new_rd_Tuple(NULL, irg, block, arity, in );
1618 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1619 ir_node *val, ir_mode *mode) {
1620 return new_rd_Id(NULL, irg, block, val, mode);
1622 ir_node *new_r_Bad(ir_graph *irg) {
1623 return new_rd_Bad(irg);
1625 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1626 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1628 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1629 return new_rd_Unknown(irg, m);
1631 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1632 return new_rd_CallBegin(NULL, irg, block, callee);
1634 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1635 return new_rd_EndReg(NULL, irg, block);
1637 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1638 return new_rd_EndExcept(NULL, irg, block);
1640 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1641 return new_rd_Break(NULL, irg, block);
1643 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1644 ir_mode *mode, long proj) {
1645 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1647 ir_node *new_r_NoMem(ir_graph *irg) {
1648 return new_rd_NoMem(irg);
1650 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1651 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1652 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1654 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1655 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1656 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1658 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1660 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1662 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1663 ir_node *store, ir_node *obj) {
1664 return new_rd_Raise(NULL, irg, block, store, obj);
1666 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1667 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1668 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1670 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1671 return new_rd_Pin(NULL, irg, block, node);
1673 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1674 int arity, ir_node *in[], ir_asm_constraint *inputs,
1675 int n_outs, ir_asm_constraint *outputs,
1676 int n_clobber, ident *clobber[], ident *asm_text) {
1677 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1680 /** ********************/
1681 /** public interfaces */
1682 /** construction tools */
1686 * - create a new Start node in the current block
1688 * @return s - pointer to the created Start node
1693 new_d_Start(dbg_info *db) {
1696 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1697 op_Start, mode_T, 0, NULL);
1699 res = optimize_node(res);
1700 IRN_VRFY_IRG(res, current_ir_graph);
1705 new_d_End(dbg_info *db) {
1707 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1708 op_End, mode_X, -1, NULL);
1709 res = optimize_node(res);
1710 IRN_VRFY_IRG(res, current_ir_graph);
1715 /* Constructs a Block with a fixed number of predecessors.
1716 Does set current_block. Can be used with automatic Phi
1717 node construction. */
1719 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1722 int has_unknown = 0;
1724 res = new_bd_Block(db, arity, in);
1726 /* Create and initialize array for Phi-node construction. */
1727 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1728 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1729 current_ir_graph->n_loc);
1730 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1733 for (i = arity-1; i >= 0; i--)
1734 if (is_Unknown(in[i])) {
1739 if (!has_unknown) res = optimize_node(res);
1740 current_ir_graph->current_block = res;
1742 IRN_VRFY_IRG(res, current_ir_graph);
1747 /* ***********************************************************************/
1748 /* Methods necessary for automatic Phi node creation */
1750 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1751 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1752 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1753 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1755 Call Graph: ( A ---> B == A "calls" B)
1757 get_value mature_immBlock
1765 get_r_value_internal |
1769 new_rd_Phi0 new_rd_Phi_in
1771 * *************************************************************************** */
1773 /** Creates a Phi node with 0 predecessors. */
1774 static inline ir_node *
1775 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1778 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1779 IRN_VRFY_IRG(res, irg);
1785 * Internal constructor of a Phi node by a phi_merge operation.
1787 * @param irg the graph on which the Phi will be constructed
1788 * @param block the block in which the Phi will be constructed
1789 * @param mode the mod eof the Phi node
1790 * @param in the input array of the phi node
1791 * @param ins number of elements in the input array
1792 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1793 * the value for which the new Phi is constructed
1795 static inline ir_node *
1796 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1797 ir_node **in, int ins, ir_node *phi0) {
1799 ir_node *res, *known;
1801 /* Allocate a new node on the obstack. The allocation copies the in
1803 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1804 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1806 /* This loop checks whether the Phi has more than one predecessor.
1807 If so, it is a real Phi node and we break the loop. Else the
1808 Phi node merges the same definition on several paths and therefore
1809 is not needed. Don't consider Bad nodes! */
1811 for (i = ins - 1; i >= 0; --i) {
1814 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1816 /* Optimize self referencing Phis: We can't detect them yet properly, as
1817 they still refer to the Phi0 they will replace. So replace right now. */
1818 if (phi0 && in[i] == phi0)
1821 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1830 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1833 edges_node_deleted(res, current_ir_graph);
1834 obstack_free(current_ir_graph->obst, res);
1835 if (is_Phi(known)) {
1836 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1837 order, an enclosing Phi know may get superfluous. */
1838 res = optimize_in_place_2(known);
1840 exchange(known, res);
1845 /* A undefined value, e.g., in unreachable code. */
1849 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1850 IRN_VRFY_IRG(res, irg);
1851 /* Memory Phis in endless loops must be kept alive.
1852 As we can't distinguish these easily we keep all of them alive. */
1853 if (is_Phi(res) && mode == mode_M)
1854 add_End_keepalive(get_irg_end(irg), res);
1858 } /* new_rd_Phi_in */
1861 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1863 #if PRECISE_EXC_CONTEXT
1865 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1868 * Construct a new frag_array for node n.
1869 * Copy the content from the current graph_arr of the corresponding block:
1870 * this is the current state.
1871 * Set ProjM(n) as current memory state.
1872 * Further the last entry in frag_arr of current block points to n. This
1873 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1875 static inline ir_node **new_frag_arr(ir_node *n) {
1879 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1880 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1881 sizeof(ir_node *)*current_ir_graph->n_loc);
1883 /* turn off optimization before allocating Proj nodes, as res isn't
1885 opt = get_opt_optimize(); set_optimize(0);
1886 /* Here we rely on the fact that all frag ops have Memory as first result! */
1888 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1889 } else if (is_CopyB(n)) {
1890 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1892 assert((pn_Quot_M == pn_DivMod_M) &&
1893 (pn_Quot_M == pn_Div_M) &&
1894 (pn_Quot_M == pn_Mod_M) &&
1895 (pn_Quot_M == pn_Load_M) &&
1896 (pn_Quot_M == pn_Store_M) &&
1897 (pn_Quot_M == pn_Alloc_M) &&
1898 (pn_Quot_M == pn_Bound_M));
1899 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1903 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1905 } /* new_frag_arr */
1908 * Returns the frag_arr from a node.
1910 static inline ir_node **get_frag_arr(ir_node *n) {
1911 switch (get_irn_opcode(n)) {
1913 return n->attr.call.exc.frag_arr;
1915 return n->attr.alloc.exc.frag_arr;
1917 return n->attr.load.exc.frag_arr;
1919 return n->attr.store.exc.frag_arr;
1921 return n->attr.except.frag_arr;
1923 } /* get_frag_arr */
1926 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1927 #ifdef DEBUG_libfirm
1930 for (i = 1024; i >= 0; --i)
1935 if (frag_arr[pos] == NULL)
1936 frag_arr[pos] = val;
1937 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1938 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1939 assert(arr != frag_arr && "Endless recursion detected");
1944 assert(!"potential endless recursion in set_frag_value");
1945 } /* set_frag_value */
1948 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1952 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1954 frag_arr = get_frag_arr(cfOp);
1955 res = frag_arr[pos];
1957 if (block->attr.block.graph_arr[pos] != NULL) {
1958 /* There was a set_value() after the cfOp and no get_value() before that
1959 set_value(). We must build a Phi node now. */
1960 if (block->attr.block.is_matured) {
1961 int ins = get_irn_arity(block);
1963 NEW_ARR_A(ir_node *, nin, ins);
1964 res = phi_merge(block, pos, mode, nin, ins);
1966 res = new_rd_Phi0(current_ir_graph, block, mode);
1967 res->attr.phi.u.pos = pos;
1968 res->attr.phi.next = block->attr.block.phis;
1969 block->attr.block.phis = res;
1971 assert(res != NULL);
1972 /* It's a Phi, we can write this into all graph_arrs with NULL */
1973 set_frag_value(block->attr.block.graph_arr, pos, res);
1975 res = get_r_value_internal(block, pos, mode);
1976 set_frag_value(block->attr.block.graph_arr, pos, res);
1980 } /* get_r_frag_value_internal */
1981 #endif /* PRECISE_EXC_CONTEXT */
1984 * Check whether a control flownode cf_pred represents an exception flow.
1986 * @param cf_pred the control flow node
1987 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
1989 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
1991 * Note: all projections from a raise are "exceptional control flow" we we handle it
1992 * like a normal Jmp, because there is no "regular" one.
1993 * That's why Raise is no "fragile_op"!
1995 if (is_fragile_op(prev_cf_op)) {
1996 if (is_Proj(cf_pred)) {
1997 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
1998 /* the regular control flow, NO exception */
2001 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2004 /* Hmm, exception but not a Proj? */
2005 assert(!"unexpected condition: fragile op without a proj");
2009 } /* is_exception_flow */
2012 * Computes the predecessors for the real phi node, and then
2013 * allocates and returns this node. The routine called to allocate the
2014 * node might optimize it away and return a real value.
2015 * This function must be called with an in-array of proper size.
2018 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2019 ir_node *prevBlock, *res, *phi0, *phi0_all;
2022 /* If this block has no value at pos create a Phi0 and remember it
2023 in graph_arr to break recursions.
2024 Else we may not set graph_arr as there a later value is remembered. */
2026 if (block->attr.block.graph_arr[pos] == NULL) {
2027 ir_graph *irg = current_ir_graph;
2029 if (block == get_irg_start_block(irg)) {
2030 /* Collapsing to Bad tarvals is no good idea.
2031 So we call a user-supplied routine here that deals with this case as
2032 appropriate for the given language. Sorrily the only help we can give
2033 here is the position.
2035 Even if all variables are defined before use, it can happen that
2036 we get to the start block, if a Cond has been replaced by a tuple
2037 (bad, jmp). In this case we call the function needlessly, eventually
2038 generating an non existent error.
2039 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2042 if (default_initialize_local_variable != NULL) {
2043 ir_node *rem = get_cur_block();
2045 set_cur_block(block);
2046 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2050 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2051 /* We don't need to care about exception ops in the start block.
2052 There are none by definition. */
2053 return block->attr.block.graph_arr[pos];
2055 phi0 = new_rd_Phi0(irg, block, mode);
2056 block->attr.block.graph_arr[pos] = phi0;
2057 #if PRECISE_EXC_CONTEXT
2058 if (get_opt_precise_exc_context()) {
2059 /* Set graph_arr for fragile ops. Also here we should break recursion.
2060 We could choose a cyclic path through an cfop. But the recursion would
2061 break at some point. */
2062 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2068 /* This loop goes to all predecessor blocks of the block the Phi node
2069 is in and there finds the operands of the Phi node by calling
2070 get_r_value_internal. */
2071 for (i = 1; i <= ins; ++i) {
2072 ir_node *cf_pred = block->in[i];
2073 ir_node *prevCfOp = skip_Proj(cf_pred);
2075 if (is_Bad(prevCfOp)) {
2076 /* In case a Cond has been optimized we would get right to the start block
2077 with an invalid definition. */
2078 nin[i-1] = new_Bad();
2081 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2083 if (!is_Bad(prevBlock)) {
2084 #if PRECISE_EXC_CONTEXT
2085 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2086 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2087 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2090 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2092 nin[i-1] = new_Bad();
2096 /* We want to pass the Phi0 node to the constructor: this finds additional
2097 optimization possibilities.
2098 The Phi0 node either is allocated in this function, or it comes from
2099 a former call to get_r_value_internal(). In this case we may not yet
2100 exchange phi0, as this is done in mature_immBlock(). */
2102 phi0_all = block->attr.block.graph_arr[pos];
2103 if (!(is_Phi(phi0_all) &&
2104 (get_irn_arity(phi0_all) == 0) &&
2105 (get_nodes_block(phi0_all) == block)))
2111 /* After collecting all predecessors into the array nin a new Phi node
2112 with these predecessors is created. This constructor contains an
2113 optimization: If all predecessors of the Phi node are identical it
2114 returns the only operand instead of a new Phi node. */
2115 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2117 /* In case we allocated a Phi0 node at the beginning of this procedure,
2118 we need to exchange this Phi0 with the real Phi. */
2120 exchange(phi0, res);
2121 block->attr.block.graph_arr[pos] = res;
2122 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2123 only an optimization. */
2130 * This function returns the last definition of a value. In case
2131 * this value was last defined in a previous block, Phi nodes are
2132 * inserted. If the part of the firm graph containing the definition
2133 * is not yet constructed, a dummy Phi node is returned.
2135 * @param block the current block
2136 * @param pos the value number of the value searched
2137 * @param mode the mode of this value (needed for Phi construction)
2140 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2142 /* There are 4 cases to treat.
2144 1. The block is not mature and we visit it the first time. We can not
2145 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2146 predecessors is returned. This node is added to the linked list (block
2147 attribute "phis") of the containing block to be completed when this block is
2148 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2151 2. The value is already known in this block, graph_arr[pos] is set and we
2152 visit the block the first time. We can return the value without
2153 creating any new nodes.
2155 3. The block is mature and we visit it the first time. A Phi node needs
2156 to be created (phi_merge). If the Phi is not needed, as all it's
2157 operands are the same value reaching the block through different
2158 paths, it's optimized away and the value itself is returned.
2160 4. The block is mature, and we visit it the second time. Now two
2161 subcases are possible:
2162 * The value was computed completely the last time we were here. This
2163 is the case if there is no loop. We can return the proper value.
2164 * The recursion that visited this node and set the flag did not
2165 return yet. We are computing a value in a loop and need to
2166 break the recursion. This case only happens if we visited
2167 the same block with phi_merge before, which inserted a Phi0.
2168 So we return the Phi0.
2171 /* case 4 -- already visited. */
2172 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2173 /* As phi_merge allocates a Phi0 this value is always defined. Here
2174 is the critical difference of the two algorithms. */
2175 assert(block->attr.block.graph_arr[pos]);
2176 return block->attr.block.graph_arr[pos];
2179 /* visited the first time */
2180 set_irn_visited(block, get_irg_visited(current_ir_graph));
2182 /* Get the local valid value */
2183 res = block->attr.block.graph_arr[pos];
2185 /* case 2 -- If the value is actually computed, return it. */
2189 if (block->attr.block.is_matured) { /* case 3 */
2191 /* The Phi has the same amount of ins as the corresponding block. */
2192 int ins = get_irn_arity(block);
2194 NEW_ARR_A(ir_node *, nin, ins);
2196 /* Phi merge collects the predecessors and then creates a node. */
2197 res = phi_merge(block, pos, mode, nin, ins);
2199 } else { /* case 1 */
2200 /* The block is not mature, we don't know how many in's are needed. A Phi
2201 with zero predecessors is created. Such a Phi node is called Phi0
2202 node. The Phi0 is then added to the list of Phi0 nodes in this block
2203 to be matured by mature_immBlock later.
2204 The Phi0 has to remember the pos of it's internal value. If the real
2205 Phi is computed, pos is used to update the array with the local
2207 res = new_rd_Phi0(current_ir_graph, block, mode);
2208 res->attr.phi.u.pos = pos;
2209 res->attr.phi.next = block->attr.block.phis;
2210 block->attr.block.phis = res;
2213 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2215 /* The local valid value is available now. */
2216 block->attr.block.graph_arr[pos] = res;
2219 } /* get_r_value_internal */
2221 /* ************************************************************************** */
2224 * Finalize a Block node, when all control flows are known.
2225 * Acceptable parameters are only Block nodes.
2228 mature_immBlock(ir_node *block) {
2233 assert(is_Block(block));
2234 if (!get_Block_matured(block)) {
2235 ir_graph *irg = current_ir_graph;
2237 ins = ARR_LEN(block->in) - 1;
2238 /* Fix block parameters */
2239 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2241 /* An array for building the Phi nodes. */
2242 NEW_ARR_A(ir_node *, nin, ins);
2244 /* Traverse a chain of Phi nodes attached to this block and mature
2246 for (n = block->attr.block.phis; n; n = next) {
2247 inc_irg_visited(irg);
2248 next = n->attr.phi.next;
2249 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2252 block->attr.block.is_matured = 1;
2254 /* Now, as the block is a finished Firm node, we can optimize it.
2255 Since other nodes have been allocated since the block was created
2256 we can not free the node on the obstack. Therefore we have to call
2257 optimize_in_place().
2258 Unfortunately the optimization does not change a lot, as all allocated
2259 nodes refer to the unoptimized node.
2260 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2261 block = optimize_in_place_2(block);
2262 IRN_VRFY_IRG(block, irg);
2264 } /* mature_immBlock */
2267 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2268 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2272 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2273 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2277 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2278 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2279 } /* new_d_Const_long */
2282 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2283 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2284 } /* new_d_Const_type */
2288 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2289 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2293 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2294 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2298 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2301 assert(arg->op == op_Cond);
2302 arg->attr.cond.kind = fragmentary;
2303 arg->attr.cond.default_proj = max_proj;
2304 res = new_Proj(arg, mode_X, max_proj);
2306 } /* new_d_defaultProj */
2309 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2310 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2314 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2315 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2316 } /* new_d_strictConv */
2319 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2320 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2324 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2325 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2335 * Allocate a frag array for a node if the current graph state is phase_building.
2337 * @param irn the node for which the frag array should be allocated
2338 * @param op the opcode of the (original) node, if does not match opcode of irn,
2340 * @param frag_store the address of the frag store in irn attributes, if this
2341 * address contains a value != NULL, does nothing
2343 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2344 if (get_opt_precise_exc_context()) {
2345 if ((current_ir_graph->phase_state == phase_building) &&
2346 (get_irn_op(irn) == op) && /* Could be optimized away. */
2347 !*frag_store) /* Could be a cse where the arr is already set. */ {
2348 *frag_store = new_frag_arr(irn);
2351 } /* firm_alloc_frag_arr */
2354 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2356 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2357 #if PRECISE_EXC_CONTEXT
2358 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2365 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2367 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2368 #if PRECISE_EXC_CONTEXT
2369 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2373 } /* new_d_DivMod */
2376 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2378 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2379 #if PRECISE_EXC_CONTEXT
2380 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2387 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2389 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2390 #if PRECISE_EXC_CONTEXT
2391 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2398 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2400 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2401 #if PRECISE_EXC_CONTEXT
2402 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2421 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2422 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2426 new_d_Jmp(dbg_info *db) {
2427 return new_bd_Jmp(db, current_ir_graph->current_block);
2431 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2432 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2436 new_d_Cond(dbg_info *db, ir_node *c) {
2437 return new_bd_Cond(db, current_ir_graph->current_block, c);
2441 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2444 res = new_bd_Call(db, current_ir_graph->current_block,
2445 store, callee, arity, in, tp);
2446 #if PRECISE_EXC_CONTEXT
2447 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2454 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2455 return new_bd_Return(db, current_ir_graph->current_block,
2457 } /* new_d_Return */
2460 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2462 res = new_bd_Load(db, current_ir_graph->current_block,
2464 #if PRECISE_EXC_CONTEXT
2465 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2472 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2474 res = new_bd_Store(db, current_ir_graph->current_block,
2476 #if PRECISE_EXC_CONTEXT
2477 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2484 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2485 ir_where_alloc where) {
2487 res = new_bd_Alloc(db, current_ir_graph->current_block,
2488 store, size, alloc_type, where);
2489 #if PRECISE_EXC_CONTEXT
2490 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2497 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2498 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2499 return new_bd_Free(db, current_ir_graph->current_block,
2500 store, ptr, size, free_type, where);
2504 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2505 /* GL: objptr was called frame before. Frame was a bad choice for the name
2506 as the operand could as well be a pointer to a dynamic object. */
2508 return new_bd_Sel(db, current_ir_graph->current_block,
2509 store, objptr, 0, NULL, ent);
2510 } /* new_d_simpleSel */
2513 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2514 return new_bd_Sel(db, current_ir_graph->current_block,
2515 store, objptr, n_index, index, sel);
2519 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2520 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2522 } /* new_d_SymConst_type */
2525 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2526 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2527 value, kind, firm_unknown_type);
2528 } /* new_d_SymConst */
2531 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2532 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2538 return _new_d_Bad();
2542 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2543 return new_bd_Confirm(db, current_ir_graph->current_block,
2545 } /* new_d_Confirm */
2548 new_d_Unknown(ir_mode *m) {
2549 return new_bd_Unknown(m);
2550 } /* new_d_Unknown */
2553 new_d_CallBegin(dbg_info *db, ir_node *call) {
2554 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2555 } /* new_d_CallBegin */
2558 new_d_EndReg(dbg_info *db) {
2559 return new_bd_EndReg(db, current_ir_graph->current_block);
2560 } /* new_d_EndReg */
2563 new_d_EndExcept(dbg_info *db) {
2564 return new_bd_EndExcept(db, current_ir_graph->current_block);
2565 } /* new_d_EndExcept */
2568 new_d_Break(dbg_info *db) {
2569 return new_bd_Break(db, current_ir_graph->current_block);
2573 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2574 return new_bd_Filter(db, current_ir_graph->current_block,
2576 } /* new_d_Filter */
2579 (new_d_NoMem)(void) {
2580 return _new_d_NoMem();
2584 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2585 ir_node *ir_true, ir_mode *mode) {
2586 return new_bd_Mux(db, current_ir_graph->current_block,
2587 sel, ir_false, ir_true, mode);
2590 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2591 ir_node *dst, ir_node *src, ir_type *data_type) {
2593 res = new_bd_CopyB(db, current_ir_graph->current_block,
2594 store, dst, src, data_type);
2595 #if PRECISE_EXC_CONTEXT
2596 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2602 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2603 return new_bd_InstOf(db, current_ir_graph->current_block,
2604 store, objptr, type);
2605 } /* new_d_InstOf */
2608 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2609 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2612 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2613 ir_node *idx, ir_node *lower, ir_node *upper) {
2615 res = new_bd_Bound(db, current_ir_graph->current_block,
2616 store, idx, lower, upper);
2617 #if PRECISE_EXC_CONTEXT
2618 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2624 new_d_Pin(dbg_info *db, ir_node *node) {
2625 return new_bd_Pin(db, current_ir_graph->current_block, node);
2629 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2630 int n_outs, ir_asm_constraint *outputs,
2631 int n_clobber, ident *clobber[], ident *asm_text) {
2632 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2635 /* ********************************************************************* */
2636 /* Comfortable interface with automatic Phi node construction. */
2637 /* (Uses also constructors of ?? interface, except new_Block. */
2638 /* ********************************************************************* */
2640 /* Block construction */
2641 /* immature Block without predecessors */
2643 new_d_immBlock(dbg_info *db) {
2646 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2647 /* creates a new dynamic in-array as length of in is -1 */
2648 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2649 current_ir_graph->current_block = res;
2651 /* macroblock head */
2654 res->attr.block.is_matured = 0;
2655 res->attr.block.is_dead = 0;
2656 res->attr.block.is_mb_head = 1;
2657 res->attr.block.has_label = 0;
2658 res->attr.block.irg = current_ir_graph;
2659 res->attr.block.backedge = NULL;
2660 res->attr.block.in_cg = NULL;
2661 res->attr.block.cg_backedge = NULL;
2662 res->attr.block.extblk = NULL;
2663 res->attr.block.region = NULL;
2664 res->attr.block.mb_depth = 0;
2665 res->attr.block.label = 0;
2667 set_Block_block_visited(res, 0);
2669 /* Create and initialize array for Phi-node construction. */
2670 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2671 current_ir_graph->n_loc);
2672 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2674 /* Immature block may not be optimized! */
2675 IRN_VRFY_IRG(res, current_ir_graph);
2678 } /* new_d_immBlock */
2681 new_immBlock(void) {
2682 return new_d_immBlock(NULL);
2683 } /* new_immBlock */
2685 /* immature PartBlock with its predecessors */
2687 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2688 ir_node *res = new_d_immBlock(db);
2689 ir_node *blk = get_nodes_block(pred_jmp);
2691 res->in[0] = blk->in[0];
2692 assert(res->in[0] != NULL);
2693 add_immBlock_pred(res, pred_jmp);
2695 res->attr.block.is_mb_head = 0;
2696 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2699 } /* new_d_immPartBlock */
2702 new_immPartBlock(ir_node *pred_jmp) {
2703 return new_d_immPartBlock(NULL, pred_jmp);
2704 } /* new_immPartBlock */
2706 /* add an edge to a jmp/control flow node */
2708 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2709 int n = ARR_LEN(block->in) - 1;
2711 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2712 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2713 assert(is_ir_node(jmp));
2715 ARR_APP1(ir_node *, block->in, jmp);
2717 hook_set_irn_n(block, n, jmp, NULL);
2718 } /* add_immBlock_pred */
2720 /* changing the current block */
2722 set_cur_block(ir_node *target) {
2723 current_ir_graph->current_block = target;
2724 } /* set_cur_block */
2726 /* ************************ */
2727 /* parameter administration */
2729 /* get a value from the parameter array from the current block by its index */
2731 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2732 ir_graph *irg = current_ir_graph;
2733 assert(get_irg_phase_state(irg) == phase_building);
2734 inc_irg_visited(irg);
2737 return get_r_value_internal(irg->current_block, pos + 1, mode);
2740 /* get a value from the parameter array from the current block by its index */
2742 get_value(int pos, ir_mode *mode) {
2743 return get_d_value(NULL, pos, mode);
2746 /* set a value at position pos in the parameter array from the current block */
2748 set_value(int pos, ir_node *value) {
2749 ir_graph *irg = current_ir_graph;
2750 assert(get_irg_phase_state(irg) == phase_building);
2751 assert(pos+1 < irg->n_loc);
2752 assert(is_ir_node(value));
2753 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2756 /* Find the value number for a node in the current block.*/
2758 find_value(ir_node *value) {
2760 ir_node *bl = current_ir_graph->current_block;
2762 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2763 if (bl->attr.block.graph_arr[i] == value)
2768 /* get the current store */
2771 ir_graph *irg = current_ir_graph;
2773 assert(get_irg_phase_state(irg) == phase_building);
2774 /* GL: one could call get_value instead */
2775 inc_irg_visited(irg);
2776 return get_r_value_internal(irg->current_block, 0, mode_M);
2779 /* set the current store: handles automatic Sync construction for Load nodes */
2781 set_store(ir_node *store) {
2782 ir_node *load, *pload, *pred, *in[2];
2784 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2785 /* Beware: due to dead code elimination, a store might become a Bad node even in
2786 the construction phase. */
2787 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2789 if (get_opt_auto_create_sync()) {
2790 /* handle non-volatile Load nodes by automatically creating Sync's */
2791 load = skip_Proj(store);
2792 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2793 pred = get_Load_mem(load);
2795 if (is_Sync(pred)) {
2796 /* a Load after a Sync: move it up */
2797 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2799 set_Load_mem(load, get_memop_mem(mem));
2800 add_Sync_pred(pred, store);
2803 pload = skip_Proj(pred);
2804 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2805 /* a Load after a Load: create a new Sync */
2806 set_Load_mem(load, get_Load_mem(pload));
2810 store = new_Sync(2, in);
2815 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2819 keep_alive(ir_node *ka) {
2820 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2823 /* --- Useful access routines --- */
2824 /* Returns the current block of the current graph. To set the current
2825 block use set_cur_block. */
2826 ir_node *get_cur_block(void) {
2827 return get_irg_current_block(current_ir_graph);
2828 } /* get_cur_block */
2830 /* Returns the frame type of the current graph */
2831 ir_type *get_cur_frame_type(void) {
2832 return get_irg_frame_type(current_ir_graph);
2833 } /* get_cur_frame_type */
2836 /* ********************************************************************* */
2839 /* call once for each run of the library */
2841 firm_init_cons(uninitialized_local_variable_func_t *func) {
2842 default_initialize_local_variable = func;
2843 } /* firm_init_cons */
2846 irp_finalize_cons(void) {
2848 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2849 irg_finalize_cons(get_irp_irg(i));
2851 irp->phase_state = phase_high;
2852 } /* irp_finalize_cons */
2855 ir_node *new_Block(int arity, ir_node **in) {
2856 return new_d_Block(NULL, arity, in);
2858 ir_node *new_Start(void) {
2859 return new_d_Start(NULL);
2861 ir_node *new_End(void) {
2862 return new_d_End(NULL);
2864 ir_node *new_Jmp(void) {
2865 return new_d_Jmp(NULL);
2867 ir_node *new_IJmp(ir_node *tgt) {
2868 return new_d_IJmp(NULL, tgt);
2870 ir_node *new_Cond(ir_node *c) {
2871 return new_d_Cond(NULL, c);
2873 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2874 return new_d_Return(NULL, store, arity, in);
2876 ir_node *new_Const(ir_mode *mode, tarval *con) {
2877 return new_d_Const(NULL, mode, con);
2880 ir_node *new_Const_long(ir_mode *mode, long value) {
2881 return new_d_Const_long(NULL, mode, value);
2884 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2885 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2888 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2889 return new_d_SymConst_type(NULL, mode, value, kind, type);
2891 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2892 return new_d_SymConst(NULL, mode, value, kind);
2894 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2895 return new_d_simpleSel(NULL, store, objptr, ent);
2897 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2899 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2901 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2903 return new_d_Call(NULL, store, callee, arity, in, tp);
2905 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2906 return new_d_Add(NULL, op1, op2, mode);
2908 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2909 return new_d_Sub(NULL, op1, op2, mode);
2911 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2912 return new_d_Minus(NULL, op, mode);
2914 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2915 return new_d_Mul(NULL, op1, op2, mode);
2917 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2918 return new_d_Mulh(NULL, op1, op2, mode);
2920 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2921 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2923 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2924 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2926 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2927 return new_d_Div(NULL, memop, op1, op2, mode, state);
2929 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2930 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2932 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2933 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2935 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2936 return new_d_Abs(NULL, op, mode);
2938 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2939 return new_d_And(NULL, op1, op2, mode);
2941 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2942 return new_d_Or(NULL, op1, op2, mode);
2944 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2945 return new_d_Eor(NULL, op1, op2, mode);
2947 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2948 return new_d_Not(NULL, op, mode);
2950 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2951 return new_d_Shl(NULL, op, k, mode);
2953 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2954 return new_d_Shr(NULL, op, k, mode);
2956 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2957 return new_d_Shrs(NULL, op, k, mode);
2959 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
2960 return new_d_Rotl(NULL, op, k, mode);
2962 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2963 return new_d_Carry(NULL, op1, op2, mode);
2965 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2966 return new_d_Borrow(NULL, op1, op2, mode);
2968 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2969 return new_d_Cmp(NULL, op1, op2);
2971 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2972 return new_d_Conv(NULL, op, mode);
2974 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2975 return new_d_strictConv(NULL, op, mode);
2977 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2978 return new_d_Cast(NULL, op, to_tp);
2980 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2981 return new_d_Phi(NULL, arity, in, mode);
2983 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2984 return new_d_Load(NULL, store, addr, mode);
2986 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2987 return new_d_Store(NULL, store, addr, val);
2989 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2990 ir_where_alloc where) {
2991 return new_d_Alloc(NULL, store, size, alloc_type, where);
2993 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
2994 ir_type *free_type, ir_where_alloc where) {
2995 return new_d_Free(NULL, store, ptr, size, free_type, where);
2997 ir_node *new_Sync(int arity, ir_node *in[]) {
2998 return new_d_Sync(NULL, arity, in);
3000 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3001 return new_d_Proj(NULL, arg, mode, proj);
3003 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3004 return new_d_defaultProj(NULL, arg, max_proj);
3006 ir_node *new_Tuple(int arity, ir_node **in) {
3007 return new_d_Tuple(NULL, arity, in);
3009 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3010 return new_d_Id(NULL, val, mode);
3012 ir_node *new_Bad(void) {
3015 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3016 return new_d_Confirm(NULL, val, bound, cmp);
3018 ir_node *new_Unknown(ir_mode *m) {
3019 return new_d_Unknown(m);
3021 ir_node *new_CallBegin(ir_node *callee) {
3022 return new_d_CallBegin(NULL, callee);
3024 ir_node *new_EndReg(void) {
3025 return new_d_EndReg(NULL);
3027 ir_node *new_EndExcept(void) {
3028 return new_d_EndExcept(NULL);
3030 ir_node *new_Break(void) {
3031 return new_d_Break(NULL);
3033 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3034 return new_d_Filter(NULL, arg, mode, proj);
3036 ir_node *new_NoMem(void) {
3037 return new_d_NoMem();
3039 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3040 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3042 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3043 return new_d_CopyB(NULL, store, dst, src, data_type);
3045 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3046 return new_d_InstOf(NULL, store, objptr, ent);
3048 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3049 return new_d_Raise(NULL, store, obj);
3051 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3052 return new_d_Bound(NULL, store, idx, lower, upper);
3054 ir_node *new_Pin(ir_node *node) {
3055 return new_d_Pin(NULL, node);
3057 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3058 int n_outs, ir_asm_constraint *outputs,
3059 int n_clobber, ident *clobber[], ident *asm_text) {
3060 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3063 /* create a new anchor node */
3064 ir_node *new_Anchor(ir_graph *irg) {
3065 ir_node *in[anchor_last];
3066 memset(in, 0, sizeof(in));
3067 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);