2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
49 #if USE_EXPLICIT_PHI_IN_STACK
50 /* A stack needed for the automatic Phi node construction in constructor
51 Phi_in. Redefinition in irgraph.c!! */
56 typedef struct Phi_in_stack Phi_in_stack;
59 /* when we need verifying */
61 # define IRN_VRFY_IRG(res, irg)
63 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
67 * Language dependent variable initialization callback.
69 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
71 /* creates a bd constructor for a binop */
72 #define NEW_BD_BINOP(instr) \
74 new_bd_##instr(dbg_info *db, ir_node *block, \
75 ir_node *op1, ir_node *op2, ir_mode *mode) \
79 ir_graph *irg = current_ir_graph; \
82 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
83 res = optimize_node(res); \
84 IRN_VRFY_IRG(res, irg); \
88 /* creates a bd constructor for an unop */
89 #define NEW_BD_UNOP(instr) \
91 new_bd_##instr(dbg_info *db, ir_node *block, \
92 ir_node *op, ir_mode *mode) \
95 ir_graph *irg = current_ir_graph; \
96 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
97 res = optimize_node(res); \
98 IRN_VRFY_IRG(res, irg); \
102 /* creates a bd constructor for an divop */
103 #define NEW_BD_DIVOP(instr) \
105 new_bd_##instr(dbg_info *db, ir_node *block, \
106 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
110 ir_graph *irg = current_ir_graph; \
114 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
115 res->attr.divmod.exc.pin_state = state; \
116 res->attr.divmod.res_mode = mode; \
117 res->attr.divmod.no_remainder = 0; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if (get_Block_matured(block))
249 assert(get_irn_arity(block) == arity);
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity - 1; i >= 0; --i)
256 if (is_Unknown(in[i])) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if (is_Phi(res) && mode == mode_M)
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 /** Creates a remainderless Div node. */
403 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
404 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
408 ir_graph *irg = current_ir_graph;
412 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
413 res->attr.divmod.exc.pin_state = state;
414 res->attr.divmod.res_mode = mode;
415 res->attr.divmod.no_remainder = 1;
416 res = optimize_node(res);
417 IRN_VRFY_IRG(res, irg);
422 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
425 ir_graph *irg = current_ir_graph;
428 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
429 res = optimize_node(res);
430 IRN_VRFY_IRG(res, irg);
435 new_bd_Jmp(dbg_info *db, ir_node *block) {
437 ir_graph *irg = current_ir_graph;
439 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
440 res = optimize_node(res);
441 IRN_VRFY_IRG(res, irg);
446 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
448 ir_graph *irg = current_ir_graph;
450 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
451 res = optimize_node(res);
452 IRN_VRFY_IRG(res, irg);
457 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
459 ir_graph *irg = current_ir_graph;
461 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
462 res->attr.cond.kind = dense;
463 res->attr.cond.default_proj = 0;
464 res->attr.cond.pred = COND_JMP_PRED_NONE;
465 res = optimize_node(res);
466 IRN_VRFY_IRG(res, irg);
471 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
472 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
476 ir_graph *irg = current_ir_graph;
479 NEW_ARR_A(ir_node *, r_in, r_arity);
482 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
484 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
486 assert((get_unknown_type() == tp) || is_Method_type(tp));
487 set_Call_type(res, tp);
488 res->attr.call.exc.pin_state = op_pin_state_pinned;
489 res->attr.call.callee_arr = NULL;
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_bd_Return(dbg_info *db, ir_node *block,
497 ir_node *store, int arity, ir_node **in) {
501 ir_graph *irg = current_ir_graph;
504 NEW_ARR_A (ir_node *, r_in, r_arity);
506 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
507 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
511 } /* new_bd_Return */
514 new_bd_Load(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_mode *mode) {
518 ir_graph *irg = current_ir_graph;
522 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
523 res->attr.load.exc.pin_state = op_pin_state_pinned;
524 res->attr.load.load_mode = mode;
525 res->attr.load.volatility = volatility_non_volatile;
526 res->attr.load.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Store(dbg_info *db, ir_node *block,
534 ir_node *store, ir_node *adr, ir_node *val) {
537 ir_graph *irg = current_ir_graph;
542 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
543 res->attr.store.exc.pin_state = op_pin_state_pinned;
544 res->attr.store.volatility = volatility_non_volatile;
545 res->attr.store.aligned = align_is_aligned;
546 res = optimize_node(res);
547 IRN_VRFY_IRG(res, irg);
552 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
553 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
556 ir_graph *irg = current_ir_graph;
560 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
561 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
562 res->attr.alloc.where = where;
563 res->attr.alloc.type = alloc_type;
564 res = optimize_node(res);
565 IRN_VRFY_IRG(res, irg);
570 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
571 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
574 ir_graph *irg = current_ir_graph;
579 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
580 res->attr.free.where = where;
581 res->attr.free.type = free_type;
582 res = optimize_node(res);
583 IRN_VRFY_IRG(res, irg);
588 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
589 int arity, ir_node **in, ir_entity *ent) {
593 ir_graph *irg = current_ir_graph;
594 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
596 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
599 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
602 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
604 * Sel's can select functions which should be of mode mode_P_code.
606 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
607 res->attr.sel.ent = ent;
608 res = optimize_node(res);
609 IRN_VRFY_IRG(res, irg);
614 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
615 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
616 ir_graph *irg = current_ir_graph;
617 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
619 res->attr.symc.kind = symkind;
620 res->attr.symc.sym = value;
621 res->attr.symc.tp = tp;
623 res = optimize_node(res);
624 IRN_VRFY_IRG(res, irg);
626 } /* new_bd_SymConst_type */
629 new_bd_Sync(dbg_info *db, ir_node *block) {
631 ir_graph *irg = current_ir_graph;
633 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
634 /* no need to call optimize node here, Sync are always created with no predecessors */
635 IRN_VRFY_IRG(res, irg);
640 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
641 ir_node *in[2], *res;
642 ir_graph *irg = current_ir_graph;
646 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
647 res->attr.confirm.cmp = cmp;
648 res = optimize_node(res);
649 IRN_VRFY_IRG(res, irg);
651 } /* new_bd_Confirm */
654 new_bd_Unknown(ir_mode *m) {
656 ir_graph *irg = current_ir_graph;
658 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
659 res = optimize_node(res);
661 } /* new_bd_Unknown */
664 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
667 ir_graph *irg = current_ir_graph;
669 in[0] = get_Call_ptr(call);
670 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
671 /* res->attr.callbegin.irg = irg; */
672 res->attr.callbegin.call = call;
673 res = optimize_node(res);
674 IRN_VRFY_IRG(res, irg);
676 } /* new_bd_CallBegin */
679 new_bd_EndReg(dbg_info *db, ir_node *block) {
681 ir_graph *irg = current_ir_graph;
683 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
684 set_irg_end_reg(irg, res);
685 IRN_VRFY_IRG(res, irg);
687 } /* new_bd_EndReg */
690 new_bd_EndExcept(dbg_info *db, ir_node *block) {
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
695 set_irg_end_except(irg, res);
696 IRN_VRFY_IRG (res, irg);
698 } /* new_bd_EndExcept */
701 new_bd_Break(dbg_info *db, ir_node *block) {
703 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
706 res = optimize_node(res);
707 IRN_VRFY_IRG(res, irg);
712 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
715 ir_graph *irg = current_ir_graph;
717 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
718 res->attr.filter.proj = proj;
719 res->attr.filter.in_cg = NULL;
720 res->attr.filter.backedge = NULL;
723 assert(get_Proj_pred(res));
724 assert(get_nodes_block(get_Proj_pred(res)));
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
729 } /* new_bd_Filter */
732 new_bd_Mux(dbg_info *db, ir_node *block,
733 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
736 ir_graph *irg = current_ir_graph;
742 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
745 res = optimize_node(res);
746 IRN_VRFY_IRG(res, irg);
751 new_bd_CopyB(dbg_info *db, ir_node *block,
752 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
755 ir_graph *irg = current_ir_graph;
761 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
763 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
764 res->attr.copyb.data_type = data_type;
765 res = optimize_node(res);
766 IRN_VRFY_IRG(res, irg);
771 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
772 ir_node *objptr, ir_type *type) {
775 ir_graph *irg = current_ir_graph;
779 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
780 res->attr.instof.type = type;
781 res = optimize_node(res);
782 IRN_VRFY_IRG(res, irg);
784 } /* new_bd_InstOf */
787 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
790 ir_graph *irg = current_ir_graph;
794 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
795 res = optimize_node(res);
796 IRN_VRFY_IRG(res, irg);
801 new_bd_Bound(dbg_info *db, ir_node *block,
802 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
805 ir_graph *irg = current_ir_graph;
811 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
812 res->attr.bound.exc.pin_state = op_pin_state_pinned;
813 res = optimize_node(res);
814 IRN_VRFY_IRG(res, irg);
819 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
821 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
831 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
833 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
837 res->attr.assem.pin_state = op_pin_state_pinned;
838 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
839 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
840 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
841 res->attr.assem.asm_text = asm_text;
843 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
844 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
845 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
847 res = optimize_node(res);
848 IRN_VRFY_IRG(res, irg);
852 /* --------------------------------------------- */
853 /* private interfaces, for professional use only */
854 /* --------------------------------------------- */
856 /* Constructs a Block with a fixed number of predecessors.
857 Does not set current_block. Can not be used with automatic
858 Phi node construction. */
860 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
861 ir_graph *rem = current_ir_graph;
864 current_ir_graph = irg;
865 res = new_bd_Block(db, arity, in);
866 current_ir_graph = rem;
872 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
873 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_Start(db, block);
878 current_ir_graph = rem;
884 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
886 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_End(db, block);
890 current_ir_graph = rem;
895 /* Creates a Phi node with all predecessors. Calling this constructor
896 is only allowed if the corresponding block is mature. */
898 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
900 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Phi(db, block,arity, in, mode);
904 current_ir_graph = rem;
910 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Const_type(db, block, mode, con, tp);
916 current_ir_graph = rem;
919 } /* new_rd_Const_type */
922 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
928 current_ir_graph = rem;
934 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
935 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
936 } /* new_rd_Const_long */
939 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
941 ir_graph *rem = current_ir_graph;
943 current_ir_graph = irg;
944 res = new_bd_Id(db, block, val, mode);
945 current_ir_graph = rem;
951 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_Proj(db, block, arg, mode, proj);
958 current_ir_graph = rem;
964 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
967 ir_graph *rem = current_ir_graph;
969 current_ir_graph = irg;
970 res = new_bd_defaultProj(db, block, arg, max_proj);
971 current_ir_graph = rem;
974 } /* new_rd_defaultProj */
977 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Conv(db, block, op, mode, 0);
983 current_ir_graph = rem;
989 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Cast(db, block, op, to_tp);
995 current_ir_graph = rem;
1001 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_Tuple(db, block, arity, in);
1007 current_ir_graph = rem;
1010 } /* new_rd_Tuple */
1018 NEW_RD_DIVOP(DivMod)
1031 NEW_RD_BINOP(Borrow)
1033 /* creates a rd constructor for an divRL */
1034 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1035 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1038 ir_graph *rem = current_ir_graph;
1039 current_ir_graph = irg;
1040 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1041 current_ir_graph = rem;
1046 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1047 ir_node *op1, ir_node *op2) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Cmp(db, block, op1, op2);
1053 current_ir_graph = rem;
1059 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_Jmp(db, block);
1065 current_ir_graph = rem;
1071 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_IJmp(db, block, tgt);
1077 current_ir_graph = rem;
1083 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Cond(db, block, c);
1089 current_ir_graph = rem;
1095 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1096 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1102 current_ir_graph = rem;
1108 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1109 ir_node *store, int arity, ir_node **in) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Return(db, block, store, arity, in);
1115 current_ir_graph = rem;
1118 } /* new_rd_Return */
1121 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, ir_node *adr, ir_mode *mode) {
1124 ir_graph *rem = current_ir_graph;
1126 current_ir_graph = irg;
1127 res = new_bd_Load(db, block, store, adr, mode);
1128 current_ir_graph = rem;
1134 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1135 ir_node *store, ir_node *adr, ir_node *val) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Store(db, block, store, adr, val);
1141 current_ir_graph = rem;
1144 } /* new_rd_Store */
1147 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1148 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1154 current_ir_graph = rem;
1157 } /* new_rd_Alloc */
1160 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1161 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1167 current_ir_graph = rem;
1173 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1174 ir_node *store, ir_node *objptr, ir_entity *ent) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1180 current_ir_graph = rem;
1183 } /* new_rd_simpleSel */
1186 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1187 int arity, ir_node **in, ir_entity *ent) {
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1193 current_ir_graph = rem;
1199 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1200 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1202 ir_graph *rem = current_ir_graph;
1204 current_ir_graph = irg;
1205 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1206 current_ir_graph = rem;
1209 } /* new_rd_SymConst_type */
1212 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1213 symconst_symbol value, symconst_kind symkind) {
1214 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1215 } /* new_rd_SymConst */
1217 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1218 symconst_symbol sym;
1219 sym.entity_p = symbol;
1220 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1221 } /* new_rd_SymConst_addr_ent */
1223 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1224 symconst_symbol sym;
1225 sym.entity_p = symbol;
1226 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1227 } /* new_rd_SymConst_ofs_ent */
1229 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1230 symconst_symbol sym;
1231 sym.ident_p = symbol;
1232 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1233 } /* new_rd_SymConst_addr_name */
1235 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1236 symconst_symbol sym;
1237 sym.type_p = symbol;
1238 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1239 } /* new_rd_SymConst_type_tag */
1241 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1242 symconst_symbol sym;
1243 sym.type_p = symbol;
1244 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1245 } /* new_rd_SymConst_size */
1247 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1248 symconst_symbol sym;
1249 sym.type_p = symbol;
1250 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1251 } /* new_rd_SymConst_align */
1254 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1256 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_Sync(db, block);
1261 current_ir_graph = rem;
1263 for (i = 0; i < arity; ++i)
1264 add_Sync_pred(res, in[i]);
1270 new_rd_Bad(ir_graph *irg) {
1271 return get_irg_bad(irg);
1275 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1277 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Confirm(db, block, val, bound, cmp);
1281 current_ir_graph = rem;
1284 } /* new_rd_Confirm */
1287 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1289 ir_graph *rem = current_ir_graph;
1291 current_ir_graph = irg;
1292 res = new_bd_Unknown(m);
1293 current_ir_graph = rem;
1296 } /* new_rd_Unknown */
1299 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1301 ir_graph *rem = current_ir_graph;
1303 current_ir_graph = irg;
1304 res = new_bd_CallBegin(db, block, call);
1305 current_ir_graph = rem;
1308 } /* new_rd_CallBegin */
1311 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1314 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1315 set_irg_end_reg(irg, res);
1316 IRN_VRFY_IRG(res, irg);
1318 } /* new_rd_EndReg */
1321 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1324 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1325 set_irg_end_except(irg, res);
1326 IRN_VRFY_IRG (res, irg);
1328 } /* new_rd_EndExcept */
1331 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Break(db, block);
1337 current_ir_graph = rem;
1340 } /* new_rd_Break */
1343 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1346 ir_graph *rem = current_ir_graph;
1348 current_ir_graph = irg;
1349 res = new_bd_Filter(db, block, arg, mode, proj);
1350 current_ir_graph = rem;
1353 } /* new_rd_Filter */
1356 new_rd_NoMem(ir_graph *irg) {
1357 return get_irg_no_mem(irg);
1358 } /* new_rd_NoMem */
1361 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1362 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1364 ir_graph *rem = current_ir_graph;
1366 current_ir_graph = irg;
1367 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1368 current_ir_graph = rem;
1373 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1374 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1380 current_ir_graph = rem;
1383 } /* new_rd_CopyB */
1386 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1387 ir_node *objptr, ir_type *type) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_InstOf(db, block, store, objptr, type);
1393 current_ir_graph = rem;
1396 } /* new_rd_InstOf */
1399 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1401 ir_graph *rem = current_ir_graph;
1403 current_ir_graph = irg;
1404 res = new_bd_Raise(db, block, store, obj);
1405 current_ir_graph = rem;
1408 } /* new_rd_Raise */
1410 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1411 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1413 ir_graph *rem = current_ir_graph;
1415 current_ir_graph = irg;
1416 res = new_bd_Bound(db, block, store, idx, lower, upper);
1417 current_ir_graph = rem;
1420 } /* new_rd_Bound */
1422 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1424 ir_graph *rem = current_ir_graph;
1426 current_ir_graph = irg;
1427 res = new_bd_Pin(db, block, node);
1428 current_ir_graph = rem;
1433 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1434 int arity, ir_node *in[], ir_asm_constraint *inputs,
1435 int n_outs, ir_asm_constraint *outputs,
1436 int n_clobber, ident *clobber[], ident *asm_text) {
1438 ir_graph *rem = current_ir_graph;
1440 current_ir_graph = irg;
1441 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1442 current_ir_graph = rem;
1448 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1449 return new_rd_Block(NULL, irg, arity, in);
1451 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1452 return new_rd_Start(NULL, irg, block);
1454 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1455 return new_rd_End(NULL, irg, block);
1457 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1458 return new_rd_Jmp(NULL, irg, block);
1460 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1461 return new_rd_IJmp(NULL, irg, block, tgt);
1463 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1464 return new_rd_Cond(NULL, irg, block, c);
1466 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1467 ir_node *store, int arity, ir_node **in) {
1468 return new_rd_Return(NULL, irg, block, store, arity, in);
1470 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1471 ir_mode *mode, tarval *con) {
1472 return new_rd_Const(NULL, irg, block, mode, con);
1474 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1475 ir_mode *mode, long value) {
1476 return new_rd_Const_long(NULL, irg, block, mode, value);
1478 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1479 ir_mode *mode, tarval *con, ir_type *tp) {
1480 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1482 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1483 symconst_symbol value, symconst_kind symkind) {
1484 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1486 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1487 ir_node *objptr, ir_entity *ent) {
1488 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1490 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1491 ir_node *objptr, int n_index, ir_node **index,
1493 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1495 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1496 ir_node *callee, int arity, ir_node **in,
1498 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1500 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1501 ir_node *op1, ir_node *op2, ir_mode *mode) {
1502 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1504 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1505 ir_node *op1, ir_node *op2, ir_mode *mode) {
1506 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1508 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1509 ir_node *op, ir_mode *mode) {
1510 return new_rd_Minus(NULL, irg, block, op, mode);
1512 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1516 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1517 ir_node *op1, ir_node *op2, ir_mode *mode) {
1518 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1520 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1521 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1522 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1524 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1525 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1526 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1528 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1529 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1530 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1532 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1533 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1534 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1536 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1537 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1538 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1540 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1541 ir_node *op, ir_mode *mode) {
1542 return new_rd_Abs(NULL, irg, block, op, mode);
1544 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1545 ir_node *op1, ir_node *op2, ir_mode *mode) {
1546 return new_rd_And(NULL, irg, block, op1, op2, mode);
1548 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1549 ir_node *op1, ir_node *op2, ir_mode *mode) {
1550 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1552 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1553 ir_node *op1, ir_node *op2, ir_mode *mode) {
1554 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1556 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1557 ir_node *op, ir_mode *mode) {
1558 return new_rd_Not(NULL, irg, block, op, mode);
1560 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1561 ir_node *op, ir_node *k, ir_mode *mode) {
1562 return new_rd_Shl(NULL, irg, block, op, k, mode);
1564 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_node *k, ir_mode *mode) {
1566 return new_rd_Shr(NULL, irg, block, op, k, mode);
1568 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1569 ir_node *op, ir_node *k, ir_mode *mode) {
1570 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1572 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1573 ir_node *op, ir_node *k, ir_mode *mode) {
1574 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1576 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1577 ir_node *op, ir_node *k, ir_mode *mode) {
1578 return new_rd_Carry(NULL, irg, block, op, k, mode);
1580 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1581 ir_node *op, ir_node *k, ir_mode *mode) {
1582 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1584 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1585 ir_node *op1, ir_node *op2) {
1586 return new_rd_Cmp(NULL, irg, block, op1, op2);
1588 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1589 ir_node *op, ir_mode *mode) {
1590 return new_rd_Conv(NULL, irg, block, op, mode);
1592 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1593 return new_rd_Cast(NULL, irg, block, op, to_tp);
1595 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1596 ir_node **in, ir_mode *mode) {
1597 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1599 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1600 ir_node *store, ir_node *adr, ir_mode *mode) {
1601 return new_rd_Load(NULL, irg, block, store, adr, mode);
1603 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1604 ir_node *store, ir_node *adr, ir_node *val) {
1605 return new_rd_Store(NULL, irg, block, store, adr, val);
1607 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1608 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1609 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1611 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1612 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1613 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1615 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1616 return new_rd_Sync(NULL, irg, block, arity, in);
1618 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1619 ir_mode *mode, long proj) {
1620 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1622 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1624 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1626 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1627 int arity, ir_node **in) {
1628 return new_rd_Tuple(NULL, irg, block, arity, in );
1630 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1631 ir_node *val, ir_mode *mode) {
1632 return new_rd_Id(NULL, irg, block, val, mode);
1634 ir_node *new_r_Bad(ir_graph *irg) {
1635 return new_rd_Bad(irg);
1637 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1638 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1640 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1641 return new_rd_Unknown(irg, m);
1643 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1644 return new_rd_CallBegin(NULL, irg, block, callee);
1646 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1647 return new_rd_EndReg(NULL, irg, block);
1649 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1650 return new_rd_EndExcept(NULL, irg, block);
1652 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1653 return new_rd_Break(NULL, irg, block);
1655 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1656 ir_mode *mode, long proj) {
1657 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1659 ir_node *new_r_NoMem(ir_graph *irg) {
1660 return new_rd_NoMem(irg);
1662 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1663 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1664 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1666 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1667 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1668 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1670 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1672 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1674 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1675 ir_node *store, ir_node *obj) {
1676 return new_rd_Raise(NULL, irg, block, store, obj);
1678 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1679 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1680 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1682 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1683 return new_rd_Pin(NULL, irg, block, node);
1685 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1686 int arity, ir_node *in[], ir_asm_constraint *inputs,
1687 int n_outs, ir_asm_constraint *outputs,
1688 int n_clobber, ident *clobber[], ident *asm_text) {
1689 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1692 /** ********************/
1693 /** public interfaces */
1694 /** construction tools */
1698 * - create a new Start node in the current block
1700 * @return s - pointer to the created Start node
1705 new_d_Start(dbg_info *db) {
1708 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1709 op_Start, mode_T, 0, NULL);
1711 res = optimize_node(res);
1712 IRN_VRFY_IRG(res, current_ir_graph);
1717 new_d_End(dbg_info *db) {
1719 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1720 op_End, mode_X, -1, NULL);
1721 res = optimize_node(res);
1722 IRN_VRFY_IRG(res, current_ir_graph);
1727 /* Constructs a Block with a fixed number of predecessors.
1728 Does set current_block. Can be used with automatic Phi
1729 node construction. */
1731 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1734 int has_unknown = 0;
1736 res = new_bd_Block(db, arity, in);
1738 /* Create and initialize array for Phi-node construction. */
1739 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1740 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1741 current_ir_graph->n_loc);
1742 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1745 for (i = arity-1; i >= 0; i--)
1746 if (is_Unknown(in[i])) {
1751 if (!has_unknown) res = optimize_node(res);
1752 current_ir_graph->current_block = res;
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 /* ***********************************************************************/
1760 /* Methods necessary for automatic Phi node creation */
1762 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1763 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1764 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1765 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1767 Call Graph: ( A ---> B == A "calls" B)
1769 get_value mature_immBlock
1777 get_r_value_internal |
1781 new_rd_Phi0 new_rd_Phi_in
1783 * *************************************************************************** */
1785 /** Creates a Phi node with 0 predecessors. */
1786 static INLINE ir_node *
1787 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1790 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1791 IRN_VRFY_IRG(res, irg);
1797 * Internal constructor of a Phi node by a phi_merge operation.
1799 * @param irg the graph on which the Phi will be constructed
1800 * @param block the block in which the Phi will be constructed
1801 * @param mode the mod eof the Phi node
1802 * @param in the input array of the phi node
1803 * @param ins number of elements in the input array
1804 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1805 * the value for which the new Phi is constructed
1807 static INLINE ir_node *
1808 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1809 ir_node **in, int ins, ir_node *phi0) {
1811 ir_node *res, *known;
1813 /* Allocate a new node on the obstack. The allocation copies the in
1815 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1816 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1818 /* This loop checks whether the Phi has more than one predecessor.
1819 If so, it is a real Phi node and we break the loop. Else the
1820 Phi node merges the same definition on several paths and therefore
1821 is not needed. Don't consider Bad nodes! */
1823 for (i = ins - 1; i >= 0; --i) {
1826 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1828 /* Optimize self referencing Phis: We can't detect them yet properly, as
1829 they still refer to the Phi0 they will replace. So replace right now. */
1830 if (phi0 && in[i] == phi0)
1833 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1842 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1845 edges_node_deleted(res, current_ir_graph);
1846 obstack_free(current_ir_graph->obst, res);
1847 if (is_Phi(known)) {
1848 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1849 order, an enclosing Phi know may get superfluous. */
1850 res = optimize_in_place_2(known);
1852 exchange(known, res);
1857 /* A undefined value, e.g., in unreachable code. */
1861 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1862 IRN_VRFY_IRG(res, irg);
1863 /* Memory Phis in endless loops must be kept alive.
1864 As we can't distinguish these easily we keep all of them alive. */
1865 if (is_Phi(res) && mode == mode_M)
1866 add_End_keepalive(get_irg_end(irg), res);
1870 } /* new_rd_Phi_in */
1873 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1875 #if PRECISE_EXC_CONTEXT
1877 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1880 * Construct a new frag_array for node n.
1881 * Copy the content from the current graph_arr of the corresponding block:
1882 * this is the current state.
1883 * Set ProjM(n) as current memory state.
1884 * Further the last entry in frag_arr of current block points to n. This
1885 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1887 static INLINE ir_node **new_frag_arr(ir_node *n) {
1891 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1892 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1893 sizeof(ir_node *)*current_ir_graph->n_loc);
1895 /* turn off optimization before allocating Proj nodes, as res isn't
1897 opt = get_opt_optimize(); set_optimize(0);
1898 /* Here we rely on the fact that all frag ops have Memory as first result! */
1900 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1901 } else if (is_CopyB(n)) {
1902 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1904 assert((pn_Quot_M == pn_DivMod_M) &&
1905 (pn_Quot_M == pn_Div_M) &&
1906 (pn_Quot_M == pn_Mod_M) &&
1907 (pn_Quot_M == pn_Load_M) &&
1908 (pn_Quot_M == pn_Store_M) &&
1909 (pn_Quot_M == pn_Alloc_M) &&
1910 (pn_Quot_M == pn_Bound_M));
1911 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1915 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1917 } /* new_frag_arr */
1920 * Returns the frag_arr from a node.
1922 static INLINE ir_node **get_frag_arr(ir_node *n) {
1923 switch (get_irn_opcode(n)) {
1925 return n->attr.call.exc.frag_arr;
1927 return n->attr.alloc.exc.frag_arr;
1929 return n->attr.load.exc.frag_arr;
1931 return n->attr.store.exc.frag_arr;
1933 return n->attr.except.frag_arr;
1935 } /* get_frag_arr */
1938 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1939 #ifdef DEBUG_libfirm
1942 for (i = 1024; i >= 0; --i)
1947 if (frag_arr[pos] == NULL)
1948 frag_arr[pos] = val;
1949 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1950 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1951 assert(arr != frag_arr && "Endless recursion detected");
1956 assert(!"potential endless recursion in set_frag_value");
1957 } /* set_frag_value */
1960 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1964 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1966 frag_arr = get_frag_arr(cfOp);
1967 res = frag_arr[pos];
1969 if (block->attr.block.graph_arr[pos] != NULL) {
1970 /* There was a set_value() after the cfOp and no get_value() before that
1971 set_value(). We must build a Phi node now. */
1972 if (block->attr.block.is_matured) {
1973 int ins = get_irn_arity(block);
1975 NEW_ARR_A(ir_node *, nin, ins);
1976 res = phi_merge(block, pos, mode, nin, ins);
1978 res = new_rd_Phi0(current_ir_graph, block, mode);
1979 res->attr.phi.u.pos = pos;
1980 res->attr.phi.next = block->attr.block.phis;
1981 block->attr.block.phis = res;
1983 assert(res != NULL);
1984 /* It's a Phi, we can write this into all graph_arrs with NULL */
1985 set_frag_value(block->attr.block.graph_arr, pos, res);
1987 res = get_r_value_internal(block, pos, mode);
1988 set_frag_value(block->attr.block.graph_arr, pos, res);
1992 } /* get_r_frag_value_internal */
1993 #endif /* PRECISE_EXC_CONTEXT */
1996 * Check whether a control flownode cf_pred represents an exception flow.
1998 * @param cf_pred the control flow node
1999 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2001 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2003 * Note: all projections from a raise are "exceptional control flow" we we handle it
2004 * like a normal Jmp, because there is no "regular" one.
2005 * That's why Raise is no "fragile_op"!
2007 if (is_fragile_op(prev_cf_op)) {
2008 if (is_Proj(cf_pred)) {
2009 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2010 /* the regular control flow, NO exception */
2013 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2016 /* Hmm, exception but not a Proj? */
2017 assert(!"unexpected condition: fragile op without a proj");
2021 } /* is_exception_flow */
2024 * Computes the predecessors for the real phi node, and then
2025 * allocates and returns this node. The routine called to allocate the
2026 * node might optimize it away and return a real value.
2027 * This function must be called with an in-array of proper size.
2030 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2031 ir_node *prevBlock, *res, *phi0, *phi0_all;
2034 /* If this block has no value at pos create a Phi0 and remember it
2035 in graph_arr to break recursions.
2036 Else we may not set graph_arr as there a later value is remembered. */
2038 if (block->attr.block.graph_arr[pos] == NULL) {
2039 ir_graph *irg = current_ir_graph;
2041 if (block == get_irg_start_block(irg)) {
2042 /* Collapsing to Bad tarvals is no good idea.
2043 So we call a user-supplied routine here that deals with this case as
2044 appropriate for the given language. Sorrily the only help we can give
2045 here is the position.
2047 Even if all variables are defined before use, it can happen that
2048 we get to the start block, if a Cond has been replaced by a tuple
2049 (bad, jmp). In this case we call the function needlessly, eventually
2050 generating an non existent error.
2051 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2054 if (default_initialize_local_variable != NULL) {
2055 ir_node *rem = get_cur_block();
2057 set_cur_block(block);
2058 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2062 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2063 /* We don't need to care about exception ops in the start block.
2064 There are none by definition. */
2065 return block->attr.block.graph_arr[pos];
2067 phi0 = new_rd_Phi0(irg, block, mode);
2068 block->attr.block.graph_arr[pos] = phi0;
2069 #if PRECISE_EXC_CONTEXT
2070 if (get_opt_precise_exc_context()) {
2071 /* Set graph_arr for fragile ops. Also here we should break recursion.
2072 We could choose a cyclic path through an cfop. But the recursion would
2073 break at some point. */
2074 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2080 /* This loop goes to all predecessor blocks of the block the Phi node
2081 is in and there finds the operands of the Phi node by calling
2082 get_r_value_internal. */
2083 for (i = 1; i <= ins; ++i) {
2084 ir_node *cf_pred = block->in[i];
2085 ir_node *prevCfOp = skip_Proj(cf_pred);
2087 if (is_Bad(prevCfOp)) {
2088 /* In case a Cond has been optimized we would get right to the start block
2089 with an invalid definition. */
2090 nin[i-1] = new_Bad();
2093 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2095 if (!is_Bad(prevBlock)) {
2096 #if PRECISE_EXC_CONTEXT
2097 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2098 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2099 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2102 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2104 nin[i-1] = new_Bad();
2108 /* We want to pass the Phi0 node to the constructor: this finds additional
2109 optimization possibilities.
2110 The Phi0 node either is allocated in this function, or it comes from
2111 a former call to get_r_value_internal(). In this case we may not yet
2112 exchange phi0, as this is done in mature_immBlock(). */
2114 phi0_all = block->attr.block.graph_arr[pos];
2115 if (!(is_Phi(phi0_all) &&
2116 (get_irn_arity(phi0_all) == 0) &&
2117 (get_nodes_block(phi0_all) == block)))
2123 /* After collecting all predecessors into the array nin a new Phi node
2124 with these predecessors is created. This constructor contains an
2125 optimization: If all predecessors of the Phi node are identical it
2126 returns the only operand instead of a new Phi node. */
2127 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2129 /* In case we allocated a Phi0 node at the beginning of this procedure,
2130 we need to exchange this Phi0 with the real Phi. */
2132 exchange(phi0, res);
2133 block->attr.block.graph_arr[pos] = res;
2134 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2135 only an optimization. */
2142 * This function returns the last definition of a value. In case
2143 * this value was last defined in a previous block, Phi nodes are
2144 * inserted. If the part of the firm graph containing the definition
2145 * is not yet constructed, a dummy Phi node is returned.
2147 * @param block the current block
2148 * @param pos the value number of the value searched
2149 * @param mode the mode of this value (needed for Phi construction)
2152 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2154 /* There are 4 cases to treat.
2156 1. The block is not mature and we visit it the first time. We can not
2157 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2158 predecessors is returned. This node is added to the linked list (block
2159 attribute "phis") of the containing block to be completed when this block is
2160 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2163 2. The value is already known in this block, graph_arr[pos] is set and we
2164 visit the block the first time. We can return the value without
2165 creating any new nodes.
2167 3. The block is mature and we visit it the first time. A Phi node needs
2168 to be created (phi_merge). If the Phi is not needed, as all it's
2169 operands are the same value reaching the block through different
2170 paths, it's optimized away and the value itself is returned.
2172 4. The block is mature, and we visit it the second time. Now two
2173 subcases are possible:
2174 * The value was computed completely the last time we were here. This
2175 is the case if there is no loop. We can return the proper value.
2176 * The recursion that visited this node and set the flag did not
2177 return yet. We are computing a value in a loop and need to
2178 break the recursion. This case only happens if we visited
2179 the same block with phi_merge before, which inserted a Phi0.
2180 So we return the Phi0.
2183 /* case 4 -- already visited. */
2184 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2185 /* As phi_merge allocates a Phi0 this value is always defined. Here
2186 is the critical difference of the two algorithms. */
2187 assert(block->attr.block.graph_arr[pos]);
2188 return block->attr.block.graph_arr[pos];
2191 /* visited the first time */
2192 set_irn_visited(block, get_irg_visited(current_ir_graph));
2194 /* Get the local valid value */
2195 res = block->attr.block.graph_arr[pos];
2197 /* case 2 -- If the value is actually computed, return it. */
2201 if (block->attr.block.is_matured) { /* case 3 */
2203 /* The Phi has the same amount of ins as the corresponding block. */
2204 int ins = get_irn_arity(block);
2206 NEW_ARR_A(ir_node *, nin, ins);
2208 /* Phi merge collects the predecessors and then creates a node. */
2209 res = phi_merge(block, pos, mode, nin, ins);
2211 } else { /* case 1 */
2212 /* The block is not mature, we don't know how many in's are needed. A Phi
2213 with zero predecessors is created. Such a Phi node is called Phi0
2214 node. The Phi0 is then added to the list of Phi0 nodes in this block
2215 to be matured by mature_immBlock later.
2216 The Phi0 has to remember the pos of it's internal value. If the real
2217 Phi is computed, pos is used to update the array with the local
2219 res = new_rd_Phi0(current_ir_graph, block, mode);
2220 res->attr.phi.u.pos = pos;
2221 res->attr.phi.next = block->attr.block.phis;
2222 block->attr.block.phis = res;
2225 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2227 /* The local valid value is available now. */
2228 block->attr.block.graph_arr[pos] = res;
2231 } /* get_r_value_internal */
2233 /* ************************************************************************** */
2236 * Finalize a Block node, when all control flows are known.
2237 * Acceptable parameters are only Block nodes.
2240 mature_immBlock(ir_node *block) {
2245 assert(is_Block(block));
2246 if (!get_Block_matured(block)) {
2247 ir_graph *irg = current_ir_graph;
2249 ins = ARR_LEN(block->in) - 1;
2250 /* Fix block parameters */
2251 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2253 /* An array for building the Phi nodes. */
2254 NEW_ARR_A(ir_node *, nin, ins);
2256 /* Traverse a chain of Phi nodes attached to this block and mature
2258 for (n = block->attr.block.phis; n; n = next) {
2259 inc_irg_visited(irg);
2260 next = n->attr.phi.next;
2261 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2264 block->attr.block.is_matured = 1;
2266 /* Now, as the block is a finished Firm node, we can optimize it.
2267 Since other nodes have been allocated since the block was created
2268 we can not free the node on the obstack. Therefore we have to call
2269 optimize_in_place().
2270 Unfortunately the optimization does not change a lot, as all allocated
2271 nodes refer to the unoptimized node.
2272 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2273 block = optimize_in_place_2(block);
2274 IRN_VRFY_IRG(block, irg);
2276 } /* mature_immBlock */
2279 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2280 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2284 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2285 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2289 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2290 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2291 } /* new_d_Const_long */
2294 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2295 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2296 } /* new_d_Const_type */
2300 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2301 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2305 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2306 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2310 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2313 assert(arg->op == op_Cond);
2314 arg->attr.cond.kind = fragmentary;
2315 arg->attr.cond.default_proj = max_proj;
2316 res = new_Proj(arg, mode_X, max_proj);
2318 } /* new_d_defaultProj */
2321 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2322 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2326 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2327 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2328 } /* new_d_strictConv */
2331 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2332 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2336 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2337 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2347 * Allocate a frag array for a node if the current graph state is phase_building.
2349 * @param irn the node for which the frag array should be allocated
2350 * @param op the opcode of the (original) node, if does not match opcode of irn,
2352 * @param frag_store the address of the frag store in irn attributes, if this
2353 * address contains a value != NULL, does nothing
2355 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2356 if (get_opt_precise_exc_context()) {
2357 if ((current_ir_graph->phase_state == phase_building) &&
2358 (get_irn_op(irn) == op) && /* Could be optimized away. */
2359 !*frag_store) /* Could be a cse where the arr is already set. */ {
2360 *frag_store = new_frag_arr(irn);
2363 } /* firm_alloc_frag_arr */
2366 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2368 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2369 #if PRECISE_EXC_CONTEXT
2370 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2377 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2379 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2380 #if PRECISE_EXC_CONTEXT
2381 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2385 } /* new_d_DivMod */
2388 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2390 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2391 #if PRECISE_EXC_CONTEXT
2392 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2399 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2401 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2402 #if PRECISE_EXC_CONTEXT
2403 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2410 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2412 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2413 #if PRECISE_EXC_CONTEXT
2414 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2433 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2434 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2438 new_d_Jmp(dbg_info *db) {
2439 return new_bd_Jmp(db, current_ir_graph->current_block);
2443 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2444 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2448 new_d_Cond(dbg_info *db, ir_node *c) {
2449 return new_bd_Cond(db, current_ir_graph->current_block, c);
2453 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2456 res = new_bd_Call(db, current_ir_graph->current_block,
2457 store, callee, arity, in, tp);
2458 #if PRECISE_EXC_CONTEXT
2459 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2466 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2467 return new_bd_Return(db, current_ir_graph->current_block,
2469 } /* new_d_Return */
2472 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2474 res = new_bd_Load(db, current_ir_graph->current_block,
2476 #if PRECISE_EXC_CONTEXT
2477 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2484 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2486 res = new_bd_Store(db, current_ir_graph->current_block,
2488 #if PRECISE_EXC_CONTEXT
2489 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2496 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2497 ir_where_alloc where) {
2499 res = new_bd_Alloc(db, current_ir_graph->current_block,
2500 store, size, alloc_type, where);
2501 #if PRECISE_EXC_CONTEXT
2502 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2509 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2510 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2511 return new_bd_Free(db, current_ir_graph->current_block,
2512 store, ptr, size, free_type, where);
2516 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2517 /* GL: objptr was called frame before. Frame was a bad choice for the name
2518 as the operand could as well be a pointer to a dynamic object. */
2520 return new_bd_Sel(db, current_ir_graph->current_block,
2521 store, objptr, 0, NULL, ent);
2522 } /* new_d_simpleSel */
2525 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2526 return new_bd_Sel(db, current_ir_graph->current_block,
2527 store, objptr, n_index, index, sel);
2531 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2532 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2534 } /* new_d_SymConst_type */
2537 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2538 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2539 value, kind, firm_unknown_type);
2540 } /* new_d_SymConst */
2543 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2544 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2550 return _new_d_Bad();
2554 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2555 return new_bd_Confirm(db, current_ir_graph->current_block,
2557 } /* new_d_Confirm */
2560 new_d_Unknown(ir_mode *m) {
2561 return new_bd_Unknown(m);
2562 } /* new_d_Unknown */
2565 new_d_CallBegin(dbg_info *db, ir_node *call) {
2566 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2567 } /* new_d_CallBegin */
2570 new_d_EndReg(dbg_info *db) {
2571 return new_bd_EndReg(db, current_ir_graph->current_block);
2572 } /* new_d_EndReg */
2575 new_d_EndExcept(dbg_info *db) {
2576 return new_bd_EndExcept(db, current_ir_graph->current_block);
2577 } /* new_d_EndExcept */
2580 new_d_Break(dbg_info *db) {
2581 return new_bd_Break(db, current_ir_graph->current_block);
2585 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2586 return new_bd_Filter(db, current_ir_graph->current_block,
2588 } /* new_d_Filter */
2591 (new_d_NoMem)(void) {
2592 return _new_d_NoMem();
2596 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2597 ir_node *ir_true, ir_mode *mode) {
2598 return new_bd_Mux(db, current_ir_graph->current_block,
2599 sel, ir_false, ir_true, mode);
2602 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2603 ir_node *dst, ir_node *src, ir_type *data_type) {
2605 res = new_bd_CopyB(db, current_ir_graph->current_block,
2606 store, dst, src, data_type);
2607 #if PRECISE_EXC_CONTEXT
2608 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2614 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2615 return new_bd_InstOf(db, current_ir_graph->current_block,
2616 store, objptr, type);
2617 } /* new_d_InstOf */
2620 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2621 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2624 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2625 ir_node *idx, ir_node *lower, ir_node *upper) {
2627 res = new_bd_Bound(db, current_ir_graph->current_block,
2628 store, idx, lower, upper);
2629 #if PRECISE_EXC_CONTEXT
2630 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2636 new_d_Pin(dbg_info *db, ir_node *node) {
2637 return new_bd_Pin(db, current_ir_graph->current_block, node);
2641 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2642 int n_outs, ir_asm_constraint *outputs,
2643 int n_clobber, ident *clobber[], ident *asm_text) {
2644 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2647 /* ********************************************************************* */
2648 /* Comfortable interface with automatic Phi node construction. */
2649 /* (Uses also constructors of ?? interface, except new_Block. */
2650 /* ********************************************************************* */
2652 /* Block construction */
2653 /* immature Block without predecessors */
2655 new_d_immBlock(dbg_info *db) {
2658 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2659 /* creates a new dynamic in-array as length of in is -1 */
2660 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2661 current_ir_graph->current_block = res;
2663 /* macroblock head */
2666 res->attr.block.is_matured = 0;
2667 res->attr.block.is_dead = 0;
2668 res->attr.block.is_mb_head = 1;
2669 res->attr.block.has_label = 0;
2670 res->attr.block.irg = current_ir_graph;
2671 res->attr.block.backedge = NULL;
2672 res->attr.block.in_cg = NULL;
2673 res->attr.block.cg_backedge = NULL;
2674 res->attr.block.extblk = NULL;
2675 res->attr.block.region = NULL;
2676 res->attr.block.mb_depth = 0;
2677 res->attr.block.label = 0;
2679 set_Block_block_visited(res, 0);
2681 /* Create and initialize array for Phi-node construction. */
2682 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2683 current_ir_graph->n_loc);
2684 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2686 /* Immature block may not be optimized! */
2687 IRN_VRFY_IRG(res, current_ir_graph);
2690 } /* new_d_immBlock */
2693 new_immBlock(void) {
2694 return new_d_immBlock(NULL);
2695 } /* new_immBlock */
2697 /* immature PartBlock with its predecessors */
2699 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2700 ir_node *res = new_d_immBlock(db);
2701 ir_node *blk = get_nodes_block(pred_jmp);
2703 res->in[0] = blk->in[0];
2704 assert(res->in[0] != NULL);
2705 add_immBlock_pred(res, pred_jmp);
2707 res->attr.block.is_mb_head = 0;
2708 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2711 } /* new_d_immPartBlock */
2714 new_immPartBlock(ir_node *pred_jmp) {
2715 return new_d_immPartBlock(NULL, pred_jmp);
2716 } /* new_immPartBlock */
2718 /* add an edge to a jmp/control flow node */
2720 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2721 int n = ARR_LEN(block->in) - 1;
2723 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2724 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2725 assert(is_ir_node(jmp));
2727 ARR_APP1(ir_node *, block->in, jmp);
2729 hook_set_irn_n(block, n, jmp, NULL);
2730 } /* add_immBlock_pred */
2732 /* changing the current block */
2734 set_cur_block(ir_node *target) {
2735 current_ir_graph->current_block = target;
2736 } /* set_cur_block */
2738 /* ************************ */
2739 /* parameter administration */
2741 /* get a value from the parameter array from the current block by its index */
2743 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2744 ir_graph *irg = current_ir_graph;
2745 assert(get_irg_phase_state(irg) == phase_building);
2746 inc_irg_visited(irg);
2749 return get_r_value_internal(irg->current_block, pos + 1, mode);
2752 /* get a value from the parameter array from the current block by its index */
2754 get_value(int pos, ir_mode *mode) {
2755 return get_d_value(NULL, pos, mode);
2758 /* set a value at position pos in the parameter array from the current block */
2760 set_value(int pos, ir_node *value) {
2761 ir_graph *irg = current_ir_graph;
2762 assert(get_irg_phase_state(irg) == phase_building);
2763 assert(pos+1 < irg->n_loc);
2764 assert(is_ir_node(value));
2765 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2768 /* Find the value number for a node in the current block.*/
2770 find_value(ir_node *value) {
2772 ir_node *bl = current_ir_graph->current_block;
2774 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2775 if (bl->attr.block.graph_arr[i] == value)
2780 /* get the current store */
2783 ir_graph *irg = current_ir_graph;
2785 assert(get_irg_phase_state(irg) == phase_building);
2786 /* GL: one could call get_value instead */
2787 inc_irg_visited(irg);
2788 return get_r_value_internal(irg->current_block, 0, mode_M);
2791 /* set the current store: handles automatic Sync construction for Load nodes */
2793 set_store(ir_node *store) {
2794 ir_node *load, *pload, *pred, *in[2];
2796 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2797 /* Beware: due to dead code elimination, a store might become a Bad node even in
2798 the construction phase. */
2799 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2801 if (get_opt_auto_create_sync()) {
2802 /* handle non-volatile Load nodes by automatically creating Sync's */
2803 load = skip_Proj(store);
2804 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2805 pred = get_Load_mem(load);
2807 if (is_Sync(pred)) {
2808 /* a Load after a Sync: move it up */
2809 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2811 set_Load_mem(load, get_memop_mem(mem));
2812 add_Sync_pred(pred, store);
2815 pload = skip_Proj(pred);
2816 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2817 /* a Load after a Load: create a new Sync */
2818 set_Load_mem(load, get_Load_mem(pload));
2822 store = new_Sync(2, in);
2827 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2831 keep_alive(ir_node *ka) {
2832 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2835 /* --- Useful access routines --- */
2836 /* Returns the current block of the current graph. To set the current
2837 block use set_cur_block. */
2838 ir_node *get_cur_block(void) {
2839 return get_irg_current_block(current_ir_graph);
2840 } /* get_cur_block */
2842 /* Returns the frame type of the current graph */
2843 ir_type *get_cur_frame_type(void) {
2844 return get_irg_frame_type(current_ir_graph);
2845 } /* get_cur_frame_type */
2848 /* ********************************************************************* */
2851 /* call once for each run of the library */
2853 firm_init_cons(uninitialized_local_variable_func_t *func) {
2854 default_initialize_local_variable = func;
2855 } /* firm_init_cons */
2858 irp_finalize_cons(void) {
2860 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2861 irg_finalize_cons(get_irp_irg(i));
2863 irp->phase_state = phase_high;
2864 } /* irp_finalize_cons */
2867 ir_node *new_Block(int arity, ir_node **in) {
2868 return new_d_Block(NULL, arity, in);
2870 ir_node *new_Start(void) {
2871 return new_d_Start(NULL);
2873 ir_node *new_End(void) {
2874 return new_d_End(NULL);
2876 ir_node *new_Jmp(void) {
2877 return new_d_Jmp(NULL);
2879 ir_node *new_IJmp(ir_node *tgt) {
2880 return new_d_IJmp(NULL, tgt);
2882 ir_node *new_Cond(ir_node *c) {
2883 return new_d_Cond(NULL, c);
2885 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2886 return new_d_Return(NULL, store, arity, in);
2888 ir_node *new_Const(ir_mode *mode, tarval *con) {
2889 return new_d_Const(NULL, mode, con);
2892 ir_node *new_Const_long(ir_mode *mode, long value) {
2893 return new_d_Const_long(NULL, mode, value);
2896 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2897 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2900 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2901 return new_d_SymConst_type(NULL, mode, value, kind, type);
2903 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2904 return new_d_SymConst(NULL, mode, value, kind);
2906 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2907 return new_d_simpleSel(NULL, store, objptr, ent);
2909 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2911 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2913 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2915 return new_d_Call(NULL, store, callee, arity, in, tp);
2917 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2918 return new_d_Add(NULL, op1, op2, mode);
2920 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2921 return new_d_Sub(NULL, op1, op2, mode);
2923 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2924 return new_d_Minus(NULL, op, mode);
2926 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2927 return new_d_Mul(NULL, op1, op2, mode);
2929 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2930 return new_d_Mulh(NULL, op1, op2, mode);
2932 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2933 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2935 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2936 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2938 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2939 return new_d_Div(NULL, memop, op1, op2, mode, state);
2941 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2942 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2944 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2945 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2947 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2948 return new_d_Abs(NULL, op, mode);
2950 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2951 return new_d_And(NULL, op1, op2, mode);
2953 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2954 return new_d_Or(NULL, op1, op2, mode);
2956 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2957 return new_d_Eor(NULL, op1, op2, mode);
2959 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2960 return new_d_Not(NULL, op, mode);
2962 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2963 return new_d_Shl(NULL, op, k, mode);
2965 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2966 return new_d_Shr(NULL, op, k, mode);
2968 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2969 return new_d_Shrs(NULL, op, k, mode);
2971 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
2972 return new_d_Rotl(NULL, op, k, mode);
2974 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2975 return new_d_Carry(NULL, op1, op2, mode);
2977 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2978 return new_d_Borrow(NULL, op1, op2, mode);
2980 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2981 return new_d_Cmp(NULL, op1, op2);
2983 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2984 return new_d_Conv(NULL, op, mode);
2986 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2987 return new_d_strictConv(NULL, op, mode);
2989 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2990 return new_d_Cast(NULL, op, to_tp);
2992 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2993 return new_d_Phi(NULL, arity, in, mode);
2995 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2996 return new_d_Load(NULL, store, addr, mode);
2998 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2999 return new_d_Store(NULL, store, addr, val);
3001 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3002 ir_where_alloc where) {
3003 return new_d_Alloc(NULL, store, size, alloc_type, where);
3005 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3006 ir_type *free_type, ir_where_alloc where) {
3007 return new_d_Free(NULL, store, ptr, size, free_type, where);
3009 ir_node *new_Sync(int arity, ir_node *in[]) {
3010 return new_d_Sync(NULL, arity, in);
3012 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3013 return new_d_Proj(NULL, arg, mode, proj);
3015 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3016 return new_d_defaultProj(NULL, arg, max_proj);
3018 ir_node *new_Tuple(int arity, ir_node **in) {
3019 return new_d_Tuple(NULL, arity, in);
3021 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3022 return new_d_Id(NULL, val, mode);
3024 ir_node *new_Bad(void) {
3027 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3028 return new_d_Confirm(NULL, val, bound, cmp);
3030 ir_node *new_Unknown(ir_mode *m) {
3031 return new_d_Unknown(m);
3033 ir_node *new_CallBegin(ir_node *callee) {
3034 return new_d_CallBegin(NULL, callee);
3036 ir_node *new_EndReg(void) {
3037 return new_d_EndReg(NULL);
3039 ir_node *new_EndExcept(void) {
3040 return new_d_EndExcept(NULL);
3042 ir_node *new_Break(void) {
3043 return new_d_Break(NULL);
3045 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3046 return new_d_Filter(NULL, arg, mode, proj);
3048 ir_node *new_NoMem(void) {
3049 return new_d_NoMem();
3051 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3052 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3054 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3055 return new_d_CopyB(NULL, store, dst, src, data_type);
3057 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3058 return new_d_InstOf(NULL, store, objptr, ent);
3060 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3061 return new_d_Raise(NULL, store, obj);
3063 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3064 return new_d_Bound(NULL, store, idx, lower, upper);
3066 ir_node *new_Pin(ir_node *node) {
3067 return new_d_Pin(NULL, node);
3069 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3070 int n_outs, ir_asm_constraint *outputs,
3071 int n_clobber, ident *clobber[], ident *asm_text) {
3072 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3075 /* create a new anchor node */
3076 ir_node *new_Anchor(ir_graph *irg) {
3077 ir_node *in[anchor_last];
3078 memset(in, 0, sizeof(in));
3079 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);