2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res->attr.divmod.no_remainder = 0; \
119 res = optimize_node(res); \
120 IRN_VRFY_IRG(res, irg); \
124 /* creates a rd constructor for a binop */
125 #define NEW_RD_BINOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op1, ir_node *op2, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op1, op2, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an unop */
139 #define NEW_RD_UNOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *op, ir_mode *mode) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, op, mode); \
148 current_ir_graph = rem; \
152 /* creates a rd constructor for an divop */
153 #define NEW_RD_DIVOP(instr) \
155 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
156 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
159 ir_graph *rem = current_ir_graph; \
160 current_ir_graph = irg; \
161 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
162 current_ir_graph = rem; \
166 /* creates a d constructor for an binop */
167 #define NEW_D_BINOP(instr) \
169 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
170 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
173 /* creates a d constructor for an unop */
174 #define NEW_D_UNOP(instr) \
176 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
177 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
182 * Constructs a Block with a fixed number of predecessors.
183 * Does not set current_block. Cannot be used with automatic
184 * Phi node construction.
187 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
189 ir_graph *irg = current_ir_graph;
191 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
193 /* macroblock header */
196 res->attr.block.is_dead = 0;
197 res->attr.block.is_mb_head = 1;
198 res->attr.block.has_label = 0;
199 res->attr.block.irg = irg;
200 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
201 res->attr.block.in_cg = NULL;
202 res->attr.block.cg_backedge = NULL;
203 res->attr.block.extblk = NULL;
204 res->attr.block.mb_depth = 0;
205 res->attr.block.label = 0;
207 set_Block_matured(res, 1);
208 set_Block_block_visited(res, 0);
210 IRN_VRFY_IRG(res, irg);
215 new_bd_Start(dbg_info *db, ir_node *block) {
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
221 IRN_VRFY_IRG(res, irg);
226 new_bd_End(dbg_info *db, ir_node *block) {
228 ir_graph *irg = current_ir_graph;
230 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
232 IRN_VRFY_IRG(res, irg);
237 * Creates a Phi node with all predecessors. Calling this constructor
238 * is only allowed if the corresponding block is mature.
241 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
243 ir_graph *irg = current_ir_graph;
247 /* Don't assert that block matured: the use of this constructor is strongly
249 if (get_Block_matured(block))
250 assert(get_irn_arity(block) == arity);
252 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
254 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
256 for (i = arity - 1; i >= 0; --i)
257 if (is_Unknown(in[i])) {
262 if (!has_unknown) res = optimize_node(res);
263 IRN_VRFY_IRG(res, irg);
265 /* Memory Phis in endless loops must be kept alive.
266 As we can't distinguish these easily we keep all of them alive. */
267 if (is_Phi(res) && mode == mode_M)
268 add_End_keepalive(get_irg_end(irg), res);
273 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
275 ir_graph *irg = current_ir_graph;
278 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
279 res->attr.con.tv = con;
280 set_Const_type(res, tp); /* Call method because of complex assertion. */
281 res = optimize_node (res);
282 assert(get_Const_type(res) == tp);
283 IRN_VRFY_IRG(res, irg);
286 } /* new_bd_Const_type */
289 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
296 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
297 ir_graph *irg = current_ir_graph;
299 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
300 } /* new_bd_Const_long */
303 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
305 ir_graph *irg = current_ir_graph;
307 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
317 ir_graph *irg = current_ir_graph;
319 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
320 res->attr.proj = proj;
323 assert(get_Proj_pred(res));
324 assert(get_nodes_block(get_Proj_pred(res)));
326 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
336 ir_graph *irg = current_ir_graph;
338 assert(arg->op == op_Cond);
339 arg->attr.cond.kind = fragmentary;
340 arg->attr.cond.default_proj = max_proj;
341 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
343 } /* new_bd_defaultProj */
346 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
348 ir_graph *irg = current_ir_graph;
350 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
351 res->attr.conv.strict = strict_flag;
352 res = optimize_node(res);
353 IRN_VRFY_IRG(res, irg);
358 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
360 ir_graph *irg = current_ir_graph;
362 assert(is_atomic_type(to_tp));
364 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
365 res->attr.cast.totype = to_tp;
366 res = optimize_node(res);
367 IRN_VRFY_IRG(res, irg);
372 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
374 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
377 res = optimize_node (res);
378 IRN_VRFY_IRG(res, irg);
403 /** Creates a remainderless Div node. */
404 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
405 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
409 ir_graph *irg = current_ir_graph;
413 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
414 res->attr.divmod.exc.pin_state = state;
415 res->attr.divmod.res_mode = mode;
416 res->attr.divmod.no_remainder = 1;
417 res = optimize_node(res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
426 ir_graph *irg = current_ir_graph;
429 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_bd_Jmp(dbg_info *db, ir_node *block) {
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
449 ir_graph *irg = current_ir_graph;
451 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
452 res = optimize_node(res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
460 ir_graph *irg = current_ir_graph;
462 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
463 res->attr.cond.kind = dense;
464 res->attr.cond.default_proj = 0;
465 res->attr.cond.pred = COND_JMP_PRED_NONE;
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
472 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
473 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
477 ir_graph *irg = current_ir_graph;
480 NEW_ARR_A(ir_node *, r_in, r_arity);
483 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
485 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
487 assert((get_unknown_type() == tp) || is_Method_type(tp));
488 set_Call_type(res, tp);
489 res->attr.call.exc.pin_state = op_pin_state_pinned;
490 res->attr.call.callee_arr = NULL;
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Return(dbg_info *db, ir_node *block,
498 ir_node *store, int arity, ir_node **in) {
502 ir_graph *irg = current_ir_graph;
505 NEW_ARR_A (ir_node *, r_in, r_arity);
507 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
508 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
509 res = optimize_node(res);
510 IRN_VRFY_IRG(res, irg);
512 } /* new_bd_Return */
515 new_bd_Load(dbg_info *db, ir_node *block,
516 ir_node *store, ir_node *adr, ir_mode *mode) {
519 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
524 res->attr.load.exc.pin_state = op_pin_state_pinned;
525 res->attr.load.load_mode = mode;
526 res->attr.load.volatility = volatility_non_volatile;
527 res->attr.load.aligned = align_is_aligned;
528 res = optimize_node(res);
529 IRN_VRFY_IRG(res, irg);
534 new_bd_Store(dbg_info *db, ir_node *block,
535 ir_node *store, ir_node *adr, ir_node *val) {
538 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
544 res->attr.store.exc.pin_state = op_pin_state_pinned;
545 res->attr.store.volatility = volatility_non_volatile;
546 res->attr.store.aligned = align_is_aligned;
547 res = optimize_node(res);
548 IRN_VRFY_IRG(res, irg);
553 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
554 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
557 ir_graph *irg = current_ir_graph;
561 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
562 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
563 res->attr.alloc.where = where;
564 res->attr.alloc.type = alloc_type;
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
572 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
575 ir_graph *irg = current_ir_graph;
580 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
581 res->attr.free.where = where;
582 res->attr.free.type = free_type;
583 res = optimize_node(res);
584 IRN_VRFY_IRG(res, irg);
589 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
590 int arity, ir_node **in, ir_entity *ent) {
594 ir_graph *irg = current_ir_graph;
595 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
597 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
600 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
603 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
605 * Sel's can select functions which should be of mode mode_P_code.
607 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
608 res->attr.sel.ent = ent;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
616 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
617 ir_graph *irg = current_ir_graph;
618 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
620 res->attr.symc.kind = symkind;
621 res->attr.symc.sym = value;
622 res->attr.symc.tp = tp;
624 res = optimize_node(res);
625 IRN_VRFY_IRG(res, irg);
627 } /* new_bd_SymConst_type */
630 new_bd_Sync(dbg_info *db, ir_node *block) {
632 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
635 /* no need to call optimize node here, Sync are always created with no predecessors */
636 IRN_VRFY_IRG(res, irg);
641 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
642 ir_node *in[2], *res;
643 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
648 res->attr.confirm.cmp = cmp;
649 res = optimize_node(res);
650 IRN_VRFY_IRG(res, irg);
652 } /* new_bd_Confirm */
655 new_bd_Unknown(ir_mode *m) {
657 ir_graph *irg = current_ir_graph;
659 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
660 res = optimize_node(res);
662 } /* new_bd_Unknown */
665 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
668 ir_graph *irg = current_ir_graph;
670 in[0] = get_Call_ptr(call);
671 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
672 /* res->attr.callbegin.irg = irg; */
673 res->attr.callbegin.call = call;
674 res = optimize_node(res);
675 IRN_VRFY_IRG(res, irg);
677 } /* new_bd_CallBegin */
680 new_bd_EndReg(dbg_info *db, ir_node *block) {
682 ir_graph *irg = current_ir_graph;
684 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
685 set_irg_end_reg(irg, res);
686 IRN_VRFY_IRG(res, irg);
688 } /* new_bd_EndReg */
691 new_bd_EndExcept(dbg_info *db, ir_node *block) {
693 ir_graph *irg = current_ir_graph;
695 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
696 set_irg_end_except(irg, res);
697 IRN_VRFY_IRG (res, irg);
699 } /* new_bd_EndExcept */
702 new_bd_Break(dbg_info *db, ir_node *block) {
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
713 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
716 ir_graph *irg = current_ir_graph;
718 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
719 res->attr.filter.proj = proj;
720 res->attr.filter.in_cg = NULL;
721 res->attr.filter.backedge = NULL;
724 assert(get_Proj_pred(res));
725 assert(get_nodes_block(get_Proj_pred(res)));
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
730 } /* new_bd_Filter */
733 new_bd_Mux(dbg_info *db, ir_node *block,
734 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
737 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
746 res = optimize_node(res);
747 IRN_VRFY_IRG(res, irg);
752 new_bd_CopyB(dbg_info *db, ir_node *block,
753 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
756 ir_graph *irg = current_ir_graph;
762 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
764 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
765 res->attr.copyb.data_type = data_type;
766 res = optimize_node(res);
767 IRN_VRFY_IRG(res, irg);
772 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
773 ir_node *objptr, ir_type *type) {
776 ir_graph *irg = current_ir_graph;
780 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
781 res->attr.instof.type = type;
782 res = optimize_node(res);
783 IRN_VRFY_IRG(res, irg);
785 } /* new_bd_InstOf */
788 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
791 ir_graph *irg = current_ir_graph;
795 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
796 res = optimize_node(res);
797 IRN_VRFY_IRG(res, irg);
802 new_bd_Bound(dbg_info *db, ir_node *block,
803 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
806 ir_graph *irg = current_ir_graph;
812 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
813 res->attr.bound.exc.pin_state = op_pin_state_pinned;
814 res = optimize_node(res);
815 IRN_VRFY_IRG(res, irg);
820 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
822 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
825 res = optimize_node(res);
826 IRN_VRFY_IRG(res, irg);
831 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
832 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
834 ir_graph *irg = current_ir_graph;
837 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
838 res->attr.assem.pin_state = op_pin_state_pinned;
839 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
840 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
841 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
842 res->attr.assem.asm_text = asm_text;
844 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
845 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
846 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
848 res = optimize_node(res);
849 IRN_VRFY_IRG(res, irg);
853 /* --------------------------------------------- */
854 /* private interfaces, for professional use only */
855 /* --------------------------------------------- */
857 /* Constructs a Block with a fixed number of predecessors.
858 Does not set current_block. Can not be used with automatic
859 Phi node construction. */
861 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
862 ir_graph *rem = current_ir_graph;
865 current_ir_graph = irg;
866 res = new_bd_Block(db, arity, in);
867 current_ir_graph = rem;
873 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
874 ir_graph *rem = current_ir_graph;
877 current_ir_graph = irg;
878 res = new_bd_Start(db, block);
879 current_ir_graph = rem;
885 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
887 ir_graph *rem = current_ir_graph;
889 current_ir_graph = irg;
890 res = new_bd_End(db, block);
891 current_ir_graph = rem;
896 /* Creates a Phi node with all predecessors. Calling this constructor
897 is only allowed if the corresponding block is mature. */
899 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
901 ir_graph *rem = current_ir_graph;
903 current_ir_graph = irg;
904 res = new_bd_Phi(db, block,arity, in, mode);
905 current_ir_graph = rem;
911 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
913 ir_graph *rem = current_ir_graph;
915 current_ir_graph = irg;
916 res = new_bd_Const_type(db, block, mode, con, tp);
917 current_ir_graph = rem;
920 } /* new_rd_Const_type */
923 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
929 current_ir_graph = rem;
935 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
936 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
937 } /* new_rd_Const_long */
940 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
942 ir_graph *rem = current_ir_graph;
944 current_ir_graph = irg;
945 res = new_bd_Id(db, block, val, mode);
946 current_ir_graph = rem;
952 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
955 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_Proj(db, block, arg, mode, proj);
959 current_ir_graph = rem;
965 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
968 ir_graph *rem = current_ir_graph;
970 current_ir_graph = irg;
971 res = new_bd_defaultProj(db, block, arg, max_proj);
972 current_ir_graph = rem;
975 } /* new_rd_defaultProj */
978 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
980 ir_graph *rem = current_ir_graph;
982 current_ir_graph = irg;
983 res = new_bd_Conv(db, block, op, mode, 0);
984 current_ir_graph = rem;
990 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
992 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_Cast(db, block, op, to_tp);
996 current_ir_graph = rem;
1002 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = irg;
1007 res = new_bd_Tuple(db, block, arity, in);
1008 current_ir_graph = rem;
1011 } /* new_rd_Tuple */
1019 NEW_RD_DIVOP(DivMod)
1032 NEW_RD_BINOP(Borrow)
1034 /* creates a rd constructor for an divRL */
1035 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1036 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1039 ir_graph *rem = current_ir_graph;
1040 current_ir_graph = irg;
1041 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1042 current_ir_graph = rem;
1047 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1048 ir_node *op1, ir_node *op2) {
1050 ir_graph *rem = current_ir_graph;
1052 current_ir_graph = irg;
1053 res = new_bd_Cmp(db, block, op1, op2);
1054 current_ir_graph = rem;
1060 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1062 ir_graph *rem = current_ir_graph;
1064 current_ir_graph = irg;
1065 res = new_bd_Jmp(db, block);
1066 current_ir_graph = rem;
1072 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1074 ir_graph *rem = current_ir_graph;
1076 current_ir_graph = irg;
1077 res = new_bd_IJmp(db, block, tgt);
1078 current_ir_graph = rem;
1084 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Cond(db, block, c);
1090 current_ir_graph = rem;
1096 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1097 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1103 current_ir_graph = rem;
1109 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1110 ir_node *store, int arity, ir_node **in) {
1112 ir_graph *rem = current_ir_graph;
1114 current_ir_graph = irg;
1115 res = new_bd_Return(db, block, store, arity, in);
1116 current_ir_graph = rem;
1119 } /* new_rd_Return */
1122 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1123 ir_node *store, ir_node *adr, ir_mode *mode) {
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Load(db, block, store, adr, mode);
1129 current_ir_graph = rem;
1135 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1136 ir_node *store, ir_node *adr, ir_node *val) {
1138 ir_graph *rem = current_ir_graph;
1140 current_ir_graph = irg;
1141 res = new_bd_Store(db, block, store, adr, val);
1142 current_ir_graph = rem;
1145 } /* new_rd_Store */
1148 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1149 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1151 ir_graph *rem = current_ir_graph;
1153 current_ir_graph = irg;
1154 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1155 current_ir_graph = rem;
1158 } /* new_rd_Alloc */
1161 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1162 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1164 ir_graph *rem = current_ir_graph;
1166 current_ir_graph = irg;
1167 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1168 current_ir_graph = rem;
1174 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1175 ir_node *store, ir_node *objptr, ir_entity *ent) {
1177 ir_graph *rem = current_ir_graph;
1179 current_ir_graph = irg;
1180 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1181 current_ir_graph = rem;
1184 } /* new_rd_simpleSel */
1187 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1188 int arity, ir_node **in, ir_entity *ent) {
1190 ir_graph *rem = current_ir_graph;
1192 current_ir_graph = irg;
1193 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1194 current_ir_graph = rem;
1200 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1201 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1203 ir_graph *rem = current_ir_graph;
1205 current_ir_graph = irg;
1206 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1207 current_ir_graph = rem;
1210 } /* new_rd_SymConst_type */
1213 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1214 symconst_symbol value, symconst_kind symkind) {
1215 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1216 } /* new_rd_SymConst */
1218 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1219 symconst_symbol sym;
1220 sym.entity_p = symbol;
1221 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1222 } /* new_rd_SymConst_addr_ent */
1224 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1225 symconst_symbol sym;
1226 sym.entity_p = symbol;
1227 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1228 } /* new_rd_SymConst_ofs_ent */
1230 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1231 symconst_symbol sym;
1232 sym.ident_p = symbol;
1233 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1234 } /* new_rd_SymConst_addr_name */
1236 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1237 symconst_symbol sym;
1238 sym.type_p = symbol;
1239 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1240 } /* new_rd_SymConst_type_tag */
1242 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.type_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1246 } /* new_rd_SymConst_size */
1248 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.type_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1252 } /* new_rd_SymConst_align */
1255 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1257 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sync(db, block);
1262 current_ir_graph = rem;
1264 for (i = 0; i < arity; ++i)
1265 add_Sync_pred(res, in[i]);
1271 new_rd_Bad(ir_graph *irg) {
1272 return get_irg_bad(irg);
1276 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Confirm(db, block, val, bound, cmp);
1282 current_ir_graph = rem;
1285 } /* new_rd_Confirm */
1288 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1290 ir_graph *rem = current_ir_graph;
1292 current_ir_graph = irg;
1293 res = new_bd_Unknown(m);
1294 current_ir_graph = rem;
1297 } /* new_rd_Unknown */
1300 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_CallBegin(db, block, call);
1306 current_ir_graph = rem;
1309 } /* new_rd_CallBegin */
1312 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1315 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1316 set_irg_end_reg(irg, res);
1317 IRN_VRFY_IRG(res, irg);
1319 } /* new_rd_EndReg */
1322 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1325 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1326 set_irg_end_except(irg, res);
1327 IRN_VRFY_IRG (res, irg);
1329 } /* new_rd_EndExcept */
1332 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Break(db, block);
1338 current_ir_graph = rem;
1341 } /* new_rd_Break */
1344 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Filter(db, block, arg, mode, proj);
1351 current_ir_graph = rem;
1354 } /* new_rd_Filter */
1357 new_rd_NoMem(ir_graph *irg) {
1358 return get_irg_no_mem(irg);
1359 } /* new_rd_NoMem */
1362 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1363 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1369 current_ir_graph = rem;
1374 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1375 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1377 ir_graph *rem = current_ir_graph;
1379 current_ir_graph = irg;
1380 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1381 current_ir_graph = rem;
1384 } /* new_rd_CopyB */
1387 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1388 ir_node *objptr, ir_type *type) {
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_InstOf(db, block, store, objptr, type);
1394 current_ir_graph = rem;
1397 } /* new_rd_InstOf */
1400 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_Raise(db, block, store, obj);
1406 current_ir_graph = rem;
1409 } /* new_rd_Raise */
1411 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1412 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_Bound(db, block, store, idx, lower, upper);
1418 current_ir_graph = rem;
1421 } /* new_rd_Bound */
1423 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1425 ir_graph *rem = current_ir_graph;
1427 current_ir_graph = irg;
1428 res = new_bd_Pin(db, block, node);
1429 current_ir_graph = rem;
1434 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1435 int arity, ir_node *in[], ir_asm_constraint *inputs,
1436 int n_outs, ir_asm_constraint *outputs,
1437 int n_clobber, ident *clobber[], ident *asm_text) {
1439 ir_graph *rem = current_ir_graph;
1441 current_ir_graph = irg;
1442 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1443 current_ir_graph = rem;
1449 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1450 return new_rd_Block(NULL, irg, arity, in);
1452 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1453 return new_rd_Start(NULL, irg, block);
1455 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1456 return new_rd_End(NULL, irg, block);
1458 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1459 return new_rd_Jmp(NULL, irg, block);
1461 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1462 return new_rd_IJmp(NULL, irg, block, tgt);
1464 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1465 return new_rd_Cond(NULL, irg, block, c);
1467 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1468 ir_node *store, int arity, ir_node **in) {
1469 return new_rd_Return(NULL, irg, block, store, arity, in);
1471 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1472 ir_mode *mode, tarval *con) {
1473 return new_rd_Const(NULL, irg, block, mode, con);
1475 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1476 ir_mode *mode, long value) {
1477 return new_rd_Const_long(NULL, irg, block, mode, value);
1479 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1480 ir_mode *mode, tarval *con, ir_type *tp) {
1481 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1483 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1484 symconst_symbol value, symconst_kind symkind) {
1485 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1487 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1488 ir_node *objptr, ir_entity *ent) {
1489 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1491 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1492 ir_node *objptr, int n_index, ir_node **index,
1494 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1496 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1497 ir_node *callee, int arity, ir_node **in,
1499 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1501 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1502 ir_node *op1, ir_node *op2, ir_mode *mode) {
1503 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1505 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1506 ir_node *op1, ir_node *op2, ir_mode *mode) {
1507 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1509 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1510 ir_node *op, ir_mode *mode) {
1511 return new_rd_Minus(NULL, irg, block, op, mode);
1513 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1522 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1523 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1525 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1526 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1527 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1529 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1530 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1531 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1533 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1535 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1537 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1539 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1541 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1542 ir_node *op, ir_mode *mode) {
1543 return new_rd_Abs(NULL, irg, block, op, mode);
1545 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1546 ir_node *op1, ir_node *op2, ir_mode *mode) {
1547 return new_rd_And(NULL, irg, block, op1, op2, mode);
1549 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1554 ir_node *op1, ir_node *op2, ir_mode *mode) {
1555 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1557 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1558 ir_node *op, ir_mode *mode) {
1559 return new_rd_Not(NULL, irg, block, op, mode);
1561 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1562 ir_node *op, ir_node *k, ir_mode *mode) {
1563 return new_rd_Shl(NULL, irg, block, op, k, mode);
1565 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1566 ir_node *op, ir_node *k, ir_mode *mode) {
1567 return new_rd_Shr(NULL, irg, block, op, k, mode);
1569 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1570 ir_node *op, ir_node *k, ir_mode *mode) {
1571 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1573 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1574 ir_node *op, ir_node *k, ir_mode *mode) {
1575 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1577 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_node *k, ir_mode *mode) {
1579 return new_rd_Carry(NULL, irg, block, op, k, mode);
1581 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1582 ir_node *op, ir_node *k, ir_mode *mode) {
1583 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1585 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1586 ir_node *op1, ir_node *op2) {
1587 return new_rd_Cmp(NULL, irg, block, op1, op2);
1589 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1590 ir_node *op, ir_mode *mode) {
1591 return new_rd_Conv(NULL, irg, block, op, mode);
1593 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1594 return new_rd_Cast(NULL, irg, block, op, to_tp);
1596 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1597 ir_node **in, ir_mode *mode) {
1598 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1600 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1601 ir_node *store, ir_node *adr, ir_mode *mode) {
1602 return new_rd_Load(NULL, irg, block, store, adr, mode);
1604 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1605 ir_node *store, ir_node *adr, ir_node *val) {
1606 return new_rd_Store(NULL, irg, block, store, adr, val);
1608 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1609 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1610 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1612 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1613 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1614 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1616 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1617 return new_rd_Sync(NULL, irg, block, arity, in);
1619 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1620 ir_mode *mode, long proj) {
1621 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1623 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1625 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1627 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1628 int arity, ir_node **in) {
1629 return new_rd_Tuple(NULL, irg, block, arity, in );
1631 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1632 ir_node *val, ir_mode *mode) {
1633 return new_rd_Id(NULL, irg, block, val, mode);
1635 ir_node *new_r_Bad(ir_graph *irg) {
1636 return new_rd_Bad(irg);
1638 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1639 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1641 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1642 return new_rd_Unknown(irg, m);
1644 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1645 return new_rd_CallBegin(NULL, irg, block, callee);
1647 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1648 return new_rd_EndReg(NULL, irg, block);
1650 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1651 return new_rd_EndExcept(NULL, irg, block);
1653 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1654 return new_rd_Break(NULL, irg, block);
1656 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_NoMem(ir_graph *irg) {
1661 return new_rd_NoMem(irg);
1663 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1664 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1665 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1667 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1668 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1669 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1671 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1673 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1675 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1676 ir_node *store, ir_node *obj) {
1677 return new_rd_Raise(NULL, irg, block, store, obj);
1679 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1680 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1681 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1683 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1684 return new_rd_Pin(NULL, irg, block, node);
1686 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1687 int arity, ir_node *in[], ir_asm_constraint *inputs,
1688 int n_outs, ir_asm_constraint *outputs,
1689 int n_clobber, ident *clobber[], ident *asm_text) {
1690 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1693 /** ********************/
1694 /** public interfaces */
1695 /** construction tools */
1699 * - create a new Start node in the current block
1701 * @return s - pointer to the created Start node
1706 new_d_Start(dbg_info *db) {
1709 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1710 op_Start, mode_T, 0, NULL);
1712 res = optimize_node(res);
1713 IRN_VRFY_IRG(res, current_ir_graph);
1718 new_d_End(dbg_info *db) {
1720 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1721 op_End, mode_X, -1, NULL);
1722 res = optimize_node(res);
1723 IRN_VRFY_IRG(res, current_ir_graph);
1728 /* Constructs a Block with a fixed number of predecessors.
1729 Does set current_block. Can be used with automatic Phi
1730 node construction. */
1732 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1735 int has_unknown = 0;
1737 res = new_bd_Block(db, arity, in);
1739 /* Create and initialize array for Phi-node construction. */
1740 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1741 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1742 current_ir_graph->n_loc);
1743 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1746 for (i = arity-1; i >= 0; i--)
1747 if (is_Unknown(in[i])) {
1752 if (!has_unknown) res = optimize_node(res);
1753 current_ir_graph->current_block = res;
1755 IRN_VRFY_IRG(res, current_ir_graph);
1760 /* ***********************************************************************/
1761 /* Methods necessary for automatic Phi node creation */
1763 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1764 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1765 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1766 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1768 Call Graph: ( A ---> B == A "calls" B)
1770 get_value mature_immBlock
1778 get_r_value_internal |
1782 new_rd_Phi0 new_rd_Phi_in
1784 * *************************************************************************** */
1786 /** Creates a Phi node with 0 predecessors. */
1787 static INLINE ir_node *
1788 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1791 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1792 IRN_VRFY_IRG(res, irg);
1798 * Internal constructor of a Phi node by a phi_merge operation.
1800 * @param irg the graph on which the Phi will be constructed
1801 * @param block the block in which the Phi will be constructed
1802 * @param mode the mod eof the Phi node
1803 * @param in the input array of the phi node
1804 * @param ins number of elements in the input array
1805 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1806 * the value for which the new Phi is constructed
1808 static INLINE ir_node *
1809 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1810 ir_node **in, int ins, ir_node *phi0) {
1812 ir_node *res, *known;
1814 /* Allocate a new node on the obstack. The allocation copies the in
1816 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1817 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1819 /* This loop checks whether the Phi has more than one predecessor.
1820 If so, it is a real Phi node and we break the loop. Else the
1821 Phi node merges the same definition on several paths and therefore
1822 is not needed. Don't consider Bad nodes! */
1824 for (i = ins - 1; i >= 0; --i) {
1827 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1829 /* Optimize self referencing Phis: We can't detect them yet properly, as
1830 they still refer to the Phi0 they will replace. So replace right now. */
1831 if (phi0 && in[i] == phi0)
1834 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1843 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1846 edges_node_deleted(res, current_ir_graph);
1847 obstack_free(current_ir_graph->obst, res);
1848 if (is_Phi(known)) {
1849 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1850 order, an enclosing Phi know may get superfluous. */
1851 res = optimize_in_place_2(known);
1853 exchange(known, res);
1858 /* A undefined value, e.g., in unreachable code. */
1862 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1863 IRN_VRFY_IRG(res, irg);
1864 /* Memory Phis in endless loops must be kept alive.
1865 As we can't distinguish these easily we keep all of them alive. */
1866 if (is_Phi(res) && mode == mode_M)
1867 add_End_keepalive(get_irg_end(irg), res);
1871 } /* new_rd_Phi_in */
1874 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1876 #if PRECISE_EXC_CONTEXT
1878 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1881 * Construct a new frag_array for node n.
1882 * Copy the content from the current graph_arr of the corresponding block:
1883 * this is the current state.
1884 * Set ProjM(n) as current memory state.
1885 * Further the last entry in frag_arr of current block points to n. This
1886 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1888 static INLINE ir_node **new_frag_arr(ir_node *n) {
1892 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1893 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1894 sizeof(ir_node *)*current_ir_graph->n_loc);
1896 /* turn off optimization before allocating Proj nodes, as res isn't
1898 opt = get_opt_optimize(); set_optimize(0);
1899 /* Here we rely on the fact that all frag ops have Memory as first result! */
1901 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1902 } else if (is_CopyB(n)) {
1903 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1905 assert((pn_Quot_M == pn_DivMod_M) &&
1906 (pn_Quot_M == pn_Div_M) &&
1907 (pn_Quot_M == pn_Mod_M) &&
1908 (pn_Quot_M == pn_Load_M) &&
1909 (pn_Quot_M == pn_Store_M) &&
1910 (pn_Quot_M == pn_Alloc_M) &&
1911 (pn_Quot_M == pn_Bound_M));
1912 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1916 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1918 } /* new_frag_arr */
1921 * Returns the frag_arr from a node.
1923 static INLINE ir_node **get_frag_arr(ir_node *n) {
1924 switch (get_irn_opcode(n)) {
1926 return n->attr.call.exc.frag_arr;
1928 return n->attr.alloc.exc.frag_arr;
1930 return n->attr.load.exc.frag_arr;
1932 return n->attr.store.exc.frag_arr;
1934 return n->attr.except.frag_arr;
1936 } /* get_frag_arr */
1939 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1940 #ifdef DEBUG_libfirm
1943 for (i = 1024; i >= 0; --i)
1948 if (frag_arr[pos] == NULL)
1949 frag_arr[pos] = val;
1950 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1951 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1952 assert(arr != frag_arr && "Endless recursion detected");
1957 assert(!"potential endless recursion in set_frag_value");
1958 } /* set_frag_value */
1961 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1965 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1967 frag_arr = get_frag_arr(cfOp);
1968 res = frag_arr[pos];
1970 if (block->attr.block.graph_arr[pos] != NULL) {
1971 /* There was a set_value() after the cfOp and no get_value() before that
1972 set_value(). We must build a Phi node now. */
1973 if (block->attr.block.is_matured) {
1974 int ins = get_irn_arity(block);
1976 NEW_ARR_A(ir_node *, nin, ins);
1977 res = phi_merge(block, pos, mode, nin, ins);
1979 res = new_rd_Phi0(current_ir_graph, block, mode);
1980 res->attr.phi.u.pos = pos;
1981 res->attr.phi.next = block->attr.block.phis;
1982 block->attr.block.phis = res;
1984 assert(res != NULL);
1985 /* It's a Phi, we can write this into all graph_arrs with NULL */
1986 set_frag_value(block->attr.block.graph_arr, pos, res);
1988 res = get_r_value_internal(block, pos, mode);
1989 set_frag_value(block->attr.block.graph_arr, pos, res);
1993 } /* get_r_frag_value_internal */
1994 #endif /* PRECISE_EXC_CONTEXT */
1997 * Check whether a control flownode cf_pred represents an exception flow.
1999 * @param cf_pred the control flow node
2000 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2002 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2004 * Note: all projections from a raise are "exceptional control flow" we we handle it
2005 * like a normal Jmp, because there is no "regular" one.
2006 * That's why Raise is no "fragile_op"!
2008 if (is_fragile_op(prev_cf_op)) {
2009 if (is_Proj(cf_pred)) {
2010 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2011 /* the regular control flow, NO exception */
2014 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2017 /* Hmm, exception but not a Proj? */
2018 assert(!"unexpected condition: fragile op without a proj");
2022 } /* is_exception_flow */
2025 * Computes the predecessors for the real phi node, and then
2026 * allocates and returns this node. The routine called to allocate the
2027 * node might optimize it away and return a real value.
2028 * This function must be called with an in-array of proper size.
2031 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2032 ir_node *prevBlock, *res, *phi0, *phi0_all;
2035 /* If this block has no value at pos create a Phi0 and remember it
2036 in graph_arr to break recursions.
2037 Else we may not set graph_arr as there a later value is remembered. */
2039 if (block->attr.block.graph_arr[pos] == NULL) {
2040 ir_graph *irg = current_ir_graph;
2042 if (block == get_irg_start_block(irg)) {
2043 /* Collapsing to Bad tarvals is no good idea.
2044 So we call a user-supplied routine here that deals with this case as
2045 appropriate for the given language. Sorrily the only help we can give
2046 here is the position.
2048 Even if all variables are defined before use, it can happen that
2049 we get to the start block, if a Cond has been replaced by a tuple
2050 (bad, jmp). In this case we call the function needlessly, eventually
2051 generating an non existent error.
2052 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2055 if (default_initialize_local_variable != NULL) {
2056 ir_node *rem = get_cur_block();
2058 set_cur_block(block);
2059 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2063 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2064 /* We don't need to care about exception ops in the start block.
2065 There are none by definition. */
2066 return block->attr.block.graph_arr[pos];
2068 phi0 = new_rd_Phi0(irg, block, mode);
2069 block->attr.block.graph_arr[pos] = phi0;
2070 #if PRECISE_EXC_CONTEXT
2071 if (get_opt_precise_exc_context()) {
2072 /* Set graph_arr for fragile ops. Also here we should break recursion.
2073 We could choose a cyclic path through an cfop. But the recursion would
2074 break at some point. */
2075 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2081 /* This loop goes to all predecessor blocks of the block the Phi node
2082 is in and there finds the operands of the Phi node by calling
2083 get_r_value_internal. */
2084 for (i = 1; i <= ins; ++i) {
2085 ir_node *cf_pred = block->in[i];
2086 ir_node *prevCfOp = skip_Proj(cf_pred);
2088 if (is_Bad(prevCfOp)) {
2089 /* In case a Cond has been optimized we would get right to the start block
2090 with an invalid definition. */
2091 nin[i-1] = new_Bad();
2094 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2096 if (!is_Bad(prevBlock)) {
2097 #if PRECISE_EXC_CONTEXT
2098 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2099 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2100 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2103 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2105 nin[i-1] = new_Bad();
2109 /* We want to pass the Phi0 node to the constructor: this finds additional
2110 optimization possibilities.
2111 The Phi0 node either is allocated in this function, or it comes from
2112 a former call to get_r_value_internal(). In this case we may not yet
2113 exchange phi0, as this is done in mature_immBlock(). */
2115 phi0_all = block->attr.block.graph_arr[pos];
2116 if (!(is_Phi(phi0_all) &&
2117 (get_irn_arity(phi0_all) == 0) &&
2118 (get_nodes_block(phi0_all) == block)))
2124 /* After collecting all predecessors into the array nin a new Phi node
2125 with these predecessors is created. This constructor contains an
2126 optimization: If all predecessors of the Phi node are identical it
2127 returns the only operand instead of a new Phi node. */
2128 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2130 /* In case we allocated a Phi0 node at the beginning of this procedure,
2131 we need to exchange this Phi0 with the real Phi. */
2133 exchange(phi0, res);
2134 block->attr.block.graph_arr[pos] = res;
2135 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2136 only an optimization. */
2143 * This function returns the last definition of a value. In case
2144 * this value was last defined in a previous block, Phi nodes are
2145 * inserted. If the part of the firm graph containing the definition
2146 * is not yet constructed, a dummy Phi node is returned.
2148 * @param block the current block
2149 * @param pos the value number of the value searched
2150 * @param mode the mode of this value (needed for Phi construction)
2153 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2155 /* There are 4 cases to treat.
2157 1. The block is not mature and we visit it the first time. We can not
2158 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2159 predecessors is returned. This node is added to the linked list (block
2160 attribute "phis") of the containing block to be completed when this block is
2161 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2164 2. The value is already known in this block, graph_arr[pos] is set and we
2165 visit the block the first time. We can return the value without
2166 creating any new nodes.
2168 3. The block is mature and we visit it the first time. A Phi node needs
2169 to be created (phi_merge). If the Phi is not needed, as all it's
2170 operands are the same value reaching the block through different
2171 paths, it's optimized away and the value itself is returned.
2173 4. The block is mature, and we visit it the second time. Now two
2174 subcases are possible:
2175 * The value was computed completely the last time we were here. This
2176 is the case if there is no loop. We can return the proper value.
2177 * The recursion that visited this node and set the flag did not
2178 return yet. We are computing a value in a loop and need to
2179 break the recursion. This case only happens if we visited
2180 the same block with phi_merge before, which inserted a Phi0.
2181 So we return the Phi0.
2184 /* case 4 -- already visited. */
2185 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2186 /* As phi_merge allocates a Phi0 this value is always defined. Here
2187 is the critical difference of the two algorithms. */
2188 assert(block->attr.block.graph_arr[pos]);
2189 return block->attr.block.graph_arr[pos];
2192 /* visited the first time */
2193 set_irn_visited(block, get_irg_visited(current_ir_graph));
2195 /* Get the local valid value */
2196 res = block->attr.block.graph_arr[pos];
2198 /* case 2 -- If the value is actually computed, return it. */
2202 if (block->attr.block.is_matured) { /* case 3 */
2204 /* The Phi has the same amount of ins as the corresponding block. */
2205 int ins = get_irn_arity(block);
2207 NEW_ARR_A(ir_node *, nin, ins);
2209 /* Phi merge collects the predecessors and then creates a node. */
2210 res = phi_merge(block, pos, mode, nin, ins);
2212 } else { /* case 1 */
2213 /* The block is not mature, we don't know how many in's are needed. A Phi
2214 with zero predecessors is created. Such a Phi node is called Phi0
2215 node. The Phi0 is then added to the list of Phi0 nodes in this block
2216 to be matured by mature_immBlock later.
2217 The Phi0 has to remember the pos of it's internal value. If the real
2218 Phi is computed, pos is used to update the array with the local
2220 res = new_rd_Phi0(current_ir_graph, block, mode);
2221 res->attr.phi.u.pos = pos;
2222 res->attr.phi.next = block->attr.block.phis;
2223 block->attr.block.phis = res;
2226 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2228 /* The local valid value is available now. */
2229 block->attr.block.graph_arr[pos] = res;
2232 } /* get_r_value_internal */
2234 /* ************************************************************************** */
2237 * Finalize a Block node, when all control flows are known.
2238 * Acceptable parameters are only Block nodes.
2241 mature_immBlock(ir_node *block) {
2246 assert(is_Block(block));
2247 if (!get_Block_matured(block)) {
2248 ir_graph *irg = current_ir_graph;
2250 ins = ARR_LEN(block->in) - 1;
2251 /* Fix block parameters */
2252 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2254 /* An array for building the Phi nodes. */
2255 NEW_ARR_A(ir_node *, nin, ins);
2257 /* Traverse a chain of Phi nodes attached to this block and mature
2259 for (n = block->attr.block.phis; n; n = next) {
2260 inc_irg_visited(irg);
2261 next = n->attr.phi.next;
2262 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2265 block->attr.block.is_matured = 1;
2267 /* Now, as the block is a finished Firm node, we can optimize it.
2268 Since other nodes have been allocated since the block was created
2269 we can not free the node on the obstack. Therefore we have to call
2270 optimize_in_place().
2271 Unfortunately the optimization does not change a lot, as all allocated
2272 nodes refer to the unoptimized node.
2273 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2274 block = optimize_in_place_2(block);
2275 IRN_VRFY_IRG(block, irg);
2277 } /* mature_immBlock */
2280 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2281 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2285 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2286 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2290 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2291 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2292 } /* new_d_Const_long */
2295 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2296 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2297 } /* new_d_Const_type */
2301 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2302 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2306 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2307 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2311 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2314 assert(arg->op == op_Cond);
2315 arg->attr.cond.kind = fragmentary;
2316 arg->attr.cond.default_proj = max_proj;
2317 res = new_Proj(arg, mode_X, max_proj);
2319 } /* new_d_defaultProj */
2322 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2323 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2327 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2328 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2329 } /* new_d_strictConv */
2332 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2333 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2337 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2338 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2348 * Allocate a frag array for a node if the current graph state is phase_building.
2350 * @param irn the node for which the frag array should be allocated
2351 * @param op the opcode of the (original) node, if does not match opcode of irn,
2353 * @param frag_store the address of the frag store in irn attributes, if this
2354 * address contains a value != NULL, does nothing
2356 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2357 if (get_opt_precise_exc_context()) {
2358 if ((current_ir_graph->phase_state == phase_building) &&
2359 (get_irn_op(irn) == op) && /* Could be optimized away. */
2360 !*frag_store) /* Could be a cse where the arr is already set. */ {
2361 *frag_store = new_frag_arr(irn);
2364 } /* firm_alloc_frag_arr */
2367 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2369 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2370 #if PRECISE_EXC_CONTEXT
2371 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2378 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2380 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2381 #if PRECISE_EXC_CONTEXT
2382 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2386 } /* new_d_DivMod */
2389 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2391 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2392 #if PRECISE_EXC_CONTEXT
2393 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2400 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2402 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2403 #if PRECISE_EXC_CONTEXT
2404 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2411 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2413 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2414 #if PRECISE_EXC_CONTEXT
2415 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2434 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2435 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2439 new_d_Jmp(dbg_info *db) {
2440 return new_bd_Jmp(db, current_ir_graph->current_block);
2444 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2445 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2449 new_d_Cond(dbg_info *db, ir_node *c) {
2450 return new_bd_Cond(db, current_ir_graph->current_block, c);
2454 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2457 res = new_bd_Call(db, current_ir_graph->current_block,
2458 store, callee, arity, in, tp);
2459 #if PRECISE_EXC_CONTEXT
2460 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2467 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2468 return new_bd_Return(db, current_ir_graph->current_block,
2470 } /* new_d_Return */
2473 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2475 res = new_bd_Load(db, current_ir_graph->current_block,
2477 #if PRECISE_EXC_CONTEXT
2478 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2485 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2487 res = new_bd_Store(db, current_ir_graph->current_block,
2489 #if PRECISE_EXC_CONTEXT
2490 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2497 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2498 ir_where_alloc where) {
2500 res = new_bd_Alloc(db, current_ir_graph->current_block,
2501 store, size, alloc_type, where);
2502 #if PRECISE_EXC_CONTEXT
2503 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2510 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2511 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2512 return new_bd_Free(db, current_ir_graph->current_block,
2513 store, ptr, size, free_type, where);
2517 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2518 /* GL: objptr was called frame before. Frame was a bad choice for the name
2519 as the operand could as well be a pointer to a dynamic object. */
2521 return new_bd_Sel(db, current_ir_graph->current_block,
2522 store, objptr, 0, NULL, ent);
2523 } /* new_d_simpleSel */
2526 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2527 return new_bd_Sel(db, current_ir_graph->current_block,
2528 store, objptr, n_index, index, sel);
2532 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2533 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2535 } /* new_d_SymConst_type */
2538 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2539 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2540 value, kind, firm_unknown_type);
2541 } /* new_d_SymConst */
2544 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2545 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2551 return _new_d_Bad();
2555 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2556 return new_bd_Confirm(db, current_ir_graph->current_block,
2558 } /* new_d_Confirm */
2561 new_d_Unknown(ir_mode *m) {
2562 return new_bd_Unknown(m);
2563 } /* new_d_Unknown */
2566 new_d_CallBegin(dbg_info *db, ir_node *call) {
2567 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2568 } /* new_d_CallBegin */
2571 new_d_EndReg(dbg_info *db) {
2572 return new_bd_EndReg(db, current_ir_graph->current_block);
2573 } /* new_d_EndReg */
2576 new_d_EndExcept(dbg_info *db) {
2577 return new_bd_EndExcept(db, current_ir_graph->current_block);
2578 } /* new_d_EndExcept */
2581 new_d_Break(dbg_info *db) {
2582 return new_bd_Break(db, current_ir_graph->current_block);
2586 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2587 return new_bd_Filter(db, current_ir_graph->current_block,
2589 } /* new_d_Filter */
2592 (new_d_NoMem)(void) {
2593 return _new_d_NoMem();
2597 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2598 ir_node *ir_true, ir_mode *mode) {
2599 return new_bd_Mux(db, current_ir_graph->current_block,
2600 sel, ir_false, ir_true, mode);
2603 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2604 ir_node *dst, ir_node *src, ir_type *data_type) {
2606 res = new_bd_CopyB(db, current_ir_graph->current_block,
2607 store, dst, src, data_type);
2608 #if PRECISE_EXC_CONTEXT
2609 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2615 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2616 return new_bd_InstOf(db, current_ir_graph->current_block,
2617 store, objptr, type);
2618 } /* new_d_InstOf */
2621 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2622 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2625 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2626 ir_node *idx, ir_node *lower, ir_node *upper) {
2628 res = new_bd_Bound(db, current_ir_graph->current_block,
2629 store, idx, lower, upper);
2630 #if PRECISE_EXC_CONTEXT
2631 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2637 new_d_Pin(dbg_info *db, ir_node *node) {
2638 return new_bd_Pin(db, current_ir_graph->current_block, node);
2642 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2643 int n_outs, ir_asm_constraint *outputs,
2644 int n_clobber, ident *clobber[], ident *asm_text) {
2645 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2648 /* ********************************************************************* */
2649 /* Comfortable interface with automatic Phi node construction. */
2650 /* (Uses also constructors of ?? interface, except new_Block. */
2651 /* ********************************************************************* */
2653 /* Block construction */
2654 /* immature Block without predecessors */
2656 new_d_immBlock(dbg_info *db) {
2659 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2660 /* creates a new dynamic in-array as length of in is -1 */
2661 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2662 current_ir_graph->current_block = res;
2664 /* macroblock head */
2667 res->attr.block.is_matured = 0;
2668 res->attr.block.is_dead = 0;
2669 res->attr.block.is_mb_head = 1;
2670 res->attr.block.has_label = 0;
2671 res->attr.block.irg = current_ir_graph;
2672 res->attr.block.backedge = NULL;
2673 res->attr.block.in_cg = NULL;
2674 res->attr.block.cg_backedge = NULL;
2675 res->attr.block.extblk = NULL;
2676 res->attr.block.region = NULL;
2677 res->attr.block.mb_depth = 0;
2678 res->attr.block.label = 0;
2680 set_Block_block_visited(res, 0);
2682 /* Create and initialize array for Phi-node construction. */
2683 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2684 current_ir_graph->n_loc);
2685 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2687 /* Immature block may not be optimized! */
2688 IRN_VRFY_IRG(res, current_ir_graph);
2691 } /* new_d_immBlock */
2694 new_immBlock(void) {
2695 return new_d_immBlock(NULL);
2696 } /* new_immBlock */
2698 /* immature PartBlock with its predecessors */
2700 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2701 ir_node *res = new_d_immBlock(db);
2702 ir_node *blk = get_nodes_block(pred_jmp);
2704 res->in[0] = blk->in[0];
2705 assert(res->in[0] != NULL);
2706 add_immBlock_pred(res, pred_jmp);
2708 res->attr.block.is_mb_head = 0;
2709 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2712 } /* new_d_immPartBlock */
2715 new_immPartBlock(ir_node *pred_jmp) {
2716 return new_d_immPartBlock(NULL, pred_jmp);
2717 } /* new_immPartBlock */
2719 /* add an edge to a jmp/control flow node */
2721 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2722 int n = ARR_LEN(block->in) - 1;
2724 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2725 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2726 assert(is_ir_node(jmp));
2728 ARR_APP1(ir_node *, block->in, jmp);
2730 hook_set_irn_n(block, n, jmp, NULL);
2731 } /* add_immBlock_pred */
2733 /* changing the current block */
2735 set_cur_block(ir_node *target) {
2736 current_ir_graph->current_block = target;
2737 } /* set_cur_block */
2739 /* ************************ */
2740 /* parameter administration */
2742 /* get a value from the parameter array from the current block by its index */
2744 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2745 ir_graph *irg = current_ir_graph;
2746 assert(get_irg_phase_state(irg) == phase_building);
2747 inc_irg_visited(irg);
2750 return get_r_value_internal(irg->current_block, pos + 1, mode);
2753 /* get a value from the parameter array from the current block by its index */
2755 get_value(int pos, ir_mode *mode) {
2756 return get_d_value(NULL, pos, mode);
2759 /* set a value at position pos in the parameter array from the current block */
2761 set_value(int pos, ir_node *value) {
2762 ir_graph *irg = current_ir_graph;
2763 assert(get_irg_phase_state(irg) == phase_building);
2764 assert(pos+1 < irg->n_loc);
2765 assert(is_ir_node(value));
2766 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2769 /* Find the value number for a node in the current block.*/
2771 find_value(ir_node *value) {
2773 ir_node *bl = current_ir_graph->current_block;
2775 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2776 if (bl->attr.block.graph_arr[i] == value)
2781 /* get the current store */
2784 ir_graph *irg = current_ir_graph;
2786 assert(get_irg_phase_state(irg) == phase_building);
2787 /* GL: one could call get_value instead */
2788 inc_irg_visited(irg);
2789 return get_r_value_internal(irg->current_block, 0, mode_M);
2792 /* set the current store: handles automatic Sync construction for Load nodes */
2794 set_store(ir_node *store) {
2795 ir_node *load, *pload, *pred, *in[2];
2797 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2798 /* Beware: due to dead code elimination, a store might become a Bad node even in
2799 the construction phase. */
2800 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2802 if (get_opt_auto_create_sync()) {
2803 /* handle non-volatile Load nodes by automatically creating Sync's */
2804 load = skip_Proj(store);
2805 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2806 pred = get_Load_mem(load);
2808 if (is_Sync(pred)) {
2809 /* a Load after a Sync: move it up */
2810 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2812 set_Load_mem(load, get_memop_mem(mem));
2813 add_Sync_pred(pred, store);
2816 pload = skip_Proj(pred);
2817 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2818 /* a Load after a Load: create a new Sync */
2819 set_Load_mem(load, get_Load_mem(pload));
2823 store = new_Sync(2, in);
2828 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2832 keep_alive(ir_node *ka) {
2833 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2836 /* --- Useful access routines --- */
2837 /* Returns the current block of the current graph. To set the current
2838 block use set_cur_block. */
2839 ir_node *get_cur_block(void) {
2840 return get_irg_current_block(current_ir_graph);
2841 } /* get_cur_block */
2843 /* Returns the frame type of the current graph */
2844 ir_type *get_cur_frame_type(void) {
2845 return get_irg_frame_type(current_ir_graph);
2846 } /* get_cur_frame_type */
2849 /* ********************************************************************* */
2852 /* call once for each run of the library */
2854 firm_init_cons(uninitialized_local_variable_func_t *func) {
2855 default_initialize_local_variable = func;
2856 } /* firm_init_cons */
2859 irp_finalize_cons(void) {
2861 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2862 irg_finalize_cons(get_irp_irg(i));
2864 irp->phase_state = phase_high;
2865 } /* irp_finalize_cons */
2868 ir_node *new_Block(int arity, ir_node **in) {
2869 return new_d_Block(NULL, arity, in);
2871 ir_node *new_Start(void) {
2872 return new_d_Start(NULL);
2874 ir_node *new_End(void) {
2875 return new_d_End(NULL);
2877 ir_node *new_Jmp(void) {
2878 return new_d_Jmp(NULL);
2880 ir_node *new_IJmp(ir_node *tgt) {
2881 return new_d_IJmp(NULL, tgt);
2883 ir_node *new_Cond(ir_node *c) {
2884 return new_d_Cond(NULL, c);
2886 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2887 return new_d_Return(NULL, store, arity, in);
2889 ir_node *new_Const(ir_mode *mode, tarval *con) {
2890 return new_d_Const(NULL, mode, con);
2893 ir_node *new_Const_long(ir_mode *mode, long value) {
2894 return new_d_Const_long(NULL, mode, value);
2897 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2898 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2901 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2902 return new_d_SymConst_type(NULL, mode, value, kind, type);
2904 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2905 return new_d_SymConst(NULL, mode, value, kind);
2907 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2908 return new_d_simpleSel(NULL, store, objptr, ent);
2910 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2912 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2914 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2916 return new_d_Call(NULL, store, callee, arity, in, tp);
2918 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2919 return new_d_Add(NULL, op1, op2, mode);
2921 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2922 return new_d_Sub(NULL, op1, op2, mode);
2924 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2925 return new_d_Minus(NULL, op, mode);
2927 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2928 return new_d_Mul(NULL, op1, op2, mode);
2930 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2931 return new_d_Mulh(NULL, op1, op2, mode);
2933 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2934 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2936 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2937 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2939 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2940 return new_d_Div(NULL, memop, op1, op2, mode, state);
2942 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2943 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2945 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2946 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2948 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2949 return new_d_Abs(NULL, op, mode);
2951 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2952 return new_d_And(NULL, op1, op2, mode);
2954 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2955 return new_d_Or(NULL, op1, op2, mode);
2957 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2958 return new_d_Eor(NULL, op1, op2, mode);
2960 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2961 return new_d_Not(NULL, op, mode);
2963 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2964 return new_d_Shl(NULL, op, k, mode);
2966 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2967 return new_d_Shr(NULL, op, k, mode);
2969 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2970 return new_d_Shrs(NULL, op, k, mode);
2972 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
2973 return new_d_Rotl(NULL, op, k, mode);
2975 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2976 return new_d_Carry(NULL, op1, op2, mode);
2978 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2979 return new_d_Borrow(NULL, op1, op2, mode);
2981 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2982 return new_d_Cmp(NULL, op1, op2);
2984 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2985 return new_d_Conv(NULL, op, mode);
2987 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2988 return new_d_strictConv(NULL, op, mode);
2990 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2991 return new_d_Cast(NULL, op, to_tp);
2993 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2994 return new_d_Phi(NULL, arity, in, mode);
2996 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2997 return new_d_Load(NULL, store, addr, mode);
2999 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3000 return new_d_Store(NULL, store, addr, val);
3002 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3003 ir_where_alloc where) {
3004 return new_d_Alloc(NULL, store, size, alloc_type, where);
3006 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3007 ir_type *free_type, ir_where_alloc where) {
3008 return new_d_Free(NULL, store, ptr, size, free_type, where);
3010 ir_node *new_Sync(int arity, ir_node *in[]) {
3011 return new_d_Sync(NULL, arity, in);
3013 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3014 return new_d_Proj(NULL, arg, mode, proj);
3016 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3017 return new_d_defaultProj(NULL, arg, max_proj);
3019 ir_node *new_Tuple(int arity, ir_node **in) {
3020 return new_d_Tuple(NULL, arity, in);
3022 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3023 return new_d_Id(NULL, val, mode);
3025 ir_node *new_Bad(void) {
3028 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3029 return new_d_Confirm(NULL, val, bound, cmp);
3031 ir_node *new_Unknown(ir_mode *m) {
3032 return new_d_Unknown(m);
3034 ir_node *new_CallBegin(ir_node *callee) {
3035 return new_d_CallBegin(NULL, callee);
3037 ir_node *new_EndReg(void) {
3038 return new_d_EndReg(NULL);
3040 ir_node *new_EndExcept(void) {
3041 return new_d_EndExcept(NULL);
3043 ir_node *new_Break(void) {
3044 return new_d_Break(NULL);
3046 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3047 return new_d_Filter(NULL, arg, mode, proj);
3049 ir_node *new_NoMem(void) {
3050 return new_d_NoMem();
3052 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3053 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3055 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3056 return new_d_CopyB(NULL, store, dst, src, data_type);
3058 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3059 return new_d_InstOf(NULL, store, objptr, ent);
3061 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3062 return new_d_Raise(NULL, store, obj);
3064 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3065 return new_d_Bound(NULL, store, idx, lower, upper);
3067 ir_node *new_Pin(ir_node *node) {
3068 return new_d_Pin(NULL, node);
3070 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3071 int n_outs, ir_asm_constraint *outputs,
3072 int n_clobber, ident *clobber[], ident *asm_text) {
3073 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3076 /* create a new anchor node */
3077 ir_node *new_Anchor(ir_graph *irg) {
3078 ir_node *in[anchor_last];
3079 memset(in, 0, sizeof(in));
3080 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);