2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if (get_Block_matured(block))
249 assert(get_irn_arity(block) == arity);
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity - 1; i >= 0; --i)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block) {
418 ir_graph *irg = current_ir_graph;
420 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
429 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node(res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res->attr.load.aligned = align_is_aligned;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val) {
518 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
524 res->attr.store.exc.pin_state = op_pin_state_pinned;
525 res->attr.store.volatility = volatility_non_volatile;
526 res->attr.store.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
596 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
597 ir_graph *irg = current_ir_graph;
598 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
600 res->attr.symc.num = symkind;
601 res->attr.symc.sym = value;
602 res->attr.symc.tp = tp;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
607 } /* new_bd_SymConst_type */
610 new_bd_Sync(dbg_info *db, ir_node *block) {
612 ir_graph *irg = current_ir_graph;
614 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
615 /* no need to call optimize node here, Sync are always created with no predecessors */
616 IRN_VRFY_IRG(res, irg);
621 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
622 ir_node *in[2], *res;
623 ir_graph *irg = current_ir_graph;
627 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
628 res->attr.confirm.cmp = cmp;
629 res = optimize_node(res);
630 IRN_VRFY_IRG(res, irg);
632 } /* new_bd_Confirm */
635 new_bd_Unknown(ir_mode *m) {
637 ir_graph *irg = current_ir_graph;
639 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
640 res = optimize_node(res);
642 } /* new_bd_Unknown */
645 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
648 ir_graph *irg = current_ir_graph;
650 in[0] = get_Call_ptr(call);
651 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
652 /* res->attr.callbegin.irg = irg; */
653 res->attr.callbegin.call = call;
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, irg);
657 } /* new_bd_CallBegin */
660 new_bd_EndReg(dbg_info *db, ir_node *block) {
662 ir_graph *irg = current_ir_graph;
664 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
665 set_irg_end_reg(irg, res);
666 IRN_VRFY_IRG(res, irg);
668 } /* new_bd_EndReg */
671 new_bd_EndExcept(dbg_info *db, ir_node *block) {
673 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
676 set_irg_end_except(irg, res);
677 IRN_VRFY_IRG (res, irg);
679 } /* new_bd_EndExcept */
682 new_bd_Break(dbg_info *db, ir_node *block) {
684 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
687 res = optimize_node(res);
688 IRN_VRFY_IRG(res, irg);
693 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
699 res->attr.filter.proj = proj;
700 res->attr.filter.in_cg = NULL;
701 res->attr.filter.backedge = NULL;
704 assert(get_Proj_pred(res));
705 assert(get_nodes_block(get_Proj_pred(res)));
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
710 } /* new_bd_Filter */
713 new_bd_Mux(dbg_info *db, ir_node *block,
714 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
717 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
732 new_bd_Psi(dbg_info *db, ir_node *block,
733 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
736 ir_graph *irg = current_ir_graph;
739 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
741 for (i = 0; i < arity; ++i) {
743 in[2 * i + 1] = vals[i];
747 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_bd_CopyB(dbg_info *db, ir_node *block,
757 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
760 ir_graph *irg = current_ir_graph;
766 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
768 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
769 res->attr.copyb.data_type = data_type;
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
777 ir_node *objptr, ir_type *type) {
780 ir_graph *irg = current_ir_graph;
784 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
785 res->attr.instof.type = type;
786 res = optimize_node(res);
787 IRN_VRFY_IRG(res, irg);
789 } /* new_bd_InstOf */
792 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
795 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_Bound(dbg_info *db, ir_node *block,
807 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
810 ir_graph *irg = current_ir_graph;
816 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
817 res->attr.bound.exc.pin_state = op_pin_state_pinned;
818 res = optimize_node(res);
819 IRN_VRFY_IRG(res, irg);
824 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
826 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
835 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
836 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
838 ir_graph *irg = current_ir_graph;
841 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
842 res->attr.assem.pin_state = op_pin_state_pinned;
843 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
844 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
845 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
846 res->attr.assem.asm_text = asm_text;
848 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
849 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
856 /* --------------------------------------------- */
857 /* private interfaces, for professional use only */
858 /* --------------------------------------------- */
860 /* Constructs a Block with a fixed number of predecessors.
861 Does not set current_block. Can not be used with automatic
862 Phi node construction. */
864 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
865 ir_graph *rem = current_ir_graph;
868 current_ir_graph = irg;
869 res = new_bd_Block(db, arity, in);
870 current_ir_graph = rem;
876 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
877 ir_graph *rem = current_ir_graph;
880 current_ir_graph = irg;
881 res = new_bd_Start(db, block);
882 current_ir_graph = rem;
888 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
890 ir_graph *rem = current_ir_graph;
892 current_ir_graph = irg;
893 res = new_bd_End(db, block);
894 current_ir_graph = rem;
899 /* Creates a Phi node with all predecessors. Calling this constructor
900 is only allowed if the corresponding block is mature. */
902 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
904 ir_graph *rem = current_ir_graph;
906 current_ir_graph = irg;
907 res = new_bd_Phi(db, block,arity, in, mode);
908 current_ir_graph = rem;
914 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
916 ir_graph *rem = current_ir_graph;
918 current_ir_graph = irg;
919 res = new_bd_Const_type(db, block, mode, con, tp);
920 current_ir_graph = rem;
923 } /* new_rd_Const_type */
926 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
928 ir_graph *rem = current_ir_graph;
930 current_ir_graph = irg;
931 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
932 current_ir_graph = rem;
938 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
939 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
940 } /* new_rd_Const_long */
943 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
945 ir_graph *rem = current_ir_graph;
947 current_ir_graph = irg;
948 res = new_bd_Id(db, block, val, mode);
949 current_ir_graph = rem;
955 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
958 ir_graph *rem = current_ir_graph;
960 current_ir_graph = irg;
961 res = new_bd_Proj(db, block, arg, mode, proj);
962 current_ir_graph = rem;
968 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_defaultProj(db, block, arg, max_proj);
975 current_ir_graph = rem;
978 } /* new_rd_defaultProj */
981 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
983 ir_graph *rem = current_ir_graph;
985 current_ir_graph = irg;
986 res = new_bd_Conv(db, block, op, mode, 0);
987 current_ir_graph = rem;
993 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
995 ir_graph *rem = current_ir_graph;
997 current_ir_graph = irg;
998 res = new_bd_Cast(db, block, op, to_tp);
999 current_ir_graph = rem;
1005 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1007 ir_graph *rem = current_ir_graph;
1009 current_ir_graph = irg;
1010 res = new_bd_Tuple(db, block, arity, in);
1011 current_ir_graph = rem;
1014 } /* new_rd_Tuple */
1022 NEW_RD_DIVOP(DivMod)
1035 NEW_RD_BINOP(Borrow)
1038 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1039 ir_node *op1, ir_node *op2) {
1041 ir_graph *rem = current_ir_graph;
1043 current_ir_graph = irg;
1044 res = new_bd_Cmp(db, block, op1, op2);
1045 current_ir_graph = rem;
1051 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Jmp(db, block);
1057 current_ir_graph = rem;
1063 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1065 ir_graph *rem = current_ir_graph;
1067 current_ir_graph = irg;
1068 res = new_bd_IJmp(db, block, tgt);
1069 current_ir_graph = rem;
1075 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1077 ir_graph *rem = current_ir_graph;
1079 current_ir_graph = irg;
1080 res = new_bd_Cond(db, block, c);
1081 current_ir_graph = rem;
1087 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1088 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1090 ir_graph *rem = current_ir_graph;
1092 current_ir_graph = irg;
1093 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1094 current_ir_graph = rem;
1100 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1101 ir_node *store, int arity, ir_node **in) {
1103 ir_graph *rem = current_ir_graph;
1105 current_ir_graph = irg;
1106 res = new_bd_Return(db, block, store, arity, in);
1107 current_ir_graph = rem;
1110 } /* new_rd_Return */
1113 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1114 ir_node *store, ir_node *adr, ir_mode *mode) {
1116 ir_graph *rem = current_ir_graph;
1118 current_ir_graph = irg;
1119 res = new_bd_Load(db, block, store, adr, mode);
1120 current_ir_graph = rem;
1126 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1127 ir_node *store, ir_node *adr, ir_node *val) {
1129 ir_graph *rem = current_ir_graph;
1131 current_ir_graph = irg;
1132 res = new_bd_Store(db, block, store, adr, val);
1133 current_ir_graph = rem;
1136 } /* new_rd_Store */
1139 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1140 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1142 ir_graph *rem = current_ir_graph;
1144 current_ir_graph = irg;
1145 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1146 current_ir_graph = rem;
1149 } /* new_rd_Alloc */
1152 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1153 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1155 ir_graph *rem = current_ir_graph;
1157 current_ir_graph = irg;
1158 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1159 current_ir_graph = rem;
1165 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1166 ir_node *store, ir_node *objptr, ir_entity *ent) {
1168 ir_graph *rem = current_ir_graph;
1170 current_ir_graph = irg;
1171 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1172 current_ir_graph = rem;
1175 } /* new_rd_simpleSel */
1178 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1179 int arity, ir_node **in, ir_entity *ent) {
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1185 current_ir_graph = rem;
1191 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1192 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1194 ir_graph *rem = current_ir_graph;
1196 current_ir_graph = irg;
1197 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1198 current_ir_graph = rem;
1201 } /* new_rd_SymConst_type */
1204 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1205 symconst_symbol value, symconst_kind symkind) {
1206 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1207 } /* new_rd_SymConst */
1209 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1210 symconst_symbol sym;
1211 sym.entity_p = symbol;
1212 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1213 } /* new_rd_SymConst_addr_ent */
1215 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1216 symconst_symbol sym;
1217 sym.entity_p = symbol;
1218 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1219 } /* new_rd_SymConst_ofs_ent */
1221 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1222 symconst_symbol sym;
1223 sym.ident_p = symbol;
1224 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1225 } /* new_rd_SymConst_addr_name */
1227 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1228 symconst_symbol sym;
1229 sym.type_p = symbol;
1230 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1231 } /* new_rd_SymConst_type_tag */
1233 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1234 symconst_symbol sym;
1235 sym.type_p = symbol;
1236 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1237 } /* new_rd_SymConst_size */
1239 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1240 symconst_symbol sym;
1241 sym.type_p = symbol;
1242 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1243 } /* new_rd_SymConst_align */
1246 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1248 ir_graph *rem = current_ir_graph;
1251 current_ir_graph = irg;
1252 res = new_bd_Sync(db, block);
1253 current_ir_graph = rem;
1255 for (i = 0; i < arity; ++i)
1256 add_Sync_pred(res, in[i]);
1262 new_rd_Bad(ir_graph *irg) {
1263 return get_irg_bad(irg);
1267 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1269 ir_graph *rem = current_ir_graph;
1271 current_ir_graph = irg;
1272 res = new_bd_Confirm(db, block, val, bound, cmp);
1273 current_ir_graph = rem;
1276 } /* new_rd_Confirm */
1279 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1281 ir_graph *rem = current_ir_graph;
1283 current_ir_graph = irg;
1284 res = new_bd_Unknown(m);
1285 current_ir_graph = rem;
1288 } /* new_rd_Unknown */
1291 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1293 ir_graph *rem = current_ir_graph;
1295 current_ir_graph = irg;
1296 res = new_bd_CallBegin(db, block, call);
1297 current_ir_graph = rem;
1300 } /* new_rd_CallBegin */
1303 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1306 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1307 set_irg_end_reg(irg, res);
1308 IRN_VRFY_IRG(res, irg);
1310 } /* new_rd_EndReg */
1313 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1316 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1317 set_irg_end_except(irg, res);
1318 IRN_VRFY_IRG (res, irg);
1320 } /* new_rd_EndExcept */
1323 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1325 ir_graph *rem = current_ir_graph;
1327 current_ir_graph = irg;
1328 res = new_bd_Break(db, block);
1329 current_ir_graph = rem;
1332 } /* new_rd_Break */
1335 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1338 ir_graph *rem = current_ir_graph;
1340 current_ir_graph = irg;
1341 res = new_bd_Filter(db, block, arg, mode, proj);
1342 current_ir_graph = rem;
1345 } /* new_rd_Filter */
1348 new_rd_NoMem(ir_graph *irg) {
1349 return get_irg_no_mem(irg);
1350 } /* new_rd_NoMem */
1353 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1354 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1356 ir_graph *rem = current_ir_graph;
1358 current_ir_graph = irg;
1359 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1360 current_ir_graph = rem;
1366 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1367 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1373 current_ir_graph = rem;
1378 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1379 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1381 ir_graph *rem = current_ir_graph;
1383 current_ir_graph = irg;
1384 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1385 current_ir_graph = rem;
1388 } /* new_rd_CopyB */
1391 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1392 ir_node *objptr, ir_type *type) {
1394 ir_graph *rem = current_ir_graph;
1396 current_ir_graph = irg;
1397 res = new_bd_InstOf(db, block, store, objptr, type);
1398 current_ir_graph = rem;
1401 } /* new_rd_InstOf */
1404 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1406 ir_graph *rem = current_ir_graph;
1408 current_ir_graph = irg;
1409 res = new_bd_Raise(db, block, store, obj);
1410 current_ir_graph = rem;
1413 } /* new_rd_Raise */
1415 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1416 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1418 ir_graph *rem = current_ir_graph;
1420 current_ir_graph = irg;
1421 res = new_bd_Bound(db, block, store, idx, lower, upper);
1422 current_ir_graph = rem;
1425 } /* new_rd_Bound */
1427 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1429 ir_graph *rem = current_ir_graph;
1431 current_ir_graph = irg;
1432 res = new_bd_Pin(db, block, node);
1433 current_ir_graph = rem;
1438 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1439 int arity, ir_node *in[], ir_asm_constraint *inputs,
1440 int n_outs, ir_asm_constraint *outputs,
1441 int n_clobber, ident *clobber[], ident *asm_text) {
1443 ir_graph *rem = current_ir_graph;
1445 current_ir_graph = irg;
1446 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1447 current_ir_graph = rem;
1453 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1454 return new_rd_Block(NULL, irg, arity, in);
1456 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1457 return new_rd_Start(NULL, irg, block);
1459 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1460 return new_rd_End(NULL, irg, block);
1462 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1463 return new_rd_Jmp(NULL, irg, block);
1465 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1466 return new_rd_IJmp(NULL, irg, block, tgt);
1468 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1469 return new_rd_Cond(NULL, irg, block, c);
1471 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1472 ir_node *store, int arity, ir_node **in) {
1473 return new_rd_Return(NULL, irg, block, store, arity, in);
1475 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1476 ir_mode *mode, tarval *con) {
1477 return new_rd_Const(NULL, irg, block, mode, con);
1479 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1480 ir_mode *mode, long value) {
1481 return new_rd_Const_long(NULL, irg, block, mode, value);
1483 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1484 ir_mode *mode, tarval *con, ir_type *tp) {
1485 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1487 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1488 symconst_symbol value, symconst_kind symkind) {
1489 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1491 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1492 ir_node *objptr, ir_entity *ent) {
1493 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1495 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1496 ir_node *objptr, int n_index, ir_node **index,
1498 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1500 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *callee, int arity, ir_node **in,
1503 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1505 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1506 ir_node *op1, ir_node *op2, ir_mode *mode) {
1507 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1509 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1510 ir_node *op1, ir_node *op2, ir_mode *mode) {
1511 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1513 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1514 ir_node *op, ir_mode *mode) {
1515 return new_rd_Minus(NULL, irg, block, op, mode);
1517 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1522 ir_node *op1, ir_node *op2, ir_mode *mode) {
1523 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1525 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1526 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1527 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1529 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1530 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1531 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1533 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1535 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1537 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1539 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1541 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1542 ir_node *op, ir_mode *mode) {
1543 return new_rd_Abs(NULL, irg, block, op, mode);
1545 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1546 ir_node *op1, ir_node *op2, ir_mode *mode) {
1547 return new_rd_And(NULL, irg, block, op1, op2, mode);
1549 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1554 ir_node *op1, ir_node *op2, ir_mode *mode) {
1555 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1557 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1558 ir_node *op, ir_mode *mode) {
1559 return new_rd_Not(NULL, irg, block, op, mode);
1561 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1562 ir_node *op, ir_node *k, ir_mode *mode) {
1563 return new_rd_Shl(NULL, irg, block, op, k, mode);
1565 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1566 ir_node *op, ir_node *k, ir_mode *mode) {
1567 return new_rd_Shr(NULL, irg, block, op, k, mode);
1569 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1570 ir_node *op, ir_node *k, ir_mode *mode) {
1571 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1573 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1574 ir_node *op, ir_node *k, ir_mode *mode) {
1575 return new_rd_Rot(NULL, irg, block, op, k, mode);
1577 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_node *k, ir_mode *mode) {
1579 return new_rd_Carry(NULL, irg, block, op, k, mode);
1581 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1582 ir_node *op, ir_node *k, ir_mode *mode) {
1583 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1585 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1586 ir_node *op1, ir_node *op2) {
1587 return new_rd_Cmp(NULL, irg, block, op1, op2);
1589 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1590 ir_node *op, ir_mode *mode) {
1591 return new_rd_Conv(NULL, irg, block, op, mode);
1593 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1594 return new_rd_Cast(NULL, irg, block, op, to_tp);
1596 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1597 ir_node **in, ir_mode *mode) {
1598 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1600 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1601 ir_node *store, ir_node *adr, ir_mode *mode) {
1602 return new_rd_Load(NULL, irg, block, store, adr, mode);
1604 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1605 ir_node *store, ir_node *adr, ir_node *val) {
1606 return new_rd_Store(NULL, irg, block, store, adr, val);
1608 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1609 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1610 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1612 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1613 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1614 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1616 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1617 return new_rd_Sync(NULL, irg, block, arity, in);
1619 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1620 ir_mode *mode, long proj) {
1621 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1623 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1625 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1627 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1628 int arity, ir_node **in) {
1629 return new_rd_Tuple(NULL, irg, block, arity, in );
1631 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1632 ir_node *val, ir_mode *mode) {
1633 return new_rd_Id(NULL, irg, block, val, mode);
1635 ir_node *new_r_Bad(ir_graph *irg) {
1636 return new_rd_Bad(irg);
1638 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1639 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1641 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1642 return new_rd_Unknown(irg, m);
1644 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1645 return new_rd_CallBegin(NULL, irg, block, callee);
1647 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1648 return new_rd_EndReg(NULL, irg, block);
1650 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1651 return new_rd_EndExcept(NULL, irg, block);
1653 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1654 return new_rd_Break(NULL, irg, block);
1656 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_NoMem(ir_graph *irg) {
1661 return new_rd_NoMem(irg);
1663 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1664 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1665 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1667 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1668 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1669 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1671 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1672 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1673 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1675 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1677 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1679 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1680 ir_node *store, ir_node *obj) {
1681 return new_rd_Raise(NULL, irg, block, store, obj);
1683 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1684 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1685 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1687 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1688 return new_rd_Pin(NULL, irg, block, node);
1690 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1691 int arity, ir_node *in[], ir_asm_constraint *inputs,
1692 int n_outs, ir_asm_constraint *outputs,
1693 int n_clobber, ident *clobber[], ident *asm_text) {
1694 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1697 /** ********************/
1698 /** public interfaces */
1699 /** construction tools */
1703 * - create a new Start node in the current block
1705 * @return s - pointer to the created Start node
1710 new_d_Start(dbg_info *db) {
1713 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1714 op_Start, mode_T, 0, NULL);
1716 res = optimize_node(res);
1717 IRN_VRFY_IRG(res, current_ir_graph);
1722 new_d_End(dbg_info *db) {
1724 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1725 op_End, mode_X, -1, NULL);
1726 res = optimize_node(res);
1727 IRN_VRFY_IRG(res, current_ir_graph);
1732 /* Constructs a Block with a fixed number of predecessors.
1733 Does set current_block. Can be used with automatic Phi
1734 node construction. */
1736 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1739 int has_unknown = 0;
1741 res = new_bd_Block(db, arity, in);
1743 /* Create and initialize array for Phi-node construction. */
1744 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1745 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1746 current_ir_graph->n_loc);
1747 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1750 for (i = arity-1; i >= 0; i--)
1751 if (get_irn_op(in[i]) == op_Unknown) {
1756 if (!has_unknown) res = optimize_node(res);
1757 current_ir_graph->current_block = res;
1759 IRN_VRFY_IRG(res, current_ir_graph);
1764 /* ***********************************************************************/
1765 /* Methods necessary for automatic Phi node creation */
1767 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1768 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1769 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1770 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1772 Call Graph: ( A ---> B == A "calls" B)
1774 get_value mature_immBlock
1782 get_r_value_internal |
1786 new_rd_Phi0 new_rd_Phi_in
1788 * *************************************************************************** */
1790 /** Creates a Phi node with 0 predecessors. */
1791 static INLINE ir_node *
1792 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1795 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1796 IRN_VRFY_IRG(res, irg);
1801 static INLINE ir_node *
1802 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1803 ir_node **in, int ins, ir_node *phi0) {
1805 ir_node *res, *known;
1807 /* Allocate a new node on the obstack. The allocation copies the in
1809 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1810 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1812 /* This loop checks whether the Phi has more than one predecessor.
1813 If so, it is a real Phi node and we break the loop. Else the
1814 Phi node merges the same definition on several paths and therefore
1815 is not needed. Don't consider Bad nodes! */
1817 for (i=0; i < ins; ++i)
1821 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1823 /* Optimize self referencing Phis: We can't detect them yet properly, as
1824 they still refer to the Phi0 they will replace. So replace right now. */
1825 if (phi0 && in[i] == phi0) in[i] = res;
1827 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1835 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1838 edges_node_deleted(res, current_ir_graph);
1839 obstack_free (current_ir_graph->obst, res);
1840 if (is_Phi(known)) {
1841 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1842 order, an enclosing Phi know may get superfluous. */
1843 res = optimize_in_place_2(known);
1845 exchange(known, res);
1851 /* A undefined value, e.g., in unreachable code. */
1855 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1856 IRN_VRFY_IRG(res, irg);
1857 /* Memory Phis in endless loops must be kept alive.
1858 As we can't distinguish these easily we keep all of them alive. */
1859 if ((res->op == op_Phi) && (mode == mode_M))
1860 add_End_keepalive(get_irg_end(irg), res);
1864 } /* new_rd_Phi_in */
1867 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1869 #if PRECISE_EXC_CONTEXT
1871 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1874 * Construct a new frag_array for node n.
1875 * Copy the content from the current graph_arr of the corresponding block:
1876 * this is the current state.
1877 * Set ProjM(n) as current memory state.
1878 * Further the last entry in frag_arr of current block points to n. This
1879 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1881 static INLINE ir_node **new_frag_arr(ir_node *n) {
1885 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1886 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1887 sizeof(ir_node *)*current_ir_graph->n_loc);
1889 /* turn off optimization before allocating Proj nodes, as res isn't
1891 opt = get_opt_optimize(); set_optimize(0);
1892 /* Here we rely on the fact that all frag ops have Memory as first result! */
1893 if (get_irn_op(n) == op_Call)
1894 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1895 else if (get_irn_op(n) == op_CopyB)
1896 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1898 assert((pn_Quot_M == pn_DivMod_M) &&
1899 (pn_Quot_M == pn_Div_M) &&
1900 (pn_Quot_M == pn_Mod_M) &&
1901 (pn_Quot_M == pn_Load_M) &&
1902 (pn_Quot_M == pn_Store_M) &&
1903 (pn_Quot_M == pn_Alloc_M) &&
1904 (pn_Quot_M == pn_Bound_M));
1905 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1909 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1911 } /* new_frag_arr */
1914 * Returns the frag_arr from a node.
1916 static INLINE ir_node **get_frag_arr(ir_node *n) {
1917 switch (get_irn_opcode(n)) {
1919 return n->attr.call.exc.frag_arr;
1921 return n->attr.alloc.exc.frag_arr;
1923 return n->attr.load.exc.frag_arr;
1925 return n->attr.store.exc.frag_arr;
1927 return n->attr.except.frag_arr;
1929 } /* get_frag_arr */
1932 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1934 if (!frag_arr[pos]) frag_arr[pos] = val;
1935 if (frag_arr[current_ir_graph->n_loc - 1]) {
1936 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1937 assert(arr != frag_arr && "Endless recursion detected");
1938 set_frag_value(arr, pos, val);
1943 for (i = 0; i < 1000; ++i) {
1944 if (!frag_arr[pos]) {
1945 frag_arr[pos] = val;
1947 if (frag_arr[current_ir_graph->n_loc - 1]) {
1948 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1954 assert(0 && "potential endless recursion");
1956 } /* set_frag_value */
1959 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1963 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1965 frag_arr = get_frag_arr(cfOp);
1966 res = frag_arr[pos];
1968 if (block->attr.block.graph_arr[pos]) {
1969 /* There was a set_value() after the cfOp and no get_value before that
1970 set_value(). We must build a Phi node now. */
1971 if (block->attr.block.is_matured) {
1972 int ins = get_irn_arity(block);
1974 NEW_ARR_A(ir_node *, nin, ins);
1975 res = phi_merge(block, pos, mode, nin, ins);
1977 res = new_rd_Phi0(current_ir_graph, block, mode);
1978 res->attr.phi0.pos = pos;
1979 res->link = block->link;
1983 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1984 but this should be better: (remove comment if this works) */
1985 /* It's a Phi, we can write this into all graph_arrs with NULL */
1986 set_frag_value(block->attr.block.graph_arr, pos, res);
1988 res = get_r_value_internal(block, pos, mode);
1989 set_frag_value(block->attr.block.graph_arr, pos, res);
1993 } /* get_r_frag_value_internal */
1994 #endif /* PRECISE_EXC_CONTEXT */
1997 * check whether a control flow cf_pred is a exception flow.
1999 * @param cf_pred the control flow node
2000 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2002 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2004 * Note: all projections from a raise are "exceptional control flow" we we handle it
2005 * like a normal Jmp, because there is no "regular" one.
2006 * That's why Raise is no "fragile_op"!
2008 if (is_fragile_op(prev_cf_op)) {
2009 if (is_Proj(cf_pred)) {
2010 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2011 /* the regular control flow, NO exception */
2014 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2017 /* Hmm, exception but not a Proj? */
2018 assert(!"unexpected condition: fragile op without a proj");
2022 } /* is_exception_flow */
2025 * Computes the predecessors for the real phi node, and then
2026 * allocates and returns this node. The routine called to allocate the
2027 * node might optimize it away and return a real value.
2028 * This function must be called with an in-array of proper size.
2031 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2032 ir_node *prevBlock, *res, *phi0, *phi0_all;
2035 /* If this block has no value at pos create a Phi0 and remember it
2036 in graph_arr to break recursions.
2037 Else we may not set graph_arr as there a later value is remembered. */
2039 if (!block->attr.block.graph_arr[pos]) {
2040 if (block == get_irg_start_block(current_ir_graph)) {
2041 /* Collapsing to Bad tarvals is no good idea.
2042 So we call a user-supplied routine here that deals with this case as
2043 appropriate for the given language. Sorrily the only help we can give
2044 here is the position.
2046 Even if all variables are defined before use, it can happen that
2047 we get to the start block, if a Cond has been replaced by a tuple
2048 (bad, jmp). In this case we call the function needlessly, eventually
2049 generating an non existent error.
2050 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2053 if (default_initialize_local_variable) {
2054 ir_node *rem = get_cur_block();
2056 set_cur_block(block);
2057 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2061 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2062 /* We don't need to care about exception ops in the start block.
2063 There are none by definition. */
2064 return block->attr.block.graph_arr[pos];
2066 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2067 block->attr.block.graph_arr[pos] = phi0;
2068 #if PRECISE_EXC_CONTEXT
2069 if (get_opt_precise_exc_context()) {
2070 /* Set graph_arr for fragile ops. Also here we should break recursion.
2071 We could choose a cyclic path through an cfop. But the recursion would
2072 break at some point. */
2073 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2079 /* This loop goes to all predecessor blocks of the block the Phi node
2080 is in and there finds the operands of the Phi node by calling
2081 get_r_value_internal. */
2082 for (i = 1; i <= ins; ++i) {
2083 ir_node *cf_pred = block->in[i];
2084 ir_node *prevCfOp = skip_Proj(cf_pred);
2086 if (is_Bad(prevCfOp)) {
2087 /* In case a Cond has been optimized we would get right to the start block
2088 with an invalid definition. */
2089 nin[i-1] = new_Bad();
2092 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2094 if (!is_Bad(prevBlock)) {
2095 #if PRECISE_EXC_CONTEXT
2096 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2097 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2098 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2101 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2103 nin[i-1] = new_Bad();
2107 /* We want to pass the Phi0 node to the constructor: this finds additional
2108 optimization possibilities.
2109 The Phi0 node either is allocated in this function, or it comes from
2110 a former call to get_r_value_internal. In this case we may not yet
2111 exchange phi0, as this is done in mature_immBlock. */
2113 phi0_all = block->attr.block.graph_arr[pos];
2114 if (!((get_irn_op(phi0_all) == op_Phi) &&
2115 (get_irn_arity(phi0_all) == 0) &&
2116 (get_nodes_block(phi0_all) == block)))
2122 /* After collecting all predecessors into the array nin a new Phi node
2123 with these predecessors is created. This constructor contains an
2124 optimization: If all predecessors of the Phi node are identical it
2125 returns the only operand instead of a new Phi node. */
2126 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2128 /* In case we allocated a Phi0 node at the beginning of this procedure,
2129 we need to exchange this Phi0 with the real Phi. */
2131 exchange(phi0, res);
2132 block->attr.block.graph_arr[pos] = res;
2133 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2134 only an optimization. */
2141 * This function returns the last definition of a value. In case
2142 * this value was last defined in a previous block, Phi nodes are
2143 * inserted. If the part of the firm graph containing the definition
2144 * is not yet constructed, a dummy Phi node is returned.
2146 * @param block the current block
2147 * @param pos the value number of the value searched
2148 * @param mode the mode of this value (needed for Phi construction)
2151 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2153 /* There are 4 cases to treat.
2155 1. The block is not mature and we visit it the first time. We can not
2156 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2157 predecessors is returned. This node is added to the linked list (field
2158 "link") of the containing block to be completed when this block is
2159 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2162 2. The value is already known in this block, graph_arr[pos] is set and we
2163 visit the block the first time. We can return the value without
2164 creating any new nodes.
2166 3. The block is mature and we visit it the first time. A Phi node needs
2167 to be created (phi_merge). If the Phi is not needed, as all it's
2168 operands are the same value reaching the block through different
2169 paths, it's optimized away and the value itself is returned.
2171 4. The block is mature, and we visit it the second time. Now two
2172 subcases are possible:
2173 * The value was computed completely the last time we were here. This
2174 is the case if there is no loop. We can return the proper value.
2175 * The recursion that visited this node and set the flag did not
2176 return yet. We are computing a value in a loop and need to
2177 break the recursion. This case only happens if we visited
2178 the same block with phi_merge before, which inserted a Phi0.
2179 So we return the Phi0.
2182 /* case 4 -- already visited. */
2183 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2184 /* As phi_merge allocates a Phi0 this value is always defined. Here
2185 is the critical difference of the two algorithms. */
2186 assert(block->attr.block.graph_arr[pos]);
2187 return block->attr.block.graph_arr[pos];
2190 /* visited the first time */
2191 set_irn_visited(block, get_irg_visited(current_ir_graph));
2193 /* Get the local valid value */
2194 res = block->attr.block.graph_arr[pos];
2196 /* case 2 -- If the value is actually computed, return it. */
2197 if (res) { return res; };
2199 if (block->attr.block.is_matured) { /* case 3 */
2201 /* The Phi has the same amount of ins as the corresponding block. */
2202 int ins = get_irn_arity(block);
2204 NEW_ARR_A (ir_node *, nin, ins);
2206 /* Phi merge collects the predecessors and then creates a node. */
2207 res = phi_merge (block, pos, mode, nin, ins);
2209 } else { /* case 1 */
2210 /* The block is not mature, we don't know how many in's are needed. A Phi
2211 with zero predecessors is created. Such a Phi node is called Phi0
2212 node. The Phi0 is then added to the list of Phi0 nodes in this block
2213 to be matured by mature_immBlock later.
2214 The Phi0 has to remember the pos of it's internal value. If the real
2215 Phi is computed, pos is used to update the array with the local
2217 res = new_rd_Phi0(current_ir_graph, block, mode);
2218 res->attr.phi0.pos = pos;
2219 res->link = block->link;
2223 /* If we get here, the frontend missed a use-before-definition error */
2226 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2227 assert(mode->code >= irm_F && mode->code <= irm_P);
2228 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2229 get_mode_null(mode));
2232 /* The local valid value is available now. */
2233 block->attr.block.graph_arr[pos] = res;
2236 } /* get_r_value_internal */
2238 /* ************************************************************************** */
2241 * Finalize a Block node, when all control flows are known.
2242 * Acceptable parameters are only Block nodes.
2245 mature_immBlock(ir_node *block) {
2250 assert(is_Block(block));
2251 if (!get_Block_matured(block)) {
2252 ins = ARR_LEN(block->in)-1;
2253 /* Fix block parameters */
2254 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2256 /* An array for building the Phi nodes. */
2257 NEW_ARR_A(ir_node *, nin, ins);
2259 /* Traverse a chain of Phi nodes attached to this block and mature
2261 for (n = block->link; n; n = next) {
2262 inc_irg_visited(current_ir_graph);
2264 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2267 block->attr.block.is_matured = 1;
2269 /* Now, as the block is a finished firm node, we can optimize it.
2270 Since other nodes have been allocated since the block was created
2271 we can not free the node on the obstack. Therefore we have to call
2273 Unfortunately the optimization does not change a lot, as all allocated
2274 nodes refer to the unoptimized node.
2275 We can call _2, as global cse has no effect on blocks. */
2276 block = optimize_in_place_2(block);
2277 IRN_VRFY_IRG(block, current_ir_graph);
2279 } /* mature_immBlock */
2282 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2283 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2287 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2288 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2292 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2293 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2294 } /* new_d_Const_long */
2297 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2298 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2299 } /* new_d_Const_type */
2303 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2304 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2308 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2309 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2313 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2316 assert(arg->op == op_Cond);
2317 arg->attr.cond.kind = fragmentary;
2318 arg->attr.cond.default_proj = max_proj;
2319 res = new_Proj(arg, mode_X, max_proj);
2321 } /* new_d_defaultProj */
2324 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2325 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2329 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2330 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2331 } /* new_d_strictConv */
2334 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2335 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2339 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2340 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2350 * Allocate the frag array.
2352 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2353 if (get_opt_precise_exc_context()) {
2354 if ((current_ir_graph->phase_state == phase_building) &&
2355 (get_irn_op(res) == op) && /* Could be optimized away. */
2356 !*frag_store) /* Could be a cse where the arr is already set. */ {
2357 *frag_store = new_frag_arr(res);
2360 } /* allocate_frag_arr */
2363 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2365 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2366 #if PRECISE_EXC_CONTEXT
2367 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2374 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2376 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2377 #if PRECISE_EXC_CONTEXT
2378 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2382 } /* new_d_DivMod */
2385 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2387 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2388 #if PRECISE_EXC_CONTEXT
2389 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2396 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2398 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2399 #if PRECISE_EXC_CONTEXT
2400 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2419 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2420 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2424 new_d_Jmp(dbg_info *db) {
2425 return new_bd_Jmp(db, current_ir_graph->current_block);
2429 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2430 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2434 new_d_Cond(dbg_info *db, ir_node *c) {
2435 return new_bd_Cond(db, current_ir_graph->current_block, c);
2439 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2442 res = new_bd_Call(db, current_ir_graph->current_block,
2443 store, callee, arity, in, tp);
2444 #if PRECISE_EXC_CONTEXT
2445 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2452 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2453 return new_bd_Return(db, current_ir_graph->current_block,
2455 } /* new_d_Return */
2458 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2460 res = new_bd_Load(db, current_ir_graph->current_block,
2462 #if PRECISE_EXC_CONTEXT
2463 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2470 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2472 res = new_bd_Store(db, current_ir_graph->current_block,
2474 #if PRECISE_EXC_CONTEXT
2475 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2482 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2483 ir_where_alloc where) {
2485 res = new_bd_Alloc(db, current_ir_graph->current_block,
2486 store, size, alloc_type, where);
2487 #if PRECISE_EXC_CONTEXT
2488 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2495 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2496 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2497 return new_bd_Free(db, current_ir_graph->current_block,
2498 store, ptr, size, free_type, where);
2502 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2503 /* GL: objptr was called frame before. Frame was a bad choice for the name
2504 as the operand could as well be a pointer to a dynamic object. */
2506 return new_bd_Sel(db, current_ir_graph->current_block,
2507 store, objptr, 0, NULL, ent);
2508 } /* new_d_simpleSel */
2511 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2512 return new_bd_Sel(db, current_ir_graph->current_block,
2513 store, objptr, n_index, index, sel);
2517 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2518 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2520 } /* new_d_SymConst_type */
2523 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2524 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2525 value, kind, firm_unknown_type);
2526 } /* new_d_SymConst */
2529 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2530 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2536 return _new_d_Bad();
2540 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2541 return new_bd_Confirm(db, current_ir_graph->current_block,
2543 } /* new_d_Confirm */
2546 new_d_Unknown(ir_mode *m) {
2547 return new_bd_Unknown(m);
2548 } /* new_d_Unknown */
2551 new_d_CallBegin(dbg_info *db, ir_node *call) {
2552 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2553 } /* new_d_CallBegin */
2556 new_d_EndReg(dbg_info *db) {
2557 return new_bd_EndReg(db, current_ir_graph->current_block);
2558 } /* new_d_EndReg */
2561 new_d_EndExcept(dbg_info *db) {
2562 return new_bd_EndExcept(db, current_ir_graph->current_block);
2563 } /* new_d_EndExcept */
2566 new_d_Break(dbg_info *db) {
2567 return new_bd_Break(db, current_ir_graph->current_block);
2571 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2572 return new_bd_Filter(db, current_ir_graph->current_block,
2574 } /* new_d_Filter */
2577 (new_d_NoMem)(void) {
2578 return _new_d_NoMem();
2582 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2583 ir_node *ir_true, ir_mode *mode) {
2584 return new_bd_Mux(db, current_ir_graph->current_block,
2585 sel, ir_false, ir_true, mode);
2589 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2590 return new_bd_Psi(db, current_ir_graph->current_block,
2591 arity, conds, vals, mode);
2594 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2595 ir_node *dst, ir_node *src, ir_type *data_type) {
2597 res = new_bd_CopyB(db, current_ir_graph->current_block,
2598 store, dst, src, data_type);
2599 #if PRECISE_EXC_CONTEXT
2600 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2606 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2607 return new_bd_InstOf(db, current_ir_graph->current_block,
2608 store, objptr, type);
2609 } /* new_d_InstOf */
2612 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2613 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2616 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2617 ir_node *idx, ir_node *lower, ir_node *upper) {
2619 res = new_bd_Bound(db, current_ir_graph->current_block,
2620 store, idx, lower, upper);
2621 #if PRECISE_EXC_CONTEXT
2622 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2628 new_d_Pin(dbg_info *db, ir_node *node) {
2629 return new_bd_Pin(db, current_ir_graph->current_block, node);
2633 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2634 int n_outs, ir_asm_constraint *outputs,
2635 int n_clobber, ident *clobber[], ident *asm_text) {
2636 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2639 /* ********************************************************************* */
2640 /* Comfortable interface with automatic Phi node construction. */
2641 /* (Uses also constructors of ?? interface, except new_Block. */
2642 /* ********************************************************************* */
2644 /* Block construction */
2645 /* immature Block without predecessors */
2647 new_d_immBlock(dbg_info *db) {
2650 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2651 /* creates a new dynamic in-array as length of in is -1 */
2652 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2653 current_ir_graph->current_block = res;
2655 /* macroblock head */
2658 res->attr.block.is_matured = 0;
2659 res->attr.block.is_dead = 0;
2660 res->attr.block.is_mb_head = 1;
2661 res->attr.block.has_label = 0;
2662 res->attr.block.irg = current_ir_graph;
2663 res->attr.block.backedge = NULL;
2664 res->attr.block.in_cg = NULL;
2665 res->attr.block.cg_backedge = NULL;
2666 res->attr.block.extblk = NULL;
2667 res->attr.block.region = NULL;
2668 res->attr.block.mb_depth = 0;
2669 res->attr.block.label = 0;
2671 set_Block_block_visited(res, 0);
2673 /* Create and initialize array for Phi-node construction. */
2674 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2675 current_ir_graph->n_loc);
2676 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2678 /* Immature block may not be optimized! */
2679 IRN_VRFY_IRG(res, current_ir_graph);
2682 } /* new_d_immBlock */
2685 new_immBlock(void) {
2686 return new_d_immBlock(NULL);
2687 } /* new_immBlock */
2689 /* immature PartBlock with its predecessors */
2691 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2692 ir_node *res = new_d_immBlock(db);
2693 ir_node *blk = get_nodes_block(pred_jmp);
2695 res->in[0] = blk->in[0];
2696 assert(res->in[0] != NULL);
2697 add_immBlock_pred(res, pred_jmp);
2699 res->attr.block.is_mb_head = 0;
2700 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2703 } /* new_d_immPartBlock */
2706 new_immPartBlock(ir_node *pred_jmp) {
2707 return new_d_immPartBlock(NULL, pred_jmp);
2708 } /* new_immPartBlock */
2710 /* add an edge to a jmp/control flow node */
2712 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2713 int n = ARR_LEN(block->in) - 1;
2715 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2716 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2717 assert(is_ir_node(jmp));
2719 ARR_APP1(ir_node *, block->in, jmp);
2721 hook_set_irn_n(block, n, jmp, NULL);
2722 } /* add_immBlock_pred */
2724 /* changing the current block */
2726 set_cur_block(ir_node *target) {
2727 current_ir_graph->current_block = target;
2728 } /* set_cur_block */
2730 /* ************************ */
2731 /* parameter administration */
2733 /* get a value from the parameter array from the current block by its index */
2735 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2736 ir_graph *irg = current_ir_graph;
2737 assert(get_irg_phase_state(irg) == phase_building);
2738 inc_irg_visited(irg);
2741 return get_r_value_internal(irg->current_block, pos + 1, mode);
2744 /* get a value from the parameter array from the current block by its index */
2746 get_value(int pos, ir_mode *mode) {
2747 return get_d_value(NULL, pos, mode);
2750 /* set a value at position pos in the parameter array from the current block */
2752 set_value(int pos, ir_node *value) {
2753 ir_graph *irg = current_ir_graph;
2754 assert(get_irg_phase_state(irg) == phase_building);
2755 assert(pos+1 < irg->n_loc);
2756 assert(is_ir_node(value));
2757 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2760 /* Find the value number for a node in the current block.*/
2762 find_value(ir_node *value) {
2764 ir_node *bl = current_ir_graph->current_block;
2766 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2767 if (bl->attr.block.graph_arr[i] == value)
2772 /* get the current store */
2775 ir_graph *irg = current_ir_graph;
2777 assert(get_irg_phase_state(irg) == phase_building);
2778 /* GL: one could call get_value instead */
2779 inc_irg_visited(irg);
2780 return get_r_value_internal(irg->current_block, 0, mode_M);
2783 /* set the current store: handles automatic Sync construction for Load nodes */
2785 set_store(ir_node *store) {
2786 ir_node *load, *pload, *pred, *in[2];
2788 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2789 /* Beware: due to dead code elimination, a store might become a Bad node even in
2790 the construction phase. */
2791 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2793 if (get_opt_auto_create_sync()) {
2794 /* handle non-volatile Load nodes by automatically creating Sync's */
2795 load = skip_Proj(store);
2796 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2797 pred = get_Load_mem(load);
2799 if (is_Sync(pred)) {
2800 /* a Load after a Sync: move it up */
2801 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2803 set_Load_mem(load, get_memop_mem(mem));
2804 add_Sync_pred(pred, store);
2807 pload = skip_Proj(pred);
2808 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2809 /* a Load after a Load: create a new Sync */
2810 set_Load_mem(load, get_Load_mem(pload));
2814 store = new_Sync(2, in);
2819 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2823 keep_alive(ir_node *ka) {
2824 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2827 /* --- Useful access routines --- */
2828 /* Returns the current block of the current graph. To set the current
2829 block use set_cur_block. */
2830 ir_node *get_cur_block(void) {
2831 return get_irg_current_block(current_ir_graph);
2832 } /* get_cur_block */
2834 /* Returns the frame type of the current graph */
2835 ir_type *get_cur_frame_type(void) {
2836 return get_irg_frame_type(current_ir_graph);
2837 } /* get_cur_frame_type */
2840 /* ********************************************************************* */
2843 /* call once for each run of the library */
2845 init_cons(uninitialized_local_variable_func_t *func) {
2846 default_initialize_local_variable = func;
2850 irp_finalize_cons(void) {
2852 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2853 irg_finalize_cons(get_irp_irg(i));
2855 irp->phase_state = phase_high;
2856 } /* irp_finalize_cons */
2859 ir_node *new_Block(int arity, ir_node **in) {
2860 return new_d_Block(NULL, arity, in);
2862 ir_node *new_Start(void) {
2863 return new_d_Start(NULL);
2865 ir_node *new_End(void) {
2866 return new_d_End(NULL);
2868 ir_node *new_Jmp(void) {
2869 return new_d_Jmp(NULL);
2871 ir_node *new_IJmp(ir_node *tgt) {
2872 return new_d_IJmp(NULL, tgt);
2874 ir_node *new_Cond(ir_node *c) {
2875 return new_d_Cond(NULL, c);
2877 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2878 return new_d_Return(NULL, store, arity, in);
2880 ir_node *new_Const(ir_mode *mode, tarval *con) {
2881 return new_d_Const(NULL, mode, con);
2884 ir_node *new_Const_long(ir_mode *mode, long value) {
2885 return new_d_Const_long(NULL, mode, value);
2888 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2889 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2892 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2893 return new_d_SymConst_type(NULL, mode, value, kind, type);
2895 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2896 return new_d_SymConst(NULL, mode, value, kind);
2898 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2899 return new_d_simpleSel(NULL, store, objptr, ent);
2901 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2903 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2905 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2907 return new_d_Call(NULL, store, callee, arity, in, tp);
2909 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2910 return new_d_Add(NULL, op1, op2, mode);
2912 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2913 return new_d_Sub(NULL, op1, op2, mode);
2915 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2916 return new_d_Minus(NULL, op, mode);
2918 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2919 return new_d_Mul(NULL, op1, op2, mode);
2921 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2922 return new_d_Mulh(NULL, op1, op2, mode);
2924 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2925 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2927 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2928 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2930 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2931 return new_d_Div(NULL, memop, op1, op2, mode, state);
2933 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2934 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2936 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2937 return new_d_Abs(NULL, op, mode);
2939 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2940 return new_d_And(NULL, op1, op2, mode);
2942 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2943 return new_d_Or(NULL, op1, op2, mode);
2945 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2946 return new_d_Eor(NULL, op1, op2, mode);
2948 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2949 return new_d_Not(NULL, op, mode);
2951 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2952 return new_d_Shl(NULL, op, k, mode);
2954 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2955 return new_d_Shr(NULL, op, k, mode);
2957 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2958 return new_d_Shrs(NULL, op, k, mode);
2960 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
2961 return new_d_Rot(NULL, op, k, mode);
2963 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2964 return new_d_Carry(NULL, op1, op2, mode);
2966 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2967 return new_d_Borrow(NULL, op1, op2, mode);
2969 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2970 return new_d_Cmp(NULL, op1, op2);
2972 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2973 return new_d_Conv(NULL, op, mode);
2975 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2976 return new_d_strictConv(NULL, op, mode);
2978 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2979 return new_d_Cast(NULL, op, to_tp);
2981 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2982 return new_d_Phi(NULL, arity, in, mode);
2984 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2985 return new_d_Load(NULL, store, addr, mode);
2987 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2988 return new_d_Store(NULL, store, addr, val);
2990 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
2991 ir_where_alloc where) {
2992 return new_d_Alloc(NULL, store, size, alloc_type, where);
2994 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
2995 ir_type *free_type, ir_where_alloc where) {
2996 return new_d_Free(NULL, store, ptr, size, free_type, where);
2998 ir_node *new_Sync(int arity, ir_node *in[]) {
2999 return new_d_Sync(NULL, arity, in);
3001 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3002 return new_d_Proj(NULL, arg, mode, proj);
3004 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3005 return new_d_defaultProj(NULL, arg, max_proj);
3007 ir_node *new_Tuple(int arity, ir_node **in) {
3008 return new_d_Tuple(NULL, arity, in);
3010 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3011 return new_d_Id(NULL, val, mode);
3013 ir_node *new_Bad(void) {
3016 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3017 return new_d_Confirm(NULL, val, bound, cmp);
3019 ir_node *new_Unknown(ir_mode *m) {
3020 return new_d_Unknown(m);
3022 ir_node *new_CallBegin(ir_node *callee) {
3023 return new_d_CallBegin(NULL, callee);
3025 ir_node *new_EndReg(void) {
3026 return new_d_EndReg(NULL);
3028 ir_node *new_EndExcept(void) {
3029 return new_d_EndExcept(NULL);
3031 ir_node *new_Break(void) {
3032 return new_d_Break(NULL);
3034 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3035 return new_d_Filter(NULL, arg, mode, proj);
3037 ir_node *new_NoMem(void) {
3038 return new_d_NoMem();
3040 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3041 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3043 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3044 return new_d_Psi(NULL, arity, conds, vals, mode);
3046 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3047 return new_d_CopyB(NULL, store, dst, src, data_type);
3049 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3050 return new_d_InstOf(NULL, store, objptr, ent);
3052 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3053 return new_d_Raise(NULL, store, obj);
3055 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3056 return new_d_Bound(NULL, store, idx, lower, upper);
3058 ir_node *new_Pin(ir_node *node) {
3059 return new_d_Pin(NULL, node);
3061 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3062 int n_outs, ir_asm_constraint *outputs,
3063 int n_clobber, ident *clobber[], ident *asm_text) {
3064 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3067 /* create a new anchor node */
3068 ir_node *new_Anchor(ir_graph *irg) {
3069 ir_node *in[anchor_last];
3070 memset(in, 0, sizeof(in));
3071 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);