2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Can not be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
191 set_Block_matured(res, 1);
192 set_Block_block_visited(res, 0);
194 res->attr.block.dead = 0;
195 res->attr.block.irg = irg;
196 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
197 res->attr.block.in_cg = NULL;
198 res->attr.block.cg_backedge = NULL;
199 res->attr.block.extblk = NULL;
201 IRN_VRFY_IRG(res, irg);
206 new_bd_Start(dbg_info *db, ir_node *block) {
208 ir_graph *irg = current_ir_graph;
210 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
212 IRN_VRFY_IRG(res, irg);
217 new_bd_End(dbg_info *db, ir_node *block) {
219 ir_graph *irg = current_ir_graph;
221 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
223 IRN_VRFY_IRG(res, irg);
228 * Creates a Phi node with all predecessors. Calling this constructor
229 * is only allowed if the corresponding block is mature.
232 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
234 ir_graph *irg = current_ir_graph;
238 /* Don't assert that block matured: the use of this constructor is strongly
240 if ( get_Block_matured(block) )
241 assert( get_irn_arity(block) == arity );
243 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
245 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
247 for (i = arity-1; i >= 0; i--)
248 if (get_irn_op(in[i]) == op_Unknown) {
253 if (!has_unknown) res = optimize_node (res);
254 IRN_VRFY_IRG(res, irg);
256 /* Memory Phis in endless loops must be kept alive.
257 As we can't distinguish these easily we keep all of them alive. */
258 if ((res->op == op_Phi) && (mode == mode_M))
259 add_End_keepalive(get_irg_end(irg), res);
264 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
266 ir_graph *irg = current_ir_graph;
268 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
269 res->attr.con.tv = con;
270 set_Const_type(res, tp); /* Call method because of complex assertion. */
271 res = optimize_node (res);
272 assert(get_Const_type(res) == tp);
273 IRN_VRFY_IRG(res, irg);
276 } /* new_bd_Const_type */
279 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
280 ir_graph *irg = current_ir_graph;
282 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
286 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
287 ir_graph *irg = current_ir_graph;
289 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
290 } /* new_bd_Const_long */
293 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
295 ir_graph *irg = current_ir_graph;
297 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
298 res = optimize_node(res);
299 IRN_VRFY_IRG(res, irg);
304 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
310 res->attr.proj = proj;
313 assert(get_Proj_pred(res));
314 assert(get_nodes_block(get_Proj_pred(res)));
316 res = optimize_node(res);
318 IRN_VRFY_IRG(res, irg);
323 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
326 ir_graph *irg = current_ir_graph;
328 assert(arg->op == op_Cond);
329 arg->attr.cond.kind = fragmentary;
330 arg->attr.cond.default_proj = max_proj;
331 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
333 } /* new_bd_defaultProj */
336 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
338 ir_graph *irg = current_ir_graph;
340 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
341 res->attr.conv.strict = strict_flag;
342 res = optimize_node(res);
343 IRN_VRFY_IRG(res, irg);
348 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
350 ir_graph *irg = current_ir_graph;
352 assert(is_atomic_type(to_tp));
354 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
355 res->attr.cast.totype = to_tp;
356 res = optimize_node(res);
357 IRN_VRFY_IRG(res, irg);
362 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
364 ir_graph *irg = current_ir_graph;
366 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
367 res = optimize_node (res);
368 IRN_VRFY_IRG(res, irg);
393 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
396 ir_graph *irg = current_ir_graph;
399 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
400 res = optimize_node(res);
401 IRN_VRFY_IRG(res, irg);
406 new_bd_Jmp(dbg_info *db, ir_node *block) {
408 ir_graph *irg = current_ir_graph;
410 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
411 res = optimize_node (res);
412 IRN_VRFY_IRG(res, irg);
417 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
419 ir_graph *irg = current_ir_graph;
421 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
422 res = optimize_node (res);
423 IRN_VRFY_IRG(res, irg);
425 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
431 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
433 ir_graph *irg = current_ir_graph;
435 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
436 res->attr.cond.kind = dense;
437 res->attr.cond.default_proj = 0;
438 res->attr.cond.pred = COND_JMP_PRED_NONE;
439 res = optimize_node (res);
440 IRN_VRFY_IRG(res, irg);
445 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
446 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
450 ir_graph *irg = current_ir_graph;
453 NEW_ARR_A(ir_node *, r_in, r_arity);
456 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
458 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
460 assert((get_unknown_type() == tp) || is_Method_type(tp));
461 set_Call_type(res, tp);
462 res->attr.call.exc.pin_state = op_pin_state_pinned;
463 res->attr.call.callee_arr = NULL;
464 res = optimize_node(res);
465 IRN_VRFY_IRG(res, irg);
470 new_bd_Return(dbg_info *db, ir_node *block,
471 ir_node *store, int arity, ir_node **in) {
475 ir_graph *irg = current_ir_graph;
478 NEW_ARR_A (ir_node *, r_in, r_arity);
480 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
481 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
482 res = optimize_node(res);
483 IRN_VRFY_IRG(res, irg);
485 } /* new_bd_Return */
488 new_bd_Load(dbg_info *db, ir_node *block,
489 ir_node *store, ir_node *adr, ir_mode *mode) {
492 ir_graph *irg = current_ir_graph;
496 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
497 res->attr.load.exc.pin_state = op_pin_state_pinned;
498 res->attr.load.load_mode = mode;
499 res->attr.load.volatility = volatility_non_volatile;
500 res = optimize_node(res);
501 IRN_VRFY_IRG(res, irg);
506 new_bd_Store(dbg_info *db, ir_node *block,
507 ir_node *store, ir_node *adr, ir_node *val) {
510 ir_graph *irg = current_ir_graph;
515 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
516 res->attr.store.exc.pin_state = op_pin_state_pinned;
517 res->attr.store.volatility = volatility_non_volatile;
518 res = optimize_node(res);
519 IRN_VRFY_IRG(res, irg);
524 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
525 ir_node *size, ir_type *alloc_type, where_alloc where) {
528 ir_graph *irg = current_ir_graph;
532 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
533 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
534 res->attr.alloc.where = where;
535 res->attr.alloc.type = alloc_type;
536 res = optimize_node(res);
537 IRN_VRFY_IRG(res, irg);
542 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
543 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
546 ir_graph *irg = current_ir_graph;
551 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
552 res->attr.free.where = where;
553 res->attr.free.type = free_type;
554 res = optimize_node(res);
555 IRN_VRFY_IRG(res, irg);
560 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
561 int arity, ir_node **in, ir_entity *ent) {
565 ir_graph *irg = current_ir_graph;
566 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
568 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
571 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
574 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
576 * Sel's can select functions which should be of mode mode_P_code.
578 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
579 res->attr.sel.ent = ent;
580 res = optimize_node(res);
581 IRN_VRFY_IRG(res, irg);
586 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
587 symconst_kind symkind, ir_type *tp) {
590 ir_graph *irg = current_ir_graph;
592 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
593 mode = mode_P_data; /* FIXME: can be mode_P_code */
597 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
599 res->attr.symc.num = symkind;
600 res->attr.symc.sym = value;
601 res->attr.symc.tp = tp;
603 res = optimize_node(res);
604 IRN_VRFY_IRG(res, irg);
606 } /* new_bd_SymConst_type */
609 new_bd_Sync(dbg_info *db, ir_node *block) {
611 ir_graph *irg = current_ir_graph;
613 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
614 /* no need to call optimize node here, Sync are always created with no predecessors */
615 IRN_VRFY_IRG(res, irg);
620 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
621 ir_node *in[2], *res;
622 ir_graph *irg = current_ir_graph;
626 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
627 res->attr.confirm_cmp = cmp;
628 res = optimize_node (res);
629 IRN_VRFY_IRG(res, irg);
631 } /* new_bd_Confirm */
634 new_bd_Unknown(ir_mode *m) {
636 ir_graph *irg = current_ir_graph;
638 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
639 res = optimize_node(res);
641 } /* new_bd_Unknown */
644 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
647 ir_graph *irg = current_ir_graph;
649 in[0] = get_Call_ptr(call);
650 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
651 /* res->attr.callbegin.irg = irg; */
652 res->attr.callbegin.call = call;
653 res = optimize_node(res);
654 IRN_VRFY_IRG(res, irg);
656 } /* new_bd_CallBegin */
659 new_bd_EndReg(dbg_info *db, ir_node *block) {
661 ir_graph *irg = current_ir_graph;
663 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
664 set_irg_end_reg(irg, res);
665 IRN_VRFY_IRG(res, irg);
667 } /* new_bd_EndReg */
670 new_bd_EndExcept(dbg_info *db, ir_node *block) {
672 ir_graph *irg = current_ir_graph;
674 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
675 set_irg_end_except(irg, res);
676 IRN_VRFY_IRG (res, irg);
678 } /* new_bd_EndExcept */
681 new_bd_Break(dbg_info *db, ir_node *block) {
683 ir_graph *irg = current_ir_graph;
685 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
686 res = optimize_node(res);
687 IRN_VRFY_IRG(res, irg);
692 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
695 ir_graph *irg = current_ir_graph;
697 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
698 res->attr.filter.proj = proj;
699 res->attr.filter.in_cg = NULL;
700 res->attr.filter.backedge = NULL;
703 assert(get_Proj_pred(res));
704 assert(get_nodes_block(get_Proj_pred(res)));
706 res = optimize_node(res);
707 IRN_VRFY_IRG(res, irg);
709 } /* new_bd_Filter */
712 new_bd_Mux(dbg_info *db, ir_node *block,
713 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
716 ir_graph *irg = current_ir_graph;
722 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
725 res = optimize_node(res);
726 IRN_VRFY_IRG(res, irg);
731 new_bd_Psi(dbg_info *db, ir_node *block,
732 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
735 ir_graph *irg = current_ir_graph;
738 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
740 for (i = 0; i < arity; ++i) {
742 in[2 * i + 1] = vals[i];
746 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
749 res = optimize_node(res);
750 IRN_VRFY_IRG(res, irg);
755 new_bd_CopyB(dbg_info *db, ir_node *block,
756 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
759 ir_graph *irg = current_ir_graph;
765 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
767 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
768 res->attr.copyb.data_type = data_type;
769 res = optimize_node(res);
770 IRN_VRFY_IRG(res, irg);
775 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
776 ir_node *objptr, ir_type *type) {
779 ir_graph *irg = current_ir_graph;
783 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
784 res->attr.instof.type = type;
785 res = optimize_node(res);
786 IRN_VRFY_IRG(res, irg);
788 } /* new_bd_InstOf */
791 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
794 ir_graph *irg = current_ir_graph;
798 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
799 res = optimize_node(res);
800 IRN_VRFY_IRG(res, irg);
805 new_bd_Bound(dbg_info *db, ir_node *block,
806 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
809 ir_graph *irg = current_ir_graph;
815 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
816 res->attr.bound.exc.pin_state = op_pin_state_pinned;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
823 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
825 ir_graph *irg = current_ir_graph;
827 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
828 res = optimize_node(res);
829 IRN_VRFY_IRG(res, irg);
833 /* --------------------------------------------- */
834 /* private interfaces, for professional use only */
835 /* --------------------------------------------- */
837 /* Constructs a Block with a fixed number of predecessors.
838 Does not set current_block. Can not be used with automatic
839 Phi node construction. */
841 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
842 ir_graph *rem = current_ir_graph;
845 current_ir_graph = irg;
846 res = new_bd_Block(db, arity, in);
847 current_ir_graph = rem;
853 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
854 ir_graph *rem = current_ir_graph;
857 current_ir_graph = irg;
858 res = new_bd_Start(db, block);
859 current_ir_graph = rem;
865 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
867 ir_graph *rem = current_ir_graph;
869 current_ir_graph = rem;
870 res = new_bd_End(db, block);
871 current_ir_graph = rem;
876 /* Creates a Phi node with all predecessors. Calling this constructor
877 is only allowed if the corresponding block is mature. */
879 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
881 ir_graph *rem = current_ir_graph;
883 current_ir_graph = irg;
884 res = new_bd_Phi(db, block,arity, in, mode);
885 current_ir_graph = rem;
891 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
893 ir_graph *rem = current_ir_graph;
895 current_ir_graph = irg;
896 res = new_bd_Const_type(db, block, mode, con, tp);
897 current_ir_graph = rem;
900 } /* new_rd_Const_type */
903 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
905 ir_graph *rem = current_ir_graph;
907 current_ir_graph = irg;
908 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
909 current_ir_graph = rem;
915 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
916 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
917 } /* new_rd_Const_long */
920 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
922 ir_graph *rem = current_ir_graph;
924 current_ir_graph = irg;
925 res = new_bd_Id(db, block, val, mode);
926 current_ir_graph = rem;
932 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
935 ir_graph *rem = current_ir_graph;
937 current_ir_graph = irg;
938 res = new_bd_Proj(db, block, arg, mode, proj);
939 current_ir_graph = rem;
945 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
948 ir_graph *rem = current_ir_graph;
950 current_ir_graph = irg;
951 res = new_bd_defaultProj(db, block, arg, max_proj);
952 current_ir_graph = rem;
955 } /* new_rd_defaultProj */
958 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
960 ir_graph *rem = current_ir_graph;
962 current_ir_graph = irg;
963 res = new_bd_Conv(db, block, op, mode, 0);
964 current_ir_graph = rem;
970 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
972 ir_graph *rem = current_ir_graph;
974 current_ir_graph = irg;
975 res = new_bd_Cast(db, block, op, to_tp);
976 current_ir_graph = rem;
982 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
984 ir_graph *rem = current_ir_graph;
986 current_ir_graph = irg;
987 res = new_bd_Tuple(db, block, arity, in);
988 current_ir_graph = rem;
1011 NEW_RD_BINOP(Borrow)
1014 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1015 ir_node *op1, ir_node *op2) {
1017 ir_graph *rem = current_ir_graph;
1019 current_ir_graph = irg;
1020 res = new_bd_Cmp(db, block, op1, op2);
1021 current_ir_graph = rem;
1027 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1029 ir_graph *rem = current_ir_graph;
1031 current_ir_graph = irg;
1032 res = new_bd_Jmp(db, block);
1033 current_ir_graph = rem;
1039 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1041 ir_graph *rem = current_ir_graph;
1043 current_ir_graph = irg;
1044 res = new_bd_IJmp(db, block, tgt);
1045 current_ir_graph = rem;
1051 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Cond(db, block, c);
1057 current_ir_graph = rem;
1063 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1064 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1066 ir_graph *rem = current_ir_graph;
1068 current_ir_graph = irg;
1069 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1070 current_ir_graph = rem;
1076 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1077 ir_node *store, int arity, ir_node **in) {
1079 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_Return(db, block, store, arity, in);
1083 current_ir_graph = rem;
1086 } /* new_rd_Return */
1089 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1090 ir_node *store, ir_node *adr, ir_mode *mode) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Load(db, block, store, adr, mode);
1096 current_ir_graph = rem;
1102 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1103 ir_node *store, ir_node *adr, ir_node *val) {
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Store(db, block, store, adr, val);
1109 current_ir_graph = rem;
1112 } /* new_rd_Store */
1115 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1116 ir_node *size, ir_type *alloc_type, where_alloc where) {
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1122 current_ir_graph = rem;
1125 } /* new_rd_Alloc */
1128 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1129 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1135 current_ir_graph = rem;
1141 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1142 ir_node *store, ir_node *objptr, ir_entity *ent) {
1144 ir_graph *rem = current_ir_graph;
1146 current_ir_graph = irg;
1147 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1148 current_ir_graph = rem;
1151 } /* new_rd_simpleSel */
1154 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1155 int arity, ir_node **in, ir_entity *ent) {
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1161 current_ir_graph = rem;
1167 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1168 symconst_kind symkind, ir_type *tp) {
1170 ir_graph *rem = current_ir_graph;
1172 current_ir_graph = irg;
1173 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1174 current_ir_graph = rem;
1177 } /* new_rd_SymConst_type */
1180 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1181 symconst_kind symkind) {
1182 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1184 } /* new_rd_SymConst */
1186 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1187 symconst_symbol sym;
1188 sym.entity_p = symbol;
1189 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1190 } /* new_rd_SymConst_addr_ent */
1192 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1193 symconst_symbol sym;
1194 sym.entity_p = symbol;
1195 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1196 } /* new_rd_SymConst_ofs_ent */
1198 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1199 symconst_symbol sym;
1200 sym.ident_p = symbol;
1201 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1202 } /* new_rd_SymConst_addr_name */
1204 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1205 symconst_symbol sym;
1206 sym.type_p = symbol;
1207 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1208 } /* new_rd_SymConst_type_tag */
1210 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1211 symconst_symbol sym;
1212 sym.type_p = symbol;
1213 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1214 } /* new_rd_SymConst_size */
1216 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1217 symconst_symbol sym;
1218 sym.type_p = symbol;
1219 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1220 } /* new_rd_SymConst_align */
1223 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1225 ir_graph *rem = current_ir_graph;
1228 current_ir_graph = irg;
1229 res = new_bd_Sync(db, block);
1230 current_ir_graph = rem;
1232 for (i = 0; i < arity; ++i)
1233 add_Sync_pred(res, in[i]);
1239 new_rd_Bad(ir_graph *irg) {
1240 return get_irg_bad(irg);
1244 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1246 ir_graph *rem = current_ir_graph;
1248 current_ir_graph = irg;
1249 res = new_bd_Confirm(db, block, val, bound, cmp);
1250 current_ir_graph = rem;
1253 } /* new_rd_Confirm */
1256 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1258 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Unknown(m);
1262 current_ir_graph = rem;
1265 } /* new_rd_Unknown */
1268 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1270 ir_graph *rem = current_ir_graph;
1272 current_ir_graph = irg;
1273 res = new_bd_CallBegin(db, block, call);
1274 current_ir_graph = rem;
1277 } /* new_rd_CallBegin */
1280 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1283 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1284 set_irg_end_reg(irg, res);
1285 IRN_VRFY_IRG(res, irg);
1287 } /* new_rd_EndReg */
1290 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1293 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1294 set_irg_end_except(irg, res);
1295 IRN_VRFY_IRG (res, irg);
1297 } /* new_rd_EndExcept */
1300 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_Break(db, block);
1306 current_ir_graph = rem;
1309 } /* new_rd_Break */
1312 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1315 ir_graph *rem = current_ir_graph;
1317 current_ir_graph = irg;
1318 res = new_bd_Filter(db, block, arg, mode, proj);
1319 current_ir_graph = rem;
1322 } /* new_rd_Filter */
1325 new_rd_NoMem(ir_graph *irg) {
1326 return get_irg_no_mem(irg);
1327 } /* new_rd_NoMem */
1330 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1331 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1337 current_ir_graph = rem;
1343 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1344 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1346 ir_graph *rem = current_ir_graph;
1348 current_ir_graph = irg;
1349 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1350 current_ir_graph = rem;
1355 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1356 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1358 ir_graph *rem = current_ir_graph;
1360 current_ir_graph = irg;
1361 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1362 current_ir_graph = rem;
1365 } /* new_rd_CopyB */
1368 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1369 ir_node *objptr, ir_type *type) {
1371 ir_graph *rem = current_ir_graph;
1373 current_ir_graph = irg;
1374 res = new_bd_InstOf(db, block, store, objptr, type);
1375 current_ir_graph = rem;
1378 } /* new_rd_InstOf */
1381 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1383 ir_graph *rem = current_ir_graph;
1385 current_ir_graph = irg;
1386 res = new_bd_Raise(db, block, store, obj);
1387 current_ir_graph = rem;
1390 } /* new_rd_Raise */
1392 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1393 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1395 ir_graph *rem = current_ir_graph;
1397 current_ir_graph = irg;
1398 res = new_bd_Bound(db, block, store, idx, lower, upper);
1399 current_ir_graph = rem;
1402 } /* new_rd_Bound */
1404 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1406 ir_graph *rem = current_ir_graph;
1408 current_ir_graph = irg;
1409 res = new_bd_Pin(db, block, node);
1410 current_ir_graph = rem;
1415 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1416 return new_rd_Block(NULL, irg, arity, in);
1418 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1419 return new_rd_Start(NULL, irg, block);
1421 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1422 return new_rd_End(NULL, irg, block);
1424 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1425 return new_rd_Jmp(NULL, irg, block);
1427 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1428 return new_rd_IJmp(NULL, irg, block, tgt);
1430 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1431 return new_rd_Cond(NULL, irg, block, c);
1433 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1434 ir_node *store, int arity, ir_node **in) {
1435 return new_rd_Return(NULL, irg, block, store, arity, in);
1437 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1438 ir_mode *mode, tarval *con) {
1439 return new_rd_Const(NULL, irg, block, mode, con);
1441 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1442 ir_mode *mode, long value) {
1443 return new_rd_Const_long(NULL, irg, block, mode, value);
1445 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1446 ir_mode *mode, tarval *con, ir_type *tp) {
1447 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1449 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1450 symconst_symbol value, symconst_kind symkind) {
1451 return new_rd_SymConst(NULL, irg, block, value, symkind);
1453 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1454 ir_node *objptr, ir_entity *ent) {
1455 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1457 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1458 ir_node *objptr, int n_index, ir_node **index,
1460 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1462 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1463 ir_node *callee, int arity, ir_node **in,
1465 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1467 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1468 ir_node *op1, ir_node *op2, ir_mode *mode) {
1469 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1471 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1472 ir_node *op1, ir_node *op2, ir_mode *mode) {
1473 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1475 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1476 ir_node *op, ir_mode *mode) {
1477 return new_rd_Minus(NULL, irg, block, op, mode);
1479 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1480 ir_node *op1, ir_node *op2, ir_mode *mode) {
1481 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1483 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1484 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1485 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1487 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1488 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1489 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1491 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1492 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1493 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1495 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1496 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1497 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1499 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1500 ir_node *op, ir_mode *mode) {
1501 return new_rd_Abs(NULL, irg, block, op, mode);
1503 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1504 ir_node *op1, ir_node *op2, ir_mode *mode) {
1505 return new_rd_And(NULL, irg, block, op1, op2, mode);
1507 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1508 ir_node *op1, ir_node *op2, ir_mode *mode) {
1509 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1511 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1512 ir_node *op1, ir_node *op2, ir_mode *mode) {
1513 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1515 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1516 ir_node *op, ir_mode *mode) {
1517 return new_rd_Not(NULL, irg, block, op, mode);
1519 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1520 ir_node *op, ir_node *k, ir_mode *mode) {
1521 return new_rd_Shl(NULL, irg, block, op, k, mode);
1523 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1524 ir_node *op, ir_node *k, ir_mode *mode) {
1525 return new_rd_Shr(NULL, irg, block, op, k, mode);
1527 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1528 ir_node *op, ir_node *k, ir_mode *mode) {
1529 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1531 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1532 ir_node *op, ir_node *k, ir_mode *mode) {
1533 return new_rd_Rot(NULL, irg, block, op, k, mode);
1535 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1536 ir_node *op, ir_node *k, ir_mode *mode) {
1537 return new_rd_Carry(NULL, irg, block, op, k, mode);
1539 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1540 ir_node *op, ir_node *k, ir_mode *mode) {
1541 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1543 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1544 ir_node *op1, ir_node *op2) {
1545 return new_rd_Cmp(NULL, irg, block, op1, op2);
1547 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1548 ir_node *op, ir_mode *mode) {
1549 return new_rd_Conv(NULL, irg, block, op, mode);
1551 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1552 return new_rd_Cast(NULL, irg, block, op, to_tp);
1554 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1555 ir_node **in, ir_mode *mode) {
1556 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1558 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1559 ir_node *store, ir_node *adr, ir_mode *mode) {
1560 return new_rd_Load(NULL, irg, block, store, adr, mode);
1562 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1563 ir_node *store, ir_node *adr, ir_node *val) {
1564 return new_rd_Store(NULL, irg, block, store, adr, val);
1566 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1567 ir_node *size, ir_type *alloc_type, where_alloc where) {
1568 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1570 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1571 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1572 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1574 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1575 return new_rd_Sync(NULL, irg, block, arity, in);
1577 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1578 ir_mode *mode, long proj) {
1579 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1581 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1583 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1585 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1586 int arity, ir_node **in) {
1587 return new_rd_Tuple(NULL, irg, block, arity, in );
1589 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1590 ir_node *val, ir_mode *mode) {
1591 return new_rd_Id(NULL, irg, block, val, mode);
1593 ir_node *new_r_Bad(ir_graph *irg) {
1594 return new_rd_Bad(irg);
1596 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1597 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1599 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1600 return new_rd_Unknown(irg, m);
1602 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1603 return new_rd_CallBegin(NULL, irg, block, callee);
1605 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1606 return new_rd_EndReg(NULL, irg, block);
1608 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1609 return new_rd_EndExcept(NULL, irg, block);
1611 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1612 return new_rd_Break(NULL, irg, block);
1614 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1615 ir_mode *mode, long proj) {
1616 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1618 ir_node *new_r_NoMem(ir_graph *irg) {
1619 return new_rd_NoMem(irg);
1621 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1622 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1623 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1625 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1626 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1627 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1629 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1630 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1631 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1633 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1635 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1637 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1638 ir_node *store, ir_node *obj) {
1639 return new_rd_Raise(NULL, irg, block, store, obj);
1641 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1642 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1643 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1645 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1646 return new_rd_Pin(NULL, irg, block, node);
1649 /** ********************/
1650 /** public interfaces */
1651 /** construction tools */
1655 * - create a new Start node in the current block
1657 * @return s - pointer to the created Start node
1662 new_d_Start(dbg_info *db) {
1665 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1666 op_Start, mode_T, 0, NULL);
1668 res = optimize_node(res);
1669 IRN_VRFY_IRG(res, current_ir_graph);
1674 new_d_End(dbg_info *db) {
1676 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1677 op_End, mode_X, -1, NULL);
1678 res = optimize_node(res);
1679 IRN_VRFY_IRG(res, current_ir_graph);
1684 /* Constructs a Block with a fixed number of predecessors.
1685 Does set current_block. Can be used with automatic Phi
1686 node construction. */
1688 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1691 int has_unknown = 0;
1693 res = new_bd_Block(db, arity, in);
1695 /* Create and initialize array for Phi-node construction. */
1696 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1697 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1698 current_ir_graph->n_loc);
1699 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1702 for (i = arity-1; i >= 0; i--)
1703 if (get_irn_op(in[i]) == op_Unknown) {
1708 if (!has_unknown) res = optimize_node(res);
1709 current_ir_graph->current_block = res;
1711 IRN_VRFY_IRG(res, current_ir_graph);
1716 /* ***********************************************************************/
1717 /* Methods necessary for automatic Phi node creation */
1719 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1720 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1721 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1722 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1724 Call Graph: ( A ---> B == A "calls" B)
1726 get_value mature_immBlock
1734 get_r_value_internal |
1738 new_rd_Phi0 new_rd_Phi_in
1740 * *************************************************************************** */
1742 /** Creates a Phi node with 0 predecessors. */
1743 static INLINE ir_node *
1744 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1747 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1748 IRN_VRFY_IRG(res, irg);
1752 /* There are two implementations of the Phi node construction. The first
1753 is faster, but does not work for blocks with more than 2 predecessors.
1754 The second works always but is slower and causes more unnecessary Phi
1756 Select the implementations by the following preprocessor flag set in
1758 #if USE_FAST_PHI_CONSTRUCTION
1760 /* This is a stack used for allocating and deallocating nodes in
1761 new_rd_Phi_in. The original implementation used the obstack
1762 to model this stack, now it is explicit. This reduces side effects.
1764 #if USE_EXPLICIT_PHI_IN_STACK
1766 new_Phi_in_stack(void) {
1769 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1771 res->stack = NEW_ARR_F (ir_node *, 0);
1775 } /* new_Phi_in_stack */
1778 free_Phi_in_stack(Phi_in_stack *s) {
1779 DEL_ARR_F(s->stack);
1781 } /* free_Phi_in_stack */
1784 free_to_Phi_in_stack(ir_node *phi) {
1785 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1786 current_ir_graph->Phi_in_stack->pos)
1787 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1789 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1791 (current_ir_graph->Phi_in_stack->pos)++;
1792 } /* free_to_Phi_in_stack */
1794 static INLINE ir_node *
1795 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1796 int arity, ir_node **in) {
1798 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1799 int pos = current_ir_graph->Phi_in_stack->pos;
1803 /* We need to allocate a new node */
1804 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1805 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1807 /* reuse the old node and initialize it again. */
1810 assert(res->kind == k_ir_node);
1811 assert(res->op == op_Phi);
1816 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1817 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1819 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1821 (current_ir_graph->Phi_in_stack->pos)--;
1824 } /* alloc_or_pop_from_Phi_in_stack */
1825 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1828 * Creates a Phi node with a given, fixed array **in of predecessors.
1829 * If the Phi node is unnecessary, as the same value reaches the block
1830 * through all control flow paths, it is eliminated and the value
1831 * returned directly. This constructor is only intended for use in
1832 * the automatic Phi node generation triggered by get_value or mature.
1833 * The implementation is quite tricky and depends on the fact, that
1834 * the nodes are allocated on a stack:
1835 * The in array contains predecessors and NULLs. The NULLs appear,
1836 * if get_r_value_internal, that computed the predecessors, reached
1837 * the same block on two paths. In this case the same value reaches
1838 * this block on both paths, there is no definition in between. We need
1839 * not allocate a Phi where these path's merge, but we have to communicate
1840 * this fact to the caller. This happens by returning a pointer to the
1841 * node the caller _will_ allocate. (Yes, we predict the address. We can
1842 * do so because the nodes are allocated on the obstack.) The caller then
1843 * finds a pointer to itself and, when this routine is called again,
1844 * eliminates itself.
1846 static INLINE ir_node *
1847 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1849 ir_node *res, *known;
1851 /* Allocate a new node on the obstack. This can return a node to
1852 which some of the pointers in the in-array already point.
1853 Attention: the constructor copies the in array, i.e., the later
1854 changes to the array in this routine do not affect the
1855 constructed node! If the in array contains NULLs, there will be
1856 missing predecessors in the returned node. Is this a possible
1857 internal state of the Phi node generation? */
1858 #if USE_EXPLICIT_PHI_IN_STACK
1859 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1861 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1862 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1865 /* The in-array can contain NULLs. These were returned by
1866 get_r_value_internal if it reached the same block/definition on a
1867 second path. The NULLs are replaced by the node itself to
1868 simplify the test in the next loop. */
1869 for (i = 0; i < ins; ++i) {
1874 /* This loop checks whether the Phi has more than one predecessor.
1875 If so, it is a real Phi node and we break the loop. Else the Phi
1876 node merges the same definition on several paths and therefore is
1878 for (i = 0; i < ins; ++i) {
1879 if (in[i] == res || in[i] == known)
1888 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1890 #if USE_EXPLICIT_PHI_IN_STACK
1891 free_to_Phi_in_stack(res);
1893 edges_node_deleted(res, current_ir_graph);
1894 obstack_free(current_ir_graph->obst, res);
1898 res = optimize_node (res);
1899 IRN_VRFY_IRG(res, irg);
1902 /* return the pointer to the Phi node. This node might be deallocated! */
1904 } /* new_rd_Phi_in */
1907 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1910 * Allocates and returns this node. The routine called to allocate the
1911 * node might optimize it away and return a real value, or even a pointer
1912 * to a deallocated Phi node on top of the obstack!
1913 * This function is called with an in-array of proper size.
1916 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1917 ir_node *prevBlock, *res;
1920 /* This loop goes to all predecessor blocks of the block the Phi node is in
1921 and there finds the operands of the Phi node by calling
1922 get_r_value_internal. */
1923 for (i = 1; i <= ins; ++i) {
1924 assert (block->in[i]);
1925 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1927 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1930 /* After collecting all predecessors into the array nin a new Phi node
1931 with these predecessors is created. This constructor contains an
1932 optimization: If all predecessors of the Phi node are identical it
1933 returns the only operand instead of a new Phi node. If the value
1934 passes two different control flow edges without being defined, and
1935 this is the second path treated, a pointer to the node that will be
1936 allocated for the first path (recursion) is returned. We already
1937 know the address of this node, as it is the next node to be allocated
1938 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1939 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1941 /* Now we now the value for "pos" and can enter it in the array with
1942 all known local variables. Attention: this might be a pointer to
1943 a node, that later will be allocated!!! See new_rd_Phi_in().
1944 If this is called in mature, after some set_value() in the same block,
1945 the proper value must not be overwritten:
1947 get_value (makes Phi0, put's it into graph_arr)
1948 set_value (overwrites Phi0 in graph_arr)
1949 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1952 if (!block->attr.block.graph_arr[pos]) {
1953 block->attr.block.graph_arr[pos] = res;
1955 /* printf(" value already computed by %s\n",
1956 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1963 * This function returns the last definition of a variable. In case
1964 * this variable was last defined in a previous block, Phi nodes are
1965 * inserted. If the part of the firm graph containing the definition
1966 * is not yet constructed, a dummy Phi node is returned.
1969 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1972 /* There are 4 cases to treat.
1974 1. The block is not mature and we visit it the first time. We can not
1975 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1976 predecessors is returned. This node is added to the linked list (field
1977 "link") of the containing block to be completed when this block is
1978 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1981 2. The value is already known in this block, graph_arr[pos] is set and we
1982 visit the block the first time. We can return the value without
1983 creating any new nodes.
1985 3. The block is mature and we visit it the first time. A Phi node needs
1986 to be created (phi_merge). If the Phi is not needed, as all it's
1987 operands are the same value reaching the block through different
1988 paths, it's optimized away and the value itself is returned.
1990 4. The block is mature, and we visit it the second time. Now two
1991 subcases are possible:
1992 * The value was computed completely the last time we were here. This
1993 is the case if there is no loop. We can return the proper value.
1994 * The recursion that visited this node and set the flag did not
1995 return yet. We are computing a value in a loop and need to
1996 break the recursion without knowing the result yet.
1997 @@@ strange case. Straight forward we would create a Phi before
1998 starting the computation of it's predecessors. In this case we will
1999 find a Phi here in any case. The problem is that this implementation
2000 only creates a Phi after computing the predecessors, so that it is
2001 hard to compute self references of this Phi. @@@
2002 There is no simple check for the second subcase. Therefore we check
2003 for a second visit and treat all such cases as the second subcase.
2004 Anyways, the basic situation is the same: we reached a block
2005 on two paths without finding a definition of the value: No Phi
2006 nodes are needed on both paths.
2007 We return this information "Two paths, no Phi needed" by a very tricky
2008 implementation that relies on the fact that an obstack is a stack and
2009 will return a node with the same address on different allocations.
2010 Look also at phi_merge and new_rd_phi_in to understand this.
2011 @@@ Unfortunately this does not work, see testprogram
2012 three_cfpred_example.
2016 /* case 4 -- already visited. */
2017 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2019 /* visited the first time */
2020 set_irn_visited(block, get_irg_visited(current_ir_graph));
2022 /* Get the local valid value */
2023 res = block->attr.block.graph_arr[pos];
2025 /* case 2 -- If the value is actually computed, return it. */
2026 if (res) return res;
2028 if (block->attr.block.matured) { /* case 3 */
2030 /* The Phi has the same amount of ins as the corresponding block. */
2031 int ins = get_irn_arity(block);
2033 NEW_ARR_A(ir_node *, nin, ins);
2035 /* Phi merge collects the predecessors and then creates a node. */
2036 res = phi_merge(block, pos, mode, nin, ins);
2038 } else { /* case 1 */
2039 /* The block is not mature, we don't know how many in's are needed. A Phi
2040 with zero predecessors is created. Such a Phi node is called Phi0
2041 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2042 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2044 The Phi0 has to remember the pos of it's internal value. If the real
2045 Phi is computed, pos is used to update the array with the local
2048 res = new_rd_Phi0(current_ir_graph, block, mode);
2049 res->attr.phi0_pos = pos;
2050 res->link = block->link;
2054 /* If we get here, the frontend missed a use-before-definition error */
2057 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2058 assert(mode->code >= irm_F && mode->code <= irm_P);
2059 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2060 tarval_mode_null[mode->code]);
2063 /* The local valid value is available now. */
2064 block->attr.block.graph_arr[pos] = res;
2067 } /* get_r_value_internal */
2072 it starts the recursion. This causes an Id at the entry of
2073 every block that has no definition of the value! **/
2075 #if USE_EXPLICIT_PHI_IN_STACK
2077 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2078 void free_Phi_in_stack(Phi_in_stack *s) {}
2081 static INLINE ir_node *
2082 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2083 ir_node **in, int ins, ir_node *phi0) {
2085 ir_node *res, *known;
2087 /* Allocate a new node on the obstack. The allocation copies the in
2089 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2090 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2092 /* This loop checks whether the Phi has more than one predecessor.
2093 If so, it is a real Phi node and we break the loop. Else the
2094 Phi node merges the same definition on several paths and therefore
2095 is not needed. Don't consider Bad nodes! */
2097 for (i=0; i < ins; ++i)
2101 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2103 /* Optimize self referencing Phis: We can't detect them yet properly, as
2104 they still refer to the Phi0 they will replace. So replace right now. */
2105 if (phi0 && in[i] == phi0) in[i] = res;
2107 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2115 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2118 edges_node_deleted(res, current_ir_graph);
2119 obstack_free (current_ir_graph->obst, res);
2120 if (is_Phi(known)) {
2121 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2122 order, an enclosing Phi know may get superfluous. */
2123 res = optimize_in_place_2(known);
2125 exchange(known, res);
2131 /* A undefined value, e.g., in unreachable code. */
2135 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2136 IRN_VRFY_IRG(res, irg);
2137 /* Memory Phis in endless loops must be kept alive.
2138 As we can't distinguish these easily we keep all of them alive. */
2139 if ((res->op == op_Phi) && (mode == mode_M))
2140 add_End_keepalive(get_irg_end(irg), res);
2144 } /* new_rd_Phi_in */
2147 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2149 #if PRECISE_EXC_CONTEXT
2151 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2154 * Construct a new frag_array for node n.
2155 * Copy the content from the current graph_arr of the corresponding block:
2156 * this is the current state.
2157 * Set ProjM(n) as current memory state.
2158 * Further the last entry in frag_arr of current block points to n. This
2159 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2161 static INLINE ir_node **new_frag_arr(ir_node *n) {
2165 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2166 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2167 sizeof(ir_node *)*current_ir_graph->n_loc);
2169 /* turn off optimization before allocating Proj nodes, as res isn't
2171 opt = get_opt_optimize(); set_optimize(0);
2172 /* Here we rely on the fact that all frag ops have Memory as first result! */
2173 if (get_irn_op(n) == op_Call)
2174 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2175 else if (get_irn_op(n) == op_CopyB)
2176 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2178 assert((pn_Quot_M == pn_DivMod_M) &&
2179 (pn_Quot_M == pn_Div_M) &&
2180 (pn_Quot_M == pn_Mod_M) &&
2181 (pn_Quot_M == pn_Load_M) &&
2182 (pn_Quot_M == pn_Store_M) &&
2183 (pn_Quot_M == pn_Alloc_M) &&
2184 (pn_Quot_M == pn_Bound_M));
2185 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2189 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2191 } /* new_frag_arr */
2194 * Returns the frag_arr from a node.
2196 static INLINE ir_node **get_frag_arr(ir_node *n) {
2197 switch (get_irn_opcode(n)) {
2199 return n->attr.call.exc.frag_arr;
2201 return n->attr.alloc.exc.frag_arr;
2203 return n->attr.load.exc.frag_arr;
2205 return n->attr.store.exc.frag_arr;
2207 return n->attr.except.frag_arr;
2209 } /* get_frag_arr */
2212 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2214 if (!frag_arr[pos]) frag_arr[pos] = val;
2215 if (frag_arr[current_ir_graph->n_loc - 1]) {
2216 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2217 assert(arr != frag_arr && "Endless recursion detected");
2218 set_frag_value(arr, pos, val);
2223 for (i = 0; i < 1000; ++i) {
2224 if (!frag_arr[pos]) {
2225 frag_arr[pos] = val;
2227 if (frag_arr[current_ir_graph->n_loc - 1]) {
2228 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2234 assert(0 && "potential endless recursion");
2236 } /* set_frag_value */
2239 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2243 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2245 frag_arr = get_frag_arr(cfOp);
2246 res = frag_arr[pos];
2248 if (block->attr.block.graph_arr[pos]) {
2249 /* There was a set_value() after the cfOp and no get_value before that
2250 set_value(). We must build a Phi node now. */
2251 if (block->attr.block.matured) {
2252 int ins = get_irn_arity(block);
2254 NEW_ARR_A (ir_node *, nin, ins);
2255 res = phi_merge(block, pos, mode, nin, ins);
2257 res = new_rd_Phi0 (current_ir_graph, block, mode);
2258 res->attr.phi0_pos = pos;
2259 res->link = block->link;
2263 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2264 but this should be better: (remove comment if this works) */
2265 /* It's a Phi, we can write this into all graph_arrs with NULL */
2266 set_frag_value(block->attr.block.graph_arr, pos, res);
2268 res = get_r_value_internal(block, pos, mode);
2269 set_frag_value(block->attr.block.graph_arr, pos, res);
2273 } /* get_r_frag_value_internal */
2274 #endif /* PRECISE_EXC_CONTEXT */
2277 * Computes the predecessors for the real phi node, and then
2278 * allocates and returns this node. The routine called to allocate the
2279 * node might optimize it away and return a real value.
2280 * This function must be called with an in-array of proper size.
2283 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2284 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2287 /* If this block has no value at pos create a Phi0 and remember it
2288 in graph_arr to break recursions.
2289 Else we may not set graph_arr as there a later value is remembered. */
2291 if (!block->attr.block.graph_arr[pos]) {
2292 if (block == get_irg_start_block(current_ir_graph)) {
2293 /* Collapsing to Bad tarvals is no good idea.
2294 So we call a user-supplied routine here that deals with this case as
2295 appropriate for the given language. Sorrily the only help we can give
2296 here is the position.
2298 Even if all variables are defined before use, it can happen that
2299 we get to the start block, if a Cond has been replaced by a tuple
2300 (bad, jmp). In this case we call the function needlessly, eventually
2301 generating an non existent error.
2302 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2305 if (default_initialize_local_variable) {
2306 ir_node *rem = get_cur_block();
2308 set_cur_block(block);
2309 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2313 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2314 /* We don't need to care about exception ops in the start block.
2315 There are none by definition. */
2316 return block->attr.block.graph_arr[pos];
2318 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2319 block->attr.block.graph_arr[pos] = phi0;
2320 #if PRECISE_EXC_CONTEXT
2321 if (get_opt_precise_exc_context()) {
2322 /* Set graph_arr for fragile ops. Also here we should break recursion.
2323 We could choose a cyclic path through an cfop. But the recursion would
2324 break at some point. */
2325 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2331 /* This loop goes to all predecessor blocks of the block the Phi node
2332 is in and there finds the operands of the Phi node by calling
2333 get_r_value_internal. */
2334 for (i = 1; i <= ins; ++i) {
2335 prevCfOp = skip_Proj(block->in[i]);
2337 if (is_Bad(prevCfOp)) {
2338 /* In case a Cond has been optimized we would get right to the start block
2339 with an invalid definition. */
2340 nin[i-1] = new_Bad();
2343 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2345 if (!is_Bad(prevBlock)) {
2346 #if PRECISE_EXC_CONTEXT
2347 if (get_opt_precise_exc_context() &&
2348 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2349 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2350 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2353 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2355 nin[i-1] = new_Bad();
2359 /* We want to pass the Phi0 node to the constructor: this finds additional
2360 optimization possibilities.
2361 The Phi0 node either is allocated in this function, or it comes from
2362 a former call to get_r_value_internal. In this case we may not yet
2363 exchange phi0, as this is done in mature_immBlock. */
2365 phi0_all = block->attr.block.graph_arr[pos];
2366 if (!((get_irn_op(phi0_all) == op_Phi) &&
2367 (get_irn_arity(phi0_all) == 0) &&
2368 (get_nodes_block(phi0_all) == block)))
2374 /* After collecting all predecessors into the array nin a new Phi node
2375 with these predecessors is created. This constructor contains an
2376 optimization: If all predecessors of the Phi node are identical it
2377 returns the only operand instead of a new Phi node. */
2378 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2380 /* In case we allocated a Phi0 node at the beginning of this procedure,
2381 we need to exchange this Phi0 with the real Phi. */
2383 exchange(phi0, res);
2384 block->attr.block.graph_arr[pos] = res;
2385 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2386 only an optimization. */
2393 * This function returns the last definition of a variable. In case
2394 * this variable was last defined in a previous block, Phi nodes are
2395 * inserted. If the part of the firm graph containing the definition
2396 * is not yet constructed, a dummy Phi node is returned.
2399 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2401 /* There are 4 cases to treat.
2403 1. The block is not mature and we visit it the first time. We can not
2404 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2405 predecessors is returned. This node is added to the linked list (field
2406 "link") of the containing block to be completed when this block is
2407 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2410 2. The value is already known in this block, graph_arr[pos] is set and we
2411 visit the block the first time. We can return the value without
2412 creating any new nodes.
2414 3. The block is mature and we visit it the first time. A Phi node needs
2415 to be created (phi_merge). If the Phi is not needed, as all it's
2416 operands are the same value reaching the block through different
2417 paths, it's optimized away and the value itself is returned.
2419 4. The block is mature, and we visit it the second time. Now two
2420 subcases are possible:
2421 * The value was computed completely the last time we were here. This
2422 is the case if there is no loop. We can return the proper value.
2423 * The recursion that visited this node and set the flag did not
2424 return yet. We are computing a value in a loop and need to
2425 break the recursion. This case only happens if we visited
2426 the same block with phi_merge before, which inserted a Phi0.
2427 So we return the Phi0.
2430 /* case 4 -- already visited. */
2431 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2432 /* As phi_merge allocates a Phi0 this value is always defined. Here
2433 is the critical difference of the two algorithms. */
2434 assert(block->attr.block.graph_arr[pos]);
2435 return block->attr.block.graph_arr[pos];
2438 /* visited the first time */
2439 set_irn_visited(block, get_irg_visited(current_ir_graph));
2441 /* Get the local valid value */
2442 res = block->attr.block.graph_arr[pos];
2444 /* case 2 -- If the value is actually computed, return it. */
2445 if (res) { return res; };
2447 if (block->attr.block.matured) { /* case 3 */
2449 /* The Phi has the same amount of ins as the corresponding block. */
2450 int ins = get_irn_arity(block);
2452 NEW_ARR_A (ir_node *, nin, ins);
2454 /* Phi merge collects the predecessors and then creates a node. */
2455 res = phi_merge (block, pos, mode, nin, ins);
2457 } else { /* case 1 */
2458 /* The block is not mature, we don't know how many in's are needed. A Phi
2459 with zero predecessors is created. Such a Phi node is called Phi0
2460 node. The Phi0 is then added to the list of Phi0 nodes in this block
2461 to be matured by mature_immBlock later.
2462 The Phi0 has to remember the pos of it's internal value. If the real
2463 Phi is computed, pos is used to update the array with the local
2465 res = new_rd_Phi0 (current_ir_graph, block, mode);
2466 res->attr.phi0_pos = pos;
2467 res->link = block->link;
2471 /* If we get here, the frontend missed a use-before-definition error */
2474 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2475 assert(mode->code >= irm_F && mode->code <= irm_P);
2476 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2477 get_mode_null(mode));
2480 /* The local valid value is available now. */
2481 block->attr.block.graph_arr[pos] = res;
2484 } /* get_r_value_internal */
2486 #endif /* USE_FAST_PHI_CONSTRUCTION */
2488 /* ************************************************************************** */
2491 * Finalize a Block node, when all control flows are known.
2492 * Acceptable parameters are only Block nodes.
2495 mature_immBlock(ir_node *block) {
2500 assert(get_irn_opcode(block) == iro_Block);
2501 /* @@@ should be commented in
2502 assert (!get_Block_matured(block) && "Block already matured"); */
2504 if (!get_Block_matured(block)) {
2505 ins = ARR_LEN (block->in)-1;
2506 /* Fix block parameters */
2507 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2509 /* An array for building the Phi nodes. */
2510 NEW_ARR_A (ir_node *, nin, ins);
2512 /* Traverse a chain of Phi nodes attached to this block and mature
2514 for (n = block->link; n; n = next) {
2515 inc_irg_visited(current_ir_graph);
2517 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2520 block->attr.block.matured = 1;
2522 /* Now, as the block is a finished firm node, we can optimize it.
2523 Since other nodes have been allocated since the block was created
2524 we can not free the node on the obstack. Therefore we have to call
2526 Unfortunately the optimization does not change a lot, as all allocated
2527 nodes refer to the unoptimized node.
2528 We can call _2, as global cse has no effect on blocks. */
2529 block = optimize_in_place_2(block);
2530 IRN_VRFY_IRG(block, current_ir_graph);
2532 } /* mature_immBlock */
2535 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2536 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2540 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2541 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2545 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2546 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2547 } /* new_d_Const_long */
2550 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2551 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2552 } /* new_d_Const_type */
2556 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2557 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2561 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2562 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2566 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2568 assert(arg->op == op_Cond);
2569 arg->attr.cond.kind = fragmentary;
2570 arg->attr.cond.default_proj = max_proj;
2571 res = new_Proj(arg, mode_X, max_proj);
2573 } /* new_d_defaultProj */
2576 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2577 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2581 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2582 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2583 } /* new_d_strictConv */
2586 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2587 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2591 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2592 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2601 * Allocate the frag array.
2603 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2604 if (get_opt_precise_exc_context()) {
2605 if ((current_ir_graph->phase_state == phase_building) &&
2606 (get_irn_op(res) == op) && /* Could be optimized away. */
2607 !*frag_store) /* Could be a cse where the arr is already set. */ {
2608 *frag_store = new_frag_arr(res);
2611 } /* allocate_frag_arr */
2614 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2616 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2617 #if PRECISE_EXC_CONTEXT
2618 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2625 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2627 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2628 #if PRECISE_EXC_CONTEXT
2629 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2633 } /* new_d_DivMod */
2636 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2638 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2639 #if PRECISE_EXC_CONTEXT
2640 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2647 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2649 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2650 #if PRECISE_EXC_CONTEXT
2651 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2670 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2671 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2675 new_d_Jmp(dbg_info *db) {
2676 return new_bd_Jmp(db, current_ir_graph->current_block);
2680 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2681 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2685 new_d_Cond(dbg_info *db, ir_node *c) {
2686 return new_bd_Cond(db, current_ir_graph->current_block, c);
2690 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2693 res = new_bd_Call(db, current_ir_graph->current_block,
2694 store, callee, arity, in, tp);
2695 #if PRECISE_EXC_CONTEXT
2696 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2703 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2704 return new_bd_Return(db, current_ir_graph->current_block,
2706 } /* new_d_Return */
2709 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2711 res = new_bd_Load(db, current_ir_graph->current_block,
2713 #if PRECISE_EXC_CONTEXT
2714 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2721 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2723 res = new_bd_Store(db, current_ir_graph->current_block,
2725 #if PRECISE_EXC_CONTEXT
2726 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2733 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2734 where_alloc where) {
2736 res = new_bd_Alloc(db, current_ir_graph->current_block,
2737 store, size, alloc_type, where);
2738 #if PRECISE_EXC_CONTEXT
2739 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2746 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2747 ir_node *size, ir_type *free_type, where_alloc where) {
2748 return new_bd_Free(db, current_ir_graph->current_block,
2749 store, ptr, size, free_type, where);
2753 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2754 /* GL: objptr was called frame before. Frame was a bad choice for the name
2755 as the operand could as well be a pointer to a dynamic object. */
2757 return new_bd_Sel(db, current_ir_graph->current_block,
2758 store, objptr, 0, NULL, ent);
2759 } /* new_d_simpleSel */
2762 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2763 return new_bd_Sel(db, current_ir_graph->current_block,
2764 store, objptr, n_index, index, sel);
2768 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2769 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2771 } /* new_d_SymConst_type */
2774 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2775 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2776 value, kind, firm_unknown_type);
2777 } /* new_d_SymConst */
2780 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2781 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2787 return _new_d_Bad();
2791 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2792 return new_bd_Confirm(db, current_ir_graph->current_block,
2794 } /* new_d_Confirm */
2797 new_d_Unknown(ir_mode *m) {
2798 return new_bd_Unknown(m);
2799 } /* new_d_Unknown */
2802 new_d_CallBegin(dbg_info *db, ir_node *call) {
2803 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2804 } /* new_d_CallBegin */
2807 new_d_EndReg(dbg_info *db) {
2808 return new_bd_EndReg(db, current_ir_graph->current_block);
2809 } /* new_d_EndReg */
2812 new_d_EndExcept(dbg_info *db) {
2813 return new_bd_EndExcept(db, current_ir_graph->current_block);
2814 } /* new_d_EndExcept */
2817 new_d_Break(dbg_info *db) {
2818 return new_bd_Break(db, current_ir_graph->current_block);
2822 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2823 return new_bd_Filter(db, current_ir_graph->current_block,
2825 } /* new_d_Filter */
2828 (new_d_NoMem)(void) {
2829 return _new_d_NoMem();
2833 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2834 ir_node *ir_true, ir_mode *mode) {
2835 return new_bd_Mux(db, current_ir_graph->current_block,
2836 sel, ir_false, ir_true, mode);
2840 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2841 return new_bd_Psi(db, current_ir_graph->current_block,
2842 arity, conds, vals, mode);
2845 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2846 ir_node *dst, ir_node *src, ir_type *data_type) {
2848 res = new_bd_CopyB(db, current_ir_graph->current_block,
2849 store, dst, src, data_type);
2850 #if PRECISE_EXC_CONTEXT
2851 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2857 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2858 return new_bd_InstOf(db, current_ir_graph->current_block,
2859 store, objptr, type);
2860 } /* new_d_InstOf */
2863 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2864 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2867 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2868 ir_node *idx, ir_node *lower, ir_node *upper) {
2870 res = new_bd_Bound(db, current_ir_graph->current_block,
2871 store, idx, lower, upper);
2872 #if PRECISE_EXC_CONTEXT
2873 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2879 new_d_Pin(dbg_info *db, ir_node *node) {
2880 return new_bd_Pin(db, current_ir_graph->current_block, node);
2883 /* ********************************************************************* */
2884 /* Comfortable interface with automatic Phi node construction. */
2885 /* (Uses also constructors of ?? interface, except new_Block. */
2886 /* ********************************************************************* */
2888 /* Block construction */
2889 /* immature Block without predecessors */
2890 ir_node *new_d_immBlock(dbg_info *db) {
2893 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2894 /* creates a new dynamic in-array as length of in is -1 */
2895 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2896 current_ir_graph->current_block = res;
2897 res->attr.block.matured = 0;
2898 res->attr.block.dead = 0;
2899 res->attr.block.irg = current_ir_graph;
2900 res->attr.block.backedge = NULL;
2901 res->attr.block.in_cg = NULL;
2902 res->attr.block.cg_backedge = NULL;
2903 res->attr.block.extblk = NULL;
2904 res->attr.block.region = NULL;
2905 set_Block_block_visited(res, 0);
2907 /* Create and initialize array for Phi-node construction. */
2908 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2909 current_ir_graph->n_loc);
2910 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2912 /* Immature block may not be optimized! */
2913 IRN_VRFY_IRG(res, current_ir_graph);
2916 } /* new_d_immBlock */
2919 new_immBlock(void) {
2920 return new_d_immBlock(NULL);
2921 } /* new_immBlock */
2923 /* add an edge to a jmp/control flow node */
2925 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2926 if (block->attr.block.matured) {
2927 assert(0 && "Error: Block already matured!\n");
2929 int n = ARR_LEN(block->in) - 1;
2930 assert(jmp != NULL);
2931 ARR_APP1(ir_node *, block->in, jmp);
2933 hook_set_irn_n(block, n, jmp, NULL);
2935 } /* add_immBlock_pred */
2937 /* changing the current block */
2939 set_cur_block(ir_node *target) {
2940 current_ir_graph->current_block = target;
2941 } /* set_cur_block */
2943 /* ************************ */
2944 /* parameter administration */
2946 /* get a value from the parameter array from the current block by its index */
2948 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2949 ir_graph *irg = current_ir_graph;
2950 assert(get_irg_phase_state(irg) == phase_building);
2951 inc_irg_visited(irg);
2953 return get_r_value_internal(irg->current_block, pos + 1, mode);
2956 /* get a value from the parameter array from the current block by its index */
2958 get_value(int pos, ir_mode *mode) {
2959 return get_d_value(NULL, pos, mode);
2962 /* set a value at position pos in the parameter array from the current block */
2964 set_value(int pos, ir_node *value) {
2965 ir_graph *irg = current_ir_graph;
2966 assert(get_irg_phase_state(irg) == phase_building);
2967 assert(pos+1 < irg->n_loc);
2968 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2971 /* Find the value number for a node in the current block.*/
2973 find_value(ir_node *value) {
2975 ir_node *bl = current_ir_graph->current_block;
2977 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2978 if (bl->attr.block.graph_arr[i] == value)
2983 /* get the current store */
2986 ir_graph *irg = current_ir_graph;
2988 assert(get_irg_phase_state(irg) == phase_building);
2989 /* GL: one could call get_value instead */
2990 inc_irg_visited(irg);
2991 return get_r_value_internal(irg->current_block, 0, mode_M);
2994 /* set the current store: handles automatic Sync construction for Load nodes */
2996 set_store(ir_node *store) {
2997 ir_node *load, *pload, *pred, *in[2];
2999 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3000 /* Beware: due to dead code elimination, a store might become a Bad node even in
3001 the construction phase. */
3002 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3004 if (get_opt_auto_create_sync()) {
3005 /* handle non-volatile Load nodes by automatically creating Sync's */
3006 load = skip_Proj(store);
3007 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3008 pred = get_Load_mem(load);
3010 if (is_Sync(pred)) {
3011 /* a Load after a Sync: move it up */
3012 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3014 set_Load_mem(load, get_memop_mem(mem));
3015 add_Sync_pred(pred, store);
3018 pload = skip_Proj(pred);
3019 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3020 /* a Load after a Load: create a new Sync */
3021 set_Load_mem(load, get_Load_mem(pload));
3025 store = new_Sync(2, in);
3030 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3034 keep_alive(ir_node *ka) {
3035 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3038 /* --- Useful access routines --- */
3039 /* Returns the current block of the current graph. To set the current
3040 block use set_cur_block. */
3041 ir_node *get_cur_block(void) {
3042 return get_irg_current_block(current_ir_graph);
3043 } /* get_cur_block */
3045 /* Returns the frame type of the current graph */
3046 ir_type *get_cur_frame_type(void) {
3047 return get_irg_frame_type(current_ir_graph);
3048 } /* get_cur_frame_type */
3051 /* ********************************************************************* */
3054 /* call once for each run of the library */
3056 init_cons(uninitialized_local_variable_func_t *func) {
3057 default_initialize_local_variable = func;
3061 irp_finalize_cons(void) {
3063 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3064 irg_finalize_cons(get_irp_irg(i));
3066 irp->phase_state = phase_high;
3067 } /* irp_finalize_cons */
3070 ir_node *new_Block(int arity, ir_node **in) {
3071 return new_d_Block(NULL, arity, in);
3073 ir_node *new_Start(void) {
3074 return new_d_Start(NULL);
3076 ir_node *new_End(void) {
3077 return new_d_End(NULL);
3079 ir_node *new_Jmp(void) {
3080 return new_d_Jmp(NULL);
3082 ir_node *new_IJmp(ir_node *tgt) {
3083 return new_d_IJmp(NULL, tgt);
3085 ir_node *new_Cond(ir_node *c) {
3086 return new_d_Cond(NULL, c);
3088 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3089 return new_d_Return(NULL, store, arity, in);
3091 ir_node *new_Const(ir_mode *mode, tarval *con) {
3092 return new_d_Const(NULL, mode, con);
3095 ir_node *new_Const_long(ir_mode *mode, long value) {
3096 return new_d_Const_long(NULL, mode, value);
3099 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3100 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3103 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3104 return new_d_SymConst_type(NULL, value, kind, type);
3106 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3107 return new_d_SymConst(NULL, value, kind);
3109 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3110 return new_d_simpleSel(NULL, store, objptr, ent);
3112 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3114 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3116 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3118 return new_d_Call(NULL, store, callee, arity, in, tp);
3120 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3121 return new_d_Add(NULL, op1, op2, mode);
3123 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3124 return new_d_Sub(NULL, op1, op2, mode);
3126 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3127 return new_d_Minus(NULL, op, mode);
3129 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3130 return new_d_Mul(NULL, op1, op2, mode);
3132 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3133 return new_d_Quot(NULL, memop, op1, op2, mode);
3135 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3136 return new_d_DivMod(NULL, memop, op1, op2, mode);
3138 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3139 return new_d_Div(NULL, memop, op1, op2, mode);
3141 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3142 return new_d_Mod(NULL, memop, op1, op2, mode);
3144 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3145 return new_d_Abs(NULL, op, mode);
3147 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3148 return new_d_And(NULL, op1, op2, mode);
3150 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3151 return new_d_Or(NULL, op1, op2, mode);
3153 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3154 return new_d_Eor(NULL, op1, op2, mode);
3156 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3157 return new_d_Not(NULL, op, mode);
3159 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3160 return new_d_Shl(NULL, op, k, mode);
3162 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3163 return new_d_Shr(NULL, op, k, mode);
3165 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3166 return new_d_Shrs(NULL, op, k, mode);
3168 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3169 return new_d_Rot(NULL, op, k, mode);
3171 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3172 return new_d_Carry(NULL, op1, op2, mode);
3174 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3175 return new_d_Borrow(NULL, op1, op2, mode);
3177 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3178 return new_d_Cmp(NULL, op1, op2);
3180 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3181 return new_d_Conv(NULL, op, mode);
3183 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3184 return new_d_strictConv(NULL, op, mode);
3186 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3187 return new_d_Cast(NULL, op, to_tp);
3189 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3190 return new_d_Phi(NULL, arity, in, mode);
3192 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3193 return new_d_Load(NULL, store, addr, mode);
3195 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3196 return new_d_Store(NULL, store, addr, val);
3198 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3199 where_alloc where) {
3200 return new_d_Alloc(NULL, store, size, alloc_type, where);
3202 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3203 ir_type *free_type, where_alloc where) {
3204 return new_d_Free(NULL, store, ptr, size, free_type, where);
3206 ir_node *new_Sync(int arity, ir_node *in[]) {
3207 return new_d_Sync(NULL, arity, in);
3209 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3210 return new_d_Proj(NULL, arg, mode, proj);
3212 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3213 return new_d_defaultProj(NULL, arg, max_proj);
3215 ir_node *new_Tuple(int arity, ir_node **in) {
3216 return new_d_Tuple(NULL, arity, in);
3218 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3219 return new_d_Id(NULL, val, mode);
3221 ir_node *new_Bad(void) {
3224 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3225 return new_d_Confirm (NULL, val, bound, cmp);
3227 ir_node *new_Unknown(ir_mode *m) {
3228 return new_d_Unknown(m);
3230 ir_node *new_CallBegin(ir_node *callee) {
3231 return new_d_CallBegin(NULL, callee);
3233 ir_node *new_EndReg(void) {
3234 return new_d_EndReg(NULL);
3236 ir_node *new_EndExcept(void) {
3237 return new_d_EndExcept(NULL);
3239 ir_node *new_Break(void) {
3240 return new_d_Break(NULL);
3242 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3243 return new_d_Filter(NULL, arg, mode, proj);
3245 ir_node *new_NoMem(void) {
3246 return new_d_NoMem();
3248 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3249 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3251 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3252 return new_d_Psi(NULL, arity, conds, vals, mode);
3254 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3255 return new_d_CopyB(NULL, store, dst, src, data_type);
3257 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3258 return new_d_InstOf(NULL, store, objptr, ent);
3260 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3261 return new_d_Raise(NULL, store, obj);
3263 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3264 return new_d_Bound(NULL, store, idx, lower, upper);
3266 ir_node *new_Pin(ir_node *node) {
3267 return new_d_Pin(NULL, node);