2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res = optimize_node(res); \
117 IRN_VRFY_IRG(res, irg); \
121 /* creates a rd constructor for a binop */
122 #define NEW_RD_BINOP(instr) \
124 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
125 ir_node *op1, ir_node *op2, ir_mode *mode) \
128 ir_graph *rem = current_ir_graph; \
129 current_ir_graph = irg; \
130 res = new_bd_##instr(db, block, op1, op2, mode); \
131 current_ir_graph = rem; \
135 /* creates a rd constructor for an unop */
136 #define NEW_RD_UNOP(instr) \
138 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
139 ir_node *op, ir_mode *mode) \
142 ir_graph *rem = current_ir_graph; \
143 current_ir_graph = irg; \
144 res = new_bd_##instr(db, block, op, mode); \
145 current_ir_graph = rem; \
149 /* creates a rd constructor for an divop */
150 #define NEW_RD_DIVOP(instr) \
152 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
153 ir_node *memop, ir_node *op1, ir_node *op2) \
156 ir_graph *rem = current_ir_graph; \
157 current_ir_graph = irg; \
158 res = new_bd_##instr(db, block, memop, op1, op2); \
159 current_ir_graph = rem; \
163 /* creates a d constructor for an binop */
164 #define NEW_D_BINOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
170 /* creates a d constructor for an unop */
171 #define NEW_D_UNOP(instr) \
173 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
174 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
179 * Constructs a Block with a fixed number of predecessors.
180 * Does not set current_block. Can not be used with automatic
181 * Phi node construction.
184 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
186 ir_graph *irg = current_ir_graph;
188 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
189 set_Block_matured(res, 1);
190 set_Block_block_visited(res, 0);
192 /* res->attr.block.exc = exc_normal; */
193 /* res->attr.block.handler_entry = 0; */
194 res->attr.block.dead = 0;
195 res->attr.block.irg = irg;
196 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
197 res->attr.block.in_cg = NULL;
198 res->attr.block.cg_backedge = NULL;
199 res->attr.block.extblk = NULL;
201 IRN_VRFY_IRG(res, irg);
206 new_bd_Start(dbg_info *db, ir_node *block) {
208 ir_graph *irg = current_ir_graph;
210 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
211 /* res->attr.start.irg = irg; */
213 IRN_VRFY_IRG(res, irg);
218 new_bd_End(dbg_info *db, ir_node *block) {
220 ir_graph *irg = current_ir_graph;
222 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
224 IRN_VRFY_IRG(res, irg);
229 * Creates a Phi node with all predecessors. Calling this constructor
230 * is only allowed if the corresponding block is mature.
233 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
235 ir_graph *irg = current_ir_graph;
239 /* Don't assert that block matured: the use of this constructor is strongly
241 if ( get_Block_matured(block) )
242 assert( get_irn_arity(block) == arity );
244 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
246 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
248 for (i = arity-1; i >= 0; i--)
249 if (get_irn_op(in[i]) == op_Unknown) {
254 if (!has_unknown) res = optimize_node (res);
255 IRN_VRFY_IRG(res, irg);
257 /* Memory Phis in endless loops must be kept alive.
258 As we can't distinguish these easily we keep all of them alive. */
259 if ((res->op == op_Phi) && (mode == mode_M))
260 add_End_keepalive(get_irg_end(irg), res);
265 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
267 ir_graph *irg = current_ir_graph;
269 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
270 res->attr.con.tv = con;
271 set_Const_type(res, tp); /* Call method because of complex assertion. */
272 res = optimize_node (res);
273 assert(get_Const_type(res) == tp);
274 IRN_VRFY_IRG(res, irg);
277 } /* new_bd_Const_type */
280 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
281 ir_graph *irg = current_ir_graph;
283 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
287 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
288 ir_graph *irg = current_ir_graph;
290 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
291 } /* new_bd_Const_long */
294 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
296 ir_graph *irg = current_ir_graph;
298 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
299 res = optimize_node(res);
300 IRN_VRFY_IRG(res, irg);
305 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
308 ir_graph *irg = current_ir_graph;
310 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
311 res->attr.proj = proj;
314 assert(get_Proj_pred(res));
315 assert(get_nodes_block(get_Proj_pred(res)));
317 res = optimize_node(res);
319 IRN_VRFY_IRG(res, irg);
324 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
327 ir_graph *irg = current_ir_graph;
329 assert(arg->op == op_Cond);
330 arg->attr.cond.kind = fragmentary;
331 arg->attr.cond.default_proj = max_proj;
332 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
334 } /* new_bd_defaultProj */
337 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
339 ir_graph *irg = current_ir_graph;
341 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
342 res->attr.conv.strict = strict_flag;
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
351 ir_graph *irg = current_ir_graph;
353 assert(is_atomic_type(to_tp));
355 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
356 res->attr.cast.totype = to_tp;
357 res = optimize_node(res);
358 IRN_VRFY_IRG(res, irg);
363 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
365 ir_graph *irg = current_ir_graph;
367 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
368 res = optimize_node (res);
369 IRN_VRFY_IRG(res, irg);
394 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
397 ir_graph *irg = current_ir_graph;
400 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
401 res = optimize_node(res);
402 IRN_VRFY_IRG(res, irg);
407 new_bd_Jmp(dbg_info *db, ir_node *block) {
409 ir_graph *irg = current_ir_graph;
411 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
412 res = optimize_node (res);
413 IRN_VRFY_IRG(res, irg);
418 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
420 ir_graph *irg = current_ir_graph;
422 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
423 res = optimize_node (res);
424 IRN_VRFY_IRG(res, irg);
426 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
432 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
434 ir_graph *irg = current_ir_graph;
436 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
437 res->attr.cond.kind = dense;
438 res->attr.cond.default_proj = 0;
439 res->attr.cond.pred = COND_JMP_PRED_NONE;
440 res = optimize_node (res);
441 IRN_VRFY_IRG(res, irg);
446 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
447 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
451 ir_graph *irg = current_ir_graph;
454 NEW_ARR_A(ir_node *, r_in, r_arity);
457 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
459 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
461 assert((get_unknown_type() == tp) || is_Method_type(tp));
462 set_Call_type(res, tp);
463 res->attr.call.exc.pin_state = op_pin_state_pinned;
464 res->attr.call.callee_arr = NULL;
465 res = optimize_node(res);
466 IRN_VRFY_IRG(res, irg);
471 new_bd_Return(dbg_info *db, ir_node *block,
472 ir_node *store, int arity, ir_node **in) {
476 ir_graph *irg = current_ir_graph;
479 NEW_ARR_A (ir_node *, r_in, r_arity);
481 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
482 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
483 res = optimize_node(res);
484 IRN_VRFY_IRG(res, irg);
486 } /* new_bd_Return */
489 new_bd_Load(dbg_info *db, ir_node *block,
490 ir_node *store, ir_node *adr, ir_mode *mode) {
493 ir_graph *irg = current_ir_graph;
497 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
498 res->attr.load.exc.pin_state = op_pin_state_pinned;
499 res->attr.load.load_mode = mode;
500 res->attr.load.volatility = volatility_non_volatile;
501 res = optimize_node(res);
502 IRN_VRFY_IRG(res, irg);
507 new_bd_Store(dbg_info *db, ir_node *block,
508 ir_node *store, ir_node *adr, ir_node *val) {
511 ir_graph *irg = current_ir_graph;
516 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
517 res->attr.store.exc.pin_state = op_pin_state_pinned;
518 res->attr.store.volatility = volatility_non_volatile;
519 res = optimize_node(res);
520 IRN_VRFY_IRG(res, irg);
525 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
526 ir_node *size, ir_type *alloc_type, where_alloc where) {
529 ir_graph *irg = current_ir_graph;
533 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
534 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
535 res->attr.alloc.where = where;
536 res->attr.alloc.type = alloc_type;
537 res = optimize_node(res);
538 IRN_VRFY_IRG(res, irg);
543 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
544 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
547 ir_graph *irg = current_ir_graph;
552 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
553 res->attr.free.where = where;
554 res->attr.free.type = free_type;
555 res = optimize_node(res);
556 IRN_VRFY_IRG(res, irg);
561 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
562 int arity, ir_node **in, ir_entity *ent) {
566 ir_graph *irg = current_ir_graph;
567 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
569 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
572 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
575 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
577 * Sel's can select functions which should be of mode mode_P_code.
579 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
580 res->attr.sel.ent = ent;
581 res = optimize_node(res);
582 IRN_VRFY_IRG(res, irg);
587 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
588 symconst_kind symkind, ir_type *tp) {
591 ir_graph *irg = current_ir_graph;
593 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
594 mode = mode_P_data; /* FIXME: can be mode_P_code */
598 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
600 res->attr.symc.num = symkind;
601 res->attr.symc.sym = value;
602 res->attr.symc.tp = tp;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
607 } /* new_bd_SymConst_type */
610 new_bd_Sync(dbg_info *db, ir_node *block) {
612 ir_graph *irg = current_ir_graph;
614 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
615 /* no need to call optimize node here, Sync are always created with no predecessors */
616 IRN_VRFY_IRG(res, irg);
621 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
622 ir_node *in[2], *res;
623 ir_graph *irg = current_ir_graph;
627 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
628 res->attr.confirm_cmp = cmp;
629 res = optimize_node (res);
630 IRN_VRFY_IRG(res, irg);
632 } /* new_bd_Confirm */
635 new_bd_Unknown(ir_mode *m) {
637 ir_graph *irg = current_ir_graph;
639 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
640 res = optimize_node(res);
642 } /* new_bd_Unknown */
645 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
648 ir_graph *irg = current_ir_graph;
650 in[0] = get_Call_ptr(call);
651 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
652 /* res->attr.callbegin.irg = irg; */
653 res->attr.callbegin.call = call;
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, irg);
657 } /* new_bd_CallBegin */
660 new_bd_EndReg(dbg_info *db, ir_node *block) {
662 ir_graph *irg = current_ir_graph;
664 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
665 set_irg_end_reg(irg, res);
666 IRN_VRFY_IRG(res, irg);
668 } /* new_bd_EndReg */
671 new_bd_EndExcept(dbg_info *db, ir_node *block) {
673 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
676 set_irg_end_except(irg, res);
677 IRN_VRFY_IRG (res, irg);
679 } /* new_bd_EndExcept */
682 new_bd_Break(dbg_info *db, ir_node *block) {
684 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
687 res = optimize_node(res);
688 IRN_VRFY_IRG(res, irg);
693 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
696 ir_graph *irg = current_ir_graph;
698 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
699 res->attr.filter.proj = proj;
700 res->attr.filter.in_cg = NULL;
701 res->attr.filter.backedge = NULL;
704 assert(get_Proj_pred(res));
705 assert(get_nodes_block(get_Proj_pred(res)));
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
710 } /* new_bd_Filter */
713 new_bd_Mux(dbg_info *db, ir_node *block,
714 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
717 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
726 res = optimize_node(res);
727 IRN_VRFY_IRG(res, irg);
732 new_bd_Psi(dbg_info *db, ir_node *block,
733 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
736 ir_graph *irg = current_ir_graph;
739 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
741 for (i = 0; i < arity; ++i) {
743 in[2 * i + 1] = vals[i];
747 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
750 res = optimize_node(res);
751 IRN_VRFY_IRG(res, irg);
756 new_bd_CopyB(dbg_info *db, ir_node *block,
757 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
760 ir_graph *irg = current_ir_graph;
766 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
768 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
769 res->attr.copyb.data_type = data_type;
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
777 ir_node *objptr, ir_type *type) {
780 ir_graph *irg = current_ir_graph;
784 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
785 res->attr.instof.type = type;
786 res = optimize_node(res);
787 IRN_VRFY_IRG(res, irg);
789 } /* new_bd_InstOf */
792 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
795 ir_graph *irg = current_ir_graph;
799 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_Bound(dbg_info *db, ir_node *block,
807 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
810 ir_graph *irg = current_ir_graph;
816 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
817 res->attr.bound.exc.pin_state = op_pin_state_pinned;
818 res = optimize_node(res);
819 IRN_VRFY_IRG(res, irg);
824 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
826 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
834 /* --------------------------------------------- */
835 /* private interfaces, for professional use only */
836 /* --------------------------------------------- */
838 /* Constructs a Block with a fixed number of predecessors.
839 Does not set current_block. Can not be used with automatic
840 Phi node construction. */
842 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
843 ir_graph *rem = current_ir_graph;
846 current_ir_graph = irg;
847 res = new_bd_Block(db, arity, in);
848 current_ir_graph = rem;
854 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
855 ir_graph *rem = current_ir_graph;
858 current_ir_graph = irg;
859 res = new_bd_Start(db, block);
860 current_ir_graph = rem;
866 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
868 ir_graph *rem = current_ir_graph;
870 current_ir_graph = rem;
871 res = new_bd_End(db, block);
872 current_ir_graph = rem;
877 /* Creates a Phi node with all predecessors. Calling this constructor
878 is only allowed if the corresponding block is mature. */
880 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
882 ir_graph *rem = current_ir_graph;
884 current_ir_graph = irg;
885 res = new_bd_Phi(db, block,arity, in, mode);
886 current_ir_graph = rem;
892 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
894 ir_graph *rem = current_ir_graph;
896 current_ir_graph = irg;
897 res = new_bd_Const_type(db, block, mode, con, tp);
898 current_ir_graph = rem;
901 } /* new_rd_Const_type */
904 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
906 ir_graph *rem = current_ir_graph;
908 current_ir_graph = irg;
909 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
910 current_ir_graph = rem;
916 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
917 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
918 } /* new_rd_Const_long */
921 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
923 ir_graph *rem = current_ir_graph;
925 current_ir_graph = irg;
926 res = new_bd_Id(db, block, val, mode);
927 current_ir_graph = rem;
933 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
936 ir_graph *rem = current_ir_graph;
938 current_ir_graph = irg;
939 res = new_bd_Proj(db, block, arg, mode, proj);
940 current_ir_graph = rem;
946 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_defaultProj(db, block, arg, max_proj);
953 current_ir_graph = rem;
956 } /* new_rd_defaultProj */
959 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
961 ir_graph *rem = current_ir_graph;
963 current_ir_graph = irg;
964 res = new_bd_Conv(db, block, op, mode, 0);
965 current_ir_graph = rem;
971 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
973 ir_graph *rem = current_ir_graph;
975 current_ir_graph = irg;
976 res = new_bd_Cast(db, block, op, to_tp);
977 current_ir_graph = rem;
983 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_Tuple(db, block, arity, in);
989 current_ir_graph = rem;
1012 NEW_RD_BINOP(Borrow)
1015 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1016 ir_node *op1, ir_node *op2) {
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = irg;
1021 res = new_bd_Cmp(db, block, op1, op2);
1022 current_ir_graph = rem;
1028 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1030 ir_graph *rem = current_ir_graph;
1032 current_ir_graph = irg;
1033 res = new_bd_Jmp(db, block);
1034 current_ir_graph = rem;
1040 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_IJmp(db, block, tgt);
1046 current_ir_graph = rem;
1052 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1054 ir_graph *rem = current_ir_graph;
1056 current_ir_graph = irg;
1057 res = new_bd_Cond(db, block, c);
1058 current_ir_graph = rem;
1064 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1065 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1067 ir_graph *rem = current_ir_graph;
1069 current_ir_graph = irg;
1070 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1071 current_ir_graph = rem;
1077 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1078 ir_node *store, int arity, ir_node **in) {
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Return(db, block, store, arity, in);
1084 current_ir_graph = rem;
1087 } /* new_rd_Return */
1090 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1091 ir_node *store, ir_node *adr, ir_mode *mode) {
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_Load(db, block, store, adr, mode);
1097 current_ir_graph = rem;
1103 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1104 ir_node *store, ir_node *adr, ir_node *val) {
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_Store(db, block, store, adr, val);
1110 current_ir_graph = rem;
1113 } /* new_rd_Store */
1116 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1117 ir_node *size, ir_type *alloc_type, where_alloc where) {
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1123 current_ir_graph = rem;
1126 } /* new_rd_Alloc */
1129 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1130 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1136 current_ir_graph = rem;
1142 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1143 ir_node *store, ir_node *objptr, ir_entity *ent) {
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1149 current_ir_graph = rem;
1152 } /* new_rd_simpleSel */
1155 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1156 int arity, ir_node **in, ir_entity *ent) {
1158 ir_graph *rem = current_ir_graph;
1160 current_ir_graph = irg;
1161 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1162 current_ir_graph = rem;
1168 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1169 symconst_kind symkind, ir_type *tp) {
1171 ir_graph *rem = current_ir_graph;
1173 current_ir_graph = irg;
1174 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1175 current_ir_graph = rem;
1178 } /* new_rd_SymConst_type */
1181 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1182 symconst_kind symkind) {
1183 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1185 } /* new_rd_SymConst */
1187 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1188 symconst_symbol sym;
1189 sym.entity_p = symbol;
1190 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1191 } /* new_rd_SymConst_addr_ent */
1193 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1194 symconst_symbol sym;
1195 sym.entity_p = symbol;
1196 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1197 } /* new_rd_SymConst_ofs_ent */
1199 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1200 symconst_symbol sym;
1201 sym.ident_p = symbol;
1202 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1203 } /* new_rd_SymConst_addr_name */
1205 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1206 symconst_symbol sym;
1207 sym.type_p = symbol;
1208 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1209 } /* new_rd_SymConst_type_tag */
1211 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1212 symconst_symbol sym;
1213 sym.type_p = symbol;
1214 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1215 } /* new_rd_SymConst_size */
1217 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1218 symconst_symbol sym;
1219 sym.type_p = symbol;
1220 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1221 } /* new_rd_SymConst_align */
1224 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1226 ir_graph *rem = current_ir_graph;
1229 current_ir_graph = irg;
1230 res = new_bd_Sync(db, block);
1231 current_ir_graph = rem;
1233 for (i = 0; i < arity; ++i)
1234 add_Sync_pred(res, in[i]);
1240 new_rd_Bad(ir_graph *irg) {
1241 return get_irg_bad(irg);
1245 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1247 ir_graph *rem = current_ir_graph;
1249 current_ir_graph = irg;
1250 res = new_bd_Confirm(db, block, val, bound, cmp);
1251 current_ir_graph = rem;
1254 } /* new_rd_Confirm */
1257 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1259 ir_graph *rem = current_ir_graph;
1261 current_ir_graph = irg;
1262 res = new_bd_Unknown(m);
1263 current_ir_graph = rem;
1266 } /* new_rd_Unknown */
1269 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_CallBegin(db, block, call);
1275 current_ir_graph = rem;
1278 } /* new_rd_CallBegin */
1281 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1284 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1285 set_irg_end_reg(irg, res);
1286 IRN_VRFY_IRG(res, irg);
1288 } /* new_rd_EndReg */
1291 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1294 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1295 set_irg_end_except(irg, res);
1296 IRN_VRFY_IRG (res, irg);
1298 } /* new_rd_EndExcept */
1301 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1303 ir_graph *rem = current_ir_graph;
1305 current_ir_graph = irg;
1306 res = new_bd_Break(db, block);
1307 current_ir_graph = rem;
1310 } /* new_rd_Break */
1313 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1316 ir_graph *rem = current_ir_graph;
1318 current_ir_graph = irg;
1319 res = new_bd_Filter(db, block, arg, mode, proj);
1320 current_ir_graph = rem;
1323 } /* new_rd_Filter */
1326 new_rd_NoMem(ir_graph *irg) {
1327 return get_irg_no_mem(irg);
1328 } /* new_rd_NoMem */
1331 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1332 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1338 current_ir_graph = rem;
1344 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1345 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1351 current_ir_graph = rem;
1356 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1357 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1359 ir_graph *rem = current_ir_graph;
1361 current_ir_graph = irg;
1362 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1363 current_ir_graph = rem;
1366 } /* new_rd_CopyB */
1369 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1370 ir_node *objptr, ir_type *type) {
1372 ir_graph *rem = current_ir_graph;
1374 current_ir_graph = irg;
1375 res = new_bd_InstOf(db, block, store, objptr, type);
1376 current_ir_graph = rem;
1379 } /* new_rd_InstOf */
1382 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1384 ir_graph *rem = current_ir_graph;
1386 current_ir_graph = irg;
1387 res = new_bd_Raise(db, block, store, obj);
1388 current_ir_graph = rem;
1391 } /* new_rd_Raise */
1393 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1394 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1396 ir_graph *rem = current_ir_graph;
1398 current_ir_graph = irg;
1399 res = new_bd_Bound(db, block, store, idx, lower, upper);
1400 current_ir_graph = rem;
1403 } /* new_rd_Bound */
1405 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1407 ir_graph *rem = current_ir_graph;
1409 current_ir_graph = irg;
1410 res = new_bd_Pin(db, block, node);
1411 current_ir_graph = rem;
1416 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1417 return new_rd_Block(NULL, irg, arity, in);
1419 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1420 return new_rd_Start(NULL, irg, block);
1422 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1423 return new_rd_End(NULL, irg, block);
1425 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1426 return new_rd_Jmp(NULL, irg, block);
1428 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1429 return new_rd_IJmp(NULL, irg, block, tgt);
1431 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1432 return new_rd_Cond(NULL, irg, block, c);
1434 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1435 ir_node *store, int arity, ir_node **in) {
1436 return new_rd_Return(NULL, irg, block, store, arity, in);
1438 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1439 ir_mode *mode, tarval *con) {
1440 return new_rd_Const(NULL, irg, block, mode, con);
1442 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1443 ir_mode *mode, long value) {
1444 return new_rd_Const_long(NULL, irg, block, mode, value);
1446 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1447 ir_mode *mode, tarval *con, ir_type *tp) {
1448 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1450 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1451 symconst_symbol value, symconst_kind symkind) {
1452 return new_rd_SymConst(NULL, irg, block, value, symkind);
1454 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1455 ir_node *objptr, ir_entity *ent) {
1456 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1458 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1459 ir_node *objptr, int n_index, ir_node **index,
1461 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1463 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1464 ir_node *callee, int arity, ir_node **in,
1466 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1468 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1469 ir_node *op1, ir_node *op2, ir_mode *mode) {
1470 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1472 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1473 ir_node *op1, ir_node *op2, ir_mode *mode) {
1474 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1476 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1477 ir_node *op, ir_mode *mode) {
1478 return new_rd_Minus(NULL, irg, block, op, mode);
1480 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1481 ir_node *op1, ir_node *op2, ir_mode *mode) {
1482 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1484 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1485 ir_node *memop, ir_node *op1, ir_node *op2) {
1486 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1488 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1489 ir_node *memop, ir_node *op1, ir_node *op2) {
1490 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1492 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1493 ir_node *memop, ir_node *op1, ir_node *op2) {
1494 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1496 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1497 ir_node *memop, ir_node *op1, ir_node *op2) {
1498 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1500 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1501 ir_node *op, ir_mode *mode) {
1502 return new_rd_Abs(NULL, irg, block, op, mode);
1504 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1505 ir_node *op1, ir_node *op2, ir_mode *mode) {
1506 return new_rd_And(NULL, irg, block, op1, op2, mode);
1508 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1509 ir_node *op1, ir_node *op2, ir_mode *mode) {
1510 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1512 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1516 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1517 ir_node *op, ir_mode *mode) {
1518 return new_rd_Not(NULL, irg, block, op, mode);
1520 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1521 ir_node *op, ir_node *k, ir_mode *mode) {
1522 return new_rd_Shl(NULL, irg, block, op, k, mode);
1524 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1525 ir_node *op, ir_node *k, ir_mode *mode) {
1526 return new_rd_Shr(NULL, irg, block, op, k, mode);
1528 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1529 ir_node *op, ir_node *k, ir_mode *mode) {
1530 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1532 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1533 ir_node *op, ir_node *k, ir_mode *mode) {
1534 return new_rd_Rot(NULL, irg, block, op, k, mode);
1536 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1537 ir_node *op, ir_node *k, ir_mode *mode) {
1538 return new_rd_Carry(NULL, irg, block, op, k, mode);
1540 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1541 ir_node *op, ir_node *k, ir_mode *mode) {
1542 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1544 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1545 ir_node *op1, ir_node *op2) {
1546 return new_rd_Cmp(NULL, irg, block, op1, op2);
1548 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1549 ir_node *op, ir_mode *mode) {
1550 return new_rd_Conv(NULL, irg, block, op, mode);
1552 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1553 return new_rd_Cast(NULL, irg, block, op, to_tp);
1555 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1556 ir_node **in, ir_mode *mode) {
1557 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1559 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1560 ir_node *store, ir_node *adr, ir_mode *mode) {
1561 return new_rd_Load(NULL, irg, block, store, adr, mode);
1563 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1564 ir_node *store, ir_node *adr, ir_node *val) {
1565 return new_rd_Store(NULL, irg, block, store, adr, val);
1567 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1568 ir_node *size, ir_type *alloc_type, where_alloc where) {
1569 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1571 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1572 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1573 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1575 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1576 return new_rd_Sync(NULL, irg, block, arity, in);
1578 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1579 ir_mode *mode, long proj) {
1580 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1582 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1584 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1586 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1587 int arity, ir_node **in) {
1588 return new_rd_Tuple(NULL, irg, block, arity, in );
1590 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1591 ir_node *val, ir_mode *mode) {
1592 return new_rd_Id(NULL, irg, block, val, mode);
1594 ir_node *new_r_Bad(ir_graph *irg) {
1595 return new_rd_Bad(irg);
1597 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1598 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1600 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1601 return new_rd_Unknown(irg, m);
1603 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1604 return new_rd_CallBegin(NULL, irg, block, callee);
1606 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1607 return new_rd_EndReg(NULL, irg, block);
1609 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1610 return new_rd_EndExcept(NULL, irg, block);
1612 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1613 return new_rd_Break(NULL, irg, block);
1615 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1616 ir_mode *mode, long proj) {
1617 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1619 ir_node *new_r_NoMem(ir_graph *irg) {
1620 return new_rd_NoMem(irg);
1622 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1623 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1624 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1626 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1627 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1628 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1630 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1631 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1632 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1634 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1636 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1638 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1639 ir_node *store, ir_node *obj) {
1640 return new_rd_Raise(NULL, irg, block, store, obj);
1642 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1643 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1644 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1646 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1647 return new_rd_Pin(NULL, irg, block, node);
1650 /** ********************/
1651 /** public interfaces */
1652 /** construction tools */
1656 * - create a new Start node in the current block
1658 * @return s - pointer to the created Start node
1663 new_d_Start(dbg_info *db) {
1666 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1667 op_Start, mode_T, 0, NULL);
1669 res = optimize_node(res);
1670 IRN_VRFY_IRG(res, current_ir_graph);
1675 new_d_End(dbg_info *db) {
1677 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1678 op_End, mode_X, -1, NULL);
1679 res = optimize_node(res);
1680 IRN_VRFY_IRG(res, current_ir_graph);
1685 /* Constructs a Block with a fixed number of predecessors.
1686 Does set current_block. Can be used with automatic Phi
1687 node construction. */
1689 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1692 int has_unknown = 0;
1694 res = new_bd_Block(db, arity, in);
1696 /* Create and initialize array for Phi-node construction. */
1697 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1698 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1699 current_ir_graph->n_loc);
1700 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1703 for (i = arity-1; i >= 0; i--)
1704 if (get_irn_op(in[i]) == op_Unknown) {
1709 if (!has_unknown) res = optimize_node(res);
1710 current_ir_graph->current_block = res;
1712 IRN_VRFY_IRG(res, current_ir_graph);
1717 /* ***********************************************************************/
1718 /* Methods necessary for automatic Phi node creation */
1720 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1721 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1722 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1723 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1725 Call Graph: ( A ---> B == A "calls" B)
1727 get_value mature_immBlock
1735 get_r_value_internal |
1739 new_rd_Phi0 new_rd_Phi_in
1741 * *************************************************************************** */
1743 /** Creates a Phi node with 0 predecessors. */
1744 static INLINE ir_node *
1745 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1748 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1749 IRN_VRFY_IRG(res, irg);
1753 /* There are two implementations of the Phi node construction. The first
1754 is faster, but does not work for blocks with more than 2 predecessors.
1755 The second works always but is slower and causes more unnecessary Phi
1757 Select the implementations by the following preprocessor flag set in
1759 #if USE_FAST_PHI_CONSTRUCTION
1761 /* This is a stack used for allocating and deallocating nodes in
1762 new_rd_Phi_in. The original implementation used the obstack
1763 to model this stack, now it is explicit. This reduces side effects.
1765 #if USE_EXPLICIT_PHI_IN_STACK
1767 new_Phi_in_stack(void) {
1770 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1772 res->stack = NEW_ARR_F (ir_node *, 0);
1776 } /* new_Phi_in_stack */
1779 free_Phi_in_stack(Phi_in_stack *s) {
1780 DEL_ARR_F(s->stack);
1782 } /* free_Phi_in_stack */
1785 free_to_Phi_in_stack(ir_node *phi) {
1786 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1787 current_ir_graph->Phi_in_stack->pos)
1788 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1790 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1792 (current_ir_graph->Phi_in_stack->pos)++;
1793 } /* free_to_Phi_in_stack */
1795 static INLINE ir_node *
1796 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1797 int arity, ir_node **in) {
1799 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1800 int pos = current_ir_graph->Phi_in_stack->pos;
1804 /* We need to allocate a new node */
1805 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1806 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1808 /* reuse the old node and initialize it again. */
1811 assert(res->kind == k_ir_node);
1812 assert(res->op == op_Phi);
1817 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1818 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1820 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1822 (current_ir_graph->Phi_in_stack->pos)--;
1825 } /* alloc_or_pop_from_Phi_in_stack */
1826 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1829 * Creates a Phi node with a given, fixed array **in of predecessors.
1830 * If the Phi node is unnecessary, as the same value reaches the block
1831 * through all control flow paths, it is eliminated and the value
1832 * returned directly. This constructor is only intended for use in
1833 * the automatic Phi node generation triggered by get_value or mature.
1834 * The implementation is quite tricky and depends on the fact, that
1835 * the nodes are allocated on a stack:
1836 * The in array contains predecessors and NULLs. The NULLs appear,
1837 * if get_r_value_internal, that computed the predecessors, reached
1838 * the same block on two paths. In this case the same value reaches
1839 * this block on both paths, there is no definition in between. We need
1840 * not allocate a Phi where these path's merge, but we have to communicate
1841 * this fact to the caller. This happens by returning a pointer to the
1842 * node the caller _will_ allocate. (Yes, we predict the address. We can
1843 * do so because the nodes are allocated on the obstack.) The caller then
1844 * finds a pointer to itself and, when this routine is called again,
1845 * eliminates itself.
1847 static INLINE ir_node *
1848 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1850 ir_node *res, *known;
1852 /* Allocate a new node on the obstack. This can return a node to
1853 which some of the pointers in the in-array already point.
1854 Attention: the constructor copies the in array, i.e., the later
1855 changes to the array in this routine do not affect the
1856 constructed node! If the in array contains NULLs, there will be
1857 missing predecessors in the returned node. Is this a possible
1858 internal state of the Phi node generation? */
1859 #if USE_EXPLICIT_PHI_IN_STACK
1860 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1862 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1863 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1866 /* The in-array can contain NULLs. These were returned by
1867 get_r_value_internal if it reached the same block/definition on a
1868 second path. The NULLs are replaced by the node itself to
1869 simplify the test in the next loop. */
1870 for (i = 0; i < ins; ++i) {
1875 /* This loop checks whether the Phi has more than one predecessor.
1876 If so, it is a real Phi node and we break the loop. Else the Phi
1877 node merges the same definition on several paths and therefore is
1879 for (i = 0; i < ins; ++i) {
1880 if (in[i] == res || in[i] == known)
1889 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1891 #if USE_EXPLICIT_PHI_IN_STACK
1892 free_to_Phi_in_stack(res);
1894 edges_node_deleted(res, current_ir_graph);
1895 obstack_free(current_ir_graph->obst, res);
1899 res = optimize_node (res);
1900 IRN_VRFY_IRG(res, irg);
1903 /* return the pointer to the Phi node. This node might be deallocated! */
1905 } /* new_rd_Phi_in */
1908 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1911 * Allocates and returns this node. The routine called to allocate the
1912 * node might optimize it away and return a real value, or even a pointer
1913 * to a deallocated Phi node on top of the obstack!
1914 * This function is called with an in-array of proper size.
1917 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1918 ir_node *prevBlock, *res;
1921 /* This loop goes to all predecessor blocks of the block the Phi node is in
1922 and there finds the operands of the Phi node by calling
1923 get_r_value_internal. */
1924 for (i = 1; i <= ins; ++i) {
1925 assert (block->in[i]);
1926 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1928 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1931 /* After collecting all predecessors into the array nin a new Phi node
1932 with these predecessors is created. This constructor contains an
1933 optimization: If all predecessors of the Phi node are identical it
1934 returns the only operand instead of a new Phi node. If the value
1935 passes two different control flow edges without being defined, and
1936 this is the second path treated, a pointer to the node that will be
1937 allocated for the first path (recursion) is returned. We already
1938 know the address of this node, as it is the next node to be allocated
1939 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1940 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1942 /* Now we now the value for "pos" and can enter it in the array with
1943 all known local variables. Attention: this might be a pointer to
1944 a node, that later will be allocated!!! See new_rd_Phi_in().
1945 If this is called in mature, after some set_value() in the same block,
1946 the proper value must not be overwritten:
1948 get_value (makes Phi0, put's it into graph_arr)
1949 set_value (overwrites Phi0 in graph_arr)
1950 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1953 if (!block->attr.block.graph_arr[pos]) {
1954 block->attr.block.graph_arr[pos] = res;
1956 /* printf(" value already computed by %s\n",
1957 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1964 * This function returns the last definition of a variable. In case
1965 * this variable was last defined in a previous block, Phi nodes are
1966 * inserted. If the part of the firm graph containing the definition
1967 * is not yet constructed, a dummy Phi node is returned.
1970 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1973 /* There are 4 cases to treat.
1975 1. The block is not mature and we visit it the first time. We can not
1976 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1977 predecessors is returned. This node is added to the linked list (field
1978 "link") of the containing block to be completed when this block is
1979 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1982 2. The value is already known in this block, graph_arr[pos] is set and we
1983 visit the block the first time. We can return the value without
1984 creating any new nodes.
1986 3. The block is mature and we visit it the first time. A Phi node needs
1987 to be created (phi_merge). If the Phi is not needed, as all it's
1988 operands are the same value reaching the block through different
1989 paths, it's optimized away and the value itself is returned.
1991 4. The block is mature, and we visit it the second time. Now two
1992 subcases are possible:
1993 * The value was computed completely the last time we were here. This
1994 is the case if there is no loop. We can return the proper value.
1995 * The recursion that visited this node and set the flag did not
1996 return yet. We are computing a value in a loop and need to
1997 break the recursion without knowing the result yet.
1998 @@@ strange case. Straight forward we would create a Phi before
1999 starting the computation of it's predecessors. In this case we will
2000 find a Phi here in any case. The problem is that this implementation
2001 only creates a Phi after computing the predecessors, so that it is
2002 hard to compute self references of this Phi. @@@
2003 There is no simple check for the second subcase. Therefore we check
2004 for a second visit and treat all such cases as the second subcase.
2005 Anyways, the basic situation is the same: we reached a block
2006 on two paths without finding a definition of the value: No Phi
2007 nodes are needed on both paths.
2008 We return this information "Two paths, no Phi needed" by a very tricky
2009 implementation that relies on the fact that an obstack is a stack and
2010 will return a node with the same address on different allocations.
2011 Look also at phi_merge and new_rd_phi_in to understand this.
2012 @@@ Unfortunately this does not work, see testprogram
2013 three_cfpred_example.
2017 /* case 4 -- already visited. */
2018 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2020 /* visited the first time */
2021 set_irn_visited(block, get_irg_visited(current_ir_graph));
2023 /* Get the local valid value */
2024 res = block->attr.block.graph_arr[pos];
2026 /* case 2 -- If the value is actually computed, return it. */
2027 if (res) return res;
2029 if (block->attr.block.matured) { /* case 3 */
2031 /* The Phi has the same amount of ins as the corresponding block. */
2032 int ins = get_irn_arity(block);
2034 NEW_ARR_A(ir_node *, nin, ins);
2036 /* Phi merge collects the predecessors and then creates a node. */
2037 res = phi_merge(block, pos, mode, nin, ins);
2039 } else { /* case 1 */
2040 /* The block is not mature, we don't know how many in's are needed. A Phi
2041 with zero predecessors is created. Such a Phi node is called Phi0
2042 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2043 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2045 The Phi0 has to remember the pos of it's internal value. If the real
2046 Phi is computed, pos is used to update the array with the local
2049 res = new_rd_Phi0(current_ir_graph, block, mode);
2050 res->attr.phi0_pos = pos;
2051 res->link = block->link;
2055 /* If we get here, the frontend missed a use-before-definition error */
2058 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2059 assert(mode->code >= irm_F && mode->code <= irm_P);
2060 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2061 tarval_mode_null[mode->code]);
2064 /* The local valid value is available now. */
2065 block->attr.block.graph_arr[pos] = res;
2068 } /* get_r_value_internal */
2073 it starts the recursion. This causes an Id at the entry of
2074 every block that has no definition of the value! **/
2076 #if USE_EXPLICIT_PHI_IN_STACK
2078 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2079 void free_Phi_in_stack(Phi_in_stack *s) {}
2082 static INLINE ir_node *
2083 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2084 ir_node **in, int ins, ir_node *phi0) {
2086 ir_node *res, *known;
2088 /* Allocate a new node on the obstack. The allocation copies the in
2090 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2091 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2093 /* This loop checks whether the Phi has more than one predecessor.
2094 If so, it is a real Phi node and we break the loop. Else the
2095 Phi node merges the same definition on several paths and therefore
2096 is not needed. Don't consider Bad nodes! */
2098 for (i=0; i < ins; ++i)
2102 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2104 /* Optimize self referencing Phis: We can't detect them yet properly, as
2105 they still refer to the Phi0 they will replace. So replace right now. */
2106 if (phi0 && in[i] == phi0) in[i] = res;
2108 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2116 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2119 edges_node_deleted(res, current_ir_graph);
2120 obstack_free (current_ir_graph->obst, res);
2121 if (is_Phi(known)) {
2122 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2123 order, an enclosing Phi know may get superfluous. */
2124 res = optimize_in_place_2(known);
2126 exchange(known, res);
2132 /* A undefined value, e.g., in unreachable code. */
2136 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2137 IRN_VRFY_IRG(res, irg);
2138 /* Memory Phis in endless loops must be kept alive.
2139 As we can't distinguish these easily we keep all of them alive. */
2140 if ((res->op == op_Phi) && (mode == mode_M))
2141 add_End_keepalive(get_irg_end(irg), res);
2145 } /* new_rd_Phi_in */
2148 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2150 #if PRECISE_EXC_CONTEXT
2152 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2155 * Construct a new frag_array for node n.
2156 * Copy the content from the current graph_arr of the corresponding block:
2157 * this is the current state.
2158 * Set ProjM(n) as current memory state.
2159 * Further the last entry in frag_arr of current block points to n. This
2160 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2162 static INLINE ir_node **new_frag_arr(ir_node *n) {
2166 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2167 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2168 sizeof(ir_node *)*current_ir_graph->n_loc);
2170 /* turn off optimization before allocating Proj nodes, as res isn't
2172 opt = get_opt_optimize(); set_optimize(0);
2173 /* Here we rely on the fact that all frag ops have Memory as first result! */
2174 if (get_irn_op(n) == op_Call)
2175 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2176 else if (get_irn_op(n) == op_CopyB)
2177 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2179 assert((pn_Quot_M == pn_DivMod_M) &&
2180 (pn_Quot_M == pn_Div_M) &&
2181 (pn_Quot_M == pn_Mod_M) &&
2182 (pn_Quot_M == pn_Load_M) &&
2183 (pn_Quot_M == pn_Store_M) &&
2184 (pn_Quot_M == pn_Alloc_M) &&
2185 (pn_Quot_M == pn_Bound_M));
2186 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2190 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2192 } /* new_frag_arr */
2195 * Returns the frag_arr from a node.
2197 static INLINE ir_node **get_frag_arr(ir_node *n) {
2198 switch (get_irn_opcode(n)) {
2200 return n->attr.call.exc.frag_arr;
2202 return n->attr.alloc.exc.frag_arr;
2204 return n->attr.load.exc.frag_arr;
2206 return n->attr.store.exc.frag_arr;
2208 return n->attr.except.frag_arr;
2210 } /* get_frag_arr */
2213 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2215 if (!frag_arr[pos]) frag_arr[pos] = val;
2216 if (frag_arr[current_ir_graph->n_loc - 1]) {
2217 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2218 assert(arr != frag_arr && "Endless recursion detected");
2219 set_frag_value(arr, pos, val);
2224 for (i = 0; i < 1000; ++i) {
2225 if (!frag_arr[pos]) {
2226 frag_arr[pos] = val;
2228 if (frag_arr[current_ir_graph->n_loc - 1]) {
2229 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2235 assert(0 && "potential endless recursion");
2237 } /* set_frag_value */
2240 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2244 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2246 frag_arr = get_frag_arr(cfOp);
2247 res = frag_arr[pos];
2249 if (block->attr.block.graph_arr[pos]) {
2250 /* There was a set_value() after the cfOp and no get_value before that
2251 set_value(). We must build a Phi node now. */
2252 if (block->attr.block.matured) {
2253 int ins = get_irn_arity(block);
2255 NEW_ARR_A (ir_node *, nin, ins);
2256 res = phi_merge(block, pos, mode, nin, ins);
2258 res = new_rd_Phi0 (current_ir_graph, block, mode);
2259 res->attr.phi0_pos = pos;
2260 res->link = block->link;
2264 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2265 but this should be better: (remove comment if this works) */
2266 /* It's a Phi, we can write this into all graph_arrs with NULL */
2267 set_frag_value(block->attr.block.graph_arr, pos, res);
2269 res = get_r_value_internal(block, pos, mode);
2270 set_frag_value(block->attr.block.graph_arr, pos, res);
2274 } /* get_r_frag_value_internal */
2275 #endif /* PRECISE_EXC_CONTEXT */
2278 * Computes the predecessors for the real phi node, and then
2279 * allocates and returns this node. The routine called to allocate the
2280 * node might optimize it away and return a real value.
2281 * This function must be called with an in-array of proper size.
2284 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2285 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2288 /* If this block has no value at pos create a Phi0 and remember it
2289 in graph_arr to break recursions.
2290 Else we may not set graph_arr as there a later value is remembered. */
2292 if (!block->attr.block.graph_arr[pos]) {
2293 if (block == get_irg_start_block(current_ir_graph)) {
2294 /* Collapsing to Bad tarvals is no good idea.
2295 So we call a user-supplied routine here that deals with this case as
2296 appropriate for the given language. Sorrily the only help we can give
2297 here is the position.
2299 Even if all variables are defined before use, it can happen that
2300 we get to the start block, if a Cond has been replaced by a tuple
2301 (bad, jmp). In this case we call the function needlessly, eventually
2302 generating an non existent error.
2303 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2306 if (default_initialize_local_variable) {
2307 ir_node *rem = get_cur_block();
2309 set_cur_block(block);
2310 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2314 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2315 /* We don't need to care about exception ops in the start block.
2316 There are none by definition. */
2317 return block->attr.block.graph_arr[pos];
2319 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2320 block->attr.block.graph_arr[pos] = phi0;
2321 #if PRECISE_EXC_CONTEXT
2322 if (get_opt_precise_exc_context()) {
2323 /* Set graph_arr for fragile ops. Also here we should break recursion.
2324 We could choose a cyclic path through an cfop. But the recursion would
2325 break at some point. */
2326 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2332 /* This loop goes to all predecessor blocks of the block the Phi node
2333 is in and there finds the operands of the Phi node by calling
2334 get_r_value_internal. */
2335 for (i = 1; i <= ins; ++i) {
2336 prevCfOp = skip_Proj(block->in[i]);
2338 if (is_Bad(prevCfOp)) {
2339 /* In case a Cond has been optimized we would get right to the start block
2340 with an invalid definition. */
2341 nin[i-1] = new_Bad();
2344 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2346 if (!is_Bad(prevBlock)) {
2347 #if PRECISE_EXC_CONTEXT
2348 if (get_opt_precise_exc_context() &&
2349 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2350 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2351 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2354 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2356 nin[i-1] = new_Bad();
2360 /* We want to pass the Phi0 node to the constructor: this finds additional
2361 optimization possibilities.
2362 The Phi0 node either is allocated in this function, or it comes from
2363 a former call to get_r_value_internal. In this case we may not yet
2364 exchange phi0, as this is done in mature_immBlock. */
2366 phi0_all = block->attr.block.graph_arr[pos];
2367 if (!((get_irn_op(phi0_all) == op_Phi) &&
2368 (get_irn_arity(phi0_all) == 0) &&
2369 (get_nodes_block(phi0_all) == block)))
2375 /* After collecting all predecessors into the array nin a new Phi node
2376 with these predecessors is created. This constructor contains an
2377 optimization: If all predecessors of the Phi node are identical it
2378 returns the only operand instead of a new Phi node. */
2379 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2381 /* In case we allocated a Phi0 node at the beginning of this procedure,
2382 we need to exchange this Phi0 with the real Phi. */
2384 exchange(phi0, res);
2385 block->attr.block.graph_arr[pos] = res;
2386 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2387 only an optimization. */
2394 * This function returns the last definition of a variable. In case
2395 * this variable was last defined in a previous block, Phi nodes are
2396 * inserted. If the part of the firm graph containing the definition
2397 * is not yet constructed, a dummy Phi node is returned.
2400 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2402 /* There are 4 cases to treat.
2404 1. The block is not mature and we visit it the first time. We can not
2405 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2406 predecessors is returned. This node is added to the linked list (field
2407 "link") of the containing block to be completed when this block is
2408 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2411 2. The value is already known in this block, graph_arr[pos] is set and we
2412 visit the block the first time. We can return the value without
2413 creating any new nodes.
2415 3. The block is mature and we visit it the first time. A Phi node needs
2416 to be created (phi_merge). If the Phi is not needed, as all it's
2417 operands are the same value reaching the block through different
2418 paths, it's optimized away and the value itself is returned.
2420 4. The block is mature, and we visit it the second time. Now two
2421 subcases are possible:
2422 * The value was computed completely the last time we were here. This
2423 is the case if there is no loop. We can return the proper value.
2424 * The recursion that visited this node and set the flag did not
2425 return yet. We are computing a value in a loop and need to
2426 break the recursion. This case only happens if we visited
2427 the same block with phi_merge before, which inserted a Phi0.
2428 So we return the Phi0.
2431 /* case 4 -- already visited. */
2432 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2433 /* As phi_merge allocates a Phi0 this value is always defined. Here
2434 is the critical difference of the two algorithms. */
2435 assert(block->attr.block.graph_arr[pos]);
2436 return block->attr.block.graph_arr[pos];
2439 /* visited the first time */
2440 set_irn_visited(block, get_irg_visited(current_ir_graph));
2442 /* Get the local valid value */
2443 res = block->attr.block.graph_arr[pos];
2445 /* case 2 -- If the value is actually computed, return it. */
2446 if (res) { return res; };
2448 if (block->attr.block.matured) { /* case 3 */
2450 /* The Phi has the same amount of ins as the corresponding block. */
2451 int ins = get_irn_arity(block);
2453 NEW_ARR_A (ir_node *, nin, ins);
2455 /* Phi merge collects the predecessors and then creates a node. */
2456 res = phi_merge (block, pos, mode, nin, ins);
2458 } else { /* case 1 */
2459 /* The block is not mature, we don't know how many in's are needed. A Phi
2460 with zero predecessors is created. Such a Phi node is called Phi0
2461 node. The Phi0 is then added to the list of Phi0 nodes in this block
2462 to be matured by mature_immBlock later.
2463 The Phi0 has to remember the pos of it's internal value. If the real
2464 Phi is computed, pos is used to update the array with the local
2466 res = new_rd_Phi0 (current_ir_graph, block, mode);
2467 res->attr.phi0_pos = pos;
2468 res->link = block->link;
2472 /* If we get here, the frontend missed a use-before-definition error */
2475 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2476 assert(mode->code >= irm_F && mode->code <= irm_P);
2477 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2478 get_mode_null(mode));
2481 /* The local valid value is available now. */
2482 block->attr.block.graph_arr[pos] = res;
2485 } /* get_r_value_internal */
2487 #endif /* USE_FAST_PHI_CONSTRUCTION */
2489 /* ************************************************************************** */
2492 * Finalize a Block node, when all control flows are known.
2493 * Acceptable parameters are only Block nodes.
2496 mature_immBlock(ir_node *block) {
2501 assert(get_irn_opcode(block) == iro_Block);
2502 /* @@@ should be commented in
2503 assert (!get_Block_matured(block) && "Block already matured"); */
2505 if (!get_Block_matured(block)) {
2506 ins = ARR_LEN (block->in)-1;
2507 /* Fix block parameters */
2508 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2510 /* An array for building the Phi nodes. */
2511 NEW_ARR_A (ir_node *, nin, ins);
2513 /* Traverse a chain of Phi nodes attached to this block and mature
2515 for (n = block->link; n; n = next) {
2516 inc_irg_visited(current_ir_graph);
2518 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2521 block->attr.block.matured = 1;
2523 /* Now, as the block is a finished firm node, we can optimize it.
2524 Since other nodes have been allocated since the block was created
2525 we can not free the node on the obstack. Therefore we have to call
2527 Unfortunately the optimization does not change a lot, as all allocated
2528 nodes refer to the unoptimized node.
2529 We can call _2, as global cse has no effect on blocks. */
2530 block = optimize_in_place_2(block);
2531 IRN_VRFY_IRG(block, current_ir_graph);
2533 } /* mature_immBlock */
2536 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2537 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2541 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2542 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2546 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2547 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2548 } /* new_d_Const_long */
2551 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2552 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2553 } /* new_d_Const_type */
2557 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2558 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2562 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2563 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2567 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2569 assert(arg->op == op_Cond);
2570 arg->attr.cond.kind = fragmentary;
2571 arg->attr.cond.default_proj = max_proj;
2572 res = new_Proj(arg, mode_X, max_proj);
2574 } /* new_d_defaultProj */
2577 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2578 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2582 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2583 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2584 } /* new_d_strictConv */
2587 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2588 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2592 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2593 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2602 * Allocate the frag array.
2604 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2605 if (get_opt_precise_exc_context()) {
2606 if ((current_ir_graph->phase_state == phase_building) &&
2607 (get_irn_op(res) == op) && /* Could be optimized away. */
2608 !*frag_store) /* Could be a cse where the arr is already set. */ {
2609 *frag_store = new_frag_arr(res);
2612 } /* allocate_frag_arr */
2615 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2617 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2618 res->attr.except.pin_state = op_pin_state_pinned;
2619 #if PRECISE_EXC_CONTEXT
2620 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2627 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2629 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2630 res->attr.except.pin_state = op_pin_state_pinned;
2631 #if PRECISE_EXC_CONTEXT
2632 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2636 } /* new_d_DivMod */
2639 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2641 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2642 res->attr.except.pin_state = op_pin_state_pinned;
2643 #if PRECISE_EXC_CONTEXT
2644 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2651 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2653 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2654 res->attr.except.pin_state = op_pin_state_pinned;
2655 #if PRECISE_EXC_CONTEXT
2656 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2675 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2676 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2680 new_d_Jmp(dbg_info *db) {
2681 return new_bd_Jmp(db, current_ir_graph->current_block);
2685 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2686 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2690 new_d_Cond(dbg_info *db, ir_node *c) {
2691 return new_bd_Cond(db, current_ir_graph->current_block, c);
2695 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2698 res = new_bd_Call(db, current_ir_graph->current_block,
2699 store, callee, arity, in, tp);
2700 #if PRECISE_EXC_CONTEXT
2701 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2708 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2709 return new_bd_Return(db, current_ir_graph->current_block,
2711 } /* new_d_Return */
2714 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2716 res = new_bd_Load(db, current_ir_graph->current_block,
2718 #if PRECISE_EXC_CONTEXT
2719 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2726 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2728 res = new_bd_Store(db, current_ir_graph->current_block,
2730 #if PRECISE_EXC_CONTEXT
2731 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2738 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2739 where_alloc where) {
2741 res = new_bd_Alloc(db, current_ir_graph->current_block,
2742 store, size, alloc_type, where);
2743 #if PRECISE_EXC_CONTEXT
2744 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2751 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2752 ir_node *size, ir_type *free_type, where_alloc where) {
2753 return new_bd_Free(db, current_ir_graph->current_block,
2754 store, ptr, size, free_type, where);
2758 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2759 /* GL: objptr was called frame before. Frame was a bad choice for the name
2760 as the operand could as well be a pointer to a dynamic object. */
2762 return new_bd_Sel(db, current_ir_graph->current_block,
2763 store, objptr, 0, NULL, ent);
2764 } /* new_d_simpleSel */
2767 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2768 return new_bd_Sel(db, current_ir_graph->current_block,
2769 store, objptr, n_index, index, sel);
2773 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2774 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2776 } /* new_d_SymConst_type */
2779 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2780 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2781 value, kind, firm_unknown_type);
2782 } /* new_d_SymConst */
2785 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2786 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2792 return _new_d_Bad();
2796 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2797 return new_bd_Confirm(db, current_ir_graph->current_block,
2799 } /* new_d_Confirm */
2802 new_d_Unknown(ir_mode *m) {
2803 return new_bd_Unknown(m);
2804 } /* new_d_Unknown */
2807 new_d_CallBegin(dbg_info *db, ir_node *call) {
2808 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2809 } /* new_d_CallBegin */
2812 new_d_EndReg(dbg_info *db) {
2813 return new_bd_EndReg(db, current_ir_graph->current_block);
2814 } /* new_d_EndReg */
2817 new_d_EndExcept(dbg_info *db) {
2818 return new_bd_EndExcept(db, current_ir_graph->current_block);
2819 } /* new_d_EndExcept */
2822 new_d_Break(dbg_info *db) {
2823 return new_bd_Break(db, current_ir_graph->current_block);
2827 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2828 return new_bd_Filter(db, current_ir_graph->current_block,
2830 } /* new_d_Filter */
2833 (new_d_NoMem)(void) {
2834 return _new_d_NoMem();
2838 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2839 ir_node *ir_true, ir_mode *mode) {
2840 return new_bd_Mux(db, current_ir_graph->current_block,
2841 sel, ir_false, ir_true, mode);
2845 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2846 return new_bd_Psi(db, current_ir_graph->current_block,
2847 arity, conds, vals, mode);
2850 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2851 ir_node *dst, ir_node *src, ir_type *data_type) {
2853 res = new_bd_CopyB(db, current_ir_graph->current_block,
2854 store, dst, src, data_type);
2855 #if PRECISE_EXC_CONTEXT
2856 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2862 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2863 return new_bd_InstOf(db, current_ir_graph->current_block,
2864 store, objptr, type);
2865 } /* new_d_InstOf */
2868 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2869 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2872 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2873 ir_node *idx, ir_node *lower, ir_node *upper) {
2875 res = new_bd_Bound(db, current_ir_graph->current_block,
2876 store, idx, lower, upper);
2877 #if PRECISE_EXC_CONTEXT
2878 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2884 new_d_Pin(dbg_info *db, ir_node *node) {
2885 return new_bd_Pin(db, current_ir_graph->current_block, node);
2888 /* ********************************************************************* */
2889 /* Comfortable interface with automatic Phi node construction. */
2890 /* (Uses also constructors of ?? interface, except new_Block. */
2891 /* ********************************************************************* */
2893 /* Block construction */
2894 /* immature Block without predecessors */
2895 ir_node *new_d_immBlock(dbg_info *db) {
2898 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2899 /* creates a new dynamic in-array as length of in is -1 */
2900 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2901 current_ir_graph->current_block = res;
2902 res->attr.block.matured = 0;
2903 res->attr.block.dead = 0;
2904 res->attr.block.irg = current_ir_graph;
2905 res->attr.block.backedge = NULL;
2906 res->attr.block.in_cg = NULL;
2907 res->attr.block.cg_backedge = NULL;
2908 res->attr.block.extblk = NULL;
2909 res->attr.block.region = NULL;
2910 set_Block_block_visited(res, 0);
2912 /* Create and initialize array for Phi-node construction. */
2913 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2914 current_ir_graph->n_loc);
2915 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2917 /* Immature block may not be optimized! */
2918 IRN_VRFY_IRG(res, current_ir_graph);
2921 } /* new_d_immBlock */
2924 new_immBlock(void) {
2925 return new_d_immBlock(NULL);
2926 } /* new_immBlock */
2928 /* add an edge to a jmp/control flow node */
2930 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2931 if (block->attr.block.matured) {
2932 assert(0 && "Error: Block already matured!\n");
2934 int n = ARR_LEN(block->in) - 1;
2935 assert(jmp != NULL);
2936 ARR_APP1(ir_node *, block->in, jmp);
2938 hook_set_irn_n(block, n, jmp, NULL);
2940 } /* add_immBlock_pred */
2942 /* changing the current block */
2944 set_cur_block(ir_node *target) {
2945 current_ir_graph->current_block = target;
2946 } /* set_cur_block */
2948 /* ************************ */
2949 /* parameter administration */
2951 /* get a value from the parameter array from the current block by its index */
2953 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2954 ir_graph *irg = current_ir_graph;
2955 assert(get_irg_phase_state(irg) == phase_building);
2956 inc_irg_visited(irg);
2958 return get_r_value_internal(irg->current_block, pos + 1, mode);
2961 /* get a value from the parameter array from the current block by its index */
2963 get_value(int pos, ir_mode *mode) {
2964 return get_d_value(NULL, pos, mode);
2967 /* set a value at position pos in the parameter array from the current block */
2969 set_value(int pos, ir_node *value) {
2970 ir_graph *irg = current_ir_graph;
2971 assert(get_irg_phase_state(irg) == phase_building);
2972 assert(pos+1 < irg->n_loc);
2973 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2976 /* Find the value number for a node in the current block.*/
2978 find_value(ir_node *value) {
2980 ir_node *bl = current_ir_graph->current_block;
2982 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2983 if (bl->attr.block.graph_arr[i] == value)
2988 /* get the current store */
2991 ir_graph *irg = current_ir_graph;
2993 assert(get_irg_phase_state(irg) == phase_building);
2994 /* GL: one could call get_value instead */
2995 inc_irg_visited(irg);
2996 return get_r_value_internal(irg->current_block, 0, mode_M);
2999 /* set the current store: handles automatic Sync construction for Load nodes */
3001 set_store(ir_node *store) {
3002 ir_node *load, *pload, *pred, *in[2];
3004 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3005 /* Beware: due to dead code elimination, a store might become a Bad node even in
3006 the construction phase. */
3007 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3009 if (get_opt_auto_create_sync()) {
3010 /* handle non-volatile Load nodes by automatically creating Sync's */
3011 load = skip_Proj(store);
3012 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3013 pred = get_Load_mem(load);
3015 if (is_Sync(pred)) {
3016 /* a Load after a Sync: move it up */
3017 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3019 set_Load_mem(load, get_memop_mem(mem));
3020 add_Sync_pred(pred, store);
3023 pload = skip_Proj(pred);
3024 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3025 /* a Load after a Load: create a new Sync */
3026 set_Load_mem(load, get_Load_mem(pload));
3030 store = new_Sync(2, in);
3035 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3039 keep_alive(ir_node *ka) {
3040 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3043 /* --- Useful access routines --- */
3044 /* Returns the current block of the current graph. To set the current
3045 block use set_cur_block. */
3046 ir_node *get_cur_block(void) {
3047 return get_irg_current_block(current_ir_graph);
3048 } /* get_cur_block */
3050 /* Returns the frame type of the current graph */
3051 ir_type *get_cur_frame_type(void) {
3052 return get_irg_frame_type(current_ir_graph);
3053 } /* get_cur_frame_type */
3056 /* ********************************************************************* */
3059 /* call once for each run of the library */
3061 init_cons(uninitialized_local_variable_func_t *func) {
3062 default_initialize_local_variable = func;
3066 irp_finalize_cons(void) {
3068 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3069 irg_finalize_cons(get_irp_irg(i));
3071 irp->phase_state = phase_high;
3072 } /* irp_finalize_cons */
3075 ir_node *new_Block(int arity, ir_node **in) {
3076 return new_d_Block(NULL, arity, in);
3078 ir_node *new_Start(void) {
3079 return new_d_Start(NULL);
3081 ir_node *new_End(void) {
3082 return new_d_End(NULL);
3084 ir_node *new_Jmp(void) {
3085 return new_d_Jmp(NULL);
3087 ir_node *new_IJmp(ir_node *tgt) {
3088 return new_d_IJmp(NULL, tgt);
3090 ir_node *new_Cond(ir_node *c) {
3091 return new_d_Cond(NULL, c);
3093 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3094 return new_d_Return(NULL, store, arity, in);
3096 ir_node *new_Const(ir_mode *mode, tarval *con) {
3097 return new_d_Const(NULL, mode, con);
3100 ir_node *new_Const_long(ir_mode *mode, long value) {
3101 return new_d_Const_long(NULL, mode, value);
3104 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3105 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3108 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3109 return new_d_SymConst_type(NULL, value, kind, type);
3111 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3112 return new_d_SymConst(NULL, value, kind);
3114 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3115 return new_d_simpleSel(NULL, store, objptr, ent);
3117 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3119 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3121 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3123 return new_d_Call(NULL, store, callee, arity, in, tp);
3125 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3126 return new_d_Add(NULL, op1, op2, mode);
3128 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3129 return new_d_Sub(NULL, op1, op2, mode);
3131 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3132 return new_d_Minus(NULL, op, mode);
3134 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3135 return new_d_Mul(NULL, op1, op2, mode);
3137 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2) {
3138 return new_d_Quot(NULL, memop, op1, op2);
3140 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2) {
3141 return new_d_DivMod(NULL, memop, op1, op2);
3143 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2) {
3144 return new_d_Div(NULL, memop, op1, op2);
3146 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2) {
3147 return new_d_Mod(NULL, memop, op1, op2);
3149 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3150 return new_d_Abs(NULL, op, mode);
3152 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3153 return new_d_And(NULL, op1, op2, mode);
3155 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3156 return new_d_Or(NULL, op1, op2, mode);
3158 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3159 return new_d_Eor(NULL, op1, op2, mode);
3161 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3162 return new_d_Not(NULL, op, mode);
3164 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3165 return new_d_Shl(NULL, op, k, mode);
3167 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3168 return new_d_Shr(NULL, op, k, mode);
3170 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3171 return new_d_Shrs(NULL, op, k, mode);
3173 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3174 return new_d_Rot(NULL, op, k, mode);
3176 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3177 return new_d_Carry(NULL, op1, op2, mode);
3179 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3180 return new_d_Borrow(NULL, op1, op2, mode);
3182 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3183 return new_d_Cmp(NULL, op1, op2);
3185 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3186 return new_d_Conv(NULL, op, mode);
3188 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3189 return new_d_strictConv(NULL, op, mode);
3191 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3192 return new_d_Cast(NULL, op, to_tp);
3194 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3195 return new_d_Phi(NULL, arity, in, mode);
3197 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3198 return new_d_Load(NULL, store, addr, mode);
3200 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3201 return new_d_Store(NULL, store, addr, val);
3203 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3204 where_alloc where) {
3205 return new_d_Alloc(NULL, store, size, alloc_type, where);
3207 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3208 ir_type *free_type, where_alloc where) {
3209 return new_d_Free(NULL, store, ptr, size, free_type, where);
3211 ir_node *new_Sync(int arity, ir_node *in[]) {
3212 return new_d_Sync(NULL, arity, in);
3214 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3215 return new_d_Proj(NULL, arg, mode, proj);
3217 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3218 return new_d_defaultProj(NULL, arg, max_proj);
3220 ir_node *new_Tuple(int arity, ir_node **in) {
3221 return new_d_Tuple(NULL, arity, in);
3223 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3224 return new_d_Id(NULL, val, mode);
3226 ir_node *new_Bad(void) {
3229 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3230 return new_d_Confirm (NULL, val, bound, cmp);
3232 ir_node *new_Unknown(ir_mode *m) {
3233 return new_d_Unknown(m);
3235 ir_node *new_CallBegin(ir_node *callee) {
3236 return new_d_CallBegin(NULL, callee);
3238 ir_node *new_EndReg(void) {
3239 return new_d_EndReg(NULL);
3241 ir_node *new_EndExcept(void) {
3242 return new_d_EndExcept(NULL);
3244 ir_node *new_Break(void) {
3245 return new_d_Break(NULL);
3247 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3248 return new_d_Filter(NULL, arg, mode, proj);
3250 ir_node *new_NoMem(void) {
3251 return new_d_NoMem();
3253 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3254 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3256 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3257 return new_d_Psi(NULL, arity, conds, vals, mode);
3259 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3260 return new_d_CopyB(NULL, store, dst, src, data_type);
3262 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3263 return new_d_InstOf(NULL, store, objptr, ent);
3265 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3266 return new_d_Raise(NULL, store, obj);
3268 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3269 return new_d_Bound(NULL, store, idx, lower, upper);
3271 ir_node *new_Pin(ir_node *node) {
3272 return new_d_Pin(NULL, node);