2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/ir/ircons.c
23 * Purpose: Various irnode constructors. Automatic construction
24 * of SSA representation.
25 * Author: Martin Trapp, Christian Schaefer
26 * Modified by: Goetz Lindenmaier, Boris Boesler, Michael Beck
29 * Copyright: (c) 1998-2007 Universität Karlsruhe
37 #include "irgraph_t.h"
41 #include "firm_common_t.h"
48 #include "irbackedge_t.h"
50 #include "iredges_t.h"
54 #if USE_EXPLICIT_PHI_IN_STACK
55 /* A stack needed for the automatic Phi node construction in constructor
56 Phi_in. Redefinition in irgraph.c!! */
61 typedef struct Phi_in_stack Phi_in_stack;
64 /* when we need verifying */
66 # define IRN_VRFY_IRG(res, irg)
68 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
72 * Language dependent variable initialization callback.
74 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
76 /* creates a bd constructor for a binop */
77 #define NEW_BD_BINOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op1, ir_node *op2, ir_mode *mode) \
84 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an unop */
94 #define NEW_BD_UNOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *op, ir_mode *mode) \
100 ir_graph *irg = current_ir_graph; \
101 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
102 res = optimize_node(res); \
103 IRN_VRFY_IRG(res, irg); \
107 /* creates a bd constructor for an divop */
108 #define NEW_BD_DIVOP(instr) \
110 new_bd_##instr(dbg_info *db, ir_node *block, \
111 ir_node *memop, ir_node *op1, ir_node *op2) \
115 ir_graph *irg = current_ir_graph; \
119 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
120 res = optimize_node(res); \
121 IRN_VRFY_IRG(res, irg); \
125 /* creates a rd constructor for a binop */
126 #define NEW_RD_BINOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op1, ir_node *op2, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op1, op2, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an unop */
140 #define NEW_RD_UNOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *op, ir_mode *mode) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, op, mode); \
149 current_ir_graph = rem; \
153 /* creates a rd constructor for an divop */
154 #define NEW_RD_DIVOP(instr) \
156 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
157 ir_node *memop, ir_node *op1, ir_node *op2) \
160 ir_graph *rem = current_ir_graph; \
161 current_ir_graph = irg; \
162 res = new_bd_##instr(db, block, memop, op1, op2); \
163 current_ir_graph = rem; \
167 /* creates a d constructor for an binop */
168 #define NEW_D_BINOP(instr) \
170 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
171 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
174 /* creates a d constructor for an unop */
175 #define NEW_D_UNOP(instr) \
177 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
178 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
183 * Constructs a Block with a fixed number of predecessors.
184 * Does not set current_block. Can not be used with automatic
185 * Phi node construction.
188 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
190 ir_graph *irg = current_ir_graph;
192 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
193 set_Block_matured(res, 1);
194 set_Block_block_visited(res, 0);
196 /* res->attr.block.exc = exc_normal; */
197 /* res->attr.block.handler_entry = 0; */
198 res->attr.block.dead = 0;
199 res->attr.block.irg = irg;
200 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
201 res->attr.block.in_cg = NULL;
202 res->attr.block.cg_backedge = NULL;
203 res->attr.block.extblk = NULL;
205 IRN_VRFY_IRG(res, irg);
210 new_bd_Start(dbg_info *db, ir_node *block) {
212 ir_graph *irg = current_ir_graph;
214 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
215 /* res->attr.start.irg = irg; */
217 IRN_VRFY_IRG(res, irg);
222 new_bd_End(dbg_info *db, ir_node *block) {
224 ir_graph *irg = current_ir_graph;
226 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
228 IRN_VRFY_IRG(res, irg);
233 * Creates a Phi node with all predecessors. Calling this constructor
234 * is only allowed if the corresponding block is mature.
237 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
239 ir_graph *irg = current_ir_graph;
243 /* Don't assert that block matured: the use of this constructor is strongly
245 if ( get_Block_matured(block) )
246 assert( get_irn_arity(block) == arity );
248 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
250 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
252 for (i = arity-1; i >= 0; i--)
253 if (get_irn_op(in[i]) == op_Unknown) {
258 if (!has_unknown) res = optimize_node (res);
259 IRN_VRFY_IRG(res, irg);
261 /* Memory Phis in endless loops must be kept alive.
262 As we can't distinguish these easily we keep all of them alive. */
263 if ((res->op == op_Phi) && (mode == mode_M))
264 add_End_keepalive(get_irg_end(irg), res);
269 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
271 ir_graph *irg = current_ir_graph;
273 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
274 res->attr.con.tv = con;
275 set_Const_type(res, tp); /* Call method because of complex assertion. */
276 res = optimize_node (res);
277 assert(get_Const_type(res) == tp);
278 IRN_VRFY_IRG(res, irg);
281 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
298 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
300 ir_graph *irg = current_ir_graph;
302 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
303 res = optimize_node(res);
304 IRN_VRFY_IRG(res, irg);
309 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
315 res->attr.proj = proj;
318 assert(get_Proj_pred(res));
319 assert(get_nodes_block(get_Proj_pred(res)));
321 res = optimize_node(res);
323 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
341 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
343 ir_graph *irg = current_ir_graph;
345 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
346 res->attr.conv.strict = strict_flag;
347 res = optimize_node(res);
348 IRN_VRFY_IRG(res, irg);
353 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
355 ir_graph *irg = current_ir_graph;
357 assert(is_atomic_type(to_tp));
359 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
360 res->attr.cast.totype = to_tp;
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
367 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
369 ir_graph *irg = current_ir_graph;
371 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
372 res = optimize_node (res);
373 IRN_VRFY_IRG(res, irg);
398 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
401 ir_graph *irg = current_ir_graph;
404 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
405 res = optimize_node(res);
406 IRN_VRFY_IRG(res, irg);
411 new_bd_Jmp(dbg_info *db, ir_node *block) {
413 ir_graph *irg = current_ir_graph;
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
417 IRN_VRFY_IRG(res, irg);
422 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
424 ir_graph *irg = current_ir_graph;
426 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
427 res = optimize_node (res);
428 IRN_VRFY_IRG(res, irg);
430 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
436 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
441 res->attr.cond.kind = dense;
442 res->attr.cond.default_proj = 0;
443 res->attr.cond.pred = COND_JMP_PRED_NONE;
444 res = optimize_node (res);
445 IRN_VRFY_IRG(res, irg);
450 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
451 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
455 ir_graph *irg = current_ir_graph;
458 NEW_ARR_A(ir_node *, r_in, r_arity);
461 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
463 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
465 assert((get_unknown_type() == tp) || is_Method_type(tp));
466 set_Call_type(res, tp);
467 res->attr.call.exc.pin_state = op_pin_state_pinned;
468 res->attr.call.callee_arr = NULL;
469 res = optimize_node(res);
470 IRN_VRFY_IRG(res, irg);
475 new_bd_Return(dbg_info *db, ir_node *block,
476 ir_node *store, int arity, ir_node **in) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A (ir_node *, r_in, r_arity);
485 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
486 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
490 } /* new_bd_Return */
493 new_bd_Load(dbg_info *db, ir_node *block,
494 ir_node *store, ir_node *adr, ir_mode *mode) {
497 ir_graph *irg = current_ir_graph;
501 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
502 res->attr.load.exc.pin_state = op_pin_state_pinned;
503 res->attr.load.load_mode = mode;
504 res->attr.load.volatility = volatility_non_volatile;
505 res = optimize_node(res);
506 IRN_VRFY_IRG(res, irg);
511 new_bd_Store(dbg_info *db, ir_node *block,
512 ir_node *store, ir_node *adr, ir_node *val) {
515 ir_graph *irg = current_ir_graph;
520 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
521 res->attr.store.exc.pin_state = op_pin_state_pinned;
522 res->attr.store.volatility = volatility_non_volatile;
523 res = optimize_node(res);
524 IRN_VRFY_IRG(res, irg);
529 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
530 ir_node *size, ir_type *alloc_type, where_alloc where) {
533 ir_graph *irg = current_ir_graph;
537 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
538 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
539 res->attr.alloc.where = where;
540 res->attr.alloc.type = alloc_type;
541 res = optimize_node(res);
542 IRN_VRFY_IRG(res, irg);
547 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
548 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
551 ir_graph *irg = current_ir_graph;
556 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
557 res->attr.free.where = where;
558 res->attr.free.type = free_type;
559 res = optimize_node(res);
560 IRN_VRFY_IRG(res, irg);
565 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
566 int arity, ir_node **in, ir_entity *ent) {
570 ir_graph *irg = current_ir_graph;
571 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
573 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
576 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
579 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
581 * Sel's can select functions which should be of mode mode_P_code.
583 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
584 res->attr.sel.ent = ent;
585 res = optimize_node(res);
586 IRN_VRFY_IRG(res, irg);
591 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
592 symconst_kind symkind, ir_type *tp) {
595 ir_graph *irg = current_ir_graph;
597 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
598 mode = mode_P_data; /* FIXME: can be mode_P_code */
602 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
604 res->attr.symc.num = symkind;
605 res->attr.symc.sym = value;
606 res->attr.symc.tp = tp;
608 res = optimize_node(res);
609 IRN_VRFY_IRG(res, irg);
611 } /* new_bd_SymConst_type */
614 new_bd_Sync(dbg_info *db, ir_node *block) {
616 ir_graph *irg = current_ir_graph;
618 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
619 /* no need to call optimize node here, Sync are always created with no predecessors */
620 IRN_VRFY_IRG(res, irg);
625 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
626 ir_node *in[2], *res;
627 ir_graph *irg = current_ir_graph;
631 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
632 res->attr.confirm_cmp = cmp;
633 res = optimize_node (res);
634 IRN_VRFY_IRG(res, irg);
636 } /* new_bd_Confirm */
639 new_bd_Unknown(ir_mode *m) {
641 ir_graph *irg = current_ir_graph;
643 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
644 res = optimize_node(res);
646 } /* new_bd_Unknown */
649 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
652 ir_graph *irg = current_ir_graph;
654 in[0] = get_Call_ptr(call);
655 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
656 /* res->attr.callbegin.irg = irg; */
657 res->attr.callbegin.call = call;
658 res = optimize_node(res);
659 IRN_VRFY_IRG(res, irg);
661 } /* new_bd_CallBegin */
664 new_bd_EndReg(dbg_info *db, ir_node *block) {
666 ir_graph *irg = current_ir_graph;
668 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
669 set_irg_end_reg(irg, res);
670 IRN_VRFY_IRG(res, irg);
672 } /* new_bd_EndReg */
675 new_bd_EndExcept(dbg_info *db, ir_node *block) {
677 ir_graph *irg = current_ir_graph;
679 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
680 set_irg_end_except(irg, res);
681 IRN_VRFY_IRG (res, irg);
683 } /* new_bd_EndExcept */
686 new_bd_Break(dbg_info *db, ir_node *block) {
688 ir_graph *irg = current_ir_graph;
690 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
691 res = optimize_node(res);
692 IRN_VRFY_IRG(res, irg);
697 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
700 ir_graph *irg = current_ir_graph;
702 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
703 res->attr.filter.proj = proj;
704 res->attr.filter.in_cg = NULL;
705 res->attr.filter.backedge = NULL;
708 assert(get_Proj_pred(res));
709 assert(get_nodes_block(get_Proj_pred(res)));
711 res = optimize_node(res);
712 IRN_VRFY_IRG(res, irg);
714 } /* new_bd_Filter */
717 new_bd_Mux(dbg_info *db, ir_node *block,
718 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
721 ir_graph *irg = current_ir_graph;
727 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
730 res = optimize_node(res);
731 IRN_VRFY_IRG(res, irg);
736 new_bd_Psi(dbg_info *db, ir_node *block,
737 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
740 ir_graph *irg = current_ir_graph;
743 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
745 for (i = 0; i < arity; ++i) {
747 in[2 * i + 1] = vals[i];
751 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
754 res = optimize_node(res);
755 IRN_VRFY_IRG(res, irg);
760 new_bd_CopyB(dbg_info *db, ir_node *block,
761 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
764 ir_graph *irg = current_ir_graph;
770 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
772 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
773 res->attr.copyb.data_type = data_type;
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
781 ir_node *objptr, ir_type *type) {
784 ir_graph *irg = current_ir_graph;
788 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
789 res->attr.instof.type = type;
790 res = optimize_node(res);
791 IRN_VRFY_IRG(res, irg);
793 } /* new_bd_InstOf */
796 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
799 ir_graph *irg = current_ir_graph;
803 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_bd_Bound(dbg_info *db, ir_node *block,
811 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
814 ir_graph *irg = current_ir_graph;
820 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
821 res->attr.bound.exc.pin_state = op_pin_state_pinned;
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
830 ir_graph *irg = current_ir_graph;
832 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
833 res = optimize_node(res);
834 IRN_VRFY_IRG(res, irg);
838 /* --------------------------------------------- */
839 /* private interfaces, for professional use only */
840 /* --------------------------------------------- */
842 /* Constructs a Block with a fixed number of predecessors.
843 Does not set current_block. Can not be used with automatic
844 Phi node construction. */
846 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
847 ir_graph *rem = current_ir_graph;
850 current_ir_graph = irg;
851 res = new_bd_Block(db, arity, in);
852 current_ir_graph = rem;
858 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
859 ir_graph *rem = current_ir_graph;
862 current_ir_graph = irg;
863 res = new_bd_Start(db, block);
864 current_ir_graph = rem;
870 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
872 ir_graph *rem = current_ir_graph;
874 current_ir_graph = rem;
875 res = new_bd_End(db, block);
876 current_ir_graph = rem;
881 /* Creates a Phi node with all predecessors. Calling this constructor
882 is only allowed if the corresponding block is mature. */
884 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
886 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Phi(db, block,arity, in, mode);
890 current_ir_graph = rem;
896 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_Const_type(db, block, mode, con, tp);
902 current_ir_graph = rem;
905 } /* new_rd_Const_type */
908 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
910 ir_graph *rem = current_ir_graph;
912 current_ir_graph = irg;
913 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
914 current_ir_graph = rem;
920 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
921 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
922 } /* new_rd_Const_long */
925 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
927 ir_graph *rem = current_ir_graph;
929 current_ir_graph = irg;
930 res = new_bd_Id(db, block, val, mode);
931 current_ir_graph = rem;
937 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
940 ir_graph *rem = current_ir_graph;
942 current_ir_graph = irg;
943 res = new_bd_Proj(db, block, arg, mode, proj);
944 current_ir_graph = rem;
950 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
953 ir_graph *rem = current_ir_graph;
955 current_ir_graph = irg;
956 res = new_bd_defaultProj(db, block, arg, max_proj);
957 current_ir_graph = rem;
960 } /* new_rd_defaultProj */
963 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
965 ir_graph *rem = current_ir_graph;
967 current_ir_graph = irg;
968 res = new_bd_Conv(db, block, op, mode, 0);
969 current_ir_graph = rem;
975 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
977 ir_graph *rem = current_ir_graph;
979 current_ir_graph = irg;
980 res = new_bd_Cast(db, block, op, to_tp);
981 current_ir_graph = rem;
987 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
989 ir_graph *rem = current_ir_graph;
991 current_ir_graph = irg;
992 res = new_bd_Tuple(db, block, arity, in);
993 current_ir_graph = rem;
1003 NEW_RD_DIVOP(DivMod)
1016 NEW_RD_BINOP(Borrow)
1019 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1020 ir_node *op1, ir_node *op2) {
1022 ir_graph *rem = current_ir_graph;
1024 current_ir_graph = irg;
1025 res = new_bd_Cmp(db, block, op1, op2);
1026 current_ir_graph = rem;
1032 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1034 ir_graph *rem = current_ir_graph;
1036 current_ir_graph = irg;
1037 res = new_bd_Jmp(db, block);
1038 current_ir_graph = rem;
1044 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1046 ir_graph *rem = current_ir_graph;
1048 current_ir_graph = irg;
1049 res = new_bd_IJmp(db, block, tgt);
1050 current_ir_graph = rem;
1056 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Cond(db, block, c);
1062 current_ir_graph = rem;
1068 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1069 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1075 current_ir_graph = rem;
1081 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1082 ir_node *store, int arity, ir_node **in) {
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_Return(db, block, store, arity, in);
1088 current_ir_graph = rem;
1091 } /* new_rd_Return */
1094 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1095 ir_node *store, ir_node *adr, ir_mode *mode) {
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Load(db, block, store, adr, mode);
1101 current_ir_graph = rem;
1107 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1108 ir_node *store, ir_node *adr, ir_node *val) {
1110 ir_graph *rem = current_ir_graph;
1112 current_ir_graph = irg;
1113 res = new_bd_Store(db, block, store, adr, val);
1114 current_ir_graph = rem;
1117 } /* new_rd_Store */
1120 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1121 ir_node *size, ir_type *alloc_type, where_alloc where) {
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1127 current_ir_graph = rem;
1130 } /* new_rd_Alloc */
1133 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1134 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1136 ir_graph *rem = current_ir_graph;
1138 current_ir_graph = irg;
1139 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1140 current_ir_graph = rem;
1146 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1147 ir_node *store, ir_node *objptr, ir_entity *ent) {
1149 ir_graph *rem = current_ir_graph;
1151 current_ir_graph = irg;
1152 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1153 current_ir_graph = rem;
1156 } /* new_rd_simpleSel */
1159 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1160 int arity, ir_node **in, ir_entity *ent) {
1162 ir_graph *rem = current_ir_graph;
1164 current_ir_graph = irg;
1165 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1166 current_ir_graph = rem;
1172 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1173 symconst_kind symkind, ir_type *tp) {
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1179 current_ir_graph = rem;
1182 } /* new_rd_SymConst_type */
1185 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1186 symconst_kind symkind) {
1187 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1189 } /* new_rd_SymConst */
1191 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1192 symconst_symbol sym;
1193 sym.entity_p = symbol;
1194 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1195 } /* new_rd_SymConst_addr_ent */
1197 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1198 symconst_symbol sym;
1199 sym.entity_p = symbol;
1200 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1201 } /* new_rd_SymConst_ofs_ent */
1203 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1204 symconst_symbol sym;
1205 sym.ident_p = symbol;
1206 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1207 } /* new_rd_SymConst_addr_name */
1209 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1210 symconst_symbol sym;
1211 sym.type_p = symbol;
1212 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1213 } /* new_rd_SymConst_type_tag */
1215 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1216 symconst_symbol sym;
1217 sym.type_p = symbol;
1218 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1219 } /* new_rd_SymConst_size */
1221 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1222 symconst_symbol sym;
1223 sym.type_p = symbol;
1224 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1225 } /* new_rd_SymConst_align */
1228 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1230 ir_graph *rem = current_ir_graph;
1233 current_ir_graph = irg;
1234 res = new_bd_Sync(db, block);
1235 current_ir_graph = rem;
1237 for (i = 0; i < arity; ++i)
1238 add_Sync_pred(res, in[i]);
1244 new_rd_Bad(ir_graph *irg) {
1245 return get_irg_bad(irg);
1249 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1251 ir_graph *rem = current_ir_graph;
1253 current_ir_graph = irg;
1254 res = new_bd_Confirm(db, block, val, bound, cmp);
1255 current_ir_graph = rem;
1258 } /* new_rd_Confirm */
1261 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1263 ir_graph *rem = current_ir_graph;
1265 current_ir_graph = irg;
1266 res = new_bd_Unknown(m);
1267 current_ir_graph = rem;
1270 } /* new_rd_Unknown */
1273 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1275 ir_graph *rem = current_ir_graph;
1277 current_ir_graph = irg;
1278 res = new_bd_CallBegin(db, block, call);
1279 current_ir_graph = rem;
1282 } /* new_rd_CallBegin */
1285 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1288 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1289 set_irg_end_reg(irg, res);
1290 IRN_VRFY_IRG(res, irg);
1292 } /* new_rd_EndReg */
1295 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1298 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1299 set_irg_end_except(irg, res);
1300 IRN_VRFY_IRG (res, irg);
1302 } /* new_rd_EndExcept */
1305 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1307 ir_graph *rem = current_ir_graph;
1309 current_ir_graph = irg;
1310 res = new_bd_Break(db, block);
1311 current_ir_graph = rem;
1314 } /* new_rd_Break */
1317 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1320 ir_graph *rem = current_ir_graph;
1322 current_ir_graph = irg;
1323 res = new_bd_Filter(db, block, arg, mode, proj);
1324 current_ir_graph = rem;
1327 } /* new_rd_Filter */
1330 new_rd_NoMem(ir_graph *irg) {
1331 return get_irg_no_mem(irg);
1332 } /* new_rd_NoMem */
1335 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1336 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1338 ir_graph *rem = current_ir_graph;
1340 current_ir_graph = irg;
1341 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1342 current_ir_graph = rem;
1348 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1349 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1351 ir_graph *rem = current_ir_graph;
1353 current_ir_graph = irg;
1354 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1355 current_ir_graph = rem;
1360 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1361 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1363 ir_graph *rem = current_ir_graph;
1365 current_ir_graph = irg;
1366 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1367 current_ir_graph = rem;
1370 } /* new_rd_CopyB */
1373 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1374 ir_node *objptr, ir_type *type) {
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_InstOf(db, block, store, objptr, type);
1380 current_ir_graph = rem;
1383 } /* new_rd_InstOf */
1386 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1388 ir_graph *rem = current_ir_graph;
1390 current_ir_graph = irg;
1391 res = new_bd_Raise(db, block, store, obj);
1392 current_ir_graph = rem;
1395 } /* new_rd_Raise */
1397 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1398 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_Bound(db, block, store, idx, lower, upper);
1404 current_ir_graph = rem;
1407 } /* new_rd_Bound */
1409 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Pin(db, block, node);
1415 current_ir_graph = rem;
1420 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1421 return new_rd_Block(NULL, irg, arity, in);
1423 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1424 return new_rd_Start(NULL, irg, block);
1426 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1427 return new_rd_End(NULL, irg, block);
1429 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1430 return new_rd_Jmp(NULL, irg, block);
1432 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1433 return new_rd_IJmp(NULL, irg, block, tgt);
1435 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1436 return new_rd_Cond(NULL, irg, block, c);
1438 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1439 ir_node *store, int arity, ir_node **in) {
1440 return new_rd_Return(NULL, irg, block, store, arity, in);
1442 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1443 ir_mode *mode, tarval *con) {
1444 return new_rd_Const(NULL, irg, block, mode, con);
1446 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1447 ir_mode *mode, long value) {
1448 return new_rd_Const_long(NULL, irg, block, mode, value);
1450 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1451 ir_mode *mode, tarval *con, ir_type *tp) {
1452 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1454 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1455 symconst_symbol value, symconst_kind symkind) {
1456 return new_rd_SymConst(NULL, irg, block, value, symkind);
1458 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1459 ir_node *objptr, ir_entity *ent) {
1460 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1462 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1463 ir_node *objptr, int n_index, ir_node **index,
1465 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1467 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1468 ir_node *callee, int arity, ir_node **in,
1470 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1472 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1473 ir_node *op1, ir_node *op2, ir_mode *mode) {
1474 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1476 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1477 ir_node *op1, ir_node *op2, ir_mode *mode) {
1478 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1480 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1481 ir_node *op, ir_mode *mode) {
1482 return new_rd_Minus(NULL, irg, block, op, mode);
1484 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1485 ir_node *op1, ir_node *op2, ir_mode *mode) {
1486 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1488 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1489 ir_node *memop, ir_node *op1, ir_node *op2) {
1490 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1492 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1493 ir_node *memop, ir_node *op1, ir_node *op2) {
1494 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1496 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1497 ir_node *memop, ir_node *op1, ir_node *op2) {
1498 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1500 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1501 ir_node *memop, ir_node *op1, ir_node *op2) {
1502 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1504 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1505 ir_node *op, ir_mode *mode) {
1506 return new_rd_Abs(NULL, irg, block, op, mode);
1508 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1509 ir_node *op1, ir_node *op2, ir_mode *mode) {
1510 return new_rd_And(NULL, irg, block, op1, op2, mode);
1512 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1516 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1517 ir_node *op1, ir_node *op2, ir_mode *mode) {
1518 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1520 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1521 ir_node *op, ir_mode *mode) {
1522 return new_rd_Not(NULL, irg, block, op, mode);
1524 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1525 ir_node *op, ir_node *k, ir_mode *mode) {
1526 return new_rd_Shl(NULL, irg, block, op, k, mode);
1528 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1529 ir_node *op, ir_node *k, ir_mode *mode) {
1530 return new_rd_Shr(NULL, irg, block, op, k, mode);
1532 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1533 ir_node *op, ir_node *k, ir_mode *mode) {
1534 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1536 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1537 ir_node *op, ir_node *k, ir_mode *mode) {
1538 return new_rd_Rot(NULL, irg, block, op, k, mode);
1540 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1541 ir_node *op, ir_node *k, ir_mode *mode) {
1542 return new_rd_Carry(NULL, irg, block, op, k, mode);
1544 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1545 ir_node *op, ir_node *k, ir_mode *mode) {
1546 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1548 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1549 ir_node *op1, ir_node *op2) {
1550 return new_rd_Cmp(NULL, irg, block, op1, op2);
1552 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1553 ir_node *op, ir_mode *mode) {
1554 return new_rd_Conv(NULL, irg, block, op, mode);
1556 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1557 return new_rd_Cast(NULL, irg, block, op, to_tp);
1559 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1560 ir_node **in, ir_mode *mode) {
1561 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1563 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1564 ir_node *store, ir_node *adr, ir_mode *mode) {
1565 return new_rd_Load(NULL, irg, block, store, adr, mode);
1567 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1568 ir_node *store, ir_node *adr, ir_node *val) {
1569 return new_rd_Store(NULL, irg, block, store, adr, val);
1571 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1572 ir_node *size, ir_type *alloc_type, where_alloc where) {
1573 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1575 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1576 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1577 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1579 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1580 return new_rd_Sync(NULL, irg, block, arity, in);
1582 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1583 ir_mode *mode, long proj) {
1584 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1586 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1588 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1590 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1591 int arity, ir_node **in) {
1592 return new_rd_Tuple(NULL, irg, block, arity, in );
1594 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1595 ir_node *val, ir_mode *mode) {
1596 return new_rd_Id(NULL, irg, block, val, mode);
1598 ir_node *new_r_Bad(ir_graph *irg) {
1599 return new_rd_Bad(irg);
1601 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1602 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1604 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1605 return new_rd_Unknown(irg, m);
1607 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1608 return new_rd_CallBegin(NULL, irg, block, callee);
1610 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1611 return new_rd_EndReg(NULL, irg, block);
1613 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1614 return new_rd_EndExcept(NULL, irg, block);
1616 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1617 return new_rd_Break(NULL, irg, block);
1619 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1620 ir_mode *mode, long proj) {
1621 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1623 ir_node *new_r_NoMem(ir_graph *irg) {
1624 return new_rd_NoMem(irg);
1626 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1627 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1628 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1630 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1631 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1632 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1634 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1635 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1636 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1638 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1640 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1642 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1643 ir_node *store, ir_node *obj) {
1644 return new_rd_Raise(NULL, irg, block, store, obj);
1646 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1647 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1648 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1650 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1651 return new_rd_Pin(NULL, irg, block, node);
1654 /** ********************/
1655 /** public interfaces */
1656 /** construction tools */
1660 * - create a new Start node in the current block
1662 * @return s - pointer to the created Start node
1667 new_d_Start(dbg_info *db) {
1670 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1671 op_Start, mode_T, 0, NULL);
1673 res = optimize_node(res);
1674 IRN_VRFY_IRG(res, current_ir_graph);
1679 new_d_End(dbg_info *db) {
1681 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1682 op_End, mode_X, -1, NULL);
1683 res = optimize_node(res);
1684 IRN_VRFY_IRG(res, current_ir_graph);
1689 /* Constructs a Block with a fixed number of predecessors.
1690 Does set current_block. Can be used with automatic Phi
1691 node construction. */
1693 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1696 int has_unknown = 0;
1698 res = new_bd_Block(db, arity, in);
1700 /* Create and initialize array for Phi-node construction. */
1701 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1702 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1703 current_ir_graph->n_loc);
1704 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1707 for (i = arity-1; i >= 0; i--)
1708 if (get_irn_op(in[i]) == op_Unknown) {
1713 if (!has_unknown) res = optimize_node(res);
1714 current_ir_graph->current_block = res;
1716 IRN_VRFY_IRG(res, current_ir_graph);
1721 /* ***********************************************************************/
1722 /* Methods necessary for automatic Phi node creation */
1724 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1725 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1726 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1727 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1729 Call Graph: ( A ---> B == A "calls" B)
1731 get_value mature_immBlock
1739 get_r_value_internal |
1743 new_rd_Phi0 new_rd_Phi_in
1745 * *************************************************************************** */
1747 /** Creates a Phi node with 0 predecessors. */
1748 static INLINE ir_node *
1749 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1752 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1753 IRN_VRFY_IRG(res, irg);
1757 /* There are two implementations of the Phi node construction. The first
1758 is faster, but does not work for blocks with more than 2 predecessors.
1759 The second works always but is slower and causes more unnecessary Phi
1761 Select the implementations by the following preprocessor flag set in
1763 #if USE_FAST_PHI_CONSTRUCTION
1765 /* This is a stack used for allocating and deallocating nodes in
1766 new_rd_Phi_in. The original implementation used the obstack
1767 to model this stack, now it is explicit. This reduces side effects.
1769 #if USE_EXPLICIT_PHI_IN_STACK
1771 new_Phi_in_stack(void) {
1774 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1776 res->stack = NEW_ARR_F (ir_node *, 0);
1780 } /* new_Phi_in_stack */
1783 free_Phi_in_stack(Phi_in_stack *s) {
1784 DEL_ARR_F(s->stack);
1786 } /* free_Phi_in_stack */
1789 free_to_Phi_in_stack(ir_node *phi) {
1790 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1791 current_ir_graph->Phi_in_stack->pos)
1792 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1794 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1796 (current_ir_graph->Phi_in_stack->pos)++;
1797 } /* free_to_Phi_in_stack */
1799 static INLINE ir_node *
1800 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1801 int arity, ir_node **in) {
1803 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1804 int pos = current_ir_graph->Phi_in_stack->pos;
1808 /* We need to allocate a new node */
1809 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1810 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1812 /* reuse the old node and initialize it again. */
1815 assert(res->kind == k_ir_node);
1816 assert(res->op == op_Phi);
1821 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1822 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1824 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1826 (current_ir_graph->Phi_in_stack->pos)--;
1829 } /* alloc_or_pop_from_Phi_in_stack */
1830 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1833 * Creates a Phi node with a given, fixed array **in of predecessors.
1834 * If the Phi node is unnecessary, as the same value reaches the block
1835 * through all control flow paths, it is eliminated and the value
1836 * returned directly. This constructor is only intended for use in
1837 * the automatic Phi node generation triggered by get_value or mature.
1838 * The implementation is quite tricky and depends on the fact, that
1839 * the nodes are allocated on a stack:
1840 * The in array contains predecessors and NULLs. The NULLs appear,
1841 * if get_r_value_internal, that computed the predecessors, reached
1842 * the same block on two paths. In this case the same value reaches
1843 * this block on both paths, there is no definition in between. We need
1844 * not allocate a Phi where these path's merge, but we have to communicate
1845 * this fact to the caller. This happens by returning a pointer to the
1846 * node the caller _will_ allocate. (Yes, we predict the address. We can
1847 * do so because the nodes are allocated on the obstack.) The caller then
1848 * finds a pointer to itself and, when this routine is called again,
1849 * eliminates itself.
1851 static INLINE ir_node *
1852 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1854 ir_node *res, *known;
1856 /* Allocate a new node on the obstack. This can return a node to
1857 which some of the pointers in the in-array already point.
1858 Attention: the constructor copies the in array, i.e., the later
1859 changes to the array in this routine do not affect the
1860 constructed node! If the in array contains NULLs, there will be
1861 missing predecessors in the returned node. Is this a possible
1862 internal state of the Phi node generation? */
1863 #if USE_EXPLICIT_PHI_IN_STACK
1864 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1866 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1867 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1870 /* The in-array can contain NULLs. These were returned by
1871 get_r_value_internal if it reached the same block/definition on a
1872 second path. The NULLs are replaced by the node itself to
1873 simplify the test in the next loop. */
1874 for (i = 0; i < ins; ++i) {
1879 /* This loop checks whether the Phi has more than one predecessor.
1880 If so, it is a real Phi node and we break the loop. Else the Phi
1881 node merges the same definition on several paths and therefore is
1883 for (i = 0; i < ins; ++i) {
1884 if (in[i] == res || in[i] == known)
1893 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1895 #if USE_EXPLICIT_PHI_IN_STACK
1896 free_to_Phi_in_stack(res);
1898 edges_node_deleted(res, current_ir_graph);
1899 obstack_free(current_ir_graph->obst, res);
1903 res = optimize_node (res);
1904 IRN_VRFY_IRG(res, irg);
1907 /* return the pointer to the Phi node. This node might be deallocated! */
1909 } /* new_rd_Phi_in */
1912 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1915 * Allocates and returns this node. The routine called to allocate the
1916 * node might optimize it away and return a real value, or even a pointer
1917 * to a deallocated Phi node on top of the obstack!
1918 * This function is called with an in-array of proper size.
1921 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1922 ir_node *prevBlock, *res;
1925 /* This loop goes to all predecessor blocks of the block the Phi node is in
1926 and there finds the operands of the Phi node by calling
1927 get_r_value_internal. */
1928 for (i = 1; i <= ins; ++i) {
1929 assert (block->in[i]);
1930 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1932 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1935 /* After collecting all predecessors into the array nin a new Phi node
1936 with these predecessors is created. This constructor contains an
1937 optimization: If all predecessors of the Phi node are identical it
1938 returns the only operand instead of a new Phi node. If the value
1939 passes two different control flow edges without being defined, and
1940 this is the second path treated, a pointer to the node that will be
1941 allocated for the first path (recursion) is returned. We already
1942 know the address of this node, as it is the next node to be allocated
1943 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1944 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1946 /* Now we now the value for "pos" and can enter it in the array with
1947 all known local variables. Attention: this might be a pointer to
1948 a node, that later will be allocated!!! See new_rd_Phi_in().
1949 If this is called in mature, after some set_value() in the same block,
1950 the proper value must not be overwritten:
1952 get_value (makes Phi0, put's it into graph_arr)
1953 set_value (overwrites Phi0 in graph_arr)
1954 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1957 if (!block->attr.block.graph_arr[pos]) {
1958 block->attr.block.graph_arr[pos] = res;
1960 /* printf(" value already computed by %s\n",
1961 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1968 * This function returns the last definition of a variable. In case
1969 * this variable was last defined in a previous block, Phi nodes are
1970 * inserted. If the part of the firm graph containing the definition
1971 * is not yet constructed, a dummy Phi node is returned.
1974 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1977 /* There are 4 cases to treat.
1979 1. The block is not mature and we visit it the first time. We can not
1980 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1981 predecessors is returned. This node is added to the linked list (field
1982 "link") of the containing block to be completed when this block is
1983 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1986 2. The value is already known in this block, graph_arr[pos] is set and we
1987 visit the block the first time. We can return the value without
1988 creating any new nodes.
1990 3. The block is mature and we visit it the first time. A Phi node needs
1991 to be created (phi_merge). If the Phi is not needed, as all it's
1992 operands are the same value reaching the block through different
1993 paths, it's optimized away and the value itself is returned.
1995 4. The block is mature, and we visit it the second time. Now two
1996 subcases are possible:
1997 * The value was computed completely the last time we were here. This
1998 is the case if there is no loop. We can return the proper value.
1999 * The recursion that visited this node and set the flag did not
2000 return yet. We are computing a value in a loop and need to
2001 break the recursion without knowing the result yet.
2002 @@@ strange case. Straight forward we would create a Phi before
2003 starting the computation of it's predecessors. In this case we will
2004 find a Phi here in any case. The problem is that this implementation
2005 only creates a Phi after computing the predecessors, so that it is
2006 hard to compute self references of this Phi. @@@
2007 There is no simple check for the second subcase. Therefore we check
2008 for a second visit and treat all such cases as the second subcase.
2009 Anyways, the basic situation is the same: we reached a block
2010 on two paths without finding a definition of the value: No Phi
2011 nodes are needed on both paths.
2012 We return this information "Two paths, no Phi needed" by a very tricky
2013 implementation that relies on the fact that an obstack is a stack and
2014 will return a node with the same address on different allocations.
2015 Look also at phi_merge and new_rd_phi_in to understand this.
2016 @@@ Unfortunately this does not work, see testprogram
2017 three_cfpred_example.
2021 /* case 4 -- already visited. */
2022 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2024 /* visited the first time */
2025 set_irn_visited(block, get_irg_visited(current_ir_graph));
2027 /* Get the local valid value */
2028 res = block->attr.block.graph_arr[pos];
2030 /* case 2 -- If the value is actually computed, return it. */
2031 if (res) return res;
2033 if (block->attr.block.matured) { /* case 3 */
2035 /* The Phi has the same amount of ins as the corresponding block. */
2036 int ins = get_irn_arity(block);
2038 NEW_ARR_A(ir_node *, nin, ins);
2040 /* Phi merge collects the predecessors and then creates a node. */
2041 res = phi_merge(block, pos, mode, nin, ins);
2043 } else { /* case 1 */
2044 /* The block is not mature, we don't know how many in's are needed. A Phi
2045 with zero predecessors is created. Such a Phi node is called Phi0
2046 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2047 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2049 The Phi0 has to remember the pos of it's internal value. If the real
2050 Phi is computed, pos is used to update the array with the local
2053 res = new_rd_Phi0(current_ir_graph, block, mode);
2054 res->attr.phi0_pos = pos;
2055 res->link = block->link;
2059 /* If we get here, the frontend missed a use-before-definition error */
2062 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2063 assert(mode->code >= irm_F && mode->code <= irm_P);
2064 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2065 tarval_mode_null[mode->code]);
2068 /* The local valid value is available now. */
2069 block->attr.block.graph_arr[pos] = res;
2072 } /* get_r_value_internal */
2077 it starts the recursion. This causes an Id at the entry of
2078 every block that has no definition of the value! **/
2080 #if USE_EXPLICIT_PHI_IN_STACK
2082 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2083 void free_Phi_in_stack(Phi_in_stack *s) {}
2086 static INLINE ir_node *
2087 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2088 ir_node **in, int ins, ir_node *phi0) {
2090 ir_node *res, *known;
2092 /* Allocate a new node on the obstack. The allocation copies the in
2094 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2095 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2097 /* This loop checks whether the Phi has more than one predecessor.
2098 If so, it is a real Phi node and we break the loop. Else the
2099 Phi node merges the same definition on several paths and therefore
2100 is not needed. Don't consider Bad nodes! */
2102 for (i=0; i < ins; ++i)
2106 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2108 /* Optimize self referencing Phis: We can't detect them yet properly, as
2109 they still refer to the Phi0 they will replace. So replace right now. */
2110 if (phi0 && in[i] == phi0) in[i] = res;
2112 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2120 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2123 edges_node_deleted(res, current_ir_graph);
2124 obstack_free (current_ir_graph->obst, res);
2125 if (is_Phi(known)) {
2126 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2127 order, an enclosing Phi know may get superfluous. */
2128 res = optimize_in_place_2(known);
2130 exchange(known, res);
2136 /* A undefined value, e.g., in unreachable code. */
2140 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2141 IRN_VRFY_IRG(res, irg);
2142 /* Memory Phis in endless loops must be kept alive.
2143 As we can't distinguish these easily we keep all of them alive. */
2144 if ((res->op == op_Phi) && (mode == mode_M))
2145 add_End_keepalive(get_irg_end(irg), res);
2149 } /* new_rd_Phi_in */
2152 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2154 #if PRECISE_EXC_CONTEXT
2156 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2159 * Construct a new frag_array for node n.
2160 * Copy the content from the current graph_arr of the corresponding block:
2161 * this is the current state.
2162 * Set ProjM(n) as current memory state.
2163 * Further the last entry in frag_arr of current block points to n. This
2164 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2166 static INLINE ir_node **new_frag_arr(ir_node *n) {
2170 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2171 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2172 sizeof(ir_node *)*current_ir_graph->n_loc);
2174 /* turn off optimization before allocating Proj nodes, as res isn't
2176 opt = get_opt_optimize(); set_optimize(0);
2177 /* Here we rely on the fact that all frag ops have Memory as first result! */
2178 if (get_irn_op(n) == op_Call)
2179 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2180 else if (get_irn_op(n) == op_CopyB)
2181 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2183 assert((pn_Quot_M == pn_DivMod_M) &&
2184 (pn_Quot_M == pn_Div_M) &&
2185 (pn_Quot_M == pn_Mod_M) &&
2186 (pn_Quot_M == pn_Load_M) &&
2187 (pn_Quot_M == pn_Store_M) &&
2188 (pn_Quot_M == pn_Alloc_M) &&
2189 (pn_Quot_M == pn_Bound_M));
2190 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2194 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2196 } /* new_frag_arr */
2199 * Returns the frag_arr from a node.
2201 static INLINE ir_node **get_frag_arr(ir_node *n) {
2202 switch (get_irn_opcode(n)) {
2204 return n->attr.call.exc.frag_arr;
2206 return n->attr.alloc.exc.frag_arr;
2208 return n->attr.load.exc.frag_arr;
2210 return n->attr.store.exc.frag_arr;
2212 return n->attr.except.frag_arr;
2214 } /* get_frag_arr */
2217 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2219 if (!frag_arr[pos]) frag_arr[pos] = val;
2220 if (frag_arr[current_ir_graph->n_loc - 1]) {
2221 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2222 assert(arr != frag_arr && "Endless recursion detected");
2223 set_frag_value(arr, pos, val);
2228 for (i = 0; i < 1000; ++i) {
2229 if (!frag_arr[pos]) {
2230 frag_arr[pos] = val;
2232 if (frag_arr[current_ir_graph->n_loc - 1]) {
2233 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2239 assert(0 && "potential endless recursion");
2241 } /* set_frag_value */
2244 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2248 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2250 frag_arr = get_frag_arr(cfOp);
2251 res = frag_arr[pos];
2253 if (block->attr.block.graph_arr[pos]) {
2254 /* There was a set_value() after the cfOp and no get_value before that
2255 set_value(). We must build a Phi node now. */
2256 if (block->attr.block.matured) {
2257 int ins = get_irn_arity(block);
2259 NEW_ARR_A (ir_node *, nin, ins);
2260 res = phi_merge(block, pos, mode, nin, ins);
2262 res = new_rd_Phi0 (current_ir_graph, block, mode);
2263 res->attr.phi0_pos = pos;
2264 res->link = block->link;
2268 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2269 but this should be better: (remove comment if this works) */
2270 /* It's a Phi, we can write this into all graph_arrs with NULL */
2271 set_frag_value(block->attr.block.graph_arr, pos, res);
2273 res = get_r_value_internal(block, pos, mode);
2274 set_frag_value(block->attr.block.graph_arr, pos, res);
2278 } /* get_r_frag_value_internal */
2279 #endif /* PRECISE_EXC_CONTEXT */
2282 * Computes the predecessors for the real phi node, and then
2283 * allocates and returns this node. The routine called to allocate the
2284 * node might optimize it away and return a real value.
2285 * This function must be called with an in-array of proper size.
2288 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2289 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2292 /* If this block has no value at pos create a Phi0 and remember it
2293 in graph_arr to break recursions.
2294 Else we may not set graph_arr as there a later value is remembered. */
2296 if (!block->attr.block.graph_arr[pos]) {
2297 if (block == get_irg_start_block(current_ir_graph)) {
2298 /* Collapsing to Bad tarvals is no good idea.
2299 So we call a user-supplied routine here that deals with this case as
2300 appropriate for the given language. Sorrily the only help we can give
2301 here is the position.
2303 Even if all variables are defined before use, it can happen that
2304 we get to the start block, if a Cond has been replaced by a tuple
2305 (bad, jmp). In this case we call the function needlessly, eventually
2306 generating an non existent error.
2307 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2310 if (default_initialize_local_variable) {
2311 ir_node *rem = get_cur_block();
2313 set_cur_block(block);
2314 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2318 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2319 /* We don't need to care about exception ops in the start block.
2320 There are none by definition. */
2321 return block->attr.block.graph_arr[pos];
2323 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2324 block->attr.block.graph_arr[pos] = phi0;
2325 #if PRECISE_EXC_CONTEXT
2326 if (get_opt_precise_exc_context()) {
2327 /* Set graph_arr for fragile ops. Also here we should break recursion.
2328 We could choose a cyclic path through an cfop. But the recursion would
2329 break at some point. */
2330 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2336 /* This loop goes to all predecessor blocks of the block the Phi node
2337 is in and there finds the operands of the Phi node by calling
2338 get_r_value_internal. */
2339 for (i = 1; i <= ins; ++i) {
2340 prevCfOp = skip_Proj(block->in[i]);
2342 if (is_Bad(prevCfOp)) {
2343 /* In case a Cond has been optimized we would get right to the start block
2344 with an invalid definition. */
2345 nin[i-1] = new_Bad();
2348 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2350 if (!is_Bad(prevBlock)) {
2351 #if PRECISE_EXC_CONTEXT
2352 if (get_opt_precise_exc_context() &&
2353 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2354 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2355 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2358 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2360 nin[i-1] = new_Bad();
2364 /* We want to pass the Phi0 node to the constructor: this finds additional
2365 optimization possibilities.
2366 The Phi0 node either is allocated in this function, or it comes from
2367 a former call to get_r_value_internal. In this case we may not yet
2368 exchange phi0, as this is done in mature_immBlock. */
2370 phi0_all = block->attr.block.graph_arr[pos];
2371 if (!((get_irn_op(phi0_all) == op_Phi) &&
2372 (get_irn_arity(phi0_all) == 0) &&
2373 (get_nodes_block(phi0_all) == block)))
2379 /* After collecting all predecessors into the array nin a new Phi node
2380 with these predecessors is created. This constructor contains an
2381 optimization: If all predecessors of the Phi node are identical it
2382 returns the only operand instead of a new Phi node. */
2383 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2385 /* In case we allocated a Phi0 node at the beginning of this procedure,
2386 we need to exchange this Phi0 with the real Phi. */
2388 exchange(phi0, res);
2389 block->attr.block.graph_arr[pos] = res;
2390 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2391 only an optimization. */
2398 * This function returns the last definition of a variable. In case
2399 * this variable was last defined in a previous block, Phi nodes are
2400 * inserted. If the part of the firm graph containing the definition
2401 * is not yet constructed, a dummy Phi node is returned.
2404 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2406 /* There are 4 cases to treat.
2408 1. The block is not mature and we visit it the first time. We can not
2409 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2410 predecessors is returned. This node is added to the linked list (field
2411 "link") of the containing block to be completed when this block is
2412 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2415 2. The value is already known in this block, graph_arr[pos] is set and we
2416 visit the block the first time. We can return the value without
2417 creating any new nodes.
2419 3. The block is mature and we visit it the first time. A Phi node needs
2420 to be created (phi_merge). If the Phi is not needed, as all it's
2421 operands are the same value reaching the block through different
2422 paths, it's optimized away and the value itself is returned.
2424 4. The block is mature, and we visit it the second time. Now two
2425 subcases are possible:
2426 * The value was computed completely the last time we were here. This
2427 is the case if there is no loop. We can return the proper value.
2428 * The recursion that visited this node and set the flag did not
2429 return yet. We are computing a value in a loop and need to
2430 break the recursion. This case only happens if we visited
2431 the same block with phi_merge before, which inserted a Phi0.
2432 So we return the Phi0.
2435 /* case 4 -- already visited. */
2436 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2437 /* As phi_merge allocates a Phi0 this value is always defined. Here
2438 is the critical difference of the two algorithms. */
2439 assert(block->attr.block.graph_arr[pos]);
2440 return block->attr.block.graph_arr[pos];
2443 /* visited the first time */
2444 set_irn_visited(block, get_irg_visited(current_ir_graph));
2446 /* Get the local valid value */
2447 res = block->attr.block.graph_arr[pos];
2449 /* case 2 -- If the value is actually computed, return it. */
2450 if (res) { return res; };
2452 if (block->attr.block.matured) { /* case 3 */
2454 /* The Phi has the same amount of ins as the corresponding block. */
2455 int ins = get_irn_arity(block);
2457 NEW_ARR_A (ir_node *, nin, ins);
2459 /* Phi merge collects the predecessors and then creates a node. */
2460 res = phi_merge (block, pos, mode, nin, ins);
2462 } else { /* case 1 */
2463 /* The block is not mature, we don't know how many in's are needed. A Phi
2464 with zero predecessors is created. Such a Phi node is called Phi0
2465 node. The Phi0 is then added to the list of Phi0 nodes in this block
2466 to be matured by mature_immBlock later.
2467 The Phi0 has to remember the pos of it's internal value. If the real
2468 Phi is computed, pos is used to update the array with the local
2470 res = new_rd_Phi0 (current_ir_graph, block, mode);
2471 res->attr.phi0_pos = pos;
2472 res->link = block->link;
2476 /* If we get here, the frontend missed a use-before-definition error */
2479 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2480 assert(mode->code >= irm_F && mode->code <= irm_P);
2481 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2482 get_mode_null(mode));
2485 /* The local valid value is available now. */
2486 block->attr.block.graph_arr[pos] = res;
2489 } /* get_r_value_internal */
2491 #endif /* USE_FAST_PHI_CONSTRUCTION */
2493 /* ************************************************************************** */
2496 * Finalize a Block node, when all control flows are known.
2497 * Acceptable parameters are only Block nodes.
2500 mature_immBlock(ir_node *block) {
2505 assert(get_irn_opcode(block) == iro_Block);
2506 /* @@@ should be commented in
2507 assert (!get_Block_matured(block) && "Block already matured"); */
2509 if (!get_Block_matured(block)) {
2510 ins = ARR_LEN (block->in)-1;
2511 /* Fix block parameters */
2512 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2514 /* An array for building the Phi nodes. */
2515 NEW_ARR_A (ir_node *, nin, ins);
2517 /* Traverse a chain of Phi nodes attached to this block and mature
2519 for (n = block->link; n; n = next) {
2520 inc_irg_visited(current_ir_graph);
2522 exchange(n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2525 block->attr.block.matured = 1;
2527 /* Now, as the block is a finished firm node, we can optimize it.
2528 Since other nodes have been allocated since the block was created
2529 we can not free the node on the obstack. Therefore we have to call
2531 Unfortunately the optimization does not change a lot, as all allocated
2532 nodes refer to the unoptimized node.
2533 We can call _2, as global cse has no effect on blocks. */
2534 block = optimize_in_place_2(block);
2535 IRN_VRFY_IRG(block, current_ir_graph);
2537 } /* mature_immBlock */
2540 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2541 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2545 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2546 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2550 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2551 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2552 } /* new_d_Const_long */
2555 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2556 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2557 } /* new_d_Const_type */
2561 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2562 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2566 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2567 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2571 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2573 assert(arg->op == op_Cond);
2574 arg->attr.cond.kind = fragmentary;
2575 arg->attr.cond.default_proj = max_proj;
2576 res = new_Proj(arg, mode_X, max_proj);
2578 } /* new_d_defaultProj */
2581 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2582 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2586 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2587 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2588 } /* new_d_strictConv */
2591 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2592 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2596 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2597 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2606 * Allocate the frag array.
2608 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2609 if (get_opt_precise_exc_context()) {
2610 if ((current_ir_graph->phase_state == phase_building) &&
2611 (get_irn_op(res) == op) && /* Could be optimized away. */
2612 !*frag_store) /* Could be a cse where the arr is already set. */ {
2613 *frag_store = new_frag_arr(res);
2616 } /* allocate_frag_arr */
2619 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2621 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2);
2622 res->attr.except.pin_state = op_pin_state_pinned;
2623 #if PRECISE_EXC_CONTEXT
2624 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2631 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2633 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2);
2634 res->attr.except.pin_state = op_pin_state_pinned;
2635 #if PRECISE_EXC_CONTEXT
2636 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2640 } /* new_d_DivMod */
2643 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2645 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2);
2646 res->attr.except.pin_state = op_pin_state_pinned;
2647 #if PRECISE_EXC_CONTEXT
2648 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2655 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
2657 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2);
2658 res->attr.except.pin_state = op_pin_state_pinned;
2659 #if PRECISE_EXC_CONTEXT
2660 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2679 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2680 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2684 new_d_Jmp(dbg_info *db) {
2685 return new_bd_Jmp(db, current_ir_graph->current_block);
2689 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2690 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2694 new_d_Cond(dbg_info *db, ir_node *c) {
2695 return new_bd_Cond(db, current_ir_graph->current_block, c);
2699 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2702 res = new_bd_Call(db, current_ir_graph->current_block,
2703 store, callee, arity, in, tp);
2704 #if PRECISE_EXC_CONTEXT
2705 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2712 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2713 return new_bd_Return(db, current_ir_graph->current_block,
2715 } /* new_d_Return */
2718 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2720 res = new_bd_Load(db, current_ir_graph->current_block,
2722 #if PRECISE_EXC_CONTEXT
2723 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2730 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2732 res = new_bd_Store(db, current_ir_graph->current_block,
2734 #if PRECISE_EXC_CONTEXT
2735 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2742 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2743 where_alloc where) {
2745 res = new_bd_Alloc(db, current_ir_graph->current_block,
2746 store, size, alloc_type, where);
2747 #if PRECISE_EXC_CONTEXT
2748 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2755 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2756 ir_node *size, ir_type *free_type, where_alloc where) {
2757 return new_bd_Free(db, current_ir_graph->current_block,
2758 store, ptr, size, free_type, where);
2762 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2763 /* GL: objptr was called frame before. Frame was a bad choice for the name
2764 as the operand could as well be a pointer to a dynamic object. */
2766 return new_bd_Sel(db, current_ir_graph->current_block,
2767 store, objptr, 0, NULL, ent);
2768 } /* new_d_simpleSel */
2771 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2772 return new_bd_Sel(db, current_ir_graph->current_block,
2773 store, objptr, n_index, index, sel);
2777 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2778 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2780 } /* new_d_SymConst_type */
2783 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2784 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2785 value, kind, firm_unknown_type);
2786 } /* new_d_SymConst */
2789 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2790 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2796 return _new_d_Bad();
2800 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2801 return new_bd_Confirm(db, current_ir_graph->current_block,
2803 } /* new_d_Confirm */
2806 new_d_Unknown(ir_mode *m) {
2807 return new_bd_Unknown(m);
2808 } /* new_d_Unknown */
2811 new_d_CallBegin(dbg_info *db, ir_node *call) {
2812 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2813 } /* new_d_CallBegin */
2816 new_d_EndReg(dbg_info *db) {
2817 return new_bd_EndReg(db, current_ir_graph->current_block);
2818 } /* new_d_EndReg */
2821 new_d_EndExcept(dbg_info *db) {
2822 return new_bd_EndExcept(db, current_ir_graph->current_block);
2823 } /* new_d_EndExcept */
2826 new_d_Break(dbg_info *db) {
2827 return new_bd_Break(db, current_ir_graph->current_block);
2831 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2832 return new_bd_Filter(db, current_ir_graph->current_block,
2834 } /* new_d_Filter */
2837 (new_d_NoMem)(void) {
2838 return _new_d_NoMem();
2842 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2843 ir_node *ir_true, ir_mode *mode) {
2844 return new_bd_Mux(db, current_ir_graph->current_block,
2845 sel, ir_false, ir_true, mode);
2849 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2850 return new_bd_Psi(db, current_ir_graph->current_block,
2851 arity, conds, vals, mode);
2854 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2855 ir_node *dst, ir_node *src, ir_type *data_type) {
2857 res = new_bd_CopyB(db, current_ir_graph->current_block,
2858 store, dst, src, data_type);
2859 #if PRECISE_EXC_CONTEXT
2860 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2866 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2867 return new_bd_InstOf(db, current_ir_graph->current_block,
2868 store, objptr, type);
2869 } /* new_d_InstOf */
2872 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2873 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2876 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2877 ir_node *idx, ir_node *lower, ir_node *upper) {
2879 res = new_bd_Bound(db, current_ir_graph->current_block,
2880 store, idx, lower, upper);
2881 #if PRECISE_EXC_CONTEXT
2882 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2888 new_d_Pin(dbg_info *db, ir_node *node) {
2889 return new_bd_Pin(db, current_ir_graph->current_block, node);
2892 /* ********************************************************************* */
2893 /* Comfortable interface with automatic Phi node construction. */
2894 /* (Uses also constructors of ?? interface, except new_Block. */
2895 /* ********************************************************************* */
2897 /* Block construction */
2898 /* immature Block without predecessors */
2899 ir_node *new_d_immBlock(dbg_info *db) {
2902 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2903 /* creates a new dynamic in-array as length of in is -1 */
2904 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2905 current_ir_graph->current_block = res;
2906 res->attr.block.matured = 0;
2907 res->attr.block.dead = 0;
2908 res->attr.block.irg = current_ir_graph;
2909 res->attr.block.backedge = NULL;
2910 res->attr.block.in_cg = NULL;
2911 res->attr.block.cg_backedge = NULL;
2912 res->attr.block.extblk = NULL;
2913 res->attr.block.region = NULL;
2914 set_Block_block_visited(res, 0);
2916 /* Create and initialize array for Phi-node construction. */
2917 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2918 current_ir_graph->n_loc);
2919 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2921 /* Immature block may not be optimized! */
2922 IRN_VRFY_IRG(res, current_ir_graph);
2925 } /* new_d_immBlock */
2928 new_immBlock(void) {
2929 return new_d_immBlock(NULL);
2930 } /* new_immBlock */
2932 /* add an edge to a jmp/control flow node */
2934 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2935 if (block->attr.block.matured) {
2936 assert(0 && "Error: Block already matured!\n");
2938 int n = ARR_LEN(block->in) - 1;
2939 assert(jmp != NULL);
2940 ARR_APP1(ir_node *, block->in, jmp);
2942 hook_set_irn_n(block, n, jmp, NULL);
2944 } /* add_immBlock_pred */
2946 /* changing the current block */
2948 set_cur_block(ir_node *target) {
2949 current_ir_graph->current_block = target;
2950 } /* set_cur_block */
2952 /* ************************ */
2953 /* parameter administration */
2955 /* get a value from the parameter array from the current block by its index */
2957 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2958 ir_graph *irg = current_ir_graph;
2959 assert(get_irg_phase_state(irg) == phase_building);
2960 inc_irg_visited(irg);
2962 return get_r_value_internal(irg->current_block, pos + 1, mode);
2965 /* get a value from the parameter array from the current block by its index */
2967 get_value(int pos, ir_mode *mode) {
2968 return get_d_value(NULL, pos, mode);
2971 /* set a value at position pos in the parameter array from the current block */
2973 set_value(int pos, ir_node *value) {
2974 ir_graph *irg = current_ir_graph;
2975 assert(get_irg_phase_state(irg) == phase_building);
2976 assert(pos+1 < irg->n_loc);
2977 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2980 /* Find the value number for a node in the current block.*/
2982 find_value(ir_node *value) {
2984 ir_node *bl = current_ir_graph->current_block;
2986 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2987 if (bl->attr.block.graph_arr[i] == value)
2992 /* get the current store */
2995 ir_graph *irg = current_ir_graph;
2997 assert(get_irg_phase_state(irg) == phase_building);
2998 /* GL: one could call get_value instead */
2999 inc_irg_visited(irg);
3000 return get_r_value_internal(irg->current_block, 0, mode_M);
3003 /* set the current store: handles automatic Sync construction for Load nodes */
3005 set_store(ir_node *store) {
3006 ir_node *load, *pload, *pred, *in[2];
3008 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3009 /* Beware: due to dead code elimination, a store might become a Bad node even in
3010 the construction phase. */
3011 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3013 if (get_opt_auto_create_sync()) {
3014 /* handle non-volatile Load nodes by automatically creating Sync's */
3015 load = skip_Proj(store);
3016 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3017 pred = get_Load_mem(load);
3019 if (is_Sync(pred)) {
3020 /* a Load after a Sync: move it up */
3021 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3023 set_Load_mem(load, get_memop_mem(mem));
3024 add_Sync_pred(pred, store);
3027 pload = skip_Proj(pred);
3028 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3029 /* a Load after a Load: create a new Sync */
3030 set_Load_mem(load, get_Load_mem(pload));
3034 store = new_Sync(2, in);
3039 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3043 keep_alive(ir_node *ka) {
3044 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3047 /* --- Useful access routines --- */
3048 /* Returns the current block of the current graph. To set the current
3049 block use set_cur_block. */
3050 ir_node *get_cur_block(void) {
3051 return get_irg_current_block(current_ir_graph);
3052 } /* get_cur_block */
3054 /* Returns the frame type of the current graph */
3055 ir_type *get_cur_frame_type(void) {
3056 return get_irg_frame_type(current_ir_graph);
3057 } /* get_cur_frame_type */
3060 /* ********************************************************************* */
3063 /* call once for each run of the library */
3065 init_cons(uninitialized_local_variable_func_t *func) {
3066 default_initialize_local_variable = func;
3070 irp_finalize_cons(void) {
3072 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3073 irg_finalize_cons(get_irp_irg(i));
3075 irp->phase_state = phase_high;
3076 } /* irp_finalize_cons */
3079 ir_node *new_Block(int arity, ir_node **in) {
3080 return new_d_Block(NULL, arity, in);
3082 ir_node *new_Start(void) {
3083 return new_d_Start(NULL);
3085 ir_node *new_End(void) {
3086 return new_d_End(NULL);
3088 ir_node *new_Jmp(void) {
3089 return new_d_Jmp(NULL);
3091 ir_node *new_IJmp(ir_node *tgt) {
3092 return new_d_IJmp(NULL, tgt);
3094 ir_node *new_Cond(ir_node *c) {
3095 return new_d_Cond(NULL, c);
3097 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3098 return new_d_Return(NULL, store, arity, in);
3100 ir_node *new_Const(ir_mode *mode, tarval *con) {
3101 return new_d_Const(NULL, mode, con);
3104 ir_node *new_Const_long(ir_mode *mode, long value) {
3105 return new_d_Const_long(NULL, mode, value);
3108 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3109 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3112 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3113 return new_d_SymConst_type(NULL, value, kind, type);
3115 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3116 return new_d_SymConst(NULL, value, kind);
3118 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3119 return new_d_simpleSel(NULL, store, objptr, ent);
3121 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3123 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3125 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3127 return new_d_Call(NULL, store, callee, arity, in, tp);
3129 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3130 return new_d_Add(NULL, op1, op2, mode);
3132 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3133 return new_d_Sub(NULL, op1, op2, mode);
3135 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3136 return new_d_Minus(NULL, op, mode);
3138 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3139 return new_d_Mul(NULL, op1, op2, mode);
3141 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2) {
3142 return new_d_Quot(NULL, memop, op1, op2);
3144 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2) {
3145 return new_d_DivMod(NULL, memop, op1, op2);
3147 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2) {
3148 return new_d_Div(NULL, memop, op1, op2);
3150 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2) {
3151 return new_d_Mod(NULL, memop, op1, op2);
3153 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3154 return new_d_Abs(NULL, op, mode);
3156 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3157 return new_d_And(NULL, op1, op2, mode);
3159 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3160 return new_d_Or(NULL, op1, op2, mode);
3162 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3163 return new_d_Eor(NULL, op1, op2, mode);
3165 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3166 return new_d_Not(NULL, op, mode);
3168 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3169 return new_d_Shl(NULL, op, k, mode);
3171 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3172 return new_d_Shr(NULL, op, k, mode);
3174 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3175 return new_d_Shrs(NULL, op, k, mode);
3177 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3178 return new_d_Rot(NULL, op, k, mode);
3180 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3181 return new_d_Carry(NULL, op1, op2, mode);
3183 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3184 return new_d_Borrow(NULL, op1, op2, mode);
3186 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3187 return new_d_Cmp(NULL, op1, op2);
3189 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3190 return new_d_Conv(NULL, op, mode);
3192 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3193 return new_d_strictConv(NULL, op, mode);
3195 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3196 return new_d_Cast(NULL, op, to_tp);
3198 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3199 return new_d_Phi(NULL, arity, in, mode);
3201 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3202 return new_d_Load(NULL, store, addr, mode);
3204 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3205 return new_d_Store(NULL, store, addr, val);
3207 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3208 where_alloc where) {
3209 return new_d_Alloc(NULL, store, size, alloc_type, where);
3211 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3212 ir_type *free_type, where_alloc where) {
3213 return new_d_Free(NULL, store, ptr, size, free_type, where);
3215 ir_node *new_Sync(int arity, ir_node *in[]) {
3216 return new_d_Sync(NULL, arity, in);
3218 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3219 return new_d_Proj(NULL, arg, mode, proj);
3221 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3222 return new_d_defaultProj(NULL, arg, max_proj);
3224 ir_node *new_Tuple(int arity, ir_node **in) {
3225 return new_d_Tuple(NULL, arity, in);
3227 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3228 return new_d_Id(NULL, val, mode);
3230 ir_node *new_Bad(void) {
3233 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3234 return new_d_Confirm (NULL, val, bound, cmp);
3236 ir_node *new_Unknown(ir_mode *m) {
3237 return new_d_Unknown(m);
3239 ir_node *new_CallBegin(ir_node *callee) {
3240 return new_d_CallBegin(NULL, callee);
3242 ir_node *new_EndReg(void) {
3243 return new_d_EndReg(NULL);
3245 ir_node *new_EndExcept(void) {
3246 return new_d_EndExcept(NULL);
3248 ir_node *new_Break(void) {
3249 return new_d_Break(NULL);
3251 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3252 return new_d_Filter(NULL, arg, mode, proj);
3254 ir_node *new_NoMem(void) {
3255 return new_d_NoMem();
3257 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3258 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3260 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3261 return new_d_Psi(NULL, arity, conds, vals, mode);
3263 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3264 return new_d_CopyB(NULL, store, dst, src, data_type);
3266 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3267 return new_d_InstOf(NULL, store, objptr, ent);
3269 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3270 return new_d_Raise(NULL, store, obj);
3272 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3273 return new_d_Bound(NULL, store, idx, lower, upper);
3275 ir_node *new_Pin(ir_node *node) {
3276 return new_d_Pin(NULL, node);