2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if ( get_Block_matured(block) )
249 assert( get_irn_arity(block) == arity );
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity-1; i >= 0; i--)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
405 ir_graph *irg = current_ir_graph;
408 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
409 res = optimize_node(res);
410 IRN_VRFY_IRG(res, irg);
415 new_bd_Jmp(dbg_info *db, ir_node *block) {
417 ir_graph *irg = current_ir_graph;
419 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
420 res = optimize_node (res);
421 IRN_VRFY_IRG(res, irg);
426 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
428 ir_graph *irg = current_ir_graph;
430 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
431 res = optimize_node (res);
432 IRN_VRFY_IRG(res, irg);
434 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
440 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
442 ir_graph *irg = current_ir_graph;
444 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
445 res->attr.cond.kind = dense;
446 res->attr.cond.default_proj = 0;
447 res->attr.cond.pred = COND_JMP_PRED_NONE;
448 res = optimize_node (res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
455 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
459 ir_graph *irg = current_ir_graph;
462 NEW_ARR_A(ir_node *, r_in, r_arity);
465 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
467 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
469 assert((get_unknown_type() == tp) || is_Method_type(tp));
470 set_Call_type(res, tp);
471 res->attr.call.exc.pin_state = op_pin_state_pinned;
472 res->attr.call.callee_arr = NULL;
473 res = optimize_node(res);
474 IRN_VRFY_IRG(res, irg);
479 new_bd_Return(dbg_info *db, ir_node *block,
480 ir_node *store, int arity, ir_node **in) {
484 ir_graph *irg = current_ir_graph;
487 NEW_ARR_A (ir_node *, r_in, r_arity);
489 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
494 } /* new_bd_Return */
497 new_bd_Load(dbg_info *db, ir_node *block,
498 ir_node *store, ir_node *adr, ir_mode *mode) {
501 ir_graph *irg = current_ir_graph;
505 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
506 res->attr.load.exc.pin_state = op_pin_state_pinned;
507 res->attr.load.load_mode = mode;
508 res->attr.load.volatility = volatility_non_volatile;
509 res = optimize_node(res);
510 IRN_VRFY_IRG(res, irg);
515 new_bd_Store(dbg_info *db, ir_node *block,
516 ir_node *store, ir_node *adr, ir_node *val) {
519 ir_graph *irg = current_ir_graph;
524 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
525 res->attr.store.exc.pin_state = op_pin_state_pinned;
526 res->attr.store.volatility = volatility_non_volatile;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
596 symconst_kind symkind, ir_type *tp) {
599 ir_graph *irg = current_ir_graph;
601 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
602 mode = mode_P_data; /* FIXME: can be mode_P_code */
606 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
608 res->attr.symc.num = symkind;
609 res->attr.symc.sym = value;
610 res->attr.symc.tp = tp;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
615 } /* new_bd_SymConst_type */
618 new_bd_Sync(dbg_info *db, ir_node *block) {
620 ir_graph *irg = current_ir_graph;
622 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
623 /* no need to call optimize node here, Sync are always created with no predecessors */
624 IRN_VRFY_IRG(res, irg);
629 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
630 ir_node *in[2], *res;
631 ir_graph *irg = current_ir_graph;
635 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
636 res->attr.confirm.cmp = cmp;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
640 } /* new_bd_Confirm */
643 new_bd_Unknown(ir_mode *m) {
645 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
648 res = optimize_node(res);
650 } /* new_bd_Unknown */
653 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
656 ir_graph *irg = current_ir_graph;
658 in[0] = get_Call_ptr(call);
659 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
660 /* res->attr.callbegin.irg = irg; */
661 res->attr.callbegin.call = call;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
665 } /* new_bd_CallBegin */
668 new_bd_EndReg(dbg_info *db, ir_node *block) {
670 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
673 set_irg_end_reg(irg, res);
674 IRN_VRFY_IRG(res, irg);
676 } /* new_bd_EndReg */
679 new_bd_EndExcept(dbg_info *db, ir_node *block) {
681 ir_graph *irg = current_ir_graph;
683 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
684 set_irg_end_except(irg, res);
685 IRN_VRFY_IRG (res, irg);
687 } /* new_bd_EndExcept */
690 new_bd_Break(dbg_info *db, ir_node *block) {
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
695 res = optimize_node(res);
696 IRN_VRFY_IRG(res, irg);
701 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
707 res->attr.filter.proj = proj;
708 res->attr.filter.in_cg = NULL;
709 res->attr.filter.backedge = NULL;
712 assert(get_Proj_pred(res));
713 assert(get_nodes_block(get_Proj_pred(res)));
715 res = optimize_node(res);
716 IRN_VRFY_IRG(res, irg);
718 } /* new_bd_Filter */
721 new_bd_Mux(dbg_info *db, ir_node *block,
722 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
725 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
740 new_bd_Psi(dbg_info *db, ir_node *block,
741 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
744 ir_graph *irg = current_ir_graph;
747 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
749 for (i = 0; i < arity; ++i) {
751 in[2 * i + 1] = vals[i];
755 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_CopyB(dbg_info *db, ir_node *block,
765 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
768 ir_graph *irg = current_ir_graph;
774 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
776 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
777 res->attr.copyb.data_type = data_type;
778 res = optimize_node(res);
779 IRN_VRFY_IRG(res, irg);
784 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
785 ir_node *objptr, ir_type *type) {
788 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
793 res->attr.instof.type = type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
797 } /* new_bd_InstOf */
800 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
803 ir_graph *irg = current_ir_graph;
807 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
814 new_bd_Bound(dbg_info *db, ir_node *block,
815 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
818 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
825 res->attr.bound.exc.pin_state = op_pin_state_pinned;
826 res = optimize_node(res);
827 IRN_VRFY_IRG(res, irg);
832 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
834 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
837 res = optimize_node(res);
838 IRN_VRFY_IRG(res, irg);
843 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
844 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
846 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
850 res->attr.assem.pin_state = op_pin_state_pinned;
851 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
852 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
853 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
854 res->attr.assem.asm_text = asm_text;
856 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
857 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
864 /* --------------------------------------------- */
865 /* private interfaces, for professional use only */
866 /* --------------------------------------------- */
868 /* Constructs a Block with a fixed number of predecessors.
869 Does not set current_block. Can not be used with automatic
870 Phi node construction. */
872 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
873 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_Block(db, arity, in);
878 current_ir_graph = rem;
884 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
885 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Start(db, block);
890 current_ir_graph = rem;
896 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_End(db, block);
902 current_ir_graph = rem;
907 /* Creates a Phi node with all predecessors. Calling this constructor
908 is only allowed if the corresponding block is mature. */
910 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi(db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_Const_type(db, block, mode, con, tp);
928 current_ir_graph = rem;
931 } /* new_rd_Const_type */
934 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
936 ir_graph *rem = current_ir_graph;
938 current_ir_graph = irg;
939 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
940 current_ir_graph = rem;
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
951 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
953 ir_graph *rem = current_ir_graph;
955 current_ir_graph = irg;
956 res = new_bd_Id(db, block, val, mode);
957 current_ir_graph = rem;
963 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Proj(db, block, arg, mode, proj);
970 current_ir_graph = rem;
976 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_defaultProj(db, block, arg, max_proj);
983 current_ir_graph = rem;
986 } /* new_rd_defaultProj */
989 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Conv(db, block, op, mode, 0);
995 current_ir_graph = rem;
1001 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_Cast(db, block, op, to_tp);
1007 current_ir_graph = rem;
1013 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1015 ir_graph *rem = current_ir_graph;
1017 current_ir_graph = irg;
1018 res = new_bd_Tuple(db, block, arity, in);
1019 current_ir_graph = rem;
1022 } /* new_rd_Tuple */
1029 NEW_RD_DIVOP(DivMod)
1042 NEW_RD_BINOP(Borrow)
1045 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1046 ir_node *op1, ir_node *op2) {
1048 ir_graph *rem = current_ir_graph;
1050 current_ir_graph = irg;
1051 res = new_bd_Cmp(db, block, op1, op2);
1052 current_ir_graph = rem;
1058 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1060 ir_graph *rem = current_ir_graph;
1062 current_ir_graph = irg;
1063 res = new_bd_Jmp(db, block);
1064 current_ir_graph = rem;
1070 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1072 ir_graph *rem = current_ir_graph;
1074 current_ir_graph = irg;
1075 res = new_bd_IJmp(db, block, tgt);
1076 current_ir_graph = rem;
1082 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_Cond(db, block, c);
1088 current_ir_graph = rem;
1094 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1095 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1101 current_ir_graph = rem;
1107 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1108 ir_node *store, int arity, ir_node **in) {
1110 ir_graph *rem = current_ir_graph;
1112 current_ir_graph = irg;
1113 res = new_bd_Return(db, block, store, arity, in);
1114 current_ir_graph = rem;
1117 } /* new_rd_Return */
1120 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1121 ir_node *store, ir_node *adr, ir_mode *mode) {
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Load(db, block, store, adr, mode);
1127 current_ir_graph = rem;
1133 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1134 ir_node *store, ir_node *adr, ir_node *val) {
1136 ir_graph *rem = current_ir_graph;
1138 current_ir_graph = irg;
1139 res = new_bd_Store(db, block, store, adr, val);
1140 current_ir_graph = rem;
1143 } /* new_rd_Store */
1146 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1147 ir_node *size, ir_type *alloc_type, where_alloc where) {
1149 ir_graph *rem = current_ir_graph;
1151 current_ir_graph = irg;
1152 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1153 current_ir_graph = rem;
1156 } /* new_rd_Alloc */
1159 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1160 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1162 ir_graph *rem = current_ir_graph;
1164 current_ir_graph = irg;
1165 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1166 current_ir_graph = rem;
1172 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1173 ir_node *store, ir_node *objptr, ir_entity *ent) {
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1179 current_ir_graph = rem;
1182 } /* new_rd_simpleSel */
1185 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1186 int arity, ir_node **in, ir_entity *ent) {
1188 ir_graph *rem = current_ir_graph;
1190 current_ir_graph = irg;
1191 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1192 current_ir_graph = rem;
1198 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1199 symconst_kind symkind, ir_type *tp) {
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1205 current_ir_graph = rem;
1208 } /* new_rd_SymConst_type */
1211 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1212 symconst_kind symkind) {
1213 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1215 } /* new_rd_SymConst */
1217 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1218 symconst_symbol sym;
1219 sym.entity_p = symbol;
1220 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1221 } /* new_rd_SymConst_addr_ent */
1223 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1224 symconst_symbol sym;
1225 sym.entity_p = symbol;
1226 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1227 } /* new_rd_SymConst_ofs_ent */
1229 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1230 symconst_symbol sym;
1231 sym.ident_p = symbol;
1232 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1233 } /* new_rd_SymConst_addr_name */
1235 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1236 symconst_symbol sym;
1237 sym.type_p = symbol;
1238 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1239 } /* new_rd_SymConst_type_tag */
1241 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1242 symconst_symbol sym;
1243 sym.type_p = symbol;
1244 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1245 } /* new_rd_SymConst_size */
1247 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1248 symconst_symbol sym;
1249 sym.type_p = symbol;
1250 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1251 } /* new_rd_SymConst_align */
1254 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1256 ir_graph *rem = current_ir_graph;
1259 current_ir_graph = irg;
1260 res = new_bd_Sync(db, block);
1261 current_ir_graph = rem;
1263 for (i = 0; i < arity; ++i)
1264 add_Sync_pred(res, in[i]);
1270 new_rd_Bad(ir_graph *irg) {
1271 return get_irg_bad(irg);
1275 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1277 ir_graph *rem = current_ir_graph;
1279 current_ir_graph = irg;
1280 res = new_bd_Confirm(db, block, val, bound, cmp);
1281 current_ir_graph = rem;
1284 } /* new_rd_Confirm */
1287 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1289 ir_graph *rem = current_ir_graph;
1291 current_ir_graph = irg;
1292 res = new_bd_Unknown(m);
1293 current_ir_graph = rem;
1296 } /* new_rd_Unknown */
1299 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1301 ir_graph *rem = current_ir_graph;
1303 current_ir_graph = irg;
1304 res = new_bd_CallBegin(db, block, call);
1305 current_ir_graph = rem;
1308 } /* new_rd_CallBegin */
1311 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1314 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1315 set_irg_end_reg(irg, res);
1316 IRN_VRFY_IRG(res, irg);
1318 } /* new_rd_EndReg */
1321 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1324 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1325 set_irg_end_except(irg, res);
1326 IRN_VRFY_IRG (res, irg);
1328 } /* new_rd_EndExcept */
1331 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1333 ir_graph *rem = current_ir_graph;
1335 current_ir_graph = irg;
1336 res = new_bd_Break(db, block);
1337 current_ir_graph = rem;
1340 } /* new_rd_Break */
1343 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1346 ir_graph *rem = current_ir_graph;
1348 current_ir_graph = irg;
1349 res = new_bd_Filter(db, block, arg, mode, proj);
1350 current_ir_graph = rem;
1353 } /* new_rd_Filter */
1356 new_rd_NoMem(ir_graph *irg) {
1357 return get_irg_no_mem(irg);
1358 } /* new_rd_NoMem */
1361 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1362 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1364 ir_graph *rem = current_ir_graph;
1366 current_ir_graph = irg;
1367 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1368 current_ir_graph = rem;
1374 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1375 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1377 ir_graph *rem = current_ir_graph;
1379 current_ir_graph = irg;
1380 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1381 current_ir_graph = rem;
1386 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1387 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1393 current_ir_graph = rem;
1396 } /* new_rd_CopyB */
1399 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1400 ir_node *objptr, ir_type *type) {
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_InstOf(db, block, store, objptr, type);
1406 current_ir_graph = rem;
1409 } /* new_rd_InstOf */
1412 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_Raise(db, block, store, obj);
1418 current_ir_graph = rem;
1421 } /* new_rd_Raise */
1423 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1424 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1426 ir_graph *rem = current_ir_graph;
1428 current_ir_graph = irg;
1429 res = new_bd_Bound(db, block, store, idx, lower, upper);
1430 current_ir_graph = rem;
1433 } /* new_rd_Bound */
1435 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1437 ir_graph *rem = current_ir_graph;
1439 current_ir_graph = irg;
1440 res = new_bd_Pin(db, block, node);
1441 current_ir_graph = rem;
1446 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1447 int arity, ir_node *in[], ir_asm_constraint *inputs,
1448 int n_outs, ir_asm_constraint *outputs,
1449 int n_clobber, ident *clobber[], ident *asm_text) {
1451 ir_graph *rem = current_ir_graph;
1453 current_ir_graph = irg;
1454 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1455 current_ir_graph = rem;
1461 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1462 return new_rd_Block(NULL, irg, arity, in);
1464 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1465 return new_rd_Start(NULL, irg, block);
1467 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1468 return new_rd_End(NULL, irg, block);
1470 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1471 return new_rd_Jmp(NULL, irg, block);
1473 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1474 return new_rd_IJmp(NULL, irg, block, tgt);
1476 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1477 return new_rd_Cond(NULL, irg, block, c);
1479 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1480 ir_node *store, int arity, ir_node **in) {
1481 return new_rd_Return(NULL, irg, block, store, arity, in);
1483 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1484 ir_mode *mode, tarval *con) {
1485 return new_rd_Const(NULL, irg, block, mode, con);
1487 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1488 ir_mode *mode, long value) {
1489 return new_rd_Const_long(NULL, irg, block, mode, value);
1491 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1492 ir_mode *mode, tarval *con, ir_type *tp) {
1493 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1495 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1496 symconst_symbol value, symconst_kind symkind) {
1497 return new_rd_SymConst(NULL, irg, block, value, symkind);
1499 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1500 ir_node *objptr, ir_entity *ent) {
1501 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1503 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1504 ir_node *objptr, int n_index, ir_node **index,
1506 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1508 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1509 ir_node *callee, int arity, ir_node **in,
1511 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1513 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1522 ir_node *op, ir_mode *mode) {
1523 return new_rd_Minus(NULL, irg, block, op, mode);
1525 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1526 ir_node *op1, ir_node *op2, ir_mode *mode) {
1527 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1529 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1530 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1531 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1533 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1535 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1537 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1539 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1541 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1542 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1543 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1545 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1546 ir_node *op, ir_mode *mode) {
1547 return new_rd_Abs(NULL, irg, block, op, mode);
1549 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_And(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1554 ir_node *op1, ir_node *op2, ir_mode *mode) {
1555 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1557 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1558 ir_node *op1, ir_node *op2, ir_mode *mode) {
1559 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1561 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1562 ir_node *op, ir_mode *mode) {
1563 return new_rd_Not(NULL, irg, block, op, mode);
1565 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1566 ir_node *op, ir_node *k, ir_mode *mode) {
1567 return new_rd_Shl(NULL, irg, block, op, k, mode);
1569 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1570 ir_node *op, ir_node *k, ir_mode *mode) {
1571 return new_rd_Shr(NULL, irg, block, op, k, mode);
1573 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1574 ir_node *op, ir_node *k, ir_mode *mode) {
1575 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1577 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_node *k, ir_mode *mode) {
1579 return new_rd_Rot(NULL, irg, block, op, k, mode);
1581 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1582 ir_node *op, ir_node *k, ir_mode *mode) {
1583 return new_rd_Carry(NULL, irg, block, op, k, mode);
1585 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1586 ir_node *op, ir_node *k, ir_mode *mode) {
1587 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1589 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1590 ir_node *op1, ir_node *op2) {
1591 return new_rd_Cmp(NULL, irg, block, op1, op2);
1593 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1594 ir_node *op, ir_mode *mode) {
1595 return new_rd_Conv(NULL, irg, block, op, mode);
1597 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1598 return new_rd_Cast(NULL, irg, block, op, to_tp);
1600 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1601 ir_node **in, ir_mode *mode) {
1602 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1604 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1605 ir_node *store, ir_node *adr, ir_mode *mode) {
1606 return new_rd_Load(NULL, irg, block, store, adr, mode);
1608 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1609 ir_node *store, ir_node *adr, ir_node *val) {
1610 return new_rd_Store(NULL, irg, block, store, adr, val);
1612 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1613 ir_node *size, ir_type *alloc_type, where_alloc where) {
1614 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1616 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1617 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1618 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1620 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1621 return new_rd_Sync(NULL, irg, block, arity, in);
1623 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1624 ir_mode *mode, long proj) {
1625 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1627 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1629 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1631 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1632 int arity, ir_node **in) {
1633 return new_rd_Tuple(NULL, irg, block, arity, in );
1635 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1636 ir_node *val, ir_mode *mode) {
1637 return new_rd_Id(NULL, irg, block, val, mode);
1639 ir_node *new_r_Bad(ir_graph *irg) {
1640 return new_rd_Bad(irg);
1642 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1643 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1645 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1646 return new_rd_Unknown(irg, m);
1648 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1649 return new_rd_CallBegin(NULL, irg, block, callee);
1651 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1652 return new_rd_EndReg(NULL, irg, block);
1654 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1655 return new_rd_EndExcept(NULL, irg, block);
1657 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1658 return new_rd_Break(NULL, irg, block);
1660 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1661 ir_mode *mode, long proj) {
1662 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1664 ir_node *new_r_NoMem(ir_graph *irg) {
1665 return new_rd_NoMem(irg);
1667 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1668 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1669 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1671 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1672 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1673 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1675 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1676 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1677 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1679 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1681 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1683 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1684 ir_node *store, ir_node *obj) {
1685 return new_rd_Raise(NULL, irg, block, store, obj);
1687 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1688 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1689 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1691 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1692 return new_rd_Pin(NULL, irg, block, node);
1694 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1695 int arity, ir_node *in[], ir_asm_constraint *inputs,
1696 int n_outs, ir_asm_constraint *outputs,
1697 int n_clobber, ident *clobber[], ident *asm_text) {
1698 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1701 /** ********************/
1702 /** public interfaces */
1703 /** construction tools */
1707 * - create a new Start node in the current block
1709 * @return s - pointer to the created Start node
1714 new_d_Start(dbg_info *db) {
1717 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1718 op_Start, mode_T, 0, NULL);
1720 res = optimize_node(res);
1721 IRN_VRFY_IRG(res, current_ir_graph);
1726 new_d_End(dbg_info *db) {
1728 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1729 op_End, mode_X, -1, NULL);
1730 res = optimize_node(res);
1731 IRN_VRFY_IRG(res, current_ir_graph);
1736 /* Constructs a Block with a fixed number of predecessors.
1737 Does set current_block. Can be used with automatic Phi
1738 node construction. */
1740 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1743 int has_unknown = 0;
1745 res = new_bd_Block(db, arity, in);
1747 /* Create and initialize array for Phi-node construction. */
1748 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1749 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1750 current_ir_graph->n_loc);
1751 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1754 for (i = arity-1; i >= 0; i--)
1755 if (get_irn_op(in[i]) == op_Unknown) {
1760 if (!has_unknown) res = optimize_node(res);
1761 current_ir_graph->current_block = res;
1763 IRN_VRFY_IRG(res, current_ir_graph);
1768 /* ***********************************************************************/
1769 /* Methods necessary for automatic Phi node creation */
1771 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1772 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1773 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1774 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1776 Call Graph: ( A ---> B == A "calls" B)
1778 get_value mature_immBlock
1786 get_r_value_internal |
1790 new_rd_Phi0 new_rd_Phi_in
1792 * *************************************************************************** */
1794 /** Creates a Phi node with 0 predecessors. */
1795 static INLINE ir_node *
1796 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1799 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1800 IRN_VRFY_IRG(res, irg);
1804 /* There are two implementations of the Phi node construction. The first
1805 is faster, but does not work for blocks with more than 2 predecessors.
1806 The second works always but is slower and causes more unnecessary Phi
1808 Select the implementations by the following preprocessor flag set in
1810 #if USE_FAST_PHI_CONSTRUCTION
1812 /* This is a stack used for allocating and deallocating nodes in
1813 new_rd_Phi_in. The original implementation used the obstack
1814 to model this stack, now it is explicit. This reduces side effects.
1816 #if USE_EXPLICIT_PHI_IN_STACK
1818 new_Phi_in_stack(void) {
1821 res = (Phi_in_stack *) xmalloc(sizeof(Phi_in_stack));
1823 res->stack = NEW_ARR_F(ir_node *, 0);
1827 } /* new_Phi_in_stack */
1830 free_Phi_in_stack(Phi_in_stack *s) {
1831 DEL_ARR_F(s->stack);
1833 } /* free_Phi_in_stack */
1836 free_to_Phi_in_stack(ir_node *phi) {
1837 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1838 current_ir_graph->Phi_in_stack->pos)
1839 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1841 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1843 (current_ir_graph->Phi_in_stack->pos)++;
1844 } /* free_to_Phi_in_stack */
1846 static INLINE ir_node *
1847 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1848 int arity, ir_node **in) {
1850 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1851 int pos = current_ir_graph->Phi_in_stack->pos;
1855 /* We need to allocate a new node */
1856 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1857 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1859 /* reuse the old node and initialize it again. */
1862 assert(res->kind == k_ir_node);
1863 assert(res->op == op_Phi);
1868 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1869 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1871 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1873 (current_ir_graph->Phi_in_stack->pos)--;
1876 } /* alloc_or_pop_from_Phi_in_stack */
1877 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1880 * Creates a Phi node with a given, fixed array **in of predecessors.
1881 * If the Phi node is unnecessary, as the same value reaches the block
1882 * through all control flow paths, it is eliminated and the value
1883 * returned directly. This constructor is only intended for use in
1884 * the automatic Phi node generation triggered by get_value or mature.
1885 * The implementation is quite tricky and depends on the fact, that
1886 * the nodes are allocated on a stack:
1887 * The in array contains predecessors and NULLs. The NULLs appear,
1888 * if get_r_value_internal, that computed the predecessors, reached
1889 * the same block on two paths. In this case the same value reaches
1890 * this block on both paths, there is no definition in between. We need
1891 * not allocate a Phi where these path's merge, but we have to communicate
1892 * this fact to the caller. This happens by returning a pointer to the
1893 * node the caller _will_ allocate. (Yes, we predict the address. We can
1894 * do so because the nodes are allocated on the obstack.) The caller then
1895 * finds a pointer to itself and, when this routine is called again,
1896 * eliminates itself.
1898 static INLINE ir_node *
1899 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1901 ir_node *res, *known;
1903 /* Allocate a new node on the obstack. This can return a node to
1904 which some of the pointers in the in-array already point.
1905 Attention: the constructor copies the in array, i.e., the later
1906 changes to the array in this routine do not affect the
1907 constructed node! If the in array contains NULLs, there will be
1908 missing predecessors in the returned node. Is this a possible
1909 internal state of the Phi node generation? */
1910 #if USE_EXPLICIT_PHI_IN_STACK
1911 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1913 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1914 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1917 /* The in-array can contain NULLs. These were returned by
1918 get_r_value_internal if it reached the same block/definition on a
1919 second path. The NULLs are replaced by the node itself to
1920 simplify the test in the next loop. */
1921 for (i = 0; i < ins; ++i) {
1926 /* This loop checks whether the Phi has more than one predecessor.
1927 If so, it is a real Phi node and we break the loop. Else the Phi
1928 node merges the same definition on several paths and therefore is
1930 for (i = 0; i < ins; ++i) {
1931 if (in[i] == res || in[i] == known)
1940 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1942 #if USE_EXPLICIT_PHI_IN_STACK
1943 free_to_Phi_in_stack(res);
1945 edges_node_deleted(res, current_ir_graph);
1946 obstack_free(current_ir_graph->obst, res);
1950 res = optimize_node (res);
1951 IRN_VRFY_IRG(res, irg);
1954 /* return the pointer to the Phi node. This node might be deallocated! */
1956 } /* new_rd_Phi_in */
1959 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1962 * Allocates and returns this node. The routine called to allocate the
1963 * node might optimize it away and return a real value, or even a pointer
1964 * to a deallocated Phi node on top of the obstack!
1965 * This function is called with an in-array of proper size.
1968 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1969 ir_node *prevBlock, *res;
1972 /* This loop goes to all predecessor blocks of the block the Phi node is in
1973 and there finds the operands of the Phi node by calling
1974 get_r_value_internal. */
1975 for (i = 1; i <= ins; ++i) {
1976 assert (block->in[i]);
1977 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1979 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1982 /* After collecting all predecessors into the array nin a new Phi node
1983 with these predecessors is created. This constructor contains an
1984 optimization: If all predecessors of the Phi node are identical it
1985 returns the only operand instead of a new Phi node. If the value
1986 passes two different control flow edges without being defined, and
1987 this is the second path treated, a pointer to the node that will be
1988 allocated for the first path (recursion) is returned. We already
1989 know the address of this node, as it is the next node to be allocated
1990 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1991 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1993 /* Now we now the value for "pos" and can enter it in the array with
1994 all known local variables. Attention: this might be a pointer to
1995 a node, that later will be allocated!!! See new_rd_Phi_in().
1996 If this is called in mature, after some set_value() in the same block,
1997 the proper value must not be overwritten:
1999 get_value (makes Phi0, put's it into graph_arr)
2000 set_value (overwrites Phi0 in graph_arr)
2001 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2004 if (!block->attr.block.graph_arr[pos]) {
2005 block->attr.block.graph_arr[pos] = res;
2007 /* printf(" value already computed by %s\n",
2008 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2015 * This function returns the last definition of a variable. In case
2016 * this variable was last defined in a previous block, Phi nodes are
2017 * inserted. If the part of the firm graph containing the definition
2018 * is not yet constructed, a dummy Phi node is returned.
2021 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2024 /* There are 4 cases to treat.
2026 1. The block is not mature and we visit it the first time. We can not
2027 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2028 predecessors is returned. This node is added to the linked list (field
2029 "link") of the containing block to be completed when this block is
2030 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2033 2. The value is already known in this block, graph_arr[pos] is set and we
2034 visit the block the first time. We can return the value without
2035 creating any new nodes.
2037 3. The block is mature and we visit it the first time. A Phi node needs
2038 to be created (phi_merge). If the Phi is not needed, as all it's
2039 operands are the same value reaching the block through different
2040 paths, it's optimized away and the value itself is returned.
2042 4. The block is mature, and we visit it the second time. Now two
2043 subcases are possible:
2044 * The value was computed completely the last time we were here. This
2045 is the case if there is no loop. We can return the proper value.
2046 * The recursion that visited this node and set the flag did not
2047 return yet. We are computing a value in a loop and need to
2048 break the recursion without knowing the result yet.
2049 @@@ strange case. Straight forward we would create a Phi before
2050 starting the computation of it's predecessors. In this case we will
2051 find a Phi here in any case. The problem is that this implementation
2052 only creates a Phi after computing the predecessors, so that it is
2053 hard to compute self references of this Phi. @@@
2054 There is no simple check for the second subcase. Therefore we check
2055 for a second visit and treat all such cases as the second subcase.
2056 Anyways, the basic situation is the same: we reached a block
2057 on two paths without finding a definition of the value: No Phi
2058 nodes are needed on both paths.
2059 We return this information "Two paths, no Phi needed" by a very tricky
2060 implementation that relies on the fact that an obstack is a stack and
2061 will return a node with the same address on different allocations.
2062 Look also at phi_merge and new_rd_phi_in to understand this.
2063 @@@ Unfortunately this does not work, see testprogram
2064 three_cfpred_example.
2068 /* case 4 -- already visited. */
2069 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2071 /* visited the first time */
2072 set_irn_visited(block, get_irg_visited(current_ir_graph));
2074 /* Get the local valid value */
2075 res = block->attr.block.graph_arr[pos];
2077 /* case 2 -- If the value is actually computed, return it. */
2078 if (res) return res;
2080 if (block->attr.block.is_matured) { /* case 3 */
2082 /* The Phi has the same amount of ins as the corresponding block. */
2083 int ins = get_irn_arity(block);
2085 NEW_ARR_A(ir_node *, nin, ins);
2087 /* Phi merge collects the predecessors and then creates a node. */
2088 res = phi_merge(block, pos, mode, nin, ins);
2090 } else { /* case 1 */
2091 /* The block is not mature, we don't know how many in's are needed. A Phi
2092 with zero predecessors is created. Such a Phi node is called Phi0
2093 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2094 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2096 The Phi0 has to remember the pos of it's internal value. If the real
2097 Phi is computed, pos is used to update the array with the local
2100 res = new_rd_Phi0(current_ir_graph, block, mode);
2101 res->attr.phi0_pos = pos;
2102 res->link = block->link;
2106 /* If we get here, the frontend missed a use-before-definition error */
2109 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2110 assert(mode->code >= irm_F && mode->code <= irm_P);
2111 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2114 /* The local valid value is available now. */
2115 block->attr.block.graph_arr[pos] = res;
2118 } /* get_r_value_internal */
2123 it starts the recursion. This causes an Id at the entry of
2124 every block that has no definition of the value! **/
2126 #if USE_EXPLICIT_PHI_IN_STACK
2128 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2129 void free_Phi_in_stack(Phi_in_stack *s) {}
2132 static INLINE ir_node *
2133 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2134 ir_node **in, int ins, ir_node *phi0) {
2136 ir_node *res, *known;
2138 /* Allocate a new node on the obstack. The allocation copies the in
2140 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2141 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2143 /* This loop checks whether the Phi has more than one predecessor.
2144 If so, it is a real Phi node and we break the loop. Else the
2145 Phi node merges the same definition on several paths and therefore
2146 is not needed. Don't consider Bad nodes! */
2148 for (i=0; i < ins; ++i)
2152 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2154 /* Optimize self referencing Phis: We can't detect them yet properly, as
2155 they still refer to the Phi0 they will replace. So replace right now. */
2156 if (phi0 && in[i] == phi0) in[i] = res;
2158 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2166 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2169 edges_node_deleted(res, current_ir_graph);
2170 obstack_free (current_ir_graph->obst, res);
2171 if (is_Phi(known)) {
2172 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2173 order, an enclosing Phi know may get superfluous. */
2174 res = optimize_in_place_2(known);
2176 exchange(known, res);
2182 /* A undefined value, e.g., in unreachable code. */
2186 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2187 IRN_VRFY_IRG(res, irg);
2188 /* Memory Phis in endless loops must be kept alive.
2189 As we can't distinguish these easily we keep all of them alive. */
2190 if ((res->op == op_Phi) && (mode == mode_M))
2191 add_End_keepalive(get_irg_end(irg), res);
2195 } /* new_rd_Phi_in */
2198 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2200 #if PRECISE_EXC_CONTEXT
2202 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2205 * Construct a new frag_array for node n.
2206 * Copy the content from the current graph_arr of the corresponding block:
2207 * this is the current state.
2208 * Set ProjM(n) as current memory state.
2209 * Further the last entry in frag_arr of current block points to n. This
2210 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2212 static INLINE ir_node **new_frag_arr(ir_node *n) {
2216 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2217 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2218 sizeof(ir_node *)*current_ir_graph->n_loc);
2220 /* turn off optimization before allocating Proj nodes, as res isn't
2222 opt = get_opt_optimize(); set_optimize(0);
2223 /* Here we rely on the fact that all frag ops have Memory as first result! */
2224 if (get_irn_op(n) == op_Call)
2225 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2226 else if (get_irn_op(n) == op_CopyB)
2227 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2229 assert((pn_Quot_M == pn_DivMod_M) &&
2230 (pn_Quot_M == pn_Div_M) &&
2231 (pn_Quot_M == pn_Mod_M) &&
2232 (pn_Quot_M == pn_Load_M) &&
2233 (pn_Quot_M == pn_Store_M) &&
2234 (pn_Quot_M == pn_Alloc_M) &&
2235 (pn_Quot_M == pn_Bound_M));
2236 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2240 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2242 } /* new_frag_arr */
2245 * Returns the frag_arr from a node.
2247 static INLINE ir_node **get_frag_arr(ir_node *n) {
2248 switch (get_irn_opcode(n)) {
2250 return n->attr.call.exc.frag_arr;
2252 return n->attr.alloc.exc.frag_arr;
2254 return n->attr.load.exc.frag_arr;
2256 return n->attr.store.exc.frag_arr;
2258 return n->attr.except.frag_arr;
2260 } /* get_frag_arr */
2263 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2265 if (!frag_arr[pos]) frag_arr[pos] = val;
2266 if (frag_arr[current_ir_graph->n_loc - 1]) {
2267 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2268 assert(arr != frag_arr && "Endless recursion detected");
2269 set_frag_value(arr, pos, val);
2274 for (i = 0; i < 1000; ++i) {
2275 if (!frag_arr[pos]) {
2276 frag_arr[pos] = val;
2278 if (frag_arr[current_ir_graph->n_loc - 1]) {
2279 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2285 assert(0 && "potential endless recursion");
2287 } /* set_frag_value */
2290 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2294 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2296 frag_arr = get_frag_arr(cfOp);
2297 res = frag_arr[pos];
2299 if (block->attr.block.graph_arr[pos]) {
2300 /* There was a set_value() after the cfOp and no get_value before that
2301 set_value(). We must build a Phi node now. */
2302 if (block->attr.block.is_matured) {
2303 int ins = get_irn_arity(block);
2305 NEW_ARR_A(ir_node *, nin, ins);
2306 res = phi_merge(block, pos, mode, nin, ins);
2308 res = new_rd_Phi0(current_ir_graph, block, mode);
2309 res->attr.phi0.pos = pos;
2310 res->link = block->link;
2314 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2315 but this should be better: (remove comment if this works) */
2316 /* It's a Phi, we can write this into all graph_arrs with NULL */
2317 set_frag_value(block->attr.block.graph_arr, pos, res);
2319 res = get_r_value_internal(block, pos, mode);
2320 set_frag_value(block->attr.block.graph_arr, pos, res);
2324 } /* get_r_frag_value_internal */
2325 #endif /* PRECISE_EXC_CONTEXT */
2328 * Computes the predecessors for the real phi node, and then
2329 * allocates and returns this node. The routine called to allocate the
2330 * node might optimize it away and return a real value.
2331 * This function must be called with an in-array of proper size.
2334 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2335 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2338 /* If this block has no value at pos create a Phi0 and remember it
2339 in graph_arr to break recursions.
2340 Else we may not set graph_arr as there a later value is remembered. */
2342 if (!block->attr.block.graph_arr[pos]) {
2343 if (block == get_irg_start_block(current_ir_graph)) {
2344 /* Collapsing to Bad tarvals is no good idea.
2345 So we call a user-supplied routine here that deals with this case as
2346 appropriate for the given language. Sorrily the only help we can give
2347 here is the position.
2349 Even if all variables are defined before use, it can happen that
2350 we get to the start block, if a Cond has been replaced by a tuple
2351 (bad, jmp). In this case we call the function needlessly, eventually
2352 generating an non existent error.
2353 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2356 if (default_initialize_local_variable) {
2357 ir_node *rem = get_cur_block();
2359 set_cur_block(block);
2360 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2364 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2365 /* We don't need to care about exception ops in the start block.
2366 There are none by definition. */
2367 return block->attr.block.graph_arr[pos];
2369 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2370 block->attr.block.graph_arr[pos] = phi0;
2371 #if PRECISE_EXC_CONTEXT
2372 if (get_opt_precise_exc_context()) {
2373 /* Set graph_arr for fragile ops. Also here we should break recursion.
2374 We could choose a cyclic path through an cfop. But the recursion would
2375 break at some point. */
2376 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2382 /* This loop goes to all predecessor blocks of the block the Phi node
2383 is in and there finds the operands of the Phi node by calling
2384 get_r_value_internal. */
2385 for (i = 1; i <= ins; ++i) {
2386 prevCfOp = skip_Proj(block->in[i]);
2388 if (is_Bad(prevCfOp)) {
2389 /* In case a Cond has been optimized we would get right to the start block
2390 with an invalid definition. */
2391 nin[i-1] = new_Bad();
2394 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2396 if (!is_Bad(prevBlock)) {
2397 #if PRECISE_EXC_CONTEXT
2398 if (get_opt_precise_exc_context() &&
2399 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2400 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2401 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2404 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2406 nin[i-1] = new_Bad();
2410 /* We want to pass the Phi0 node to the constructor: this finds additional
2411 optimization possibilities.
2412 The Phi0 node either is allocated in this function, or it comes from
2413 a former call to get_r_value_internal. In this case we may not yet
2414 exchange phi0, as this is done in mature_immBlock. */
2416 phi0_all = block->attr.block.graph_arr[pos];
2417 if (!((get_irn_op(phi0_all) == op_Phi) &&
2418 (get_irn_arity(phi0_all) == 0) &&
2419 (get_nodes_block(phi0_all) == block)))
2425 /* After collecting all predecessors into the array nin a new Phi node
2426 with these predecessors is created. This constructor contains an
2427 optimization: If all predecessors of the Phi node are identical it
2428 returns the only operand instead of a new Phi node. */
2429 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2431 /* In case we allocated a Phi0 node at the beginning of this procedure,
2432 we need to exchange this Phi0 with the real Phi. */
2434 exchange(phi0, res);
2435 block->attr.block.graph_arr[pos] = res;
2436 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2437 only an optimization. */
2444 * This function returns the last definition of a variable. In case
2445 * this variable was last defined in a previous block, Phi nodes are
2446 * inserted. If the part of the firm graph containing the definition
2447 * is not yet constructed, a dummy Phi node is returned.
2450 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2452 /* There are 4 cases to treat.
2454 1. The block is not mature and we visit it the first time. We can not
2455 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2456 predecessors is returned. This node is added to the linked list (field
2457 "link") of the containing block to be completed when this block is
2458 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2461 2. The value is already known in this block, graph_arr[pos] is set and we
2462 visit the block the first time. We can return the value without
2463 creating any new nodes.
2465 3. The block is mature and we visit it the first time. A Phi node needs
2466 to be created (phi_merge). If the Phi is not needed, as all it's
2467 operands are the same value reaching the block through different
2468 paths, it's optimized away and the value itself is returned.
2470 4. The block is mature, and we visit it the second time. Now two
2471 subcases are possible:
2472 * The value was computed completely the last time we were here. This
2473 is the case if there is no loop. We can return the proper value.
2474 * The recursion that visited this node and set the flag did not
2475 return yet. We are computing a value in a loop and need to
2476 break the recursion. This case only happens if we visited
2477 the same block with phi_merge before, which inserted a Phi0.
2478 So we return the Phi0.
2481 /* case 4 -- already visited. */
2482 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2483 /* As phi_merge allocates a Phi0 this value is always defined. Here
2484 is the critical difference of the two algorithms. */
2485 assert(block->attr.block.graph_arr[pos]);
2486 return block->attr.block.graph_arr[pos];
2489 /* visited the first time */
2490 set_irn_visited(block, get_irg_visited(current_ir_graph));
2492 /* Get the local valid value */
2493 res = block->attr.block.graph_arr[pos];
2495 /* case 2 -- If the value is actually computed, return it. */
2496 if (res) { return res; };
2498 if (block->attr.block.is_matured) { /* case 3 */
2500 /* The Phi has the same amount of ins as the corresponding block. */
2501 int ins = get_irn_arity(block);
2503 NEW_ARR_A (ir_node *, nin, ins);
2505 /* Phi merge collects the predecessors and then creates a node. */
2506 res = phi_merge (block, pos, mode, nin, ins);
2508 } else { /* case 1 */
2509 /* The block is not mature, we don't know how many in's are needed. A Phi
2510 with zero predecessors is created. Such a Phi node is called Phi0
2511 node. The Phi0 is then added to the list of Phi0 nodes in this block
2512 to be matured by mature_immBlock later.
2513 The Phi0 has to remember the pos of it's internal value. If the real
2514 Phi is computed, pos is used to update the array with the local
2516 res = new_rd_Phi0(current_ir_graph, block, mode);
2517 res->attr.phi0.pos = pos;
2518 res->link = block->link;
2522 /* If we get here, the frontend missed a use-before-definition error */
2525 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2526 assert(mode->code >= irm_F && mode->code <= irm_P);
2527 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2528 get_mode_null(mode));
2531 /* The local valid value is available now. */
2532 block->attr.block.graph_arr[pos] = res;
2535 } /* get_r_value_internal */
2537 #endif /* USE_FAST_PHI_CONSTRUCTION */
2539 /* ************************************************************************** */
2542 * Finalize a Block node, when all control flows are known.
2543 * Acceptable parameters are only Block nodes.
2546 mature_immBlock(ir_node *block) {
2551 assert(get_irn_opcode(block) == iro_Block);
2552 /* @@@ should be commented in
2553 assert (!get_Block_matured(block) && "Block already matured"); */
2555 if (!get_Block_matured(block)) {
2556 ins = ARR_LEN(block->in)-1;
2557 /* Fix block parameters */
2558 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2560 /* An array for building the Phi nodes. */
2561 NEW_ARR_A(ir_node *, nin, ins);
2563 /* Traverse a chain of Phi nodes attached to this block and mature
2565 for (n = block->link; n; n = next) {
2566 inc_irg_visited(current_ir_graph);
2568 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2571 block->attr.block.is_matured = 1;
2573 /* Now, as the block is a finished firm node, we can optimize it.
2574 Since other nodes have been allocated since the block was created
2575 we can not free the node on the obstack. Therefore we have to call
2577 Unfortunately the optimization does not change a lot, as all allocated
2578 nodes refer to the unoptimized node.
2579 We can call _2, as global cse has no effect on blocks. */
2580 block = optimize_in_place_2(block);
2581 IRN_VRFY_IRG(block, current_ir_graph);
2583 } /* mature_immBlock */
2586 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2587 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2591 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2592 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2596 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2597 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2598 } /* new_d_Const_long */
2601 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2602 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2603 } /* new_d_Const_type */
2607 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2608 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2612 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2613 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2617 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2620 assert(arg->op == op_Cond);
2621 arg->attr.cond.kind = fragmentary;
2622 arg->attr.cond.default_proj = max_proj;
2623 res = new_Proj(arg, mode_X, max_proj);
2625 } /* new_d_defaultProj */
2628 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2629 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2633 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2634 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2635 } /* new_d_strictConv */
2638 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2639 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2643 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2644 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2653 * Allocate the frag array.
2655 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2656 if (get_opt_precise_exc_context()) {
2657 if ((current_ir_graph->phase_state == phase_building) &&
2658 (get_irn_op(res) == op) && /* Could be optimized away. */
2659 !*frag_store) /* Could be a cse where the arr is already set. */ {
2660 *frag_store = new_frag_arr(res);
2663 } /* allocate_frag_arr */
2666 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2668 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2669 #if PRECISE_EXC_CONTEXT
2670 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2677 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2679 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2680 #if PRECISE_EXC_CONTEXT
2681 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2685 } /* new_d_DivMod */
2688 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2690 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2691 #if PRECISE_EXC_CONTEXT
2692 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2699 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2701 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2702 #if PRECISE_EXC_CONTEXT
2703 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2722 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2723 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2727 new_d_Jmp(dbg_info *db) {
2728 return new_bd_Jmp(db, current_ir_graph->current_block);
2732 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2733 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2737 new_d_Cond(dbg_info *db, ir_node *c) {
2738 return new_bd_Cond(db, current_ir_graph->current_block, c);
2742 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2745 res = new_bd_Call(db, current_ir_graph->current_block,
2746 store, callee, arity, in, tp);
2747 #if PRECISE_EXC_CONTEXT
2748 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2755 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2756 return new_bd_Return(db, current_ir_graph->current_block,
2758 } /* new_d_Return */
2761 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2763 res = new_bd_Load(db, current_ir_graph->current_block,
2765 #if PRECISE_EXC_CONTEXT
2766 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2773 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2775 res = new_bd_Store(db, current_ir_graph->current_block,
2777 #if PRECISE_EXC_CONTEXT
2778 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2785 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2786 where_alloc where) {
2788 res = new_bd_Alloc(db, current_ir_graph->current_block,
2789 store, size, alloc_type, where);
2790 #if PRECISE_EXC_CONTEXT
2791 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2798 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2799 ir_node *size, ir_type *free_type, where_alloc where) {
2800 return new_bd_Free(db, current_ir_graph->current_block,
2801 store, ptr, size, free_type, where);
2805 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2806 /* GL: objptr was called frame before. Frame was a bad choice for the name
2807 as the operand could as well be a pointer to a dynamic object. */
2809 return new_bd_Sel(db, current_ir_graph->current_block,
2810 store, objptr, 0, NULL, ent);
2811 } /* new_d_simpleSel */
2814 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2815 return new_bd_Sel(db, current_ir_graph->current_block,
2816 store, objptr, n_index, index, sel);
2820 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2821 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2823 } /* new_d_SymConst_type */
2826 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2827 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2828 value, kind, firm_unknown_type);
2829 } /* new_d_SymConst */
2832 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2833 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2839 return _new_d_Bad();
2843 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2844 return new_bd_Confirm(db, current_ir_graph->current_block,
2846 } /* new_d_Confirm */
2849 new_d_Unknown(ir_mode *m) {
2850 return new_bd_Unknown(m);
2851 } /* new_d_Unknown */
2854 new_d_CallBegin(dbg_info *db, ir_node *call) {
2855 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2856 } /* new_d_CallBegin */
2859 new_d_EndReg(dbg_info *db) {
2860 return new_bd_EndReg(db, current_ir_graph->current_block);
2861 } /* new_d_EndReg */
2864 new_d_EndExcept(dbg_info *db) {
2865 return new_bd_EndExcept(db, current_ir_graph->current_block);
2866 } /* new_d_EndExcept */
2869 new_d_Break(dbg_info *db) {
2870 return new_bd_Break(db, current_ir_graph->current_block);
2874 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2875 return new_bd_Filter(db, current_ir_graph->current_block,
2877 } /* new_d_Filter */
2880 (new_d_NoMem)(void) {
2881 return _new_d_NoMem();
2885 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2886 ir_node *ir_true, ir_mode *mode) {
2887 return new_bd_Mux(db, current_ir_graph->current_block,
2888 sel, ir_false, ir_true, mode);
2892 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2893 return new_bd_Psi(db, current_ir_graph->current_block,
2894 arity, conds, vals, mode);
2897 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2898 ir_node *dst, ir_node *src, ir_type *data_type) {
2900 res = new_bd_CopyB(db, current_ir_graph->current_block,
2901 store, dst, src, data_type);
2902 #if PRECISE_EXC_CONTEXT
2903 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2909 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2910 return new_bd_InstOf(db, current_ir_graph->current_block,
2911 store, objptr, type);
2912 } /* new_d_InstOf */
2915 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2916 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2919 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2920 ir_node *idx, ir_node *lower, ir_node *upper) {
2922 res = new_bd_Bound(db, current_ir_graph->current_block,
2923 store, idx, lower, upper);
2924 #if PRECISE_EXC_CONTEXT
2925 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2931 new_d_Pin(dbg_info *db, ir_node *node) {
2932 return new_bd_Pin(db, current_ir_graph->current_block, node);
2936 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2937 int n_outs, ir_asm_constraint *outputs,
2938 int n_clobber, ident *clobber[], ident *asm_text) {
2939 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2942 /* ********************************************************************* */
2943 /* Comfortable interface with automatic Phi node construction. */
2944 /* (Uses also constructors of ?? interface, except new_Block. */
2945 /* ********************************************************************* */
2947 /* Block construction */
2948 /* immature Block without predecessors */
2950 new_d_immBlock(dbg_info *db) {
2953 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2954 /* creates a new dynamic in-array as length of in is -1 */
2955 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2956 current_ir_graph->current_block = res;
2958 /* macroblock head */
2961 res->attr.block.is_matured = 0;
2962 res->attr.block.is_dead = 0;
2963 res->attr.block.is_mb_head = 1;
2964 res->attr.block.has_label = 0;
2965 res->attr.block.irg = current_ir_graph;
2966 res->attr.block.backedge = NULL;
2967 res->attr.block.in_cg = NULL;
2968 res->attr.block.cg_backedge = NULL;
2969 res->attr.block.extblk = NULL;
2970 res->attr.block.region = NULL;
2971 res->attr.block.mb_depth = 0;
2972 res->attr.block.label = 0;
2974 set_Block_block_visited(res, 0);
2976 /* Create and initialize array for Phi-node construction. */
2977 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2978 current_ir_graph->n_loc);
2979 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2981 /* Immature block may not be optimized! */
2982 IRN_VRFY_IRG(res, current_ir_graph);
2985 } /* new_d_immBlock */
2988 new_immBlock(void) {
2989 return new_d_immBlock(NULL);
2990 } /* new_immBlock */
2992 /* immature PartBlock with its predecessors */
2994 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2995 ir_node *res = new_d_immBlock(db);
2996 ir_node *blk = get_nodes_block(pred_jmp);
2998 res->in[0] = blk->in[0];
2999 add_immBlock_pred(res, pred_jmp);
3001 res->attr.block.is_mb_head = 0;
3002 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
3005 } /* new_d_immPartBlock */
3008 new_immPartBlock(ir_node *pred_jmp) {
3009 return new_d_immPartBlock(NULL, pred_jmp);
3010 } /* new_immPartBlock */
3012 /* add an edge to a jmp/control flow node */
3014 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3015 int n = ARR_LEN(block->in) - 1;
3017 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3018 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3019 assert(jmp != NULL);
3021 ARR_APP1(ir_node *, block->in, jmp);
3023 hook_set_irn_n(block, n, jmp, NULL);
3024 } /* add_immBlock_pred */
3026 /* changing the current block */
3028 set_cur_block(ir_node *target) {
3029 current_ir_graph->current_block = target;
3030 } /* set_cur_block */
3032 /* ************************ */
3033 /* parameter administration */
3035 /* get a value from the parameter array from the current block by its index */
3037 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3038 ir_graph *irg = current_ir_graph;
3039 assert(get_irg_phase_state(irg) == phase_building);
3040 inc_irg_visited(irg);
3043 return get_r_value_internal(irg->current_block, pos + 1, mode);
3046 /* get a value from the parameter array from the current block by its index */
3048 get_value(int pos, ir_mode *mode) {
3049 return get_d_value(NULL, pos, mode);
3052 /* set a value at position pos in the parameter array from the current block */
3054 set_value(int pos, ir_node *value) {
3055 ir_graph *irg = current_ir_graph;
3056 assert(get_irg_phase_state(irg) == phase_building);
3057 assert(pos+1 < irg->n_loc);
3058 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3061 /* Find the value number for a node in the current block.*/
3063 find_value(ir_node *value) {
3065 ir_node *bl = current_ir_graph->current_block;
3067 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3068 if (bl->attr.block.graph_arr[i] == value)
3073 /* get the current store */
3076 ir_graph *irg = current_ir_graph;
3078 assert(get_irg_phase_state(irg) == phase_building);
3079 /* GL: one could call get_value instead */
3080 inc_irg_visited(irg);
3081 return get_r_value_internal(irg->current_block, 0, mode_M);
3084 /* set the current store: handles automatic Sync construction for Load nodes */
3086 set_store(ir_node *store) {
3087 ir_node *load, *pload, *pred, *in[2];
3089 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3090 /* Beware: due to dead code elimination, a store might become a Bad node even in
3091 the construction phase. */
3092 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3094 if (get_opt_auto_create_sync()) {
3095 /* handle non-volatile Load nodes by automatically creating Sync's */
3096 load = skip_Proj(store);
3097 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3098 pred = get_Load_mem(load);
3100 if (is_Sync(pred)) {
3101 /* a Load after a Sync: move it up */
3102 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3104 set_Load_mem(load, get_memop_mem(mem));
3105 add_Sync_pred(pred, store);
3108 pload = skip_Proj(pred);
3109 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3110 /* a Load after a Load: create a new Sync */
3111 set_Load_mem(load, get_Load_mem(pload));
3115 store = new_Sync(2, in);
3120 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3124 keep_alive(ir_node *ka) {
3125 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3128 /* --- Useful access routines --- */
3129 /* Returns the current block of the current graph. To set the current
3130 block use set_cur_block. */
3131 ir_node *get_cur_block(void) {
3132 return get_irg_current_block(current_ir_graph);
3133 } /* get_cur_block */
3135 /* Returns the frame type of the current graph */
3136 ir_type *get_cur_frame_type(void) {
3137 return get_irg_frame_type(current_ir_graph);
3138 } /* get_cur_frame_type */
3141 /* ********************************************************************* */
3144 /* call once for each run of the library */
3146 init_cons(uninitialized_local_variable_func_t *func) {
3147 default_initialize_local_variable = func;
3151 irp_finalize_cons(void) {
3153 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3154 irg_finalize_cons(get_irp_irg(i));
3156 irp->phase_state = phase_high;
3157 } /* irp_finalize_cons */
3160 ir_node *new_Block(int arity, ir_node **in) {
3161 return new_d_Block(NULL, arity, in);
3163 ir_node *new_Start(void) {
3164 return new_d_Start(NULL);
3166 ir_node *new_End(void) {
3167 return new_d_End(NULL);
3169 ir_node *new_Jmp(void) {
3170 return new_d_Jmp(NULL);
3172 ir_node *new_IJmp(ir_node *tgt) {
3173 return new_d_IJmp(NULL, tgt);
3175 ir_node *new_Cond(ir_node *c) {
3176 return new_d_Cond(NULL, c);
3178 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3179 return new_d_Return(NULL, store, arity, in);
3181 ir_node *new_Const(ir_mode *mode, tarval *con) {
3182 return new_d_Const(NULL, mode, con);
3185 ir_node *new_Const_long(ir_mode *mode, long value) {
3186 return new_d_Const_long(NULL, mode, value);
3189 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3190 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3193 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3194 return new_d_SymConst_type(NULL, value, kind, type);
3196 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3197 return new_d_SymConst(NULL, value, kind);
3199 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3200 return new_d_simpleSel(NULL, store, objptr, ent);
3202 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3204 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3206 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3208 return new_d_Call(NULL, store, callee, arity, in, tp);
3210 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3211 return new_d_Add(NULL, op1, op2, mode);
3213 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3214 return new_d_Sub(NULL, op1, op2, mode);
3216 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3217 return new_d_Minus(NULL, op, mode);
3219 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3220 return new_d_Mul(NULL, op1, op2, mode);
3222 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3223 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3225 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3226 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3228 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3229 return new_d_Div(NULL, memop, op1, op2, mode, state);
3231 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3232 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3234 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3235 return new_d_Abs(NULL, op, mode);
3237 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3238 return new_d_And(NULL, op1, op2, mode);
3240 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3241 return new_d_Or(NULL, op1, op2, mode);
3243 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3244 return new_d_Eor(NULL, op1, op2, mode);
3246 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3247 return new_d_Not(NULL, op, mode);
3249 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3250 return new_d_Shl(NULL, op, k, mode);
3252 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Shr(NULL, op, k, mode);
3255 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3256 return new_d_Shrs(NULL, op, k, mode);
3258 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3259 return new_d_Rot(NULL, op, k, mode);
3261 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3262 return new_d_Carry(NULL, op1, op2, mode);
3264 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3265 return new_d_Borrow(NULL, op1, op2, mode);
3267 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3268 return new_d_Cmp(NULL, op1, op2);
3270 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3271 return new_d_Conv(NULL, op, mode);
3273 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3274 return new_d_strictConv(NULL, op, mode);
3276 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3277 return new_d_Cast(NULL, op, to_tp);
3279 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3280 return new_d_Phi(NULL, arity, in, mode);
3282 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3283 return new_d_Load(NULL, store, addr, mode);
3285 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3286 return new_d_Store(NULL, store, addr, val);
3288 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3289 where_alloc where) {
3290 return new_d_Alloc(NULL, store, size, alloc_type, where);
3292 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3293 ir_type *free_type, where_alloc where) {
3294 return new_d_Free(NULL, store, ptr, size, free_type, where);
3296 ir_node *new_Sync(int arity, ir_node *in[]) {
3297 return new_d_Sync(NULL, arity, in);
3299 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3300 return new_d_Proj(NULL, arg, mode, proj);
3302 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3303 return new_d_defaultProj(NULL, arg, max_proj);
3305 ir_node *new_Tuple(int arity, ir_node **in) {
3306 return new_d_Tuple(NULL, arity, in);
3308 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3309 return new_d_Id(NULL, val, mode);
3311 ir_node *new_Bad(void) {
3314 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3315 return new_d_Confirm(NULL, val, bound, cmp);
3317 ir_node *new_Unknown(ir_mode *m) {
3318 return new_d_Unknown(m);
3320 ir_node *new_CallBegin(ir_node *callee) {
3321 return new_d_CallBegin(NULL, callee);
3323 ir_node *new_EndReg(void) {
3324 return new_d_EndReg(NULL);
3326 ir_node *new_EndExcept(void) {
3327 return new_d_EndExcept(NULL);
3329 ir_node *new_Break(void) {
3330 return new_d_Break(NULL);
3332 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3333 return new_d_Filter(NULL, arg, mode, proj);
3335 ir_node *new_NoMem(void) {
3336 return new_d_NoMem();
3338 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3339 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3341 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3342 return new_d_Psi(NULL, arity, conds, vals, mode);
3344 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3345 return new_d_CopyB(NULL, store, dst, src, data_type);
3347 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3348 return new_d_InstOf(NULL, store, objptr, ent);
3350 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3351 return new_d_Raise(NULL, store, obj);
3353 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3354 return new_d_Bound(NULL, store, idx, lower, upper);
3356 ir_node *new_Pin(ir_node *node) {
3357 return new_d_Pin(NULL, node);
3359 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3360 int n_outs, ir_asm_constraint *outputs,
3361 int n_clobber, ident *clobber[], ident *asm_text) {
3362 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3365 /* create a new anchor node */
3366 ir_node *new_Anchor(ir_graph *irg) {
3367 ir_node *in[anchor_last];
3368 memset(in, 0, sizeof(in));
3369 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);