2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if ( get_Block_matured(block) )
249 assert( get_irn_arity(block) == arity );
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity-1; i >= 0; i--)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
405 ir_graph *irg = current_ir_graph;
408 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
409 res = optimize_node(res);
410 IRN_VRFY_IRG(res, irg);
415 new_bd_Jmp(dbg_info *db, ir_node *block) {
417 ir_graph *irg = current_ir_graph;
419 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
420 res = optimize_node(res);
421 IRN_VRFY_IRG(res, irg);
426 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
428 ir_graph *irg = current_ir_graph;
430 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
431 res = optimize_node(res);
432 IRN_VRFY_IRG(res, irg);
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res->attr.load.aligned = align_is_aligned;
507 res = optimize_node(res);
508 IRN_VRFY_IRG(res, irg);
513 new_bd_Store(dbg_info *db, ir_node *block,
514 ir_node *store, ir_node *adr, ir_node *val) {
517 ir_graph *irg = current_ir_graph;
522 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
523 res->attr.store.exc.pin_state = op_pin_state_pinned;
524 res->attr.store.volatility = volatility_non_volatile;
525 res->attr.store.aligned = align_is_aligned;
526 res = optimize_node(res);
527 IRN_VRFY_IRG(res, irg);
532 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
533 ir_node *size, ir_type *alloc_type, where_alloc where) {
536 ir_graph *irg = current_ir_graph;
540 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
541 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
542 res->attr.alloc.where = where;
543 res->attr.alloc.type = alloc_type;
544 res = optimize_node(res);
545 IRN_VRFY_IRG(res, irg);
550 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
551 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
554 ir_graph *irg = current_ir_graph;
559 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
560 res->attr.free.where = where;
561 res->attr.free.type = free_type;
562 res = optimize_node(res);
563 IRN_VRFY_IRG(res, irg);
568 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
569 int arity, ir_node **in, ir_entity *ent) {
573 ir_graph *irg = current_ir_graph;
574 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
576 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
579 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
582 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
584 * Sel's can select functions which should be of mode mode_P_code.
586 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
587 res->attr.sel.ent = ent;
588 res = optimize_node(res);
589 IRN_VRFY_IRG(res, irg);
594 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
595 symconst_kind symkind, ir_type *tp) {
598 ir_graph *irg = current_ir_graph;
600 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
601 mode = mode_P_data; /* FIXME: can be mode_P_code */
605 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
607 res->attr.symc.num = symkind;
608 res->attr.symc.sym = value;
609 res->attr.symc.tp = tp;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
614 } /* new_bd_SymConst_type */
617 new_bd_Sync(dbg_info *db, ir_node *block) {
619 ir_graph *irg = current_ir_graph;
621 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
622 /* no need to call optimize node here, Sync are always created with no predecessors */
623 IRN_VRFY_IRG(res, irg);
628 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
629 ir_node *in[2], *res;
630 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
635 res->attr.confirm.cmp = cmp;
636 res = optimize_node(res);
637 IRN_VRFY_IRG(res, irg);
639 } /* new_bd_Confirm */
642 new_bd_Unknown(ir_mode *m) {
644 ir_graph *irg = current_ir_graph;
646 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
647 res = optimize_node(res);
649 } /* new_bd_Unknown */
652 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
655 ir_graph *irg = current_ir_graph;
657 in[0] = get_Call_ptr(call);
658 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
659 /* res->attr.callbegin.irg = irg; */
660 res->attr.callbegin.call = call;
661 res = optimize_node(res);
662 IRN_VRFY_IRG(res, irg);
664 } /* new_bd_CallBegin */
667 new_bd_EndReg(dbg_info *db, ir_node *block) {
669 ir_graph *irg = current_ir_graph;
671 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
672 set_irg_end_reg(irg, res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_EndReg */
678 new_bd_EndExcept(dbg_info *db, ir_node *block) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
683 set_irg_end_except(irg, res);
684 IRN_VRFY_IRG (res, irg);
686 } /* new_bd_EndExcept */
689 new_bd_Break(dbg_info *db, ir_node *block) {
691 ir_graph *irg = current_ir_graph;
693 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
694 res = optimize_node(res);
695 IRN_VRFY_IRG(res, irg);
700 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
703 ir_graph *irg = current_ir_graph;
705 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
706 res->attr.filter.proj = proj;
707 res->attr.filter.in_cg = NULL;
708 res->attr.filter.backedge = NULL;
711 assert(get_Proj_pred(res));
712 assert(get_nodes_block(get_Proj_pred(res)));
714 res = optimize_node(res);
715 IRN_VRFY_IRG(res, irg);
717 } /* new_bd_Filter */
720 new_bd_Mux(dbg_info *db, ir_node *block,
721 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
724 ir_graph *irg = current_ir_graph;
730 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
733 res = optimize_node(res);
734 IRN_VRFY_IRG(res, irg);
739 new_bd_Psi(dbg_info *db, ir_node *block,
740 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
743 ir_graph *irg = current_ir_graph;
746 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
748 for (i = 0; i < arity; ++i) {
750 in[2 * i + 1] = vals[i];
754 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
757 res = optimize_node(res);
758 IRN_VRFY_IRG(res, irg);
763 new_bd_CopyB(dbg_info *db, ir_node *block,
764 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
767 ir_graph *irg = current_ir_graph;
773 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
775 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
776 res->attr.copyb.data_type = data_type;
777 res = optimize_node(res);
778 IRN_VRFY_IRG(res, irg);
783 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
784 ir_node *objptr, ir_type *type) {
787 ir_graph *irg = current_ir_graph;
791 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
792 res->attr.instof.type = type;
793 res = optimize_node(res);
794 IRN_VRFY_IRG(res, irg);
796 } /* new_bd_InstOf */
799 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
802 ir_graph *irg = current_ir_graph;
806 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
807 res = optimize_node(res);
808 IRN_VRFY_IRG(res, irg);
813 new_bd_Bound(dbg_info *db, ir_node *block,
814 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
817 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
824 res->attr.bound.exc.pin_state = op_pin_state_pinned;
825 res = optimize_node(res);
826 IRN_VRFY_IRG(res, irg);
831 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
833 ir_graph *irg = current_ir_graph;
835 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
836 res = optimize_node(res);
837 IRN_VRFY_IRG(res, irg);
842 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
843 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
845 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
849 res->attr.assem.pin_state = op_pin_state_pinned;
850 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
851 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
852 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
853 res->attr.assem.asm_text = asm_text;
855 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
856 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
858 res = optimize_node(res);
859 IRN_VRFY_IRG(res, irg);
863 /* --------------------------------------------- */
864 /* private interfaces, for professional use only */
865 /* --------------------------------------------- */
867 /* Constructs a Block with a fixed number of predecessors.
868 Does not set current_block. Can not be used with automatic
869 Phi node construction. */
871 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
872 ir_graph *rem = current_ir_graph;
875 current_ir_graph = irg;
876 res = new_bd_Block(db, arity, in);
877 current_ir_graph = rem;
883 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
884 ir_graph *rem = current_ir_graph;
887 current_ir_graph = irg;
888 res = new_bd_Start(db, block);
889 current_ir_graph = rem;
895 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
897 ir_graph *rem = current_ir_graph;
899 current_ir_graph = irg;
900 res = new_bd_End(db, block);
901 current_ir_graph = rem;
906 /* Creates a Phi node with all predecessors. Calling this constructor
907 is only allowed if the corresponding block is mature. */
909 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
911 ir_graph *rem = current_ir_graph;
913 current_ir_graph = irg;
914 res = new_bd_Phi(db, block,arity, in, mode);
915 current_ir_graph = rem;
921 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
923 ir_graph *rem = current_ir_graph;
925 current_ir_graph = irg;
926 res = new_bd_Const_type(db, block, mode, con, tp);
927 current_ir_graph = rem;
930 } /* new_rd_Const_type */
933 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
935 ir_graph *rem = current_ir_graph;
937 current_ir_graph = irg;
938 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
939 current_ir_graph = rem;
945 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
946 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
947 } /* new_rd_Const_long */
950 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
952 ir_graph *rem = current_ir_graph;
954 current_ir_graph = irg;
955 res = new_bd_Id(db, block, val, mode);
956 current_ir_graph = rem;
962 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
965 ir_graph *rem = current_ir_graph;
967 current_ir_graph = irg;
968 res = new_bd_Proj(db, block, arg, mode, proj);
969 current_ir_graph = rem;
975 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
978 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_defaultProj(db, block, arg, max_proj);
982 current_ir_graph = rem;
985 } /* new_rd_defaultProj */
988 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
990 ir_graph *rem = current_ir_graph;
992 current_ir_graph = irg;
993 res = new_bd_Conv(db, block, op, mode, 0);
994 current_ir_graph = rem;
1000 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1002 ir_graph *rem = current_ir_graph;
1004 current_ir_graph = irg;
1005 res = new_bd_Cast(db, block, op, to_tp);
1006 current_ir_graph = rem;
1012 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1014 ir_graph *rem = current_ir_graph;
1016 current_ir_graph = irg;
1017 res = new_bd_Tuple(db, block, arity, in);
1018 current_ir_graph = rem;
1021 } /* new_rd_Tuple */
1028 NEW_RD_DIVOP(DivMod)
1041 NEW_RD_BINOP(Borrow)
1044 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1045 ir_node *op1, ir_node *op2) {
1047 ir_graph *rem = current_ir_graph;
1049 current_ir_graph = irg;
1050 res = new_bd_Cmp(db, block, op1, op2);
1051 current_ir_graph = rem;
1057 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Jmp(db, block);
1063 current_ir_graph = rem;
1069 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_IJmp(db, block, tgt);
1075 current_ir_graph = rem;
1081 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1083 ir_graph *rem = current_ir_graph;
1085 current_ir_graph = irg;
1086 res = new_bd_Cond(db, block, c);
1087 current_ir_graph = rem;
1093 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1094 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1096 ir_graph *rem = current_ir_graph;
1098 current_ir_graph = irg;
1099 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1100 current_ir_graph = rem;
1106 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1107 ir_node *store, int arity, ir_node **in) {
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Return(db, block, store, arity, in);
1113 current_ir_graph = rem;
1116 } /* new_rd_Return */
1119 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1120 ir_node *store, ir_node *adr, ir_mode *mode) {
1122 ir_graph *rem = current_ir_graph;
1124 current_ir_graph = irg;
1125 res = new_bd_Load(db, block, store, adr, mode);
1126 current_ir_graph = rem;
1132 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1133 ir_node *store, ir_node *adr, ir_node *val) {
1135 ir_graph *rem = current_ir_graph;
1137 current_ir_graph = irg;
1138 res = new_bd_Store(db, block, store, adr, val);
1139 current_ir_graph = rem;
1142 } /* new_rd_Store */
1145 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1146 ir_node *size, ir_type *alloc_type, where_alloc where) {
1148 ir_graph *rem = current_ir_graph;
1150 current_ir_graph = irg;
1151 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1152 current_ir_graph = rem;
1155 } /* new_rd_Alloc */
1158 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1159 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1161 ir_graph *rem = current_ir_graph;
1163 current_ir_graph = irg;
1164 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1165 current_ir_graph = rem;
1171 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1172 ir_node *store, ir_node *objptr, ir_entity *ent) {
1174 ir_graph *rem = current_ir_graph;
1176 current_ir_graph = irg;
1177 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1178 current_ir_graph = rem;
1181 } /* new_rd_simpleSel */
1184 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1185 int arity, ir_node **in, ir_entity *ent) {
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1191 current_ir_graph = rem;
1197 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1198 symconst_kind symkind, ir_type *tp) {
1200 ir_graph *rem = current_ir_graph;
1202 current_ir_graph = irg;
1203 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1204 current_ir_graph = rem;
1207 } /* new_rd_SymConst_type */
1210 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1211 symconst_kind symkind) {
1212 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1214 } /* new_rd_SymConst */
1216 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1217 symconst_symbol sym;
1218 sym.entity_p = symbol;
1219 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1220 } /* new_rd_SymConst_addr_ent */
1222 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1223 symconst_symbol sym;
1224 sym.entity_p = symbol;
1225 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1226 } /* new_rd_SymConst_ofs_ent */
1228 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1229 symconst_symbol sym;
1230 sym.ident_p = symbol;
1231 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1232 } /* new_rd_SymConst_addr_name */
1234 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1235 symconst_symbol sym;
1236 sym.type_p = symbol;
1237 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1238 } /* new_rd_SymConst_type_tag */
1240 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1241 symconst_symbol sym;
1242 sym.type_p = symbol;
1243 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1244 } /* new_rd_SymConst_size */
1246 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1247 symconst_symbol sym;
1248 sym.type_p = symbol;
1249 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1250 } /* new_rd_SymConst_align */
1253 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1255 ir_graph *rem = current_ir_graph;
1258 current_ir_graph = irg;
1259 res = new_bd_Sync(db, block);
1260 current_ir_graph = rem;
1262 for (i = 0; i < arity; ++i)
1263 add_Sync_pred(res, in[i]);
1269 new_rd_Bad(ir_graph *irg) {
1270 return get_irg_bad(irg);
1274 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1276 ir_graph *rem = current_ir_graph;
1278 current_ir_graph = irg;
1279 res = new_bd_Confirm(db, block, val, bound, cmp);
1280 current_ir_graph = rem;
1283 } /* new_rd_Confirm */
1286 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1288 ir_graph *rem = current_ir_graph;
1290 current_ir_graph = irg;
1291 res = new_bd_Unknown(m);
1292 current_ir_graph = rem;
1295 } /* new_rd_Unknown */
1298 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1300 ir_graph *rem = current_ir_graph;
1302 current_ir_graph = irg;
1303 res = new_bd_CallBegin(db, block, call);
1304 current_ir_graph = rem;
1307 } /* new_rd_CallBegin */
1310 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1313 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1314 set_irg_end_reg(irg, res);
1315 IRN_VRFY_IRG(res, irg);
1317 } /* new_rd_EndReg */
1320 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1323 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1324 set_irg_end_except(irg, res);
1325 IRN_VRFY_IRG (res, irg);
1327 } /* new_rd_EndExcept */
1330 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1332 ir_graph *rem = current_ir_graph;
1334 current_ir_graph = irg;
1335 res = new_bd_Break(db, block);
1336 current_ir_graph = rem;
1339 } /* new_rd_Break */
1342 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1345 ir_graph *rem = current_ir_graph;
1347 current_ir_graph = irg;
1348 res = new_bd_Filter(db, block, arg, mode, proj);
1349 current_ir_graph = rem;
1352 } /* new_rd_Filter */
1355 new_rd_NoMem(ir_graph *irg) {
1356 return get_irg_no_mem(irg);
1357 } /* new_rd_NoMem */
1360 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1361 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1363 ir_graph *rem = current_ir_graph;
1365 current_ir_graph = irg;
1366 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1367 current_ir_graph = rem;
1373 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1374 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1376 ir_graph *rem = current_ir_graph;
1378 current_ir_graph = irg;
1379 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1380 current_ir_graph = rem;
1385 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1386 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1388 ir_graph *rem = current_ir_graph;
1390 current_ir_graph = irg;
1391 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1392 current_ir_graph = rem;
1395 } /* new_rd_CopyB */
1398 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1399 ir_node *objptr, ir_type *type) {
1401 ir_graph *rem = current_ir_graph;
1403 current_ir_graph = irg;
1404 res = new_bd_InstOf(db, block, store, objptr, type);
1405 current_ir_graph = rem;
1408 } /* new_rd_InstOf */
1411 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1413 ir_graph *rem = current_ir_graph;
1415 current_ir_graph = irg;
1416 res = new_bd_Raise(db, block, store, obj);
1417 current_ir_graph = rem;
1420 } /* new_rd_Raise */
1422 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1423 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1425 ir_graph *rem = current_ir_graph;
1427 current_ir_graph = irg;
1428 res = new_bd_Bound(db, block, store, idx, lower, upper);
1429 current_ir_graph = rem;
1432 } /* new_rd_Bound */
1434 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_Pin(db, block, node);
1440 current_ir_graph = rem;
1445 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1446 int arity, ir_node *in[], ir_asm_constraint *inputs,
1447 int n_outs, ir_asm_constraint *outputs,
1448 int n_clobber, ident *clobber[], ident *asm_text) {
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1454 current_ir_graph = rem;
1460 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1461 return new_rd_Block(NULL, irg, arity, in);
1463 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1464 return new_rd_Start(NULL, irg, block);
1466 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1467 return new_rd_End(NULL, irg, block);
1469 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1470 return new_rd_Jmp(NULL, irg, block);
1472 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1473 return new_rd_IJmp(NULL, irg, block, tgt);
1475 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1476 return new_rd_Cond(NULL, irg, block, c);
1478 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1479 ir_node *store, int arity, ir_node **in) {
1480 return new_rd_Return(NULL, irg, block, store, arity, in);
1482 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1483 ir_mode *mode, tarval *con) {
1484 return new_rd_Const(NULL, irg, block, mode, con);
1486 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1487 ir_mode *mode, long value) {
1488 return new_rd_Const_long(NULL, irg, block, mode, value);
1490 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1491 ir_mode *mode, tarval *con, ir_type *tp) {
1492 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1494 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1495 symconst_symbol value, symconst_kind symkind) {
1496 return new_rd_SymConst(NULL, irg, block, value, symkind);
1498 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1499 ir_node *objptr, ir_entity *ent) {
1500 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1502 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *objptr, int n_index, ir_node **index,
1505 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1507 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1508 ir_node *callee, int arity, ir_node **in,
1510 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1512 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1513 ir_node *op1, ir_node *op2, ir_mode *mode) {
1514 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1516 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1517 ir_node *op1, ir_node *op2, ir_mode *mode) {
1518 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1520 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1521 ir_node *op, ir_mode *mode) {
1522 return new_rd_Minus(NULL, irg, block, op, mode);
1524 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1525 ir_node *op1, ir_node *op2, ir_mode *mode) {
1526 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1528 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1529 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1530 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1532 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1533 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1534 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1536 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1537 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1538 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1540 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1541 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1542 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1544 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1545 ir_node *op, ir_mode *mode) {
1546 return new_rd_Abs(NULL, irg, block, op, mode);
1548 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1549 ir_node *op1, ir_node *op2, ir_mode *mode) {
1550 return new_rd_And(NULL, irg, block, op1, op2, mode);
1552 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1553 ir_node *op1, ir_node *op2, ir_mode *mode) {
1554 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1556 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1557 ir_node *op1, ir_node *op2, ir_mode *mode) {
1558 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1560 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1561 ir_node *op, ir_mode *mode) {
1562 return new_rd_Not(NULL, irg, block, op, mode);
1564 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_node *k, ir_mode *mode) {
1566 return new_rd_Shl(NULL, irg, block, op, k, mode);
1568 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1569 ir_node *op, ir_node *k, ir_mode *mode) {
1570 return new_rd_Shr(NULL, irg, block, op, k, mode);
1572 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1573 ir_node *op, ir_node *k, ir_mode *mode) {
1574 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1576 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1577 ir_node *op, ir_node *k, ir_mode *mode) {
1578 return new_rd_Rot(NULL, irg, block, op, k, mode);
1580 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1581 ir_node *op, ir_node *k, ir_mode *mode) {
1582 return new_rd_Carry(NULL, irg, block, op, k, mode);
1584 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1585 ir_node *op, ir_node *k, ir_mode *mode) {
1586 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1588 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1589 ir_node *op1, ir_node *op2) {
1590 return new_rd_Cmp(NULL, irg, block, op1, op2);
1592 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1593 ir_node *op, ir_mode *mode) {
1594 return new_rd_Conv(NULL, irg, block, op, mode);
1596 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1597 return new_rd_Cast(NULL, irg, block, op, to_tp);
1599 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1600 ir_node **in, ir_mode *mode) {
1601 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1603 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1604 ir_node *store, ir_node *adr, ir_mode *mode) {
1605 return new_rd_Load(NULL, irg, block, store, adr, mode);
1607 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1608 ir_node *store, ir_node *adr, ir_node *val) {
1609 return new_rd_Store(NULL, irg, block, store, adr, val);
1611 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1612 ir_node *size, ir_type *alloc_type, where_alloc where) {
1613 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1615 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1616 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1617 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1619 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1620 return new_rd_Sync(NULL, irg, block, arity, in);
1622 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1623 ir_mode *mode, long proj) {
1624 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1626 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1628 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1630 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1631 int arity, ir_node **in) {
1632 return new_rd_Tuple(NULL, irg, block, arity, in );
1634 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1635 ir_node *val, ir_mode *mode) {
1636 return new_rd_Id(NULL, irg, block, val, mode);
1638 ir_node *new_r_Bad(ir_graph *irg) {
1639 return new_rd_Bad(irg);
1641 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1642 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1644 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1645 return new_rd_Unknown(irg, m);
1647 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1648 return new_rd_CallBegin(NULL, irg, block, callee);
1650 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1651 return new_rd_EndReg(NULL, irg, block);
1653 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1654 return new_rd_EndExcept(NULL, irg, block);
1656 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1657 return new_rd_Break(NULL, irg, block);
1659 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1660 ir_mode *mode, long proj) {
1661 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1663 ir_node *new_r_NoMem(ir_graph *irg) {
1664 return new_rd_NoMem(irg);
1666 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1667 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1668 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1670 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1671 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1672 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1674 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1675 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1676 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1678 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1680 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1682 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1683 ir_node *store, ir_node *obj) {
1684 return new_rd_Raise(NULL, irg, block, store, obj);
1686 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1687 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1688 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1690 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1691 return new_rd_Pin(NULL, irg, block, node);
1693 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1694 int arity, ir_node *in[], ir_asm_constraint *inputs,
1695 int n_outs, ir_asm_constraint *outputs,
1696 int n_clobber, ident *clobber[], ident *asm_text) {
1697 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1700 /** ********************/
1701 /** public interfaces */
1702 /** construction tools */
1706 * - create a new Start node in the current block
1708 * @return s - pointer to the created Start node
1713 new_d_Start(dbg_info *db) {
1716 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1717 op_Start, mode_T, 0, NULL);
1719 res = optimize_node(res);
1720 IRN_VRFY_IRG(res, current_ir_graph);
1725 new_d_End(dbg_info *db) {
1727 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1728 op_End, mode_X, -1, NULL);
1729 res = optimize_node(res);
1730 IRN_VRFY_IRG(res, current_ir_graph);
1735 /* Constructs a Block with a fixed number of predecessors.
1736 Does set current_block. Can be used with automatic Phi
1737 node construction. */
1739 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1742 int has_unknown = 0;
1744 res = new_bd_Block(db, arity, in);
1746 /* Create and initialize array for Phi-node construction. */
1747 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1748 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1749 current_ir_graph->n_loc);
1750 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1753 for (i = arity-1; i >= 0; i--)
1754 if (get_irn_op(in[i]) == op_Unknown) {
1759 if (!has_unknown) res = optimize_node(res);
1760 current_ir_graph->current_block = res;
1762 IRN_VRFY_IRG(res, current_ir_graph);
1767 /* ***********************************************************************/
1768 /* Methods necessary for automatic Phi node creation */
1770 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1771 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1772 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1773 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1775 Call Graph: ( A ---> B == A "calls" B)
1777 get_value mature_immBlock
1785 get_r_value_internal |
1789 new_rd_Phi0 new_rd_Phi_in
1791 * *************************************************************************** */
1793 /** Creates a Phi node with 0 predecessors. */
1794 static INLINE ir_node *
1795 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1798 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1799 IRN_VRFY_IRG(res, irg);
1803 /* There are two implementations of the Phi node construction. The first
1804 is faster, but does not work for blocks with more than 2 predecessors.
1805 The second works always but is slower and causes more unnecessary Phi
1807 Select the implementations by the following preprocessor flag set in
1809 #if USE_FAST_PHI_CONSTRUCTION
1811 /* This is a stack used for allocating and deallocating nodes in
1812 new_rd_Phi_in. The original implementation used the obstack
1813 to model this stack, now it is explicit. This reduces side effects.
1815 #if USE_EXPLICIT_PHI_IN_STACK
1817 new_Phi_in_stack(void) {
1820 res = (Phi_in_stack *) xmalloc(sizeof(Phi_in_stack));
1822 res->stack = NEW_ARR_F(ir_node *, 0);
1826 } /* new_Phi_in_stack */
1829 free_Phi_in_stack(Phi_in_stack *s) {
1830 DEL_ARR_F(s->stack);
1832 } /* free_Phi_in_stack */
1835 free_to_Phi_in_stack(ir_node *phi) {
1836 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1837 current_ir_graph->Phi_in_stack->pos)
1838 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1840 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1842 (current_ir_graph->Phi_in_stack->pos)++;
1843 } /* free_to_Phi_in_stack */
1845 static INLINE ir_node *
1846 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1847 int arity, ir_node **in) {
1849 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1850 int pos = current_ir_graph->Phi_in_stack->pos;
1854 /* We need to allocate a new node */
1855 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1856 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1858 /* reuse the old node and initialize it again. */
1861 assert(res->kind == k_ir_node);
1862 assert(res->op == op_Phi);
1867 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1868 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1870 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1872 (current_ir_graph->Phi_in_stack->pos)--;
1875 } /* alloc_or_pop_from_Phi_in_stack */
1876 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1879 * Creates a Phi node with a given, fixed array **in of predecessors.
1880 * If the Phi node is unnecessary, as the same value reaches the block
1881 * through all control flow paths, it is eliminated and the value
1882 * returned directly. This constructor is only intended for use in
1883 * the automatic Phi node generation triggered by get_value or mature.
1884 * The implementation is quite tricky and depends on the fact, that
1885 * the nodes are allocated on a stack:
1886 * The in array contains predecessors and NULLs. The NULLs appear,
1887 * if get_r_value_internal, that computed the predecessors, reached
1888 * the same block on two paths. In this case the same value reaches
1889 * this block on both paths, there is no definition in between. We need
1890 * not allocate a Phi where these path's merge, but we have to communicate
1891 * this fact to the caller. This happens by returning a pointer to the
1892 * node the caller _will_ allocate. (Yes, we predict the address. We can
1893 * do so because the nodes are allocated on the obstack.) The caller then
1894 * finds a pointer to itself and, when this routine is called again,
1895 * eliminates itself.
1897 static INLINE ir_node *
1898 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1900 ir_node *res, *known;
1902 /* Allocate a new node on the obstack. This can return a node to
1903 which some of the pointers in the in-array already point.
1904 Attention: the constructor copies the in array, i.e., the later
1905 changes to the array in this routine do not affect the
1906 constructed node! If the in array contains NULLs, there will be
1907 missing predecessors in the returned node. Is this a possible
1908 internal state of the Phi node generation? */
1909 #if USE_EXPLICIT_PHI_IN_STACK
1910 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1912 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1913 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1916 /* The in-array can contain NULLs. These were returned by
1917 get_r_value_internal if it reached the same block/definition on a
1918 second path. The NULLs are replaced by the node itself to
1919 simplify the test in the next loop. */
1920 for (i = 0; i < ins; ++i) {
1925 /* This loop checks whether the Phi has more than one predecessor.
1926 If so, it is a real Phi node and we break the loop. Else the Phi
1927 node merges the same definition on several paths and therefore is
1929 for (i = 0; i < ins; ++i) {
1930 if (in[i] == res || in[i] == known)
1939 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1941 #if USE_EXPLICIT_PHI_IN_STACK
1942 free_to_Phi_in_stack(res);
1944 edges_node_deleted(res, current_ir_graph);
1945 obstack_free(current_ir_graph->obst, res);
1949 res = optimize_node (res);
1950 IRN_VRFY_IRG(res, irg);
1953 /* return the pointer to the Phi node. This node might be deallocated! */
1955 } /* new_rd_Phi_in */
1958 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1961 * Allocates and returns this node. The routine called to allocate the
1962 * node might optimize it away and return a real value, or even a pointer
1963 * to a deallocated Phi node on top of the obstack!
1964 * This function is called with an in-array of proper size.
1967 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1968 ir_node *prevBlock, *res;
1971 /* This loop goes to all predecessor blocks of the block the Phi node is in
1972 and there finds the operands of the Phi node by calling
1973 get_r_value_internal. */
1974 for (i = 1; i <= ins; ++i) {
1975 assert (block->in[i]);
1976 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1978 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1981 /* After collecting all predecessors into the array nin a new Phi node
1982 with these predecessors is created. This constructor contains an
1983 optimization: If all predecessors of the Phi node are identical it
1984 returns the only operand instead of a new Phi node. If the value
1985 passes two different control flow edges without being defined, and
1986 this is the second path treated, a pointer to the node that will be
1987 allocated for the first path (recursion) is returned. We already
1988 know the address of this node, as it is the next node to be allocated
1989 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1990 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1992 /* Now we now the value for "pos" and can enter it in the array with
1993 all known local variables. Attention: this might be a pointer to
1994 a node, that later will be allocated!!! See new_rd_Phi_in().
1995 If this is called in mature, after some set_value() in the same block,
1996 the proper value must not be overwritten:
1998 get_value (makes Phi0, put's it into graph_arr)
1999 set_value (overwrites Phi0 in graph_arr)
2000 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2003 if (!block->attr.block.graph_arr[pos]) {
2004 block->attr.block.graph_arr[pos] = res;
2006 /* printf(" value already computed by %s\n",
2007 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2014 * This function returns the last definition of a variable. In case
2015 * this variable was last defined in a previous block, Phi nodes are
2016 * inserted. If the part of the firm graph containing the definition
2017 * is not yet constructed, a dummy Phi node is returned.
2020 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2023 /* There are 4 cases to treat.
2025 1. The block is not mature and we visit it the first time. We can not
2026 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2027 predecessors is returned. This node is added to the linked list (field
2028 "link") of the containing block to be completed when this block is
2029 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2032 2. The value is already known in this block, graph_arr[pos] is set and we
2033 visit the block the first time. We can return the value without
2034 creating any new nodes.
2036 3. The block is mature and we visit it the first time. A Phi node needs
2037 to be created (phi_merge). If the Phi is not needed, as all it's
2038 operands are the same value reaching the block through different
2039 paths, it's optimized away and the value itself is returned.
2041 4. The block is mature, and we visit it the second time. Now two
2042 subcases are possible:
2043 * The value was computed completely the last time we were here. This
2044 is the case if there is no loop. We can return the proper value.
2045 * The recursion that visited this node and set the flag did not
2046 return yet. We are computing a value in a loop and need to
2047 break the recursion without knowing the result yet.
2048 @@@ strange case. Straight forward we would create a Phi before
2049 starting the computation of it's predecessors. In this case we will
2050 find a Phi here in any case. The problem is that this implementation
2051 only creates a Phi after computing the predecessors, so that it is
2052 hard to compute self references of this Phi. @@@
2053 There is no simple check for the second subcase. Therefore we check
2054 for a second visit and treat all such cases as the second subcase.
2055 Anyways, the basic situation is the same: we reached a block
2056 on two paths without finding a definition of the value: No Phi
2057 nodes are needed on both paths.
2058 We return this information "Two paths, no Phi needed" by a very tricky
2059 implementation that relies on the fact that an obstack is a stack and
2060 will return a node with the same address on different allocations.
2061 Look also at phi_merge and new_rd_phi_in to understand this.
2062 @@@ Unfortunately this does not work, see testprogram
2063 three_cfpred_example.
2067 /* case 4 -- already visited. */
2068 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2070 /* visited the first time */
2071 set_irn_visited(block, get_irg_visited(current_ir_graph));
2073 /* Get the local valid value */
2074 res = block->attr.block.graph_arr[pos];
2076 /* case 2 -- If the value is actually computed, return it. */
2077 if (res) return res;
2079 if (block->attr.block.is_matured) { /* case 3 */
2081 /* The Phi has the same amount of ins as the corresponding block. */
2082 int ins = get_irn_arity(block);
2084 NEW_ARR_A(ir_node *, nin, ins);
2086 /* Phi merge collects the predecessors and then creates a node. */
2087 res = phi_merge(block, pos, mode, nin, ins);
2089 } else { /* case 1 */
2090 /* The block is not mature, we don't know how many in's are needed. A Phi
2091 with zero predecessors is created. Such a Phi node is called Phi0
2092 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2093 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2095 The Phi0 has to remember the pos of it's internal value. If the real
2096 Phi is computed, pos is used to update the array with the local
2099 res = new_rd_Phi0(current_ir_graph, block, mode);
2100 res->attr.phi0_pos = pos;
2101 res->link = block->link;
2105 /* If we get here, the frontend missed a use-before-definition error */
2108 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2109 assert(mode->code >= irm_F && mode->code <= irm_P);
2110 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2113 /* The local valid value is available now. */
2114 block->attr.block.graph_arr[pos] = res;
2117 } /* get_r_value_internal */
2122 it starts the recursion. This causes an Id at the entry of
2123 every block that has no definition of the value! **/
2125 #if USE_EXPLICIT_PHI_IN_STACK
2127 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2128 void free_Phi_in_stack(Phi_in_stack *s) {}
2131 static INLINE ir_node *
2132 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2133 ir_node **in, int ins, ir_node *phi0) {
2135 ir_node *res, *known;
2137 /* Allocate a new node on the obstack. The allocation copies the in
2139 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2140 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2142 /* This loop checks whether the Phi has more than one predecessor.
2143 If so, it is a real Phi node and we break the loop. Else the
2144 Phi node merges the same definition on several paths and therefore
2145 is not needed. Don't consider Bad nodes! */
2147 for (i=0; i < ins; ++i)
2151 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2153 /* Optimize self referencing Phis: We can't detect them yet properly, as
2154 they still refer to the Phi0 they will replace. So replace right now. */
2155 if (phi0 && in[i] == phi0) in[i] = res;
2157 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2165 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2168 edges_node_deleted(res, current_ir_graph);
2169 obstack_free (current_ir_graph->obst, res);
2170 if (is_Phi(known)) {
2171 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2172 order, an enclosing Phi know may get superfluous. */
2173 res = optimize_in_place_2(known);
2175 exchange(known, res);
2181 /* A undefined value, e.g., in unreachable code. */
2185 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2186 IRN_VRFY_IRG(res, irg);
2187 /* Memory Phis in endless loops must be kept alive.
2188 As we can't distinguish these easily we keep all of them alive. */
2189 if ((res->op == op_Phi) && (mode == mode_M))
2190 add_End_keepalive(get_irg_end(irg), res);
2194 } /* new_rd_Phi_in */
2197 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2199 #if PRECISE_EXC_CONTEXT
2201 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2204 * Construct a new frag_array for node n.
2205 * Copy the content from the current graph_arr of the corresponding block:
2206 * this is the current state.
2207 * Set ProjM(n) as current memory state.
2208 * Further the last entry in frag_arr of current block points to n. This
2209 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2211 static INLINE ir_node **new_frag_arr(ir_node *n) {
2215 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2216 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2217 sizeof(ir_node *)*current_ir_graph->n_loc);
2219 /* turn off optimization before allocating Proj nodes, as res isn't
2221 opt = get_opt_optimize(); set_optimize(0);
2222 /* Here we rely on the fact that all frag ops have Memory as first result! */
2223 if (get_irn_op(n) == op_Call)
2224 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2225 else if (get_irn_op(n) == op_CopyB)
2226 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2228 assert((pn_Quot_M == pn_DivMod_M) &&
2229 (pn_Quot_M == pn_Div_M) &&
2230 (pn_Quot_M == pn_Mod_M) &&
2231 (pn_Quot_M == pn_Load_M) &&
2232 (pn_Quot_M == pn_Store_M) &&
2233 (pn_Quot_M == pn_Alloc_M) &&
2234 (pn_Quot_M == pn_Bound_M));
2235 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2239 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2241 } /* new_frag_arr */
2244 * Returns the frag_arr from a node.
2246 static INLINE ir_node **get_frag_arr(ir_node *n) {
2247 switch (get_irn_opcode(n)) {
2249 return n->attr.call.exc.frag_arr;
2251 return n->attr.alloc.exc.frag_arr;
2253 return n->attr.load.exc.frag_arr;
2255 return n->attr.store.exc.frag_arr;
2257 return n->attr.except.frag_arr;
2259 } /* get_frag_arr */
2262 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2264 if (!frag_arr[pos]) frag_arr[pos] = val;
2265 if (frag_arr[current_ir_graph->n_loc - 1]) {
2266 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2267 assert(arr != frag_arr && "Endless recursion detected");
2268 set_frag_value(arr, pos, val);
2273 for (i = 0; i < 1000; ++i) {
2274 if (!frag_arr[pos]) {
2275 frag_arr[pos] = val;
2277 if (frag_arr[current_ir_graph->n_loc - 1]) {
2278 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2284 assert(0 && "potential endless recursion");
2286 } /* set_frag_value */
2289 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2293 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2295 frag_arr = get_frag_arr(cfOp);
2296 res = frag_arr[pos];
2298 if (block->attr.block.graph_arr[pos]) {
2299 /* There was a set_value() after the cfOp and no get_value before that
2300 set_value(). We must build a Phi node now. */
2301 if (block->attr.block.is_matured) {
2302 int ins = get_irn_arity(block);
2304 NEW_ARR_A(ir_node *, nin, ins);
2305 res = phi_merge(block, pos, mode, nin, ins);
2307 res = new_rd_Phi0(current_ir_graph, block, mode);
2308 res->attr.phi0.pos = pos;
2309 res->link = block->link;
2313 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2314 but this should be better: (remove comment if this works) */
2315 /* It's a Phi, we can write this into all graph_arrs with NULL */
2316 set_frag_value(block->attr.block.graph_arr, pos, res);
2318 res = get_r_value_internal(block, pos, mode);
2319 set_frag_value(block->attr.block.graph_arr, pos, res);
2323 } /* get_r_frag_value_internal */
2324 #endif /* PRECISE_EXC_CONTEXT */
2327 * Computes the predecessors for the real phi node, and then
2328 * allocates and returns this node. The routine called to allocate the
2329 * node might optimize it away and return a real value.
2330 * This function must be called with an in-array of proper size.
2333 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2334 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2337 /* If this block has no value at pos create a Phi0 and remember it
2338 in graph_arr to break recursions.
2339 Else we may not set graph_arr as there a later value is remembered. */
2341 if (!block->attr.block.graph_arr[pos]) {
2342 if (block == get_irg_start_block(current_ir_graph)) {
2343 /* Collapsing to Bad tarvals is no good idea.
2344 So we call a user-supplied routine here that deals with this case as
2345 appropriate for the given language. Sorrily the only help we can give
2346 here is the position.
2348 Even if all variables are defined before use, it can happen that
2349 we get to the start block, if a Cond has been replaced by a tuple
2350 (bad, jmp). In this case we call the function needlessly, eventually
2351 generating an non existent error.
2352 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2355 if (default_initialize_local_variable) {
2356 ir_node *rem = get_cur_block();
2358 set_cur_block(block);
2359 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2363 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2364 /* We don't need to care about exception ops in the start block.
2365 There are none by definition. */
2366 return block->attr.block.graph_arr[pos];
2368 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2369 block->attr.block.graph_arr[pos] = phi0;
2370 #if PRECISE_EXC_CONTEXT
2371 if (get_opt_precise_exc_context()) {
2372 /* Set graph_arr for fragile ops. Also here we should break recursion.
2373 We could choose a cyclic path through an cfop. But the recursion would
2374 break at some point. */
2375 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2381 /* This loop goes to all predecessor blocks of the block the Phi node
2382 is in and there finds the operands of the Phi node by calling
2383 get_r_value_internal. */
2384 for (i = 1; i <= ins; ++i) {
2385 prevCfOp = skip_Proj(block->in[i]);
2387 if (is_Bad(prevCfOp)) {
2388 /* In case a Cond has been optimized we would get right to the start block
2389 with an invalid definition. */
2390 nin[i-1] = new_Bad();
2393 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2395 if (!is_Bad(prevBlock)) {
2396 #if PRECISE_EXC_CONTEXT
2397 if (get_opt_precise_exc_context() &&
2398 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2399 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2400 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2403 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2405 nin[i-1] = new_Bad();
2409 /* We want to pass the Phi0 node to the constructor: this finds additional
2410 optimization possibilities.
2411 The Phi0 node either is allocated in this function, or it comes from
2412 a former call to get_r_value_internal. In this case we may not yet
2413 exchange phi0, as this is done in mature_immBlock. */
2415 phi0_all = block->attr.block.graph_arr[pos];
2416 if (!((get_irn_op(phi0_all) == op_Phi) &&
2417 (get_irn_arity(phi0_all) == 0) &&
2418 (get_nodes_block(phi0_all) == block)))
2424 /* After collecting all predecessors into the array nin a new Phi node
2425 with these predecessors is created. This constructor contains an
2426 optimization: If all predecessors of the Phi node are identical it
2427 returns the only operand instead of a new Phi node. */
2428 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2430 /* In case we allocated a Phi0 node at the beginning of this procedure,
2431 we need to exchange this Phi0 with the real Phi. */
2433 exchange(phi0, res);
2434 block->attr.block.graph_arr[pos] = res;
2435 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2436 only an optimization. */
2443 * This function returns the last definition of a variable. In case
2444 * this variable was last defined in a previous block, Phi nodes are
2445 * inserted. If the part of the firm graph containing the definition
2446 * is not yet constructed, a dummy Phi node is returned.
2449 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2451 /* There are 4 cases to treat.
2453 1. The block is not mature and we visit it the first time. We can not
2454 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2455 predecessors is returned. This node is added to the linked list (field
2456 "link") of the containing block to be completed when this block is
2457 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2460 2. The value is already known in this block, graph_arr[pos] is set and we
2461 visit the block the first time. We can return the value without
2462 creating any new nodes.
2464 3. The block is mature and we visit it the first time. A Phi node needs
2465 to be created (phi_merge). If the Phi is not needed, as all it's
2466 operands are the same value reaching the block through different
2467 paths, it's optimized away and the value itself is returned.
2469 4. The block is mature, and we visit it the second time. Now two
2470 subcases are possible:
2471 * The value was computed completely the last time we were here. This
2472 is the case if there is no loop. We can return the proper value.
2473 * The recursion that visited this node and set the flag did not
2474 return yet. We are computing a value in a loop and need to
2475 break the recursion. This case only happens if we visited
2476 the same block with phi_merge before, which inserted a Phi0.
2477 So we return the Phi0.
2480 /* case 4 -- already visited. */
2481 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2482 /* As phi_merge allocates a Phi0 this value is always defined. Here
2483 is the critical difference of the two algorithms. */
2484 assert(block->attr.block.graph_arr[pos]);
2485 return block->attr.block.graph_arr[pos];
2488 /* visited the first time */
2489 set_irn_visited(block, get_irg_visited(current_ir_graph));
2491 /* Get the local valid value */
2492 res = block->attr.block.graph_arr[pos];
2494 /* case 2 -- If the value is actually computed, return it. */
2495 if (res) { return res; };
2497 if (block->attr.block.is_matured) { /* case 3 */
2499 /* The Phi has the same amount of ins as the corresponding block. */
2500 int ins = get_irn_arity(block);
2502 NEW_ARR_A (ir_node *, nin, ins);
2504 /* Phi merge collects the predecessors and then creates a node. */
2505 res = phi_merge (block, pos, mode, nin, ins);
2507 } else { /* case 1 */
2508 /* The block is not mature, we don't know how many in's are needed. A Phi
2509 with zero predecessors is created. Such a Phi node is called Phi0
2510 node. The Phi0 is then added to the list of Phi0 nodes in this block
2511 to be matured by mature_immBlock later.
2512 The Phi0 has to remember the pos of it's internal value. If the real
2513 Phi is computed, pos is used to update the array with the local
2515 res = new_rd_Phi0(current_ir_graph, block, mode);
2516 res->attr.phi0.pos = pos;
2517 res->link = block->link;
2521 /* If we get here, the frontend missed a use-before-definition error */
2524 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2525 assert(mode->code >= irm_F && mode->code <= irm_P);
2526 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2527 get_mode_null(mode));
2530 /* The local valid value is available now. */
2531 block->attr.block.graph_arr[pos] = res;
2534 } /* get_r_value_internal */
2536 #endif /* USE_FAST_PHI_CONSTRUCTION */
2538 /* ************************************************************************** */
2541 * Finalize a Block node, when all control flows are known.
2542 * Acceptable parameters are only Block nodes.
2545 mature_immBlock(ir_node *block) {
2550 assert(get_irn_opcode(block) == iro_Block);
2551 /* @@@ should be commented in
2552 assert (!get_Block_matured(block) && "Block already matured"); */
2554 if (!get_Block_matured(block)) {
2555 ins = ARR_LEN(block->in)-1;
2556 /* Fix block parameters */
2557 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2559 /* An array for building the Phi nodes. */
2560 NEW_ARR_A(ir_node *, nin, ins);
2562 /* Traverse a chain of Phi nodes attached to this block and mature
2564 for (n = block->link; n; n = next) {
2565 inc_irg_visited(current_ir_graph);
2567 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2570 block->attr.block.is_matured = 1;
2572 /* Now, as the block is a finished firm node, we can optimize it.
2573 Since other nodes have been allocated since the block was created
2574 we can not free the node on the obstack. Therefore we have to call
2576 Unfortunately the optimization does not change a lot, as all allocated
2577 nodes refer to the unoptimized node.
2578 We can call _2, as global cse has no effect on blocks. */
2579 block = optimize_in_place_2(block);
2580 IRN_VRFY_IRG(block, current_ir_graph);
2582 } /* mature_immBlock */
2585 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2586 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2590 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2591 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2595 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2596 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2597 } /* new_d_Const_long */
2600 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2601 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2602 } /* new_d_Const_type */
2606 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2607 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2611 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2612 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2616 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2619 assert(arg->op == op_Cond);
2620 arg->attr.cond.kind = fragmentary;
2621 arg->attr.cond.default_proj = max_proj;
2622 res = new_Proj(arg, mode_X, max_proj);
2624 } /* new_d_defaultProj */
2627 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2628 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2632 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2633 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2634 } /* new_d_strictConv */
2637 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2638 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2642 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2643 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2652 * Allocate the frag array.
2654 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2655 if (get_opt_precise_exc_context()) {
2656 if ((current_ir_graph->phase_state == phase_building) &&
2657 (get_irn_op(res) == op) && /* Could be optimized away. */
2658 !*frag_store) /* Could be a cse where the arr is already set. */ {
2659 *frag_store = new_frag_arr(res);
2662 } /* allocate_frag_arr */
2665 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2667 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2668 #if PRECISE_EXC_CONTEXT
2669 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2676 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2678 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2679 #if PRECISE_EXC_CONTEXT
2680 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2684 } /* new_d_DivMod */
2687 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2689 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2690 #if PRECISE_EXC_CONTEXT
2691 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2698 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2700 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2701 #if PRECISE_EXC_CONTEXT
2702 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2721 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2722 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2726 new_d_Jmp(dbg_info *db) {
2727 return new_bd_Jmp(db, current_ir_graph->current_block);
2731 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2732 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2736 new_d_Cond(dbg_info *db, ir_node *c) {
2737 return new_bd_Cond(db, current_ir_graph->current_block, c);
2741 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2744 res = new_bd_Call(db, current_ir_graph->current_block,
2745 store, callee, arity, in, tp);
2746 #if PRECISE_EXC_CONTEXT
2747 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2754 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2755 return new_bd_Return(db, current_ir_graph->current_block,
2757 } /* new_d_Return */
2760 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2762 res = new_bd_Load(db, current_ir_graph->current_block,
2764 #if PRECISE_EXC_CONTEXT
2765 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2772 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2774 res = new_bd_Store(db, current_ir_graph->current_block,
2776 #if PRECISE_EXC_CONTEXT
2777 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2784 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2785 where_alloc where) {
2787 res = new_bd_Alloc(db, current_ir_graph->current_block,
2788 store, size, alloc_type, where);
2789 #if PRECISE_EXC_CONTEXT
2790 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2797 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2798 ir_node *size, ir_type *free_type, where_alloc where) {
2799 return new_bd_Free(db, current_ir_graph->current_block,
2800 store, ptr, size, free_type, where);
2804 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2805 /* GL: objptr was called frame before. Frame was a bad choice for the name
2806 as the operand could as well be a pointer to a dynamic object. */
2808 return new_bd_Sel(db, current_ir_graph->current_block,
2809 store, objptr, 0, NULL, ent);
2810 } /* new_d_simpleSel */
2813 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2814 return new_bd_Sel(db, current_ir_graph->current_block,
2815 store, objptr, n_index, index, sel);
2819 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2820 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2822 } /* new_d_SymConst_type */
2825 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2826 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2827 value, kind, firm_unknown_type);
2828 } /* new_d_SymConst */
2831 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2832 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2838 return _new_d_Bad();
2842 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2843 return new_bd_Confirm(db, current_ir_graph->current_block,
2845 } /* new_d_Confirm */
2848 new_d_Unknown(ir_mode *m) {
2849 return new_bd_Unknown(m);
2850 } /* new_d_Unknown */
2853 new_d_CallBegin(dbg_info *db, ir_node *call) {
2854 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2855 } /* new_d_CallBegin */
2858 new_d_EndReg(dbg_info *db) {
2859 return new_bd_EndReg(db, current_ir_graph->current_block);
2860 } /* new_d_EndReg */
2863 new_d_EndExcept(dbg_info *db) {
2864 return new_bd_EndExcept(db, current_ir_graph->current_block);
2865 } /* new_d_EndExcept */
2868 new_d_Break(dbg_info *db) {
2869 return new_bd_Break(db, current_ir_graph->current_block);
2873 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2874 return new_bd_Filter(db, current_ir_graph->current_block,
2876 } /* new_d_Filter */
2879 (new_d_NoMem)(void) {
2880 return _new_d_NoMem();
2884 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2885 ir_node *ir_true, ir_mode *mode) {
2886 return new_bd_Mux(db, current_ir_graph->current_block,
2887 sel, ir_false, ir_true, mode);
2891 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2892 return new_bd_Psi(db, current_ir_graph->current_block,
2893 arity, conds, vals, mode);
2896 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2897 ir_node *dst, ir_node *src, ir_type *data_type) {
2899 res = new_bd_CopyB(db, current_ir_graph->current_block,
2900 store, dst, src, data_type);
2901 #if PRECISE_EXC_CONTEXT
2902 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2908 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2909 return new_bd_InstOf(db, current_ir_graph->current_block,
2910 store, objptr, type);
2911 } /* new_d_InstOf */
2914 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2915 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2918 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2919 ir_node *idx, ir_node *lower, ir_node *upper) {
2921 res = new_bd_Bound(db, current_ir_graph->current_block,
2922 store, idx, lower, upper);
2923 #if PRECISE_EXC_CONTEXT
2924 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2930 new_d_Pin(dbg_info *db, ir_node *node) {
2931 return new_bd_Pin(db, current_ir_graph->current_block, node);
2935 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2936 int n_outs, ir_asm_constraint *outputs,
2937 int n_clobber, ident *clobber[], ident *asm_text) {
2938 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2941 /* ********************************************************************* */
2942 /* Comfortable interface with automatic Phi node construction. */
2943 /* (Uses also constructors of ?? interface, except new_Block. */
2944 /* ********************************************************************* */
2946 /* Block construction */
2947 /* immature Block without predecessors */
2949 new_d_immBlock(dbg_info *db) {
2952 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2953 /* creates a new dynamic in-array as length of in is -1 */
2954 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2955 current_ir_graph->current_block = res;
2957 /* macroblock head */
2960 res->attr.block.is_matured = 0;
2961 res->attr.block.is_dead = 0;
2962 res->attr.block.is_mb_head = 1;
2963 res->attr.block.has_label = 0;
2964 res->attr.block.irg = current_ir_graph;
2965 res->attr.block.backedge = NULL;
2966 res->attr.block.in_cg = NULL;
2967 res->attr.block.cg_backedge = NULL;
2968 res->attr.block.extblk = NULL;
2969 res->attr.block.region = NULL;
2970 res->attr.block.mb_depth = 0;
2971 res->attr.block.label = 0;
2973 set_Block_block_visited(res, 0);
2975 /* Create and initialize array for Phi-node construction. */
2976 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2977 current_ir_graph->n_loc);
2978 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2980 /* Immature block may not be optimized! */
2981 IRN_VRFY_IRG(res, current_ir_graph);
2984 } /* new_d_immBlock */
2987 new_immBlock(void) {
2988 return new_d_immBlock(NULL);
2989 } /* new_immBlock */
2991 /* immature PartBlock with its predecessors */
2993 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2994 ir_node *res = new_d_immBlock(db);
2995 ir_node *blk = get_nodes_block(pred_jmp);
2997 res->in[0] = blk->in[0];
2998 add_immBlock_pred(res, pred_jmp);
3000 res->attr.block.is_mb_head = 0;
3001 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
3004 } /* new_d_immPartBlock */
3007 new_immPartBlock(ir_node *pred_jmp) {
3008 return new_d_immPartBlock(NULL, pred_jmp);
3009 } /* new_immPartBlock */
3011 /* add an edge to a jmp/control flow node */
3013 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3014 int n = ARR_LEN(block->in) - 1;
3016 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3017 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3018 assert(jmp != NULL);
3020 ARR_APP1(ir_node *, block->in, jmp);
3022 hook_set_irn_n(block, n, jmp, NULL);
3023 } /* add_immBlock_pred */
3025 /* changing the current block */
3027 set_cur_block(ir_node *target) {
3028 current_ir_graph->current_block = target;
3029 } /* set_cur_block */
3031 /* ************************ */
3032 /* parameter administration */
3034 /* get a value from the parameter array from the current block by its index */
3036 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3037 ir_graph *irg = current_ir_graph;
3038 assert(get_irg_phase_state(irg) == phase_building);
3039 inc_irg_visited(irg);
3042 return get_r_value_internal(irg->current_block, pos + 1, mode);
3045 /* get a value from the parameter array from the current block by its index */
3047 get_value(int pos, ir_mode *mode) {
3048 return get_d_value(NULL, pos, mode);
3051 /* set a value at position pos in the parameter array from the current block */
3053 set_value(int pos, ir_node *value) {
3054 ir_graph *irg = current_ir_graph;
3055 assert(get_irg_phase_state(irg) == phase_building);
3056 assert(pos+1 < irg->n_loc);
3057 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3060 /* Find the value number for a node in the current block.*/
3062 find_value(ir_node *value) {
3064 ir_node *bl = current_ir_graph->current_block;
3066 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3067 if (bl->attr.block.graph_arr[i] == value)
3072 /* get the current store */
3075 ir_graph *irg = current_ir_graph;
3077 assert(get_irg_phase_state(irg) == phase_building);
3078 /* GL: one could call get_value instead */
3079 inc_irg_visited(irg);
3080 return get_r_value_internal(irg->current_block, 0, mode_M);
3083 /* set the current store: handles automatic Sync construction for Load nodes */
3085 set_store(ir_node *store) {
3086 ir_node *load, *pload, *pred, *in[2];
3088 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3089 /* Beware: due to dead code elimination, a store might become a Bad node even in
3090 the construction phase. */
3091 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3093 if (get_opt_auto_create_sync()) {
3094 /* handle non-volatile Load nodes by automatically creating Sync's */
3095 load = skip_Proj(store);
3096 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3097 pred = get_Load_mem(load);
3099 if (is_Sync(pred)) {
3100 /* a Load after a Sync: move it up */
3101 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3103 set_Load_mem(load, get_memop_mem(mem));
3104 add_Sync_pred(pred, store);
3107 pload = skip_Proj(pred);
3108 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3109 /* a Load after a Load: create a new Sync */
3110 set_Load_mem(load, get_Load_mem(pload));
3114 store = new_Sync(2, in);
3119 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3123 keep_alive(ir_node *ka) {
3124 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3127 /* --- Useful access routines --- */
3128 /* Returns the current block of the current graph. To set the current
3129 block use set_cur_block. */
3130 ir_node *get_cur_block(void) {
3131 return get_irg_current_block(current_ir_graph);
3132 } /* get_cur_block */
3134 /* Returns the frame type of the current graph */
3135 ir_type *get_cur_frame_type(void) {
3136 return get_irg_frame_type(current_ir_graph);
3137 } /* get_cur_frame_type */
3140 /* ********************************************************************* */
3143 /* call once for each run of the library */
3145 init_cons(uninitialized_local_variable_func_t *func) {
3146 default_initialize_local_variable = func;
3150 irp_finalize_cons(void) {
3152 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3153 irg_finalize_cons(get_irp_irg(i));
3155 irp->phase_state = phase_high;
3156 } /* irp_finalize_cons */
3159 ir_node *new_Block(int arity, ir_node **in) {
3160 return new_d_Block(NULL, arity, in);
3162 ir_node *new_Start(void) {
3163 return new_d_Start(NULL);
3165 ir_node *new_End(void) {
3166 return new_d_End(NULL);
3168 ir_node *new_Jmp(void) {
3169 return new_d_Jmp(NULL);
3171 ir_node *new_IJmp(ir_node *tgt) {
3172 return new_d_IJmp(NULL, tgt);
3174 ir_node *new_Cond(ir_node *c) {
3175 return new_d_Cond(NULL, c);
3177 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3178 return new_d_Return(NULL, store, arity, in);
3180 ir_node *new_Const(ir_mode *mode, tarval *con) {
3181 return new_d_Const(NULL, mode, con);
3184 ir_node *new_Const_long(ir_mode *mode, long value) {
3185 return new_d_Const_long(NULL, mode, value);
3188 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3189 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3192 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3193 return new_d_SymConst_type(NULL, value, kind, type);
3195 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3196 return new_d_SymConst(NULL, value, kind);
3198 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3199 return new_d_simpleSel(NULL, store, objptr, ent);
3201 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3203 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3205 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3207 return new_d_Call(NULL, store, callee, arity, in, tp);
3209 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3210 return new_d_Add(NULL, op1, op2, mode);
3212 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3213 return new_d_Sub(NULL, op1, op2, mode);
3215 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3216 return new_d_Minus(NULL, op, mode);
3218 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3219 return new_d_Mul(NULL, op1, op2, mode);
3221 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3222 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3224 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3225 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3227 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3228 return new_d_Div(NULL, memop, op1, op2, mode, state);
3230 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3231 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3233 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3234 return new_d_Abs(NULL, op, mode);
3236 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3237 return new_d_And(NULL, op1, op2, mode);
3239 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3240 return new_d_Or(NULL, op1, op2, mode);
3242 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3243 return new_d_Eor(NULL, op1, op2, mode);
3245 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3246 return new_d_Not(NULL, op, mode);
3248 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3249 return new_d_Shl(NULL, op, k, mode);
3251 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3252 return new_d_Shr(NULL, op, k, mode);
3254 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3255 return new_d_Shrs(NULL, op, k, mode);
3257 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3258 return new_d_Rot(NULL, op, k, mode);
3260 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3261 return new_d_Carry(NULL, op1, op2, mode);
3263 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3264 return new_d_Borrow(NULL, op1, op2, mode);
3266 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3267 return new_d_Cmp(NULL, op1, op2);
3269 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3270 return new_d_Conv(NULL, op, mode);
3272 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3273 return new_d_strictConv(NULL, op, mode);
3275 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3276 return new_d_Cast(NULL, op, to_tp);
3278 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3279 return new_d_Phi(NULL, arity, in, mode);
3281 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3282 return new_d_Load(NULL, store, addr, mode);
3284 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3285 return new_d_Store(NULL, store, addr, val);
3287 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3288 where_alloc where) {
3289 return new_d_Alloc(NULL, store, size, alloc_type, where);
3291 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3292 ir_type *free_type, where_alloc where) {
3293 return new_d_Free(NULL, store, ptr, size, free_type, where);
3295 ir_node *new_Sync(int arity, ir_node *in[]) {
3296 return new_d_Sync(NULL, arity, in);
3298 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3299 return new_d_Proj(NULL, arg, mode, proj);
3301 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3302 return new_d_defaultProj(NULL, arg, max_proj);
3304 ir_node *new_Tuple(int arity, ir_node **in) {
3305 return new_d_Tuple(NULL, arity, in);
3307 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3308 return new_d_Id(NULL, val, mode);
3310 ir_node *new_Bad(void) {
3313 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3314 return new_d_Confirm(NULL, val, bound, cmp);
3316 ir_node *new_Unknown(ir_mode *m) {
3317 return new_d_Unknown(m);
3319 ir_node *new_CallBegin(ir_node *callee) {
3320 return new_d_CallBegin(NULL, callee);
3322 ir_node *new_EndReg(void) {
3323 return new_d_EndReg(NULL);
3325 ir_node *new_EndExcept(void) {
3326 return new_d_EndExcept(NULL);
3328 ir_node *new_Break(void) {
3329 return new_d_Break(NULL);
3331 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3332 return new_d_Filter(NULL, arg, mode, proj);
3334 ir_node *new_NoMem(void) {
3335 return new_d_NoMem();
3337 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3338 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3340 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3341 return new_d_Psi(NULL, arity, conds, vals, mode);
3343 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3344 return new_d_CopyB(NULL, store, dst, src, data_type);
3346 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3347 return new_d_InstOf(NULL, store, objptr, ent);
3349 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3350 return new_d_Raise(NULL, store, obj);
3352 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3353 return new_d_Bound(NULL, store, idx, lower, upper);
3355 ir_node *new_Pin(ir_node *node) {
3356 return new_d_Pin(NULL, node);
3358 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3359 int n_outs, ir_asm_constraint *outputs,
3360 int n_clobber, ident *clobber[], ident *asm_text) {
3361 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3364 /* create a new anchor node */
3365 ir_node *new_Anchor(ir_graph *irg) {
3366 ir_node *in[anchor_last];
3367 memset(in, 0, sizeof(in));
3368 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);