2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.irg = irg;
198 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
199 res->attr.block.in_cg = NULL;
200 res->attr.block.cg_backedge = NULL;
201 res->attr.block.extblk = NULL;
202 res->attr.block.mb_depth = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if ( get_Block_matured(block) )
247 assert( get_irn_arity(block) == arity );
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity-1; i >= 0; i--)
254 if (get_irn_op(in[i]) == op_Unknown) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if ((res->op == op_Phi) && (mode == mode_M))
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
274 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
275 res->attr.con.tv = con;
276 set_Const_type(res, tp); /* Call method because of complex assertion. */
277 res = optimize_node (res);
278 assert(get_Const_type(res) == tp);
279 IRN_VRFY_IRG(res, irg);
282 } /* new_bd_Const_type */
285 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
286 ir_graph *irg = current_ir_graph;
288 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
292 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
293 ir_graph *irg = current_ir_graph;
295 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
296 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.totype = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
402 ir_graph *irg = current_ir_graph;
405 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node(res);
407 IRN_VRFY_IRG(res, irg);
412 new_bd_Jmp(dbg_info *db, ir_node *block) {
414 ir_graph *irg = current_ir_graph;
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
417 res = optimize_node (res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG(res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node (res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
512 new_bd_Store(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_node *val) {
516 ir_graph *irg = current_ir_graph;
521 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
522 res->attr.store.exc.pin_state = op_pin_state_pinned;
523 res->attr.store.volatility = volatility_non_volatile;
524 res = optimize_node(res);
525 IRN_VRFY_IRG(res, irg);
530 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
531 ir_node *size, ir_type *alloc_type, where_alloc where) {
534 ir_graph *irg = current_ir_graph;
538 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
539 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
540 res->attr.alloc.where = where;
541 res->attr.alloc.type = alloc_type;
542 res = optimize_node(res);
543 IRN_VRFY_IRG(res, irg);
548 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
549 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
552 ir_graph *irg = current_ir_graph;
557 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
558 res->attr.free.where = where;
559 res->attr.free.type = free_type;
560 res = optimize_node(res);
561 IRN_VRFY_IRG(res, irg);
566 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
567 int arity, ir_node **in, ir_entity *ent) {
571 ir_graph *irg = current_ir_graph;
572 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
574 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
577 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
580 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
582 * Sel's can select functions which should be of mode mode_P_code.
584 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
585 res->attr.sel.ent = ent;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
593 symconst_kind symkind, ir_type *tp) {
596 ir_graph *irg = current_ir_graph;
598 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
599 mode = mode_P_data; /* FIXME: can be mode_P_code */
603 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
605 res->attr.symc.num = symkind;
606 res->attr.symc.sym = value;
607 res->attr.symc.tp = tp;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
612 } /* new_bd_SymConst_type */
615 new_bd_Sync(dbg_info *db, ir_node *block) {
617 ir_graph *irg = current_ir_graph;
619 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
620 /* no need to call optimize node here, Sync are always created with no predecessors */
621 IRN_VRFY_IRG(res, irg);
626 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
627 ir_node *in[2], *res;
628 ir_graph *irg = current_ir_graph;
632 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
633 res->attr.confirm_cmp = cmp;
634 res = optimize_node (res);
635 IRN_VRFY_IRG(res, irg);
637 } /* new_bd_Confirm */
640 new_bd_Unknown(ir_mode *m) {
642 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
645 res = optimize_node(res);
647 } /* new_bd_Unknown */
650 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
653 ir_graph *irg = current_ir_graph;
655 in[0] = get_Call_ptr(call);
656 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
657 /* res->attr.callbegin.irg = irg; */
658 res->attr.callbegin.call = call;
659 res = optimize_node(res);
660 IRN_VRFY_IRG(res, irg);
662 } /* new_bd_CallBegin */
665 new_bd_EndReg(dbg_info *db, ir_node *block) {
667 ir_graph *irg = current_ir_graph;
669 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
670 set_irg_end_reg(irg, res);
671 IRN_VRFY_IRG(res, irg);
673 } /* new_bd_EndReg */
676 new_bd_EndExcept(dbg_info *db, ir_node *block) {
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
681 set_irg_end_except(irg, res);
682 IRN_VRFY_IRG (res, irg);
684 } /* new_bd_EndExcept */
687 new_bd_Break(dbg_info *db, ir_node *block) {
689 ir_graph *irg = current_ir_graph;
691 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
704 res->attr.filter.proj = proj;
705 res->attr.filter.in_cg = NULL;
706 res->attr.filter.backedge = NULL;
709 assert(get_Proj_pred(res));
710 assert(get_nodes_block(get_Proj_pred(res)));
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
715 } /* new_bd_Filter */
718 new_bd_Mux(dbg_info *db, ir_node *block,
719 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
728 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
737 new_bd_Psi(dbg_info *db, ir_node *block,
738 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
744 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
746 for (i = 0; i < arity; ++i) {
748 in[2 * i + 1] = vals[i];
752 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_CopyB(dbg_info *db, ir_node *block,
762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
773 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
774 res->attr.copyb.data_type = data_type;
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
781 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
782 ir_node *objptr, ir_type *type) {
785 ir_graph *irg = current_ir_graph;
789 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
790 res->attr.instof.type = type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
794 } /* new_bd_InstOf */
797 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_Bound(dbg_info *db, ir_node *block,
812 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
815 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
822 res->attr.bound.exc.pin_state = op_pin_state_pinned;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
831 ir_graph *irg = current_ir_graph;
833 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
834 res = optimize_node(res);
835 IRN_VRFY_IRG(res, irg);
840 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
841 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
843 ir_graph *irg = current_ir_graph;
845 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
846 res->attr.assem.pin_state = op_pin_state_pinned;
847 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
848 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
849 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
850 res->attr.assem.asm_text = asm_text;
852 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
853 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
869 ir_graph *rem = current_ir_graph;
872 current_ir_graph = irg;
873 res = new_bd_Block(db, arity, in);
874 current_ir_graph = rem;
880 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
881 ir_graph *rem = current_ir_graph;
884 current_ir_graph = irg;
885 res = new_bd_Start(db, block);
886 current_ir_graph = rem;
892 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
894 ir_graph *rem = current_ir_graph;
896 current_ir_graph = rem;
897 res = new_bd_End(db, block);
898 current_ir_graph = rem;
903 /* Creates a Phi node with all predecessors. Calling this constructor
904 is only allowed if the corresponding block is mature. */
906 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
908 ir_graph *rem = current_ir_graph;
910 current_ir_graph = irg;
911 res = new_bd_Phi(db, block,arity, in, mode);
912 current_ir_graph = rem;
918 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_Const_type(db, block, mode, con, tp);
924 current_ir_graph = rem;
927 } /* new_rd_Const_type */
930 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
932 ir_graph *rem = current_ir_graph;
934 current_ir_graph = irg;
935 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
936 current_ir_graph = rem;
942 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
943 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
944 } /* new_rd_Const_long */
947 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_Id(db, block, val, mode);
953 current_ir_graph = rem;
959 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
962 ir_graph *rem = current_ir_graph;
964 current_ir_graph = irg;
965 res = new_bd_Proj(db, block, arg, mode, proj);
966 current_ir_graph = rem;
972 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
975 ir_graph *rem = current_ir_graph;
977 current_ir_graph = irg;
978 res = new_bd_defaultProj(db, block, arg, max_proj);
979 current_ir_graph = rem;
982 } /* new_rd_defaultProj */
985 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
987 ir_graph *rem = current_ir_graph;
989 current_ir_graph = irg;
990 res = new_bd_Conv(db, block, op, mode, 0);
991 current_ir_graph = rem;
997 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
999 ir_graph *rem = current_ir_graph;
1001 current_ir_graph = irg;
1002 res = new_bd_Cast(db, block, op, to_tp);
1003 current_ir_graph = rem;
1009 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Tuple(db, block, arity, in);
1015 current_ir_graph = rem;
1018 } /* new_rd_Tuple */
1025 NEW_RD_DIVOP(DivMod)
1038 NEW_RD_BINOP(Borrow)
1041 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1042 ir_node *op1, ir_node *op2) {
1044 ir_graph *rem = current_ir_graph;
1046 current_ir_graph = irg;
1047 res = new_bd_Cmp(db, block, op1, op2);
1048 current_ir_graph = rem;
1054 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1056 ir_graph *rem = current_ir_graph;
1058 current_ir_graph = irg;
1059 res = new_bd_Jmp(db, block);
1060 current_ir_graph = rem;
1066 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1068 ir_graph *rem = current_ir_graph;
1070 current_ir_graph = irg;
1071 res = new_bd_IJmp(db, block, tgt);
1072 current_ir_graph = rem;
1078 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Cond(db, block, c);
1084 current_ir_graph = rem;
1090 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1091 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1097 current_ir_graph = rem;
1103 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1104 ir_node *store, int arity, ir_node **in) {
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_Return(db, block, store, arity, in);
1110 current_ir_graph = rem;
1113 } /* new_rd_Return */
1116 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1117 ir_node *store, ir_node *adr, ir_mode *mode) {
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Load(db, block, store, adr, mode);
1123 current_ir_graph = rem;
1129 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1130 ir_node *store, ir_node *adr, ir_node *val) {
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Store(db, block, store, adr, val);
1136 current_ir_graph = rem;
1139 } /* new_rd_Store */
1142 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1143 ir_node *size, ir_type *alloc_type, where_alloc where) {
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1149 current_ir_graph = rem;
1152 } /* new_rd_Alloc */
1155 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1156 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1158 ir_graph *rem = current_ir_graph;
1160 current_ir_graph = irg;
1161 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1162 current_ir_graph = rem;
1168 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1169 ir_node *store, ir_node *objptr, ir_entity *ent) {
1171 ir_graph *rem = current_ir_graph;
1173 current_ir_graph = irg;
1174 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1175 current_ir_graph = rem;
1178 } /* new_rd_simpleSel */
1181 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1182 int arity, ir_node **in, ir_entity *ent) {
1184 ir_graph *rem = current_ir_graph;
1186 current_ir_graph = irg;
1187 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1188 current_ir_graph = rem;
1194 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1195 symconst_kind symkind, ir_type *tp) {
1197 ir_graph *rem = current_ir_graph;
1199 current_ir_graph = irg;
1200 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1201 current_ir_graph = rem;
1204 } /* new_rd_SymConst_type */
1207 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1208 symconst_kind symkind) {
1209 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1211 } /* new_rd_SymConst */
1213 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1214 symconst_symbol sym;
1215 sym.entity_p = symbol;
1216 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1217 } /* new_rd_SymConst_addr_ent */
1219 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1220 symconst_symbol sym;
1221 sym.entity_p = symbol;
1222 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1223 } /* new_rd_SymConst_ofs_ent */
1225 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1226 symconst_symbol sym;
1227 sym.ident_p = symbol;
1228 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1229 } /* new_rd_SymConst_addr_name */
1231 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1232 symconst_symbol sym;
1233 sym.type_p = symbol;
1234 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1235 } /* new_rd_SymConst_type_tag */
1237 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1238 symconst_symbol sym;
1239 sym.type_p = symbol;
1240 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1241 } /* new_rd_SymConst_size */
1243 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1244 symconst_symbol sym;
1245 sym.type_p = symbol;
1246 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1247 } /* new_rd_SymConst_align */
1250 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1252 ir_graph *rem = current_ir_graph;
1255 current_ir_graph = irg;
1256 res = new_bd_Sync(db, block);
1257 current_ir_graph = rem;
1259 for (i = 0; i < arity; ++i)
1260 add_Sync_pred(res, in[i]);
1266 new_rd_Bad(ir_graph *irg) {
1267 return get_irg_bad(irg);
1271 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1273 ir_graph *rem = current_ir_graph;
1275 current_ir_graph = irg;
1276 res = new_bd_Confirm(db, block, val, bound, cmp);
1277 current_ir_graph = rem;
1280 } /* new_rd_Confirm */
1283 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Unknown(m);
1289 current_ir_graph = rem;
1292 } /* new_rd_Unknown */
1295 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1297 ir_graph *rem = current_ir_graph;
1299 current_ir_graph = irg;
1300 res = new_bd_CallBegin(db, block, call);
1301 current_ir_graph = rem;
1304 } /* new_rd_CallBegin */
1307 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1310 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1311 set_irg_end_reg(irg, res);
1312 IRN_VRFY_IRG(res, irg);
1314 } /* new_rd_EndReg */
1317 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1320 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1321 set_irg_end_except(irg, res);
1322 IRN_VRFY_IRG (res, irg);
1324 } /* new_rd_EndExcept */
1327 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1329 ir_graph *rem = current_ir_graph;
1331 current_ir_graph = irg;
1332 res = new_bd_Break(db, block);
1333 current_ir_graph = rem;
1336 } /* new_rd_Break */
1339 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1342 ir_graph *rem = current_ir_graph;
1344 current_ir_graph = irg;
1345 res = new_bd_Filter(db, block, arg, mode, proj);
1346 current_ir_graph = rem;
1349 } /* new_rd_Filter */
1352 new_rd_NoMem(ir_graph *irg) {
1353 return get_irg_no_mem(irg);
1354 } /* new_rd_NoMem */
1357 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1358 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1360 ir_graph *rem = current_ir_graph;
1362 current_ir_graph = irg;
1363 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1364 current_ir_graph = rem;
1370 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1371 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1373 ir_graph *rem = current_ir_graph;
1375 current_ir_graph = irg;
1376 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1377 current_ir_graph = rem;
1382 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1383 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1385 ir_graph *rem = current_ir_graph;
1387 current_ir_graph = irg;
1388 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1389 current_ir_graph = rem;
1392 } /* new_rd_CopyB */
1395 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1396 ir_node *objptr, ir_type *type) {
1398 ir_graph *rem = current_ir_graph;
1400 current_ir_graph = irg;
1401 res = new_bd_InstOf(db, block, store, objptr, type);
1402 current_ir_graph = rem;
1405 } /* new_rd_InstOf */
1408 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Raise(db, block, store, obj);
1414 current_ir_graph = rem;
1417 } /* new_rd_Raise */
1419 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1420 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Bound(db, block, store, idx, lower, upper);
1426 current_ir_graph = rem;
1429 } /* new_rd_Bound */
1431 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1433 ir_graph *rem = current_ir_graph;
1435 current_ir_graph = irg;
1436 res = new_bd_Pin(db, block, node);
1437 current_ir_graph = rem;
1442 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1443 int arity, ir_node *in[], ir_asm_constraint *inputs,
1444 int n_outs, ir_asm_constraint *outputs,
1445 int n_clobber, ident *clobber[], ident *asm_text) {
1447 ir_graph *rem = current_ir_graph;
1449 current_ir_graph = irg;
1450 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1451 current_ir_graph = rem;
1457 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1458 return new_rd_Block(NULL, irg, arity, in);
1460 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1461 return new_rd_Start(NULL, irg, block);
1463 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1464 return new_rd_End(NULL, irg, block);
1466 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1467 return new_rd_Jmp(NULL, irg, block);
1469 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1470 return new_rd_IJmp(NULL, irg, block, tgt);
1472 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1473 return new_rd_Cond(NULL, irg, block, c);
1475 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1476 ir_node *store, int arity, ir_node **in) {
1477 return new_rd_Return(NULL, irg, block, store, arity, in);
1479 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1480 ir_mode *mode, tarval *con) {
1481 return new_rd_Const(NULL, irg, block, mode, con);
1483 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1484 ir_mode *mode, long value) {
1485 return new_rd_Const_long(NULL, irg, block, mode, value);
1487 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1488 ir_mode *mode, tarval *con, ir_type *tp) {
1489 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1491 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1492 symconst_symbol value, symconst_kind symkind) {
1493 return new_rd_SymConst(NULL, irg, block, value, symkind);
1495 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1496 ir_node *objptr, ir_entity *ent) {
1497 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1499 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1500 ir_node *objptr, int n_index, ir_node **index,
1502 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1504 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *callee, int arity, ir_node **in,
1507 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1509 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1510 ir_node *op1, ir_node *op2, ir_mode *mode) {
1511 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1513 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1518 ir_node *op, ir_mode *mode) {
1519 return new_rd_Minus(NULL, irg, block, op, mode);
1521 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1522 ir_node *op1, ir_node *op2, ir_mode *mode) {
1523 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1525 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1526 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1527 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1529 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1530 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1531 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1533 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1535 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1537 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1539 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1541 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1542 ir_node *op, ir_mode *mode) {
1543 return new_rd_Abs(NULL, irg, block, op, mode);
1545 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1546 ir_node *op1, ir_node *op2, ir_mode *mode) {
1547 return new_rd_And(NULL, irg, block, op1, op2, mode);
1549 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1554 ir_node *op1, ir_node *op2, ir_mode *mode) {
1555 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1557 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1558 ir_node *op, ir_mode *mode) {
1559 return new_rd_Not(NULL, irg, block, op, mode);
1561 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1562 ir_node *op, ir_node *k, ir_mode *mode) {
1563 return new_rd_Shl(NULL, irg, block, op, k, mode);
1565 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1566 ir_node *op, ir_node *k, ir_mode *mode) {
1567 return new_rd_Shr(NULL, irg, block, op, k, mode);
1569 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1570 ir_node *op, ir_node *k, ir_mode *mode) {
1571 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1573 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1574 ir_node *op, ir_node *k, ir_mode *mode) {
1575 return new_rd_Rot(NULL, irg, block, op, k, mode);
1577 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_node *k, ir_mode *mode) {
1579 return new_rd_Carry(NULL, irg, block, op, k, mode);
1581 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1582 ir_node *op, ir_node *k, ir_mode *mode) {
1583 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1585 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1586 ir_node *op1, ir_node *op2) {
1587 return new_rd_Cmp(NULL, irg, block, op1, op2);
1589 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1590 ir_node *op, ir_mode *mode) {
1591 return new_rd_Conv(NULL, irg, block, op, mode);
1593 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1594 return new_rd_Cast(NULL, irg, block, op, to_tp);
1596 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1597 ir_node **in, ir_mode *mode) {
1598 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1600 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1601 ir_node *store, ir_node *adr, ir_mode *mode) {
1602 return new_rd_Load(NULL, irg, block, store, adr, mode);
1604 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1605 ir_node *store, ir_node *adr, ir_node *val) {
1606 return new_rd_Store(NULL, irg, block, store, adr, val);
1608 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1609 ir_node *size, ir_type *alloc_type, where_alloc where) {
1610 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1612 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1613 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1614 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1616 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1617 return new_rd_Sync(NULL, irg, block, arity, in);
1619 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1620 ir_mode *mode, long proj) {
1621 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1623 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1625 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1627 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1628 int arity, ir_node **in) {
1629 return new_rd_Tuple(NULL, irg, block, arity, in );
1631 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1632 ir_node *val, ir_mode *mode) {
1633 return new_rd_Id(NULL, irg, block, val, mode);
1635 ir_node *new_r_Bad(ir_graph *irg) {
1636 return new_rd_Bad(irg);
1638 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1639 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1641 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1642 return new_rd_Unknown(irg, m);
1644 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1645 return new_rd_CallBegin(NULL, irg, block, callee);
1647 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1648 return new_rd_EndReg(NULL, irg, block);
1650 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1651 return new_rd_EndExcept(NULL, irg, block);
1653 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1654 return new_rd_Break(NULL, irg, block);
1656 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_NoMem(ir_graph *irg) {
1661 return new_rd_NoMem(irg);
1663 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1664 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1665 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1667 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1668 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1669 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1671 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1672 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1673 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1675 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1677 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1679 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1680 ir_node *store, ir_node *obj) {
1681 return new_rd_Raise(NULL, irg, block, store, obj);
1683 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1684 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1685 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1687 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1688 return new_rd_Pin(NULL, irg, block, node);
1690 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1691 int arity, ir_node *in[], ir_asm_constraint *inputs,
1692 int n_outs, ir_asm_constraint *outputs,
1693 int n_clobber, ident *clobber[], ident *asm_text) {
1694 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1697 /** ********************/
1698 /** public interfaces */
1699 /** construction tools */
1703 * - create a new Start node in the current block
1705 * @return s - pointer to the created Start node
1710 new_d_Start(dbg_info *db) {
1713 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1714 op_Start, mode_T, 0, NULL);
1716 res = optimize_node(res);
1717 IRN_VRFY_IRG(res, current_ir_graph);
1722 new_d_End(dbg_info *db) {
1724 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1725 op_End, mode_X, -1, NULL);
1726 res = optimize_node(res);
1727 IRN_VRFY_IRG(res, current_ir_graph);
1732 /* Constructs a Block with a fixed number of predecessors.
1733 Does set current_block. Can be used with automatic Phi
1734 node construction. */
1736 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1739 int has_unknown = 0;
1741 res = new_bd_Block(db, arity, in);
1743 /* Create and initialize array for Phi-node construction. */
1744 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1745 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1746 current_ir_graph->n_loc);
1747 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1750 for (i = arity-1; i >= 0; i--)
1751 if (get_irn_op(in[i]) == op_Unknown) {
1756 if (!has_unknown) res = optimize_node(res);
1757 current_ir_graph->current_block = res;
1759 IRN_VRFY_IRG(res, current_ir_graph);
1764 /* ***********************************************************************/
1765 /* Methods necessary for automatic Phi node creation */
1767 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1768 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1769 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1770 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1772 Call Graph: ( A ---> B == A "calls" B)
1774 get_value mature_immBlock
1782 get_r_value_internal |
1786 new_rd_Phi0 new_rd_Phi_in
1788 * *************************************************************************** */
1790 /** Creates a Phi node with 0 predecessors. */
1791 static INLINE ir_node *
1792 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1795 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1796 IRN_VRFY_IRG(res, irg);
1800 /* There are two implementations of the Phi node construction. The first
1801 is faster, but does not work for blocks with more than 2 predecessors.
1802 The second works always but is slower and causes more unnecessary Phi
1804 Select the implementations by the following preprocessor flag set in
1806 #if USE_FAST_PHI_CONSTRUCTION
1808 /* This is a stack used for allocating and deallocating nodes in
1809 new_rd_Phi_in. The original implementation used the obstack
1810 to model this stack, now it is explicit. This reduces side effects.
1812 #if USE_EXPLICIT_PHI_IN_STACK
1814 new_Phi_in_stack(void) {
1817 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1819 res->stack = NEW_ARR_F (ir_node *, 0);
1823 } /* new_Phi_in_stack */
1826 free_Phi_in_stack(Phi_in_stack *s) {
1827 DEL_ARR_F(s->stack);
1829 } /* free_Phi_in_stack */
1832 free_to_Phi_in_stack(ir_node *phi) {
1833 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1834 current_ir_graph->Phi_in_stack->pos)
1835 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1837 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1839 (current_ir_graph->Phi_in_stack->pos)++;
1840 } /* free_to_Phi_in_stack */
1842 static INLINE ir_node *
1843 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1844 int arity, ir_node **in) {
1846 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1847 int pos = current_ir_graph->Phi_in_stack->pos;
1851 /* We need to allocate a new node */
1852 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1853 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1855 /* reuse the old node and initialize it again. */
1858 assert(res->kind == k_ir_node);
1859 assert(res->op == op_Phi);
1864 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1865 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1867 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1869 (current_ir_graph->Phi_in_stack->pos)--;
1872 } /* alloc_or_pop_from_Phi_in_stack */
1873 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1876 * Creates a Phi node with a given, fixed array **in of predecessors.
1877 * If the Phi node is unnecessary, as the same value reaches the block
1878 * through all control flow paths, it is eliminated and the value
1879 * returned directly. This constructor is only intended for use in
1880 * the automatic Phi node generation triggered by get_value or mature.
1881 * The implementation is quite tricky and depends on the fact, that
1882 * the nodes are allocated on a stack:
1883 * The in array contains predecessors and NULLs. The NULLs appear,
1884 * if get_r_value_internal, that computed the predecessors, reached
1885 * the same block on two paths. In this case the same value reaches
1886 * this block on both paths, there is no definition in between. We need
1887 * not allocate a Phi where these path's merge, but we have to communicate
1888 * this fact to the caller. This happens by returning a pointer to the
1889 * node the caller _will_ allocate. (Yes, we predict the address. We can
1890 * do so because the nodes are allocated on the obstack.) The caller then
1891 * finds a pointer to itself and, when this routine is called again,
1892 * eliminates itself.
1894 static INLINE ir_node *
1895 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1897 ir_node *res, *known;
1899 /* Allocate a new node on the obstack. This can return a node to
1900 which some of the pointers in the in-array already point.
1901 Attention: the constructor copies the in array, i.e., the later
1902 changes to the array in this routine do not affect the
1903 constructed node! If the in array contains NULLs, there will be
1904 missing predecessors in the returned node. Is this a possible
1905 internal state of the Phi node generation? */
1906 #if USE_EXPLICIT_PHI_IN_STACK
1907 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1909 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1910 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1913 /* The in-array can contain NULLs. These were returned by
1914 get_r_value_internal if it reached the same block/definition on a
1915 second path. The NULLs are replaced by the node itself to
1916 simplify the test in the next loop. */
1917 for (i = 0; i < ins; ++i) {
1922 /* This loop checks whether the Phi has more than one predecessor.
1923 If so, it is a real Phi node and we break the loop. Else the Phi
1924 node merges the same definition on several paths and therefore is
1926 for (i = 0; i < ins; ++i) {
1927 if (in[i] == res || in[i] == known)
1936 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1938 #if USE_EXPLICIT_PHI_IN_STACK
1939 free_to_Phi_in_stack(res);
1941 edges_node_deleted(res, current_ir_graph);
1942 obstack_free(current_ir_graph->obst, res);
1946 res = optimize_node (res);
1947 IRN_VRFY_IRG(res, irg);
1950 /* return the pointer to the Phi node. This node might be deallocated! */
1952 } /* new_rd_Phi_in */
1955 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1958 * Allocates and returns this node. The routine called to allocate the
1959 * node might optimize it away and return a real value, or even a pointer
1960 * to a deallocated Phi node on top of the obstack!
1961 * This function is called with an in-array of proper size.
1964 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1965 ir_node *prevBlock, *res;
1968 /* This loop goes to all predecessor blocks of the block the Phi node is in
1969 and there finds the operands of the Phi node by calling
1970 get_r_value_internal. */
1971 for (i = 1; i <= ins; ++i) {
1972 assert (block->in[i]);
1973 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1975 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1978 /* After collecting all predecessors into the array nin a new Phi node
1979 with these predecessors is created. This constructor contains an
1980 optimization: If all predecessors of the Phi node are identical it
1981 returns the only operand instead of a new Phi node. If the value
1982 passes two different control flow edges without being defined, and
1983 this is the second path treated, a pointer to the node that will be
1984 allocated for the first path (recursion) is returned. We already
1985 know the address of this node, as it is the next node to be allocated
1986 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1987 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1989 /* Now we now the value for "pos" and can enter it in the array with
1990 all known local variables. Attention: this might be a pointer to
1991 a node, that later will be allocated!!! See new_rd_Phi_in().
1992 If this is called in mature, after some set_value() in the same block,
1993 the proper value must not be overwritten:
1995 get_value (makes Phi0, put's it into graph_arr)
1996 set_value (overwrites Phi0 in graph_arr)
1997 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2000 if (!block->attr.block.graph_arr[pos]) {
2001 block->attr.block.graph_arr[pos] = res;
2003 /* printf(" value already computed by %s\n",
2004 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2011 * This function returns the last definition of a variable. In case
2012 * this variable was last defined in a previous block, Phi nodes are
2013 * inserted. If the part of the firm graph containing the definition
2014 * is not yet constructed, a dummy Phi node is returned.
2017 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2020 /* There are 4 cases to treat.
2022 1. The block is not mature and we visit it the first time. We can not
2023 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2024 predecessors is returned. This node is added to the linked list (field
2025 "link") of the containing block to be completed when this block is
2026 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2029 2. The value is already known in this block, graph_arr[pos] is set and we
2030 visit the block the first time. We can return the value without
2031 creating any new nodes.
2033 3. The block is mature and we visit it the first time. A Phi node needs
2034 to be created (phi_merge). If the Phi is not needed, as all it's
2035 operands are the same value reaching the block through different
2036 paths, it's optimized away and the value itself is returned.
2038 4. The block is mature, and we visit it the second time. Now two
2039 subcases are possible:
2040 * The value was computed completely the last time we were here. This
2041 is the case if there is no loop. We can return the proper value.
2042 * The recursion that visited this node and set the flag did not
2043 return yet. We are computing a value in a loop and need to
2044 break the recursion without knowing the result yet.
2045 @@@ strange case. Straight forward we would create a Phi before
2046 starting the computation of it's predecessors. In this case we will
2047 find a Phi here in any case. The problem is that this implementation
2048 only creates a Phi after computing the predecessors, so that it is
2049 hard to compute self references of this Phi. @@@
2050 There is no simple check for the second subcase. Therefore we check
2051 for a second visit and treat all such cases as the second subcase.
2052 Anyways, the basic situation is the same: we reached a block
2053 on two paths without finding a definition of the value: No Phi
2054 nodes are needed on both paths.
2055 We return this information "Two paths, no Phi needed" by a very tricky
2056 implementation that relies on the fact that an obstack is a stack and
2057 will return a node with the same address on different allocations.
2058 Look also at phi_merge and new_rd_phi_in to understand this.
2059 @@@ Unfortunately this does not work, see testprogram
2060 three_cfpred_example.
2064 /* case 4 -- already visited. */
2065 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2067 /* visited the first time */
2068 set_irn_visited(block, get_irg_visited(current_ir_graph));
2070 /* Get the local valid value */
2071 res = block->attr.block.graph_arr[pos];
2073 /* case 2 -- If the value is actually computed, return it. */
2074 if (res) return res;
2076 if (block->attr.block.is_matured) { /* case 3 */
2078 /* The Phi has the same amount of ins as the corresponding block. */
2079 int ins = get_irn_arity(block);
2081 NEW_ARR_A(ir_node *, nin, ins);
2083 /* Phi merge collects the predecessors and then creates a node. */
2084 res = phi_merge(block, pos, mode, nin, ins);
2086 } else { /* case 1 */
2087 /* The block is not mature, we don't know how many in's are needed. A Phi
2088 with zero predecessors is created. Such a Phi node is called Phi0
2089 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2090 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2092 The Phi0 has to remember the pos of it's internal value. If the real
2093 Phi is computed, pos is used to update the array with the local
2096 res = new_rd_Phi0(current_ir_graph, block, mode);
2097 res->attr.phi0_pos = pos;
2098 res->link = block->link;
2102 /* If we get here, the frontend missed a use-before-definition error */
2105 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2106 assert(mode->code >= irm_F && mode->code <= irm_P);
2107 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2110 /* The local valid value is available now. */
2111 block->attr.block.graph_arr[pos] = res;
2114 } /* get_r_value_internal */
2119 it starts the recursion. This causes an Id at the entry of
2120 every block that has no definition of the value! **/
2122 #if USE_EXPLICIT_PHI_IN_STACK
2124 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2125 void free_Phi_in_stack(Phi_in_stack *s) {}
2128 static INLINE ir_node *
2129 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2130 ir_node **in, int ins, ir_node *phi0) {
2132 ir_node *res, *known;
2134 /* Allocate a new node on the obstack. The allocation copies the in
2136 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2137 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2139 /* This loop checks whether the Phi has more than one predecessor.
2140 If so, it is a real Phi node and we break the loop. Else the
2141 Phi node merges the same definition on several paths and therefore
2142 is not needed. Don't consider Bad nodes! */
2144 for (i=0; i < ins; ++i)
2148 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2150 /* Optimize self referencing Phis: We can't detect them yet properly, as
2151 they still refer to the Phi0 they will replace. So replace right now. */
2152 if (phi0 && in[i] == phi0) in[i] = res;
2154 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2162 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2165 edges_node_deleted(res, current_ir_graph);
2166 obstack_free (current_ir_graph->obst, res);
2167 if (is_Phi(known)) {
2168 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2169 order, an enclosing Phi know may get superfluous. */
2170 res = optimize_in_place_2(known);
2172 exchange(known, res);
2178 /* A undefined value, e.g., in unreachable code. */
2182 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2183 IRN_VRFY_IRG(res, irg);
2184 /* Memory Phis in endless loops must be kept alive.
2185 As we can't distinguish these easily we keep all of them alive. */
2186 if ((res->op == op_Phi) && (mode == mode_M))
2187 add_End_keepalive(get_irg_end(irg), res);
2191 } /* new_rd_Phi_in */
2194 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2196 #if PRECISE_EXC_CONTEXT
2198 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2201 * Construct a new frag_array for node n.
2202 * Copy the content from the current graph_arr of the corresponding block:
2203 * this is the current state.
2204 * Set ProjM(n) as current memory state.
2205 * Further the last entry in frag_arr of current block points to n. This
2206 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2208 static INLINE ir_node **new_frag_arr(ir_node *n) {
2212 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2213 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2214 sizeof(ir_node *)*current_ir_graph->n_loc);
2216 /* turn off optimization before allocating Proj nodes, as res isn't
2218 opt = get_opt_optimize(); set_optimize(0);
2219 /* Here we rely on the fact that all frag ops have Memory as first result! */
2220 if (get_irn_op(n) == op_Call)
2221 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2222 else if (get_irn_op(n) == op_CopyB)
2223 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2225 assert((pn_Quot_M == pn_DivMod_M) &&
2226 (pn_Quot_M == pn_Div_M) &&
2227 (pn_Quot_M == pn_Mod_M) &&
2228 (pn_Quot_M == pn_Load_M) &&
2229 (pn_Quot_M == pn_Store_M) &&
2230 (pn_Quot_M == pn_Alloc_M) &&
2231 (pn_Quot_M == pn_Bound_M));
2232 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2236 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2238 } /* new_frag_arr */
2241 * Returns the frag_arr from a node.
2243 static INLINE ir_node **get_frag_arr(ir_node *n) {
2244 switch (get_irn_opcode(n)) {
2246 return n->attr.call.exc.frag_arr;
2248 return n->attr.alloc.exc.frag_arr;
2250 return n->attr.load.exc.frag_arr;
2252 return n->attr.store.exc.frag_arr;
2254 return n->attr.except.frag_arr;
2256 } /* get_frag_arr */
2259 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2261 if (!frag_arr[pos]) frag_arr[pos] = val;
2262 if (frag_arr[current_ir_graph->n_loc - 1]) {
2263 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2264 assert(arr != frag_arr && "Endless recursion detected");
2265 set_frag_value(arr, pos, val);
2270 for (i = 0; i < 1000; ++i) {
2271 if (!frag_arr[pos]) {
2272 frag_arr[pos] = val;
2274 if (frag_arr[current_ir_graph->n_loc - 1]) {
2275 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2281 assert(0 && "potential endless recursion");
2283 } /* set_frag_value */
2286 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2290 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2292 frag_arr = get_frag_arr(cfOp);
2293 res = frag_arr[pos];
2295 if (block->attr.block.graph_arr[pos]) {
2296 /* There was a set_value() after the cfOp and no get_value before that
2297 set_value(). We must build a Phi node now. */
2298 if (block->attr.block.is_matured) {
2299 int ins = get_irn_arity(block);
2301 NEW_ARR_A(ir_node *, nin, ins);
2302 res = phi_merge(block, pos, mode, nin, ins);
2304 res = new_rd_Phi0(current_ir_graph, block, mode);
2305 res->attr.phi0.pos = pos;
2306 res->link = block->link;
2310 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2311 but this should be better: (remove comment if this works) */
2312 /* It's a Phi, we can write this into all graph_arrs with NULL */
2313 set_frag_value(block->attr.block.graph_arr, pos, res);
2315 res = get_r_value_internal(block, pos, mode);
2316 set_frag_value(block->attr.block.graph_arr, pos, res);
2320 } /* get_r_frag_value_internal */
2321 #endif /* PRECISE_EXC_CONTEXT */
2324 * Computes the predecessors for the real phi node, and then
2325 * allocates and returns this node. The routine called to allocate the
2326 * node might optimize it away and return a real value.
2327 * This function must be called with an in-array of proper size.
2330 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2331 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2334 /* If this block has no value at pos create a Phi0 and remember it
2335 in graph_arr to break recursions.
2336 Else we may not set graph_arr as there a later value is remembered. */
2338 if (!block->attr.block.graph_arr[pos]) {
2339 if (block == get_irg_start_block(current_ir_graph)) {
2340 /* Collapsing to Bad tarvals is no good idea.
2341 So we call a user-supplied routine here that deals with this case as
2342 appropriate for the given language. Sorrily the only help we can give
2343 here is the position.
2345 Even if all variables are defined before use, it can happen that
2346 we get to the start block, if a Cond has been replaced by a tuple
2347 (bad, jmp). In this case we call the function needlessly, eventually
2348 generating an non existent error.
2349 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2352 if (default_initialize_local_variable) {
2353 ir_node *rem = get_cur_block();
2355 set_cur_block(block);
2356 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2360 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2361 /* We don't need to care about exception ops in the start block.
2362 There are none by definition. */
2363 return block->attr.block.graph_arr[pos];
2365 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2366 block->attr.block.graph_arr[pos] = phi0;
2367 #if PRECISE_EXC_CONTEXT
2368 if (get_opt_precise_exc_context()) {
2369 /* Set graph_arr for fragile ops. Also here we should break recursion.
2370 We could choose a cyclic path through an cfop. But the recursion would
2371 break at some point. */
2372 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2378 /* This loop goes to all predecessor blocks of the block the Phi node
2379 is in and there finds the operands of the Phi node by calling
2380 get_r_value_internal. */
2381 for (i = 1; i <= ins; ++i) {
2382 prevCfOp = skip_Proj(block->in[i]);
2384 if (is_Bad(prevCfOp)) {
2385 /* In case a Cond has been optimized we would get right to the start block
2386 with an invalid definition. */
2387 nin[i-1] = new_Bad();
2390 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2392 if (!is_Bad(prevBlock)) {
2393 #if PRECISE_EXC_CONTEXT
2394 if (get_opt_precise_exc_context() &&
2395 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2396 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2397 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2400 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2402 nin[i-1] = new_Bad();
2406 /* We want to pass the Phi0 node to the constructor: this finds additional
2407 optimization possibilities.
2408 The Phi0 node either is allocated in this function, or it comes from
2409 a former call to get_r_value_internal. In this case we may not yet
2410 exchange phi0, as this is done in mature_immBlock. */
2412 phi0_all = block->attr.block.graph_arr[pos];
2413 if (!((get_irn_op(phi0_all) == op_Phi) &&
2414 (get_irn_arity(phi0_all) == 0) &&
2415 (get_nodes_block(phi0_all) == block)))
2421 /* After collecting all predecessors into the array nin a new Phi node
2422 with these predecessors is created. This constructor contains an
2423 optimization: If all predecessors of the Phi node are identical it
2424 returns the only operand instead of a new Phi node. */
2425 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2427 /* In case we allocated a Phi0 node at the beginning of this procedure,
2428 we need to exchange this Phi0 with the real Phi. */
2430 exchange(phi0, res);
2431 block->attr.block.graph_arr[pos] = res;
2432 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2433 only an optimization. */
2440 * This function returns the last definition of a variable. In case
2441 * this variable was last defined in a previous block, Phi nodes are
2442 * inserted. If the part of the firm graph containing the definition
2443 * is not yet constructed, a dummy Phi node is returned.
2446 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2448 /* There are 4 cases to treat.
2450 1. The block is not mature and we visit it the first time. We can not
2451 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2452 predecessors is returned. This node is added to the linked list (field
2453 "link") of the containing block to be completed when this block is
2454 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2457 2. The value is already known in this block, graph_arr[pos] is set and we
2458 visit the block the first time. We can return the value without
2459 creating any new nodes.
2461 3. The block is mature and we visit it the first time. A Phi node needs
2462 to be created (phi_merge). If the Phi is not needed, as all it's
2463 operands are the same value reaching the block through different
2464 paths, it's optimized away and the value itself is returned.
2466 4. The block is mature, and we visit it the second time. Now two
2467 subcases are possible:
2468 * The value was computed completely the last time we were here. This
2469 is the case if there is no loop. We can return the proper value.
2470 * The recursion that visited this node and set the flag did not
2471 return yet. We are computing a value in a loop and need to
2472 break the recursion. This case only happens if we visited
2473 the same block with phi_merge before, which inserted a Phi0.
2474 So we return the Phi0.
2477 /* case 4 -- already visited. */
2478 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2479 /* As phi_merge allocates a Phi0 this value is always defined. Here
2480 is the critical difference of the two algorithms. */
2481 assert(block->attr.block.graph_arr[pos]);
2482 return block->attr.block.graph_arr[pos];
2485 /* visited the first time */
2486 set_irn_visited(block, get_irg_visited(current_ir_graph));
2488 /* Get the local valid value */
2489 res = block->attr.block.graph_arr[pos];
2491 /* case 2 -- If the value is actually computed, return it. */
2492 if (res) { return res; };
2494 if (block->attr.block.is_matured) { /* case 3 */
2496 /* The Phi has the same amount of ins as the corresponding block. */
2497 int ins = get_irn_arity(block);
2499 NEW_ARR_A (ir_node *, nin, ins);
2501 /* Phi merge collects the predecessors and then creates a node. */
2502 res = phi_merge (block, pos, mode, nin, ins);
2504 } else { /* case 1 */
2505 /* The block is not mature, we don't know how many in's are needed. A Phi
2506 with zero predecessors is created. Such a Phi node is called Phi0
2507 node. The Phi0 is then added to the list of Phi0 nodes in this block
2508 to be matured by mature_immBlock later.
2509 The Phi0 has to remember the pos of it's internal value. If the real
2510 Phi is computed, pos is used to update the array with the local
2512 res = new_rd_Phi0(current_ir_graph, block, mode);
2513 res->attr.phi0.pos = pos;
2514 res->link = block->link;
2518 /* If we get here, the frontend missed a use-before-definition error */
2521 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2522 assert(mode->code >= irm_F && mode->code <= irm_P);
2523 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2524 get_mode_null(mode));
2527 /* The local valid value is available now. */
2528 block->attr.block.graph_arr[pos] = res;
2531 } /* get_r_value_internal */
2533 #endif /* USE_FAST_PHI_CONSTRUCTION */
2535 /* ************************************************************************** */
2538 * Finalize a Block node, when all control flows are known.
2539 * Acceptable parameters are only Block nodes.
2542 mature_immBlock(ir_node *block) {
2547 assert(get_irn_opcode(block) == iro_Block);
2548 /* @@@ should be commented in
2549 assert (!get_Block_matured(block) && "Block already matured"); */
2551 if (!get_Block_matured(block)) {
2552 ins = ARR_LEN(block->in)-1;
2553 /* Fix block parameters */
2554 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2556 /* An array for building the Phi nodes. */
2557 NEW_ARR_A(ir_node *, nin, ins);
2559 /* Traverse a chain of Phi nodes attached to this block and mature
2561 for (n = block->link; n; n = next) {
2562 inc_irg_visited(current_ir_graph);
2564 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2567 block->attr.block.is_matured = 1;
2569 /* Now, as the block is a finished firm node, we can optimize it.
2570 Since other nodes have been allocated since the block was created
2571 we can not free the node on the obstack. Therefore we have to call
2573 Unfortunately the optimization does not change a lot, as all allocated
2574 nodes refer to the unoptimized node.
2575 We can call _2, as global cse has no effect on blocks. */
2576 block = optimize_in_place_2(block);
2577 IRN_VRFY_IRG(block, current_ir_graph);
2579 } /* mature_immBlock */
2582 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2583 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2587 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2588 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2592 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2593 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2594 } /* new_d_Const_long */
2597 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2598 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2599 } /* new_d_Const_type */
2603 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2604 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2608 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2609 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2613 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2615 assert(arg->op == op_Cond);
2616 arg->attr.cond.kind = fragmentary;
2617 arg->attr.cond.default_proj = max_proj;
2618 res = new_Proj(arg, mode_X, max_proj);
2620 } /* new_d_defaultProj */
2623 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2624 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2628 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2629 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2630 } /* new_d_strictConv */
2633 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2634 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2638 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2639 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2648 * Allocate the frag array.
2650 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2651 if (get_opt_precise_exc_context()) {
2652 if ((current_ir_graph->phase_state == phase_building) &&
2653 (get_irn_op(res) == op) && /* Could be optimized away. */
2654 !*frag_store) /* Could be a cse where the arr is already set. */ {
2655 *frag_store = new_frag_arr(res);
2658 } /* allocate_frag_arr */
2661 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2663 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2664 #if PRECISE_EXC_CONTEXT
2665 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2672 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2674 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2675 #if PRECISE_EXC_CONTEXT
2676 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2680 } /* new_d_DivMod */
2683 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2685 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2686 #if PRECISE_EXC_CONTEXT
2687 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2694 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2696 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2697 #if PRECISE_EXC_CONTEXT
2698 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2717 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2718 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2722 new_d_Jmp(dbg_info *db) {
2723 return new_bd_Jmp(db, current_ir_graph->current_block);
2727 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2728 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2732 new_d_Cond(dbg_info *db, ir_node *c) {
2733 return new_bd_Cond(db, current_ir_graph->current_block, c);
2737 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2740 res = new_bd_Call(db, current_ir_graph->current_block,
2741 store, callee, arity, in, tp);
2742 #if PRECISE_EXC_CONTEXT
2743 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2750 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2751 return new_bd_Return(db, current_ir_graph->current_block,
2753 } /* new_d_Return */
2756 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2758 res = new_bd_Load(db, current_ir_graph->current_block,
2760 #if PRECISE_EXC_CONTEXT
2761 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2768 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2770 res = new_bd_Store(db, current_ir_graph->current_block,
2772 #if PRECISE_EXC_CONTEXT
2773 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2780 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2781 where_alloc where) {
2783 res = new_bd_Alloc(db, current_ir_graph->current_block,
2784 store, size, alloc_type, where);
2785 #if PRECISE_EXC_CONTEXT
2786 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2793 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2794 ir_node *size, ir_type *free_type, where_alloc where) {
2795 return new_bd_Free(db, current_ir_graph->current_block,
2796 store, ptr, size, free_type, where);
2800 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2801 /* GL: objptr was called frame before. Frame was a bad choice for the name
2802 as the operand could as well be a pointer to a dynamic object. */
2804 return new_bd_Sel(db, current_ir_graph->current_block,
2805 store, objptr, 0, NULL, ent);
2806 } /* new_d_simpleSel */
2809 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2810 return new_bd_Sel(db, current_ir_graph->current_block,
2811 store, objptr, n_index, index, sel);
2815 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2816 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2818 } /* new_d_SymConst_type */
2821 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2822 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2823 value, kind, firm_unknown_type);
2824 } /* new_d_SymConst */
2827 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2828 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2834 return _new_d_Bad();
2838 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2839 return new_bd_Confirm(db, current_ir_graph->current_block,
2841 } /* new_d_Confirm */
2844 new_d_Unknown(ir_mode *m) {
2845 return new_bd_Unknown(m);
2846 } /* new_d_Unknown */
2849 new_d_CallBegin(dbg_info *db, ir_node *call) {
2850 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2851 } /* new_d_CallBegin */
2854 new_d_EndReg(dbg_info *db) {
2855 return new_bd_EndReg(db, current_ir_graph->current_block);
2856 } /* new_d_EndReg */
2859 new_d_EndExcept(dbg_info *db) {
2860 return new_bd_EndExcept(db, current_ir_graph->current_block);
2861 } /* new_d_EndExcept */
2864 new_d_Break(dbg_info *db) {
2865 return new_bd_Break(db, current_ir_graph->current_block);
2869 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2870 return new_bd_Filter(db, current_ir_graph->current_block,
2872 } /* new_d_Filter */
2875 (new_d_NoMem)(void) {
2876 return _new_d_NoMem();
2880 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2881 ir_node *ir_true, ir_mode *mode) {
2882 return new_bd_Mux(db, current_ir_graph->current_block,
2883 sel, ir_false, ir_true, mode);
2887 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2888 return new_bd_Psi(db, current_ir_graph->current_block,
2889 arity, conds, vals, mode);
2892 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2893 ir_node *dst, ir_node *src, ir_type *data_type) {
2895 res = new_bd_CopyB(db, current_ir_graph->current_block,
2896 store, dst, src, data_type);
2897 #if PRECISE_EXC_CONTEXT
2898 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2904 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2905 return new_bd_InstOf(db, current_ir_graph->current_block,
2906 store, objptr, type);
2907 } /* new_d_InstOf */
2910 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2911 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2914 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2915 ir_node *idx, ir_node *lower, ir_node *upper) {
2917 res = new_bd_Bound(db, current_ir_graph->current_block,
2918 store, idx, lower, upper);
2919 #if PRECISE_EXC_CONTEXT
2920 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2926 new_d_Pin(dbg_info *db, ir_node *node) {
2927 return new_bd_Pin(db, current_ir_graph->current_block, node);
2931 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2932 int n_outs, ir_asm_constraint *outputs,
2933 int n_clobber, ident *clobber[], ident *asm_text) {
2934 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2937 /* ********************************************************************* */
2938 /* Comfortable interface with automatic Phi node construction. */
2939 /* (Uses also constructors of ?? interface, except new_Block. */
2940 /* ********************************************************************* */
2942 /* Block construction */
2943 /* immature Block without predecessors */
2945 new_d_immBlock(dbg_info *db) {
2948 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2949 /* creates a new dynamic in-array as length of in is -1 */
2950 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2951 current_ir_graph->current_block = res;
2953 /* macroblock head */
2956 res->attr.block.is_matured = 0;
2957 res->attr.block.is_dead = 0;
2958 res->attr.block.is_mb_head = 1;
2959 res->attr.block.irg = current_ir_graph;
2960 res->attr.block.backedge = NULL;
2961 res->attr.block.in_cg = NULL;
2962 res->attr.block.cg_backedge = NULL;
2963 res->attr.block.extblk = NULL;
2964 res->attr.block.region = NULL;
2965 res->attr.block.mb_depth = 0;
2967 set_Block_block_visited(res, 0);
2969 /* Create and initialize array for Phi-node construction. */
2970 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2971 current_ir_graph->n_loc);
2972 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2974 /* Immature block may not be optimized! */
2975 IRN_VRFY_IRG(res, current_ir_graph);
2978 } /* new_d_immBlock */
2981 new_immBlock(void) {
2982 return new_d_immBlock(NULL);
2983 } /* new_immBlock */
2985 /* immature PartBlock with its predecessors */
2987 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2988 ir_node *res = new_d_immBlock(db);
2989 ir_node *blk = get_nodes_block(pred_jmp);
2991 res->in[0] = blk->in[0];
2992 add_immBlock_pred(res, pred_jmp);
2994 res->attr.block.is_mb_head = 0;
2995 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2998 } /* new_d_immPartBlock */
3001 new_immPartBlock(ir_node *pred_jmp) {
3002 return new_d_immPartBlock(NULL, pred_jmp);
3003 } /* new_immPartBlock */
3005 /* add an edge to a jmp/control flow node */
3007 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3008 int n = ARR_LEN(block->in) - 1;
3010 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3011 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3012 assert(jmp != NULL);
3014 ARR_APP1(ir_node *, block->in, jmp);
3016 hook_set_irn_n(block, n, jmp, NULL);
3017 } /* add_immBlock_pred */
3019 /* changing the current block */
3021 set_cur_block(ir_node *target) {
3022 current_ir_graph->current_block = target;
3023 } /* set_cur_block */
3025 /* ************************ */
3026 /* parameter administration */
3028 /* get a value from the parameter array from the current block by its index */
3030 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3031 ir_graph *irg = current_ir_graph;
3032 assert(get_irg_phase_state(irg) == phase_building);
3033 inc_irg_visited(irg);
3035 return get_r_value_internal(irg->current_block, pos + 1, mode);
3038 /* get a value from the parameter array from the current block by its index */
3040 get_value(int pos, ir_mode *mode) {
3041 return get_d_value(NULL, pos, mode);
3044 /* set a value at position pos in the parameter array from the current block */
3046 set_value(int pos, ir_node *value) {
3047 ir_graph *irg = current_ir_graph;
3048 assert(get_irg_phase_state(irg) == phase_building);
3049 assert(pos+1 < irg->n_loc);
3050 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3053 /* Find the value number for a node in the current block.*/
3055 find_value(ir_node *value) {
3057 ir_node *bl = current_ir_graph->current_block;
3059 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3060 if (bl->attr.block.graph_arr[i] == value)
3065 /* get the current store */
3068 ir_graph *irg = current_ir_graph;
3070 assert(get_irg_phase_state(irg) == phase_building);
3071 /* GL: one could call get_value instead */
3072 inc_irg_visited(irg);
3073 return get_r_value_internal(irg->current_block, 0, mode_M);
3076 /* set the current store: handles automatic Sync construction for Load nodes */
3078 set_store(ir_node *store) {
3079 ir_node *load, *pload, *pred, *in[2];
3081 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3082 /* Beware: due to dead code elimination, a store might become a Bad node even in
3083 the construction phase. */
3084 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3086 if (get_opt_auto_create_sync()) {
3087 /* handle non-volatile Load nodes by automatically creating Sync's */
3088 load = skip_Proj(store);
3089 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3090 pred = get_Load_mem(load);
3092 if (is_Sync(pred)) {
3093 /* a Load after a Sync: move it up */
3094 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3096 set_Load_mem(load, get_memop_mem(mem));
3097 add_Sync_pred(pred, store);
3100 pload = skip_Proj(pred);
3101 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3102 /* a Load after a Load: create a new Sync */
3103 set_Load_mem(load, get_Load_mem(pload));
3107 store = new_Sync(2, in);
3112 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3116 keep_alive(ir_node *ka) {
3117 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3120 /* --- Useful access routines --- */
3121 /* Returns the current block of the current graph. To set the current
3122 block use set_cur_block. */
3123 ir_node *get_cur_block(void) {
3124 return get_irg_current_block(current_ir_graph);
3125 } /* get_cur_block */
3127 /* Returns the frame type of the current graph */
3128 ir_type *get_cur_frame_type(void) {
3129 return get_irg_frame_type(current_ir_graph);
3130 } /* get_cur_frame_type */
3133 /* ********************************************************************* */
3136 /* call once for each run of the library */
3138 init_cons(uninitialized_local_variable_func_t *func) {
3139 default_initialize_local_variable = func;
3143 irp_finalize_cons(void) {
3145 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3146 irg_finalize_cons(get_irp_irg(i));
3148 irp->phase_state = phase_high;
3149 } /* irp_finalize_cons */
3152 ir_node *new_Block(int arity, ir_node **in) {
3153 return new_d_Block(NULL, arity, in);
3155 ir_node *new_Start(void) {
3156 return new_d_Start(NULL);
3158 ir_node *new_End(void) {
3159 return new_d_End(NULL);
3161 ir_node *new_Jmp(void) {
3162 return new_d_Jmp(NULL);
3164 ir_node *new_IJmp(ir_node *tgt) {
3165 return new_d_IJmp(NULL, tgt);
3167 ir_node *new_Cond(ir_node *c) {
3168 return new_d_Cond(NULL, c);
3170 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3171 return new_d_Return(NULL, store, arity, in);
3173 ir_node *new_Const(ir_mode *mode, tarval *con) {
3174 return new_d_Const(NULL, mode, con);
3177 ir_node *new_Const_long(ir_mode *mode, long value) {
3178 return new_d_Const_long(NULL, mode, value);
3181 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3182 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3185 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3186 return new_d_SymConst_type(NULL, value, kind, type);
3188 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3189 return new_d_SymConst(NULL, value, kind);
3191 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3192 return new_d_simpleSel(NULL, store, objptr, ent);
3194 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3196 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3198 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3200 return new_d_Call(NULL, store, callee, arity, in, tp);
3202 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3203 return new_d_Add(NULL, op1, op2, mode);
3205 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3206 return new_d_Sub(NULL, op1, op2, mode);
3208 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3209 return new_d_Minus(NULL, op, mode);
3211 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3212 return new_d_Mul(NULL, op1, op2, mode);
3214 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3215 return new_d_Quot(NULL, memop, op1, op2, mode);
3217 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3218 return new_d_DivMod(NULL, memop, op1, op2, mode);
3220 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3221 return new_d_Div(NULL, memop, op1, op2, mode);
3223 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3224 return new_d_Mod(NULL, memop, op1, op2, mode);
3226 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3227 return new_d_Abs(NULL, op, mode);
3229 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_And(NULL, op1, op2, mode);
3232 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3233 return new_d_Or(NULL, op1, op2, mode);
3235 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3236 return new_d_Eor(NULL, op1, op2, mode);
3238 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3239 return new_d_Not(NULL, op, mode);
3241 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3242 return new_d_Shl(NULL, op, k, mode);
3244 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3245 return new_d_Shr(NULL, op, k, mode);
3247 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3248 return new_d_Shrs(NULL, op, k, mode);
3250 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3251 return new_d_Rot(NULL, op, k, mode);
3253 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3254 return new_d_Carry(NULL, op1, op2, mode);
3256 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3257 return new_d_Borrow(NULL, op1, op2, mode);
3259 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3260 return new_d_Cmp(NULL, op1, op2);
3262 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3263 return new_d_Conv(NULL, op, mode);
3265 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3266 return new_d_strictConv(NULL, op, mode);
3268 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3269 return new_d_Cast(NULL, op, to_tp);
3271 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3272 return new_d_Phi(NULL, arity, in, mode);
3274 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3275 return new_d_Load(NULL, store, addr, mode);
3277 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3278 return new_d_Store(NULL, store, addr, val);
3280 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3281 where_alloc where) {
3282 return new_d_Alloc(NULL, store, size, alloc_type, where);
3284 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3285 ir_type *free_type, where_alloc where) {
3286 return new_d_Free(NULL, store, ptr, size, free_type, where);
3288 ir_node *new_Sync(int arity, ir_node *in[]) {
3289 return new_d_Sync(NULL, arity, in);
3291 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3292 return new_d_Proj(NULL, arg, mode, proj);
3294 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3295 return new_d_defaultProj(NULL, arg, max_proj);
3297 ir_node *new_Tuple(int arity, ir_node **in) {
3298 return new_d_Tuple(NULL, arity, in);
3300 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3301 return new_d_Id(NULL, val, mode);
3303 ir_node *new_Bad(void) {
3306 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3307 return new_d_Confirm (NULL, val, bound, cmp);
3309 ir_node *new_Unknown(ir_mode *m) {
3310 return new_d_Unknown(m);
3312 ir_node *new_CallBegin(ir_node *callee) {
3313 return new_d_CallBegin(NULL, callee);
3315 ir_node *new_EndReg(void) {
3316 return new_d_EndReg(NULL);
3318 ir_node *new_EndExcept(void) {
3319 return new_d_EndExcept(NULL);
3321 ir_node *new_Break(void) {
3322 return new_d_Break(NULL);
3324 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3325 return new_d_Filter(NULL, arg, mode, proj);
3327 ir_node *new_NoMem(void) {
3328 return new_d_NoMem();
3330 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3331 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3333 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3334 return new_d_Psi(NULL, arity, conds, vals, mode);
3336 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3337 return new_d_CopyB(NULL, store, dst, src, data_type);
3339 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3340 return new_d_InstOf(NULL, store, objptr, ent);
3342 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3343 return new_d_Raise(NULL, store, obj);
3345 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3346 return new_d_Bound(NULL, store, idx, lower, upper);
3348 ir_node *new_Pin(ir_node *node) {
3349 return new_d_Pin(NULL, node);
3351 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3352 int n_outs, ir_asm_constraint *outputs,
3353 int n_clobber, ident *clobber[], ident *asm_text) {
3354 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);