2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.irg = irg;
198 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
199 res->attr.block.in_cg = NULL;
200 res->attr.block.cg_backedge = NULL;
201 res->attr.block.extblk = NULL;
202 res->attr.block.mb_depth = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if ( get_Block_matured(block) )
247 assert( get_irn_arity(block) == arity );
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity-1; i >= 0; i--)
254 if (get_irn_op(in[i]) == op_Unknown) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if ((res->op == op_Phi) && (mode == mode_M))
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
274 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
275 res->attr.con.tv = con;
276 set_Const_type(res, tp); /* Call method because of complex assertion. */
277 res = optimize_node (res);
278 assert(get_Const_type(res) == tp);
279 IRN_VRFY_IRG(res, irg);
282 } /* new_bd_Const_type */
285 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
286 ir_graph *irg = current_ir_graph;
288 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
292 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
293 ir_graph *irg = current_ir_graph;
295 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
296 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.totype = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
402 ir_graph *irg = current_ir_graph;
405 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node(res);
407 IRN_VRFY_IRG(res, irg);
412 new_bd_Jmp(dbg_info *db, ir_node *block) {
414 ir_graph *irg = current_ir_graph;
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
417 res = optimize_node (res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG(res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node (res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
512 new_bd_Store(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_node *val) {
516 ir_graph *irg = current_ir_graph;
521 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
522 res->attr.store.exc.pin_state = op_pin_state_pinned;
523 res->attr.store.volatility = volatility_non_volatile;
524 res = optimize_node(res);
525 IRN_VRFY_IRG(res, irg);
530 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
531 ir_node *size, ir_type *alloc_type, where_alloc where) {
534 ir_graph *irg = current_ir_graph;
538 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
539 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
540 res->attr.alloc.where = where;
541 res->attr.alloc.type = alloc_type;
542 res = optimize_node(res);
543 IRN_VRFY_IRG(res, irg);
548 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
549 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
552 ir_graph *irg = current_ir_graph;
557 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
558 res->attr.free.where = where;
559 res->attr.free.type = free_type;
560 res = optimize_node(res);
561 IRN_VRFY_IRG(res, irg);
566 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
567 int arity, ir_node **in, ir_entity *ent) {
571 ir_graph *irg = current_ir_graph;
572 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
574 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
577 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
580 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
582 * Sel's can select functions which should be of mode mode_P_code.
584 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
585 res->attr.sel.ent = ent;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
593 symconst_kind symkind, ir_type *tp) {
596 ir_graph *irg = current_ir_graph;
598 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
599 mode = mode_P_data; /* FIXME: can be mode_P_code */
603 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
605 res->attr.symc.num = symkind;
606 res->attr.symc.sym = value;
607 res->attr.symc.tp = tp;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
612 } /* new_bd_SymConst_type */
615 new_bd_Sync(dbg_info *db, ir_node *block) {
617 ir_graph *irg = current_ir_graph;
619 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
620 /* no need to call optimize node here, Sync are always created with no predecessors */
621 IRN_VRFY_IRG(res, irg);
626 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
627 ir_node *in[2], *res;
628 ir_graph *irg = current_ir_graph;
632 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
633 res->attr.confirm_cmp = cmp;
634 res = optimize_node (res);
635 IRN_VRFY_IRG(res, irg);
637 } /* new_bd_Confirm */
640 new_bd_Unknown(ir_mode *m) {
642 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
645 res = optimize_node(res);
647 } /* new_bd_Unknown */
650 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
653 ir_graph *irg = current_ir_graph;
655 in[0] = get_Call_ptr(call);
656 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
657 /* res->attr.callbegin.irg = irg; */
658 res->attr.callbegin.call = call;
659 res = optimize_node(res);
660 IRN_VRFY_IRG(res, irg);
662 } /* new_bd_CallBegin */
665 new_bd_EndReg(dbg_info *db, ir_node *block) {
667 ir_graph *irg = current_ir_graph;
669 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
670 set_irg_end_reg(irg, res);
671 IRN_VRFY_IRG(res, irg);
673 } /* new_bd_EndReg */
676 new_bd_EndExcept(dbg_info *db, ir_node *block) {
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
681 set_irg_end_except(irg, res);
682 IRN_VRFY_IRG (res, irg);
684 } /* new_bd_EndExcept */
687 new_bd_Break(dbg_info *db, ir_node *block) {
689 ir_graph *irg = current_ir_graph;
691 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
704 res->attr.filter.proj = proj;
705 res->attr.filter.in_cg = NULL;
706 res->attr.filter.backedge = NULL;
709 assert(get_Proj_pred(res));
710 assert(get_nodes_block(get_Proj_pred(res)));
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
715 } /* new_bd_Filter */
718 new_bd_Mux(dbg_info *db, ir_node *block,
719 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
728 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
737 new_bd_Psi(dbg_info *db, ir_node *block,
738 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
744 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
746 for (i = 0; i < arity; ++i) {
748 in[2 * i + 1] = vals[i];
752 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_CopyB(dbg_info *db, ir_node *block,
762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
773 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
774 res->attr.copyb.data_type = data_type;
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
781 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
782 ir_node *objptr, ir_type *type) {
785 ir_graph *irg = current_ir_graph;
789 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
790 res->attr.instof.type = type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
794 } /* new_bd_InstOf */
797 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_Bound(dbg_info *db, ir_node *block,
812 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
815 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
822 res->attr.bound.exc.pin_state = op_pin_state_pinned;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
831 ir_graph *irg = current_ir_graph;
833 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
834 res = optimize_node(res);
835 IRN_VRFY_IRG(res, irg);
840 new_bd_ASM(dbg_info *db, ir_node *block, ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
842 ir_graph *irg = current_ir_graph;
845 NEW_ARR_A(ir_node *, in, arity + 1);
848 for (i = 0; i < arity; ++i)
849 in[i + 1] = inputs[i];
851 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity + 1, in);
852 res->attr.asm.asm_text = asm_text;
854 res = optimize_node(res);
855 IRN_VRFY_IRG(res, irg);
859 /* --------------------------------------------- */
860 /* private interfaces, for professional use only */
861 /* --------------------------------------------- */
863 /* Constructs a Block with a fixed number of predecessors.
864 Does not set current_block. Can not be used with automatic
865 Phi node construction. */
867 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
868 ir_graph *rem = current_ir_graph;
871 current_ir_graph = irg;
872 res = new_bd_Block(db, arity, in);
873 current_ir_graph = rem;
879 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
880 ir_graph *rem = current_ir_graph;
883 current_ir_graph = irg;
884 res = new_bd_Start(db, block);
885 current_ir_graph = rem;
891 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
893 ir_graph *rem = current_ir_graph;
895 current_ir_graph = rem;
896 res = new_bd_End(db, block);
897 current_ir_graph = rem;
902 /* Creates a Phi node with all predecessors. Calling this constructor
903 is only allowed if the corresponding block is mature. */
905 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
907 ir_graph *rem = current_ir_graph;
909 current_ir_graph = irg;
910 res = new_bd_Phi(db, block,arity, in, mode);
911 current_ir_graph = rem;
917 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
919 ir_graph *rem = current_ir_graph;
921 current_ir_graph = irg;
922 res = new_bd_Const_type(db, block, mode, con, tp);
923 current_ir_graph = rem;
926 } /* new_rd_Const_type */
929 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
931 ir_graph *rem = current_ir_graph;
933 current_ir_graph = irg;
934 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
935 current_ir_graph = rem;
941 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
942 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
943 } /* new_rd_Const_long */
946 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
948 ir_graph *rem = current_ir_graph;
950 current_ir_graph = irg;
951 res = new_bd_Id(db, block, val, mode);
952 current_ir_graph = rem;
958 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
961 ir_graph *rem = current_ir_graph;
963 current_ir_graph = irg;
964 res = new_bd_Proj(db, block, arg, mode, proj);
965 current_ir_graph = rem;
971 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
974 ir_graph *rem = current_ir_graph;
976 current_ir_graph = irg;
977 res = new_bd_defaultProj(db, block, arg, max_proj);
978 current_ir_graph = rem;
981 } /* new_rd_defaultProj */
984 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
986 ir_graph *rem = current_ir_graph;
988 current_ir_graph = irg;
989 res = new_bd_Conv(db, block, op, mode, 0);
990 current_ir_graph = rem;
996 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Cast(db, block, op, to_tp);
1002 current_ir_graph = rem;
1008 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1010 ir_graph *rem = current_ir_graph;
1012 current_ir_graph = irg;
1013 res = new_bd_Tuple(db, block, arity, in);
1014 current_ir_graph = rem;
1017 } /* new_rd_Tuple */
1024 NEW_RD_DIVOP(DivMod)
1037 NEW_RD_BINOP(Borrow)
1040 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1041 ir_node *op1, ir_node *op2) {
1043 ir_graph *rem = current_ir_graph;
1045 current_ir_graph = irg;
1046 res = new_bd_Cmp(db, block, op1, op2);
1047 current_ir_graph = rem;
1053 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1055 ir_graph *rem = current_ir_graph;
1057 current_ir_graph = irg;
1058 res = new_bd_Jmp(db, block);
1059 current_ir_graph = rem;
1065 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1067 ir_graph *rem = current_ir_graph;
1069 current_ir_graph = irg;
1070 res = new_bd_IJmp(db, block, tgt);
1071 current_ir_graph = rem;
1077 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1079 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_Cond(db, block, c);
1083 current_ir_graph = rem;
1089 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1090 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1096 current_ir_graph = rem;
1102 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1103 ir_node *store, int arity, ir_node **in) {
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Return(db, block, store, arity, in);
1109 current_ir_graph = rem;
1112 } /* new_rd_Return */
1115 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1116 ir_node *store, ir_node *adr, ir_mode *mode) {
1118 ir_graph *rem = current_ir_graph;
1120 current_ir_graph = irg;
1121 res = new_bd_Load(db, block, store, adr, mode);
1122 current_ir_graph = rem;
1128 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1129 ir_node *store, ir_node *adr, ir_node *val) {
1131 ir_graph *rem = current_ir_graph;
1133 current_ir_graph = irg;
1134 res = new_bd_Store(db, block, store, adr, val);
1135 current_ir_graph = rem;
1138 } /* new_rd_Store */
1141 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1142 ir_node *size, ir_type *alloc_type, where_alloc where) {
1144 ir_graph *rem = current_ir_graph;
1146 current_ir_graph = irg;
1147 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1148 current_ir_graph = rem;
1151 } /* new_rd_Alloc */
1154 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1155 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1157 ir_graph *rem = current_ir_graph;
1159 current_ir_graph = irg;
1160 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1161 current_ir_graph = rem;
1167 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1168 ir_node *store, ir_node *objptr, ir_entity *ent) {
1170 ir_graph *rem = current_ir_graph;
1172 current_ir_graph = irg;
1173 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1174 current_ir_graph = rem;
1177 } /* new_rd_simpleSel */
1180 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1181 int arity, ir_node **in, ir_entity *ent) {
1183 ir_graph *rem = current_ir_graph;
1185 current_ir_graph = irg;
1186 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1187 current_ir_graph = rem;
1193 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1194 symconst_kind symkind, ir_type *tp) {
1196 ir_graph *rem = current_ir_graph;
1198 current_ir_graph = irg;
1199 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1200 current_ir_graph = rem;
1203 } /* new_rd_SymConst_type */
1206 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1207 symconst_kind symkind) {
1208 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1210 } /* new_rd_SymConst */
1212 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1213 symconst_symbol sym;
1214 sym.entity_p = symbol;
1215 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1216 } /* new_rd_SymConst_addr_ent */
1218 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1219 symconst_symbol sym;
1220 sym.entity_p = symbol;
1221 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1222 } /* new_rd_SymConst_ofs_ent */
1224 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1225 symconst_symbol sym;
1226 sym.ident_p = symbol;
1227 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1228 } /* new_rd_SymConst_addr_name */
1230 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1231 symconst_symbol sym;
1232 sym.type_p = symbol;
1233 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1234 } /* new_rd_SymConst_type_tag */
1236 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1237 symconst_symbol sym;
1238 sym.type_p = symbol;
1239 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1240 } /* new_rd_SymConst_size */
1242 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.type_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1246 } /* new_rd_SymConst_align */
1249 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1251 ir_graph *rem = current_ir_graph;
1254 current_ir_graph = irg;
1255 res = new_bd_Sync(db, block);
1256 current_ir_graph = rem;
1258 for (i = 0; i < arity; ++i)
1259 add_Sync_pred(res, in[i]);
1265 new_rd_Bad(ir_graph *irg) {
1266 return get_irg_bad(irg);
1270 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1272 ir_graph *rem = current_ir_graph;
1274 current_ir_graph = irg;
1275 res = new_bd_Confirm(db, block, val, bound, cmp);
1276 current_ir_graph = rem;
1279 } /* new_rd_Confirm */
1282 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1284 ir_graph *rem = current_ir_graph;
1286 current_ir_graph = irg;
1287 res = new_bd_Unknown(m);
1288 current_ir_graph = rem;
1291 } /* new_rd_Unknown */
1294 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1296 ir_graph *rem = current_ir_graph;
1298 current_ir_graph = irg;
1299 res = new_bd_CallBegin(db, block, call);
1300 current_ir_graph = rem;
1303 } /* new_rd_CallBegin */
1306 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1309 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1310 set_irg_end_reg(irg, res);
1311 IRN_VRFY_IRG(res, irg);
1313 } /* new_rd_EndReg */
1316 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1319 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1320 set_irg_end_except(irg, res);
1321 IRN_VRFY_IRG (res, irg);
1323 } /* new_rd_EndExcept */
1326 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1328 ir_graph *rem = current_ir_graph;
1330 current_ir_graph = irg;
1331 res = new_bd_Break(db, block);
1332 current_ir_graph = rem;
1335 } /* new_rd_Break */
1338 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Filter(db, block, arg, mode, proj);
1345 current_ir_graph = rem;
1348 } /* new_rd_Filter */
1351 new_rd_NoMem(ir_graph *irg) {
1352 return get_irg_no_mem(irg);
1353 } /* new_rd_NoMem */
1356 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1357 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1359 ir_graph *rem = current_ir_graph;
1361 current_ir_graph = irg;
1362 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1363 current_ir_graph = rem;
1369 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1370 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1372 ir_graph *rem = current_ir_graph;
1374 current_ir_graph = irg;
1375 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1376 current_ir_graph = rem;
1381 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1382 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1384 ir_graph *rem = current_ir_graph;
1386 current_ir_graph = irg;
1387 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1388 current_ir_graph = rem;
1391 } /* new_rd_CopyB */
1394 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1395 ir_node *objptr, ir_type *type) {
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_InstOf(db, block, store, objptr, type);
1401 current_ir_graph = rem;
1404 } /* new_rd_InstOf */
1407 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1409 ir_graph *rem = current_ir_graph;
1411 current_ir_graph = irg;
1412 res = new_bd_Raise(db, block, store, obj);
1413 current_ir_graph = rem;
1416 } /* new_rd_Raise */
1418 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1419 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1421 ir_graph *rem = current_ir_graph;
1423 current_ir_graph = irg;
1424 res = new_bd_Bound(db, block, store, idx, lower, upper);
1425 current_ir_graph = rem;
1428 } /* new_rd_Bound */
1430 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1432 ir_graph *rem = current_ir_graph;
1434 current_ir_graph = irg;
1435 res = new_bd_Pin(db, block, node);
1436 current_ir_graph = rem;
1441 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1442 int arity, ir_node *inputs[], ident *asm_text) {
1444 ir_graph *rem = current_ir_graph;
1446 current_ir_graph = irg;
1447 res = new_bd_ASM(db, block, store, arity, inputs, asm_text);
1448 current_ir_graph = rem;
1454 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1455 return new_rd_Block(NULL, irg, arity, in);
1457 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1458 return new_rd_Start(NULL, irg, block);
1460 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1461 return new_rd_End(NULL, irg, block);
1463 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1464 return new_rd_Jmp(NULL, irg, block);
1466 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1467 return new_rd_IJmp(NULL, irg, block, tgt);
1469 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1470 return new_rd_Cond(NULL, irg, block, c);
1472 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1473 ir_node *store, int arity, ir_node **in) {
1474 return new_rd_Return(NULL, irg, block, store, arity, in);
1476 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1477 ir_mode *mode, tarval *con) {
1478 return new_rd_Const(NULL, irg, block, mode, con);
1480 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1481 ir_mode *mode, long value) {
1482 return new_rd_Const_long(NULL, irg, block, mode, value);
1484 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con, ir_type *tp) {
1486 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1488 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1489 symconst_symbol value, symconst_kind symkind) {
1490 return new_rd_SymConst(NULL, irg, block, value, symkind);
1492 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1493 ir_node *objptr, ir_entity *ent) {
1494 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1496 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1497 ir_node *objptr, int n_index, ir_node **index,
1499 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1501 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *callee, int arity, ir_node **in,
1504 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1506 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1507 ir_node *op1, ir_node *op2, ir_mode *mode) {
1508 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1510 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1511 ir_node *op1, ir_node *op2, ir_mode *mode) {
1512 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1514 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1515 ir_node *op, ir_mode *mode) {
1516 return new_rd_Minus(NULL, irg, block, op, mode);
1518 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1523 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1524 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1526 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1527 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1530 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1531 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1534 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1536 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1538 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1539 ir_node *op, ir_mode *mode) {
1540 return new_rd_Abs(NULL, irg, block, op, mode);
1542 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1543 ir_node *op1, ir_node *op2, ir_mode *mode) {
1544 return new_rd_And(NULL, irg, block, op1, op2, mode);
1546 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1547 ir_node *op1, ir_node *op2, ir_mode *mode) {
1548 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1550 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1555 ir_node *op, ir_mode *mode) {
1556 return new_rd_Not(NULL, irg, block, op, mode);
1558 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1559 ir_node *op, ir_node *k, ir_mode *mode) {
1560 return new_rd_Shl(NULL, irg, block, op, k, mode);
1562 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1563 ir_node *op, ir_node *k, ir_mode *mode) {
1564 return new_rd_Shr(NULL, irg, block, op, k, mode);
1566 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_node *k, ir_mode *mode) {
1568 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1570 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Rot(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Carry(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1583 ir_node *op1, ir_node *op2) {
1584 return new_rd_Cmp(NULL, irg, block, op1, op2);
1586 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_mode *mode) {
1588 return new_rd_Conv(NULL, irg, block, op, mode);
1590 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1591 return new_rd_Cast(NULL, irg, block, op, to_tp);
1593 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1594 ir_node **in, ir_mode *mode) {
1595 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1597 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1598 ir_node *store, ir_node *adr, ir_mode *mode) {
1599 return new_rd_Load(NULL, irg, block, store, adr, mode);
1601 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1602 ir_node *store, ir_node *adr, ir_node *val) {
1603 return new_rd_Store(NULL, irg, block, store, adr, val);
1605 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1606 ir_node *size, ir_type *alloc_type, where_alloc where) {
1607 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1609 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1610 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1611 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1613 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1614 return new_rd_Sync(NULL, irg, block, arity, in);
1616 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1617 ir_mode *mode, long proj) {
1618 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1620 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1622 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1624 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1625 int arity, ir_node **in) {
1626 return new_rd_Tuple(NULL, irg, block, arity, in );
1628 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1629 ir_node *val, ir_mode *mode) {
1630 return new_rd_Id(NULL, irg, block, val, mode);
1632 ir_node *new_r_Bad(ir_graph *irg) {
1633 return new_rd_Bad(irg);
1635 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1636 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1638 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1639 return new_rd_Unknown(irg, m);
1641 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1642 return new_rd_CallBegin(NULL, irg, block, callee);
1644 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1645 return new_rd_EndReg(NULL, irg, block);
1647 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1648 return new_rd_EndExcept(NULL, irg, block);
1650 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1651 return new_rd_Break(NULL, irg, block);
1653 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1654 ir_mode *mode, long proj) {
1655 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1657 ir_node *new_r_NoMem(ir_graph *irg) {
1658 return new_rd_NoMem(irg);
1660 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1661 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1662 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1664 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1665 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1666 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1668 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1669 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1670 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1672 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1674 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1676 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1677 ir_node *store, ir_node *obj) {
1678 return new_rd_Raise(NULL, irg, block, store, obj);
1680 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1682 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1684 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1685 return new_rd_Pin(NULL, irg, block, node);
1687 ir_node *new_r_ASM(ir_graph *irg, ir_node *block, ir_node *store,
1688 int arity, ir_node *inputs[], ident *asm_text) {
1689 return new_rd_ASM(NULL, irg, block, store, arity, inputs, asm_text);
1692 /** ********************/
1693 /** public interfaces */
1694 /** construction tools */
1698 * - create a new Start node in the current block
1700 * @return s - pointer to the created Start node
1705 new_d_Start(dbg_info *db) {
1708 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1709 op_Start, mode_T, 0, NULL);
1711 res = optimize_node(res);
1712 IRN_VRFY_IRG(res, current_ir_graph);
1717 new_d_End(dbg_info *db) {
1719 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1720 op_End, mode_X, -1, NULL);
1721 res = optimize_node(res);
1722 IRN_VRFY_IRG(res, current_ir_graph);
1727 /* Constructs a Block with a fixed number of predecessors.
1728 Does set current_block. Can be used with automatic Phi
1729 node construction. */
1731 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1734 int has_unknown = 0;
1736 res = new_bd_Block(db, arity, in);
1738 /* Create and initialize array for Phi-node construction. */
1739 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1740 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1741 current_ir_graph->n_loc);
1742 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1745 for (i = arity-1; i >= 0; i--)
1746 if (get_irn_op(in[i]) == op_Unknown) {
1751 if (!has_unknown) res = optimize_node(res);
1752 current_ir_graph->current_block = res;
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 /* ***********************************************************************/
1760 /* Methods necessary for automatic Phi node creation */
1762 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1763 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1764 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1765 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1767 Call Graph: ( A ---> B == A "calls" B)
1769 get_value mature_immBlock
1777 get_r_value_internal |
1781 new_rd_Phi0 new_rd_Phi_in
1783 * *************************************************************************** */
1785 /** Creates a Phi node with 0 predecessors. */
1786 static INLINE ir_node *
1787 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1790 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1791 IRN_VRFY_IRG(res, irg);
1795 /* There are two implementations of the Phi node construction. The first
1796 is faster, but does not work for blocks with more than 2 predecessors.
1797 The second works always but is slower and causes more unnecessary Phi
1799 Select the implementations by the following preprocessor flag set in
1801 #if USE_FAST_PHI_CONSTRUCTION
1803 /* This is a stack used for allocating and deallocating nodes in
1804 new_rd_Phi_in. The original implementation used the obstack
1805 to model this stack, now it is explicit. This reduces side effects.
1807 #if USE_EXPLICIT_PHI_IN_STACK
1809 new_Phi_in_stack(void) {
1812 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1814 res->stack = NEW_ARR_F (ir_node *, 0);
1818 } /* new_Phi_in_stack */
1821 free_Phi_in_stack(Phi_in_stack *s) {
1822 DEL_ARR_F(s->stack);
1824 } /* free_Phi_in_stack */
1827 free_to_Phi_in_stack(ir_node *phi) {
1828 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1829 current_ir_graph->Phi_in_stack->pos)
1830 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1832 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1834 (current_ir_graph->Phi_in_stack->pos)++;
1835 } /* free_to_Phi_in_stack */
1837 static INLINE ir_node *
1838 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1839 int arity, ir_node **in) {
1841 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1842 int pos = current_ir_graph->Phi_in_stack->pos;
1846 /* We need to allocate a new node */
1847 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1848 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1850 /* reuse the old node and initialize it again. */
1853 assert(res->kind == k_ir_node);
1854 assert(res->op == op_Phi);
1859 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1860 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1862 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1864 (current_ir_graph->Phi_in_stack->pos)--;
1867 } /* alloc_or_pop_from_Phi_in_stack */
1868 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1871 * Creates a Phi node with a given, fixed array **in of predecessors.
1872 * If the Phi node is unnecessary, as the same value reaches the block
1873 * through all control flow paths, it is eliminated and the value
1874 * returned directly. This constructor is only intended for use in
1875 * the automatic Phi node generation triggered by get_value or mature.
1876 * The implementation is quite tricky and depends on the fact, that
1877 * the nodes are allocated on a stack:
1878 * The in array contains predecessors and NULLs. The NULLs appear,
1879 * if get_r_value_internal, that computed the predecessors, reached
1880 * the same block on two paths. In this case the same value reaches
1881 * this block on both paths, there is no definition in between. We need
1882 * not allocate a Phi where these path's merge, but we have to communicate
1883 * this fact to the caller. This happens by returning a pointer to the
1884 * node the caller _will_ allocate. (Yes, we predict the address. We can
1885 * do so because the nodes are allocated on the obstack.) The caller then
1886 * finds a pointer to itself and, when this routine is called again,
1887 * eliminates itself.
1889 static INLINE ir_node *
1890 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1892 ir_node *res, *known;
1894 /* Allocate a new node on the obstack. This can return a node to
1895 which some of the pointers in the in-array already point.
1896 Attention: the constructor copies the in array, i.e., the later
1897 changes to the array in this routine do not affect the
1898 constructed node! If the in array contains NULLs, there will be
1899 missing predecessors in the returned node. Is this a possible
1900 internal state of the Phi node generation? */
1901 #if USE_EXPLICIT_PHI_IN_STACK
1902 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1904 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1905 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1908 /* The in-array can contain NULLs. These were returned by
1909 get_r_value_internal if it reached the same block/definition on a
1910 second path. The NULLs are replaced by the node itself to
1911 simplify the test in the next loop. */
1912 for (i = 0; i < ins; ++i) {
1917 /* This loop checks whether the Phi has more than one predecessor.
1918 If so, it is a real Phi node and we break the loop. Else the Phi
1919 node merges the same definition on several paths and therefore is
1921 for (i = 0; i < ins; ++i) {
1922 if (in[i] == res || in[i] == known)
1931 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1933 #if USE_EXPLICIT_PHI_IN_STACK
1934 free_to_Phi_in_stack(res);
1936 edges_node_deleted(res, current_ir_graph);
1937 obstack_free(current_ir_graph->obst, res);
1941 res = optimize_node (res);
1942 IRN_VRFY_IRG(res, irg);
1945 /* return the pointer to the Phi node. This node might be deallocated! */
1947 } /* new_rd_Phi_in */
1950 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1953 * Allocates and returns this node. The routine called to allocate the
1954 * node might optimize it away and return a real value, or even a pointer
1955 * to a deallocated Phi node on top of the obstack!
1956 * This function is called with an in-array of proper size.
1959 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1960 ir_node *prevBlock, *res;
1963 /* This loop goes to all predecessor blocks of the block the Phi node is in
1964 and there finds the operands of the Phi node by calling
1965 get_r_value_internal. */
1966 for (i = 1; i <= ins; ++i) {
1967 assert (block->in[i]);
1968 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1970 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1973 /* After collecting all predecessors into the array nin a new Phi node
1974 with these predecessors is created. This constructor contains an
1975 optimization: If all predecessors of the Phi node are identical it
1976 returns the only operand instead of a new Phi node. If the value
1977 passes two different control flow edges without being defined, and
1978 this is the second path treated, a pointer to the node that will be
1979 allocated for the first path (recursion) is returned. We already
1980 know the address of this node, as it is the next node to be allocated
1981 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1982 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1984 /* Now we now the value for "pos" and can enter it in the array with
1985 all known local variables. Attention: this might be a pointer to
1986 a node, that later will be allocated!!! See new_rd_Phi_in().
1987 If this is called in mature, after some set_value() in the same block,
1988 the proper value must not be overwritten:
1990 get_value (makes Phi0, put's it into graph_arr)
1991 set_value (overwrites Phi0 in graph_arr)
1992 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1995 if (!block->attr.block.graph_arr[pos]) {
1996 block->attr.block.graph_arr[pos] = res;
1998 /* printf(" value already computed by %s\n",
1999 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2006 * This function returns the last definition of a variable. In case
2007 * this variable was last defined in a previous block, Phi nodes are
2008 * inserted. If the part of the firm graph containing the definition
2009 * is not yet constructed, a dummy Phi node is returned.
2012 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2015 /* There are 4 cases to treat.
2017 1. The block is not mature and we visit it the first time. We can not
2018 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2019 predecessors is returned. This node is added to the linked list (field
2020 "link") of the containing block to be completed when this block is
2021 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2024 2. The value is already known in this block, graph_arr[pos] is set and we
2025 visit the block the first time. We can return the value without
2026 creating any new nodes.
2028 3. The block is mature and we visit it the first time. A Phi node needs
2029 to be created (phi_merge). If the Phi is not needed, as all it's
2030 operands are the same value reaching the block through different
2031 paths, it's optimized away and the value itself is returned.
2033 4. The block is mature, and we visit it the second time. Now two
2034 subcases are possible:
2035 * The value was computed completely the last time we were here. This
2036 is the case if there is no loop. We can return the proper value.
2037 * The recursion that visited this node and set the flag did not
2038 return yet. We are computing a value in a loop and need to
2039 break the recursion without knowing the result yet.
2040 @@@ strange case. Straight forward we would create a Phi before
2041 starting the computation of it's predecessors. In this case we will
2042 find a Phi here in any case. The problem is that this implementation
2043 only creates a Phi after computing the predecessors, so that it is
2044 hard to compute self references of this Phi. @@@
2045 There is no simple check for the second subcase. Therefore we check
2046 for a second visit and treat all such cases as the second subcase.
2047 Anyways, the basic situation is the same: we reached a block
2048 on two paths without finding a definition of the value: No Phi
2049 nodes are needed on both paths.
2050 We return this information "Two paths, no Phi needed" by a very tricky
2051 implementation that relies on the fact that an obstack is a stack and
2052 will return a node with the same address on different allocations.
2053 Look also at phi_merge and new_rd_phi_in to understand this.
2054 @@@ Unfortunately this does not work, see testprogram
2055 three_cfpred_example.
2059 /* case 4 -- already visited. */
2060 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2062 /* visited the first time */
2063 set_irn_visited(block, get_irg_visited(current_ir_graph));
2065 /* Get the local valid value */
2066 res = block->attr.block.graph_arr[pos];
2068 /* case 2 -- If the value is actually computed, return it. */
2069 if (res) return res;
2071 if (block->attr.block.is_matured) { /* case 3 */
2073 /* The Phi has the same amount of ins as the corresponding block. */
2074 int ins = get_irn_arity(block);
2076 NEW_ARR_A(ir_node *, nin, ins);
2078 /* Phi merge collects the predecessors and then creates a node. */
2079 res = phi_merge(block, pos, mode, nin, ins);
2081 } else { /* case 1 */
2082 /* The block is not mature, we don't know how many in's are needed. A Phi
2083 with zero predecessors is created. Such a Phi node is called Phi0
2084 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2085 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2087 The Phi0 has to remember the pos of it's internal value. If the real
2088 Phi is computed, pos is used to update the array with the local
2091 res = new_rd_Phi0(current_ir_graph, block, mode);
2092 res->attr.phi0_pos = pos;
2093 res->link = block->link;
2097 /* If we get here, the frontend missed a use-before-definition error */
2100 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2101 assert(mode->code >= irm_F && mode->code <= irm_P);
2102 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2105 /* The local valid value is available now. */
2106 block->attr.block.graph_arr[pos] = res;
2109 } /* get_r_value_internal */
2114 it starts the recursion. This causes an Id at the entry of
2115 every block that has no definition of the value! **/
2117 #if USE_EXPLICIT_PHI_IN_STACK
2119 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2120 void free_Phi_in_stack(Phi_in_stack *s) {}
2123 static INLINE ir_node *
2124 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2125 ir_node **in, int ins, ir_node *phi0) {
2127 ir_node *res, *known;
2129 /* Allocate a new node on the obstack. The allocation copies the in
2131 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2132 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2134 /* This loop checks whether the Phi has more than one predecessor.
2135 If so, it is a real Phi node and we break the loop. Else the
2136 Phi node merges the same definition on several paths and therefore
2137 is not needed. Don't consider Bad nodes! */
2139 for (i=0; i < ins; ++i)
2143 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2145 /* Optimize self referencing Phis: We can't detect them yet properly, as
2146 they still refer to the Phi0 they will replace. So replace right now. */
2147 if (phi0 && in[i] == phi0) in[i] = res;
2149 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2157 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2160 edges_node_deleted(res, current_ir_graph);
2161 obstack_free (current_ir_graph->obst, res);
2162 if (is_Phi(known)) {
2163 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2164 order, an enclosing Phi know may get superfluous. */
2165 res = optimize_in_place_2(known);
2167 exchange(known, res);
2173 /* A undefined value, e.g., in unreachable code. */
2177 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2178 IRN_VRFY_IRG(res, irg);
2179 /* Memory Phis in endless loops must be kept alive.
2180 As we can't distinguish these easily we keep all of them alive. */
2181 if ((res->op == op_Phi) && (mode == mode_M))
2182 add_End_keepalive(get_irg_end(irg), res);
2186 } /* new_rd_Phi_in */
2189 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2191 #if PRECISE_EXC_CONTEXT
2193 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2196 * Construct a new frag_array for node n.
2197 * Copy the content from the current graph_arr of the corresponding block:
2198 * this is the current state.
2199 * Set ProjM(n) as current memory state.
2200 * Further the last entry in frag_arr of current block points to n. This
2201 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2203 static INLINE ir_node **new_frag_arr(ir_node *n) {
2207 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2208 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2209 sizeof(ir_node *)*current_ir_graph->n_loc);
2211 /* turn off optimization before allocating Proj nodes, as res isn't
2213 opt = get_opt_optimize(); set_optimize(0);
2214 /* Here we rely on the fact that all frag ops have Memory as first result! */
2215 if (get_irn_op(n) == op_Call)
2216 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2217 else if (get_irn_op(n) == op_CopyB)
2218 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2220 assert((pn_Quot_M == pn_DivMod_M) &&
2221 (pn_Quot_M == pn_Div_M) &&
2222 (pn_Quot_M == pn_Mod_M) &&
2223 (pn_Quot_M == pn_Load_M) &&
2224 (pn_Quot_M == pn_Store_M) &&
2225 (pn_Quot_M == pn_Alloc_M) &&
2226 (pn_Quot_M == pn_Bound_M));
2227 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2231 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2233 } /* new_frag_arr */
2236 * Returns the frag_arr from a node.
2238 static INLINE ir_node **get_frag_arr(ir_node *n) {
2239 switch (get_irn_opcode(n)) {
2241 return n->attr.call.exc.frag_arr;
2243 return n->attr.alloc.exc.frag_arr;
2245 return n->attr.load.exc.frag_arr;
2247 return n->attr.store.exc.frag_arr;
2249 return n->attr.except.frag_arr;
2251 } /* get_frag_arr */
2254 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2256 if (!frag_arr[pos]) frag_arr[pos] = val;
2257 if (frag_arr[current_ir_graph->n_loc - 1]) {
2258 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2259 assert(arr != frag_arr && "Endless recursion detected");
2260 set_frag_value(arr, pos, val);
2265 for (i = 0; i < 1000; ++i) {
2266 if (!frag_arr[pos]) {
2267 frag_arr[pos] = val;
2269 if (frag_arr[current_ir_graph->n_loc - 1]) {
2270 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2276 assert(0 && "potential endless recursion");
2278 } /* set_frag_value */
2281 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2285 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2287 frag_arr = get_frag_arr(cfOp);
2288 res = frag_arr[pos];
2290 if (block->attr.block.graph_arr[pos]) {
2291 /* There was a set_value() after the cfOp and no get_value before that
2292 set_value(). We must build a Phi node now. */
2293 if (block->attr.block.is_matured) {
2294 int ins = get_irn_arity(block);
2296 NEW_ARR_A(ir_node *, nin, ins);
2297 res = phi_merge(block, pos, mode, nin, ins);
2299 res = new_rd_Phi0(current_ir_graph, block, mode);
2300 res->attr.phi0.pos = pos;
2301 res->link = block->link;
2305 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2306 but this should be better: (remove comment if this works) */
2307 /* It's a Phi, we can write this into all graph_arrs with NULL */
2308 set_frag_value(block->attr.block.graph_arr, pos, res);
2310 res = get_r_value_internal(block, pos, mode);
2311 set_frag_value(block->attr.block.graph_arr, pos, res);
2315 } /* get_r_frag_value_internal */
2316 #endif /* PRECISE_EXC_CONTEXT */
2319 * Computes the predecessors for the real phi node, and then
2320 * allocates and returns this node. The routine called to allocate the
2321 * node might optimize it away and return a real value.
2322 * This function must be called with an in-array of proper size.
2325 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2326 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2329 /* If this block has no value at pos create a Phi0 and remember it
2330 in graph_arr to break recursions.
2331 Else we may not set graph_arr as there a later value is remembered. */
2333 if (!block->attr.block.graph_arr[pos]) {
2334 if (block == get_irg_start_block(current_ir_graph)) {
2335 /* Collapsing to Bad tarvals is no good idea.
2336 So we call a user-supplied routine here that deals with this case as
2337 appropriate for the given language. Sorrily the only help we can give
2338 here is the position.
2340 Even if all variables are defined before use, it can happen that
2341 we get to the start block, if a Cond has been replaced by a tuple
2342 (bad, jmp). In this case we call the function needlessly, eventually
2343 generating an non existent error.
2344 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2347 if (default_initialize_local_variable) {
2348 ir_node *rem = get_cur_block();
2350 set_cur_block(block);
2351 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2355 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2356 /* We don't need to care about exception ops in the start block.
2357 There are none by definition. */
2358 return block->attr.block.graph_arr[pos];
2360 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2361 block->attr.block.graph_arr[pos] = phi0;
2362 #if PRECISE_EXC_CONTEXT
2363 if (get_opt_precise_exc_context()) {
2364 /* Set graph_arr for fragile ops. Also here we should break recursion.
2365 We could choose a cyclic path through an cfop. But the recursion would
2366 break at some point. */
2367 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2373 /* This loop goes to all predecessor blocks of the block the Phi node
2374 is in and there finds the operands of the Phi node by calling
2375 get_r_value_internal. */
2376 for (i = 1; i <= ins; ++i) {
2377 prevCfOp = skip_Proj(block->in[i]);
2379 if (is_Bad(prevCfOp)) {
2380 /* In case a Cond has been optimized we would get right to the start block
2381 with an invalid definition. */
2382 nin[i-1] = new_Bad();
2385 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2387 if (!is_Bad(prevBlock)) {
2388 #if PRECISE_EXC_CONTEXT
2389 if (get_opt_precise_exc_context() &&
2390 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2391 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2392 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2395 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2397 nin[i-1] = new_Bad();
2401 /* We want to pass the Phi0 node to the constructor: this finds additional
2402 optimization possibilities.
2403 The Phi0 node either is allocated in this function, or it comes from
2404 a former call to get_r_value_internal. In this case we may not yet
2405 exchange phi0, as this is done in mature_immBlock. */
2407 phi0_all = block->attr.block.graph_arr[pos];
2408 if (!((get_irn_op(phi0_all) == op_Phi) &&
2409 (get_irn_arity(phi0_all) == 0) &&
2410 (get_nodes_block(phi0_all) == block)))
2416 /* After collecting all predecessors into the array nin a new Phi node
2417 with these predecessors is created. This constructor contains an
2418 optimization: If all predecessors of the Phi node are identical it
2419 returns the only operand instead of a new Phi node. */
2420 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2422 /* In case we allocated a Phi0 node at the beginning of this procedure,
2423 we need to exchange this Phi0 with the real Phi. */
2425 exchange(phi0, res);
2426 block->attr.block.graph_arr[pos] = res;
2427 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2428 only an optimization. */
2435 * This function returns the last definition of a variable. In case
2436 * this variable was last defined in a previous block, Phi nodes are
2437 * inserted. If the part of the firm graph containing the definition
2438 * is not yet constructed, a dummy Phi node is returned.
2441 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2443 /* There are 4 cases to treat.
2445 1. The block is not mature and we visit it the first time. We can not
2446 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2447 predecessors is returned. This node is added to the linked list (field
2448 "link") of the containing block to be completed when this block is
2449 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2452 2. The value is already known in this block, graph_arr[pos] is set and we
2453 visit the block the first time. We can return the value without
2454 creating any new nodes.
2456 3. The block is mature and we visit it the first time. A Phi node needs
2457 to be created (phi_merge). If the Phi is not needed, as all it's
2458 operands are the same value reaching the block through different
2459 paths, it's optimized away and the value itself is returned.
2461 4. The block is mature, and we visit it the second time. Now two
2462 subcases are possible:
2463 * The value was computed completely the last time we were here. This
2464 is the case if there is no loop. We can return the proper value.
2465 * The recursion that visited this node and set the flag did not
2466 return yet. We are computing a value in a loop and need to
2467 break the recursion. This case only happens if we visited
2468 the same block with phi_merge before, which inserted a Phi0.
2469 So we return the Phi0.
2472 /* case 4 -- already visited. */
2473 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2474 /* As phi_merge allocates a Phi0 this value is always defined. Here
2475 is the critical difference of the two algorithms. */
2476 assert(block->attr.block.graph_arr[pos]);
2477 return block->attr.block.graph_arr[pos];
2480 /* visited the first time */
2481 set_irn_visited(block, get_irg_visited(current_ir_graph));
2483 /* Get the local valid value */
2484 res = block->attr.block.graph_arr[pos];
2486 /* case 2 -- If the value is actually computed, return it. */
2487 if (res) { return res; };
2489 if (block->attr.block.is_matured) { /* case 3 */
2491 /* The Phi has the same amount of ins as the corresponding block. */
2492 int ins = get_irn_arity(block);
2494 NEW_ARR_A (ir_node *, nin, ins);
2496 /* Phi merge collects the predecessors and then creates a node. */
2497 res = phi_merge (block, pos, mode, nin, ins);
2499 } else { /* case 1 */
2500 /* The block is not mature, we don't know how many in's are needed. A Phi
2501 with zero predecessors is created. Such a Phi node is called Phi0
2502 node. The Phi0 is then added to the list of Phi0 nodes in this block
2503 to be matured by mature_immBlock later.
2504 The Phi0 has to remember the pos of it's internal value. If the real
2505 Phi is computed, pos is used to update the array with the local
2507 res = new_rd_Phi0(current_ir_graph, block, mode);
2508 res->attr.phi0.pos = pos;
2509 res->link = block->link;
2513 /* If we get here, the frontend missed a use-before-definition error */
2516 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2517 assert(mode->code >= irm_F && mode->code <= irm_P);
2518 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2519 get_mode_null(mode));
2522 /* The local valid value is available now. */
2523 block->attr.block.graph_arr[pos] = res;
2526 } /* get_r_value_internal */
2528 #endif /* USE_FAST_PHI_CONSTRUCTION */
2530 /* ************************************************************************** */
2533 * Finalize a Block node, when all control flows are known.
2534 * Acceptable parameters are only Block nodes.
2537 mature_immBlock(ir_node *block) {
2542 assert(get_irn_opcode(block) == iro_Block);
2543 /* @@@ should be commented in
2544 assert (!get_Block_matured(block) && "Block already matured"); */
2546 if (!get_Block_matured(block)) {
2547 ins = ARR_LEN(block->in)-1;
2548 /* Fix block parameters */
2549 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2551 /* An array for building the Phi nodes. */
2552 NEW_ARR_A(ir_node *, nin, ins);
2554 /* Traverse a chain of Phi nodes attached to this block and mature
2556 for (n = block->link; n; n = next) {
2557 inc_irg_visited(current_ir_graph);
2559 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2562 block->attr.block.is_matured = 1;
2564 /* Now, as the block is a finished firm node, we can optimize it.
2565 Since other nodes have been allocated since the block was created
2566 we can not free the node on the obstack. Therefore we have to call
2568 Unfortunately the optimization does not change a lot, as all allocated
2569 nodes refer to the unoptimized node.
2570 We can call _2, as global cse has no effect on blocks. */
2571 block = optimize_in_place_2(block);
2572 IRN_VRFY_IRG(block, current_ir_graph);
2574 } /* mature_immBlock */
2577 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2578 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2582 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2583 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2587 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2588 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2589 } /* new_d_Const_long */
2592 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2593 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2594 } /* new_d_Const_type */
2598 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2599 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2603 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2604 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2608 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2610 assert(arg->op == op_Cond);
2611 arg->attr.cond.kind = fragmentary;
2612 arg->attr.cond.default_proj = max_proj;
2613 res = new_Proj(arg, mode_X, max_proj);
2615 } /* new_d_defaultProj */
2618 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2619 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2623 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2624 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2625 } /* new_d_strictConv */
2628 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2629 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2633 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2634 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2643 * Allocate the frag array.
2645 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2646 if (get_opt_precise_exc_context()) {
2647 if ((current_ir_graph->phase_state == phase_building) &&
2648 (get_irn_op(res) == op) && /* Could be optimized away. */
2649 !*frag_store) /* Could be a cse where the arr is already set. */ {
2650 *frag_store = new_frag_arr(res);
2653 } /* allocate_frag_arr */
2656 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2658 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2659 #if PRECISE_EXC_CONTEXT
2660 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2667 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2669 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2670 #if PRECISE_EXC_CONTEXT
2671 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2675 } /* new_d_DivMod */
2678 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2680 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2681 #if PRECISE_EXC_CONTEXT
2682 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2689 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2691 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2692 #if PRECISE_EXC_CONTEXT
2693 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2712 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2713 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2717 new_d_Jmp(dbg_info *db) {
2718 return new_bd_Jmp(db, current_ir_graph->current_block);
2722 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2723 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2727 new_d_Cond(dbg_info *db, ir_node *c) {
2728 return new_bd_Cond(db, current_ir_graph->current_block, c);
2732 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2735 res = new_bd_Call(db, current_ir_graph->current_block,
2736 store, callee, arity, in, tp);
2737 #if PRECISE_EXC_CONTEXT
2738 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2745 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2746 return new_bd_Return(db, current_ir_graph->current_block,
2748 } /* new_d_Return */
2751 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2753 res = new_bd_Load(db, current_ir_graph->current_block,
2755 #if PRECISE_EXC_CONTEXT
2756 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2763 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2765 res = new_bd_Store(db, current_ir_graph->current_block,
2767 #if PRECISE_EXC_CONTEXT
2768 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2775 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2776 where_alloc where) {
2778 res = new_bd_Alloc(db, current_ir_graph->current_block,
2779 store, size, alloc_type, where);
2780 #if PRECISE_EXC_CONTEXT
2781 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2788 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2789 ir_node *size, ir_type *free_type, where_alloc where) {
2790 return new_bd_Free(db, current_ir_graph->current_block,
2791 store, ptr, size, free_type, where);
2795 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2796 /* GL: objptr was called frame before. Frame was a bad choice for the name
2797 as the operand could as well be a pointer to a dynamic object. */
2799 return new_bd_Sel(db, current_ir_graph->current_block,
2800 store, objptr, 0, NULL, ent);
2801 } /* new_d_simpleSel */
2804 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2805 return new_bd_Sel(db, current_ir_graph->current_block,
2806 store, objptr, n_index, index, sel);
2810 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2811 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2813 } /* new_d_SymConst_type */
2816 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2817 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2818 value, kind, firm_unknown_type);
2819 } /* new_d_SymConst */
2822 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2823 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2829 return _new_d_Bad();
2833 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2834 return new_bd_Confirm(db, current_ir_graph->current_block,
2836 } /* new_d_Confirm */
2839 new_d_Unknown(ir_mode *m) {
2840 return new_bd_Unknown(m);
2841 } /* new_d_Unknown */
2844 new_d_CallBegin(dbg_info *db, ir_node *call) {
2845 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2846 } /* new_d_CallBegin */
2849 new_d_EndReg(dbg_info *db) {
2850 return new_bd_EndReg(db, current_ir_graph->current_block);
2851 } /* new_d_EndReg */
2854 new_d_EndExcept(dbg_info *db) {
2855 return new_bd_EndExcept(db, current_ir_graph->current_block);
2856 } /* new_d_EndExcept */
2859 new_d_Break(dbg_info *db) {
2860 return new_bd_Break(db, current_ir_graph->current_block);
2864 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2865 return new_bd_Filter(db, current_ir_graph->current_block,
2867 } /* new_d_Filter */
2870 (new_d_NoMem)(void) {
2871 return _new_d_NoMem();
2875 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2876 ir_node *ir_true, ir_mode *mode) {
2877 return new_bd_Mux(db, current_ir_graph->current_block,
2878 sel, ir_false, ir_true, mode);
2882 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2883 return new_bd_Psi(db, current_ir_graph->current_block,
2884 arity, conds, vals, mode);
2887 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2888 ir_node *dst, ir_node *src, ir_type *data_type) {
2890 res = new_bd_CopyB(db, current_ir_graph->current_block,
2891 store, dst, src, data_type);
2892 #if PRECISE_EXC_CONTEXT
2893 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2899 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2900 return new_bd_InstOf(db, current_ir_graph->current_block,
2901 store, objptr, type);
2902 } /* new_d_InstOf */
2905 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2906 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2909 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2910 ir_node *idx, ir_node *lower, ir_node *upper) {
2912 res = new_bd_Bound(db, current_ir_graph->current_block,
2913 store, idx, lower, upper);
2914 #if PRECISE_EXC_CONTEXT
2915 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2921 new_d_Pin(dbg_info *db, ir_node *node) {
2922 return new_bd_Pin(db, current_ir_graph->current_block, node);
2926 new_d_ASM(dbg_info *db, ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
2927 return new_bd_ASM(db, current_ir_graph->current_block, store, arity, inputs, asm_text);
2930 /* ********************************************************************* */
2931 /* Comfortable interface with automatic Phi node construction. */
2932 /* (Uses also constructors of ?? interface, except new_Block. */
2933 /* ********************************************************************* */
2935 /* Block construction */
2936 /* immature Block without predecessors */
2938 new_d_immBlock(dbg_info *db) {
2941 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2942 /* creates a new dynamic in-array as length of in is -1 */
2943 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2944 current_ir_graph->current_block = res;
2946 /* macroblock head */
2949 res->attr.block.is_matured = 0;
2950 res->attr.block.is_dead = 0;
2951 res->attr.block.is_mb_head = 1;
2952 res->attr.block.irg = current_ir_graph;
2953 res->attr.block.backedge = NULL;
2954 res->attr.block.in_cg = NULL;
2955 res->attr.block.cg_backedge = NULL;
2956 res->attr.block.extblk = NULL;
2957 res->attr.block.region = NULL;
2958 res->attr.block.mb_depth = 0;
2960 set_Block_block_visited(res, 0);
2962 /* Create and initialize array for Phi-node construction. */
2963 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2964 current_ir_graph->n_loc);
2965 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2967 /* Immature block may not be optimized! */
2968 IRN_VRFY_IRG(res, current_ir_graph);
2971 } /* new_d_immBlock */
2974 new_immBlock(void) {
2975 return new_d_immBlock(NULL);
2976 } /* new_immBlock */
2978 /* immature PartBlock with its predecessors */
2980 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2981 ir_node *res = new_d_immBlock(db);
2982 ir_node *blk = get_nodes_block(pred_jmp);
2984 res->in[0] = blk->in[0];
2985 res->attr.block.is_mb_head = 0;
2986 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2988 add_immBlock_pred(res, pred_jmp);
2991 } /* new_d_immPartBlock */
2994 new_immPartBlock(ir_node *pred_jmp) {
2995 return new_d_immPartBlock(NULL, pred_jmp);
2996 } /* new_immPartBlock */
2998 /* add an edge to a jmp/control flow node */
3000 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3001 int n = ARR_LEN(block->in) - 1;
3003 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3004 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3005 assert(jmp != NULL);
3007 ARR_APP1(ir_node *, block->in, jmp);
3009 hook_set_irn_n(block, n, jmp, NULL);
3010 } /* add_immBlock_pred */
3012 /* changing the current block */
3014 set_cur_block(ir_node *target) {
3015 current_ir_graph->current_block = target;
3016 } /* set_cur_block */
3018 /* ************************ */
3019 /* parameter administration */
3021 /* get a value from the parameter array from the current block by its index */
3023 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3024 ir_graph *irg = current_ir_graph;
3025 assert(get_irg_phase_state(irg) == phase_building);
3026 inc_irg_visited(irg);
3028 return get_r_value_internal(irg->current_block, pos + 1, mode);
3031 /* get a value from the parameter array from the current block by its index */
3033 get_value(int pos, ir_mode *mode) {
3034 return get_d_value(NULL, pos, mode);
3037 /* set a value at position pos in the parameter array from the current block */
3039 set_value(int pos, ir_node *value) {
3040 ir_graph *irg = current_ir_graph;
3041 assert(get_irg_phase_state(irg) == phase_building);
3042 assert(pos+1 < irg->n_loc);
3043 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3046 /* Find the value number for a node in the current block.*/
3048 find_value(ir_node *value) {
3050 ir_node *bl = current_ir_graph->current_block;
3052 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3053 if (bl->attr.block.graph_arr[i] == value)
3058 /* get the current store */
3061 ir_graph *irg = current_ir_graph;
3063 assert(get_irg_phase_state(irg) == phase_building);
3064 /* GL: one could call get_value instead */
3065 inc_irg_visited(irg);
3066 return get_r_value_internal(irg->current_block, 0, mode_M);
3069 /* set the current store: handles automatic Sync construction for Load nodes */
3071 set_store(ir_node *store) {
3072 ir_node *load, *pload, *pred, *in[2];
3074 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3075 /* Beware: due to dead code elimination, a store might become a Bad node even in
3076 the construction phase. */
3077 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3079 if (get_opt_auto_create_sync()) {
3080 /* handle non-volatile Load nodes by automatically creating Sync's */
3081 load = skip_Proj(store);
3082 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3083 pred = get_Load_mem(load);
3085 if (is_Sync(pred)) {
3086 /* a Load after a Sync: move it up */
3087 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3089 set_Load_mem(load, get_memop_mem(mem));
3090 add_Sync_pred(pred, store);
3093 pload = skip_Proj(pred);
3094 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3095 /* a Load after a Load: create a new Sync */
3096 set_Load_mem(load, get_Load_mem(pload));
3100 store = new_Sync(2, in);
3105 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3109 keep_alive(ir_node *ka) {
3110 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3113 /* --- Useful access routines --- */
3114 /* Returns the current block of the current graph. To set the current
3115 block use set_cur_block. */
3116 ir_node *get_cur_block(void) {
3117 return get_irg_current_block(current_ir_graph);
3118 } /* get_cur_block */
3120 /* Returns the frame type of the current graph */
3121 ir_type *get_cur_frame_type(void) {
3122 return get_irg_frame_type(current_ir_graph);
3123 } /* get_cur_frame_type */
3126 /* ********************************************************************* */
3129 /* call once for each run of the library */
3131 init_cons(uninitialized_local_variable_func_t *func) {
3132 default_initialize_local_variable = func;
3136 irp_finalize_cons(void) {
3138 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3139 irg_finalize_cons(get_irp_irg(i));
3141 irp->phase_state = phase_high;
3142 } /* irp_finalize_cons */
3145 ir_node *new_Block(int arity, ir_node **in) {
3146 return new_d_Block(NULL, arity, in);
3148 ir_node *new_Start(void) {
3149 return new_d_Start(NULL);
3151 ir_node *new_End(void) {
3152 return new_d_End(NULL);
3154 ir_node *new_Jmp(void) {
3155 return new_d_Jmp(NULL);
3157 ir_node *new_IJmp(ir_node *tgt) {
3158 return new_d_IJmp(NULL, tgt);
3160 ir_node *new_Cond(ir_node *c) {
3161 return new_d_Cond(NULL, c);
3163 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3164 return new_d_Return(NULL, store, arity, in);
3166 ir_node *new_Const(ir_mode *mode, tarval *con) {
3167 return new_d_Const(NULL, mode, con);
3170 ir_node *new_Const_long(ir_mode *mode, long value) {
3171 return new_d_Const_long(NULL, mode, value);
3174 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3175 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3178 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3179 return new_d_SymConst_type(NULL, value, kind, type);
3181 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3182 return new_d_SymConst(NULL, value, kind);
3184 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3185 return new_d_simpleSel(NULL, store, objptr, ent);
3187 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3189 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3191 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3193 return new_d_Call(NULL, store, callee, arity, in, tp);
3195 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3196 return new_d_Add(NULL, op1, op2, mode);
3198 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3199 return new_d_Sub(NULL, op1, op2, mode);
3201 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3202 return new_d_Minus(NULL, op, mode);
3204 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3205 return new_d_Mul(NULL, op1, op2, mode);
3207 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Quot(NULL, memop, op1, op2, mode);
3210 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3211 return new_d_DivMod(NULL, memop, op1, op2, mode);
3213 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3214 return new_d_Div(NULL, memop, op1, op2, mode);
3216 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Mod(NULL, memop, op1, op2, mode);
3219 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3220 return new_d_Abs(NULL, op, mode);
3222 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3223 return new_d_And(NULL, op1, op2, mode);
3225 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3226 return new_d_Or(NULL, op1, op2, mode);
3228 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3229 return new_d_Eor(NULL, op1, op2, mode);
3231 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3232 return new_d_Not(NULL, op, mode);
3234 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3235 return new_d_Shl(NULL, op, k, mode);
3237 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3238 return new_d_Shr(NULL, op, k, mode);
3240 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3241 return new_d_Shrs(NULL, op, k, mode);
3243 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3244 return new_d_Rot(NULL, op, k, mode);
3246 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3247 return new_d_Carry(NULL, op1, op2, mode);
3249 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3250 return new_d_Borrow(NULL, op1, op2, mode);
3252 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3253 return new_d_Cmp(NULL, op1, op2);
3255 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3256 return new_d_Conv(NULL, op, mode);
3258 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3259 return new_d_strictConv(NULL, op, mode);
3261 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3262 return new_d_Cast(NULL, op, to_tp);
3264 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3265 return new_d_Phi(NULL, arity, in, mode);
3267 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3268 return new_d_Load(NULL, store, addr, mode);
3270 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3271 return new_d_Store(NULL, store, addr, val);
3273 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3274 where_alloc where) {
3275 return new_d_Alloc(NULL, store, size, alloc_type, where);
3277 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3278 ir_type *free_type, where_alloc where) {
3279 return new_d_Free(NULL, store, ptr, size, free_type, where);
3281 ir_node *new_Sync(int arity, ir_node *in[]) {
3282 return new_d_Sync(NULL, arity, in);
3284 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3285 return new_d_Proj(NULL, arg, mode, proj);
3287 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3288 return new_d_defaultProj(NULL, arg, max_proj);
3290 ir_node *new_Tuple(int arity, ir_node **in) {
3291 return new_d_Tuple(NULL, arity, in);
3293 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3294 return new_d_Id(NULL, val, mode);
3296 ir_node *new_Bad(void) {
3299 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3300 return new_d_Confirm (NULL, val, bound, cmp);
3302 ir_node *new_Unknown(ir_mode *m) {
3303 return new_d_Unknown(m);
3305 ir_node *new_CallBegin(ir_node *callee) {
3306 return new_d_CallBegin(NULL, callee);
3308 ir_node *new_EndReg(void) {
3309 return new_d_EndReg(NULL);
3311 ir_node *new_EndExcept(void) {
3312 return new_d_EndExcept(NULL);
3314 ir_node *new_Break(void) {
3315 return new_d_Break(NULL);
3317 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3318 return new_d_Filter(NULL, arg, mode, proj);
3320 ir_node *new_NoMem(void) {
3321 return new_d_NoMem();
3323 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3324 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3326 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3327 return new_d_Psi(NULL, arity, conds, vals, mode);
3329 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3330 return new_d_CopyB(NULL, store, dst, src, data_type);
3332 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3333 return new_d_InstOf(NULL, store, objptr, ent);
3335 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3336 return new_d_Raise(NULL, store, obj);
3338 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3339 return new_d_Bound(NULL, store, idx, lower, upper);
3341 ir_node *new_Pin(ir_node *node) {
3342 return new_d_Pin(NULL, node);
3344 ir_node *new_ASM(ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
3345 return new_d_ASM(NULL, store, arity, inputs, asm_text);