2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.irg = irg;
198 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
199 res->attr.block.in_cg = NULL;
200 res->attr.block.cg_backedge = NULL;
201 res->attr.block.extblk = NULL;
202 res->attr.block.mb_depth = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if ( get_Block_matured(block) )
247 assert( get_irn_arity(block) == arity );
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity-1; i >= 0; i--)
254 if (get_irn_op(in[i]) == op_Unknown) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if ((res->op == op_Phi) && (mode == mode_M))
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
274 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
275 res->attr.con.tv = con;
276 set_Const_type(res, tp); /* Call method because of complex assertion. */
277 res = optimize_node (res);
278 assert(get_Const_type(res) == tp);
279 IRN_VRFY_IRG(res, irg);
282 } /* new_bd_Const_type */
285 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
286 ir_graph *irg = current_ir_graph;
288 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
292 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
293 ir_graph *irg = current_ir_graph;
295 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
296 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.totype = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
402 ir_graph *irg = current_ir_graph;
405 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node(res);
407 IRN_VRFY_IRG(res, irg);
412 new_bd_Jmp(dbg_info *db, ir_node *block) {
414 ir_graph *irg = current_ir_graph;
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
417 res = optimize_node (res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG(res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node (res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
512 new_bd_Store(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_node *val) {
516 ir_graph *irg = current_ir_graph;
521 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
522 res->attr.store.exc.pin_state = op_pin_state_pinned;
523 res->attr.store.volatility = volatility_non_volatile;
524 res = optimize_node(res);
525 IRN_VRFY_IRG(res, irg);
530 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
531 ir_node *size, ir_type *alloc_type, where_alloc where) {
534 ir_graph *irg = current_ir_graph;
538 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
539 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
540 res->attr.alloc.where = where;
541 res->attr.alloc.type = alloc_type;
542 res = optimize_node(res);
543 IRN_VRFY_IRG(res, irg);
548 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
549 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
552 ir_graph *irg = current_ir_graph;
557 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
558 res->attr.free.where = where;
559 res->attr.free.type = free_type;
560 res = optimize_node(res);
561 IRN_VRFY_IRG(res, irg);
566 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
567 int arity, ir_node **in, ir_entity *ent) {
571 ir_graph *irg = current_ir_graph;
572 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
574 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
577 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
580 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
582 * Sel's can select functions which should be of mode mode_P_code.
584 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
585 res->attr.sel.ent = ent;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
593 symconst_kind symkind, ir_type *tp) {
596 ir_graph *irg = current_ir_graph;
598 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
599 mode = mode_P_data; /* FIXME: can be mode_P_code */
603 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
605 res->attr.symc.num = symkind;
606 res->attr.symc.sym = value;
607 res->attr.symc.tp = tp;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
612 } /* new_bd_SymConst_type */
615 new_bd_Sync(dbg_info *db, ir_node *block) {
617 ir_graph *irg = current_ir_graph;
619 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
620 /* no need to call optimize node here, Sync are always created with no predecessors */
621 IRN_VRFY_IRG(res, irg);
626 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
627 ir_node *in[2], *res;
628 ir_graph *irg = current_ir_graph;
632 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
633 res->attr.confirm_cmp = cmp;
634 res = optimize_node (res);
635 IRN_VRFY_IRG(res, irg);
637 } /* new_bd_Confirm */
640 new_bd_Unknown(ir_mode *m) {
642 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
645 res = optimize_node(res);
647 } /* new_bd_Unknown */
650 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
653 ir_graph *irg = current_ir_graph;
655 in[0] = get_Call_ptr(call);
656 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
657 /* res->attr.callbegin.irg = irg; */
658 res->attr.callbegin.call = call;
659 res = optimize_node(res);
660 IRN_VRFY_IRG(res, irg);
662 } /* new_bd_CallBegin */
665 new_bd_EndReg(dbg_info *db, ir_node *block) {
667 ir_graph *irg = current_ir_graph;
669 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
670 set_irg_end_reg(irg, res);
671 IRN_VRFY_IRG(res, irg);
673 } /* new_bd_EndReg */
676 new_bd_EndExcept(dbg_info *db, ir_node *block) {
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
681 set_irg_end_except(irg, res);
682 IRN_VRFY_IRG (res, irg);
684 } /* new_bd_EndExcept */
687 new_bd_Break(dbg_info *db, ir_node *block) {
689 ir_graph *irg = current_ir_graph;
691 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
704 res->attr.filter.proj = proj;
705 res->attr.filter.in_cg = NULL;
706 res->attr.filter.backedge = NULL;
709 assert(get_Proj_pred(res));
710 assert(get_nodes_block(get_Proj_pred(res)));
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
715 } /* new_bd_Filter */
718 new_bd_Mux(dbg_info *db, ir_node *block,
719 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
728 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
737 new_bd_Psi(dbg_info *db, ir_node *block,
738 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
744 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
746 for (i = 0; i < arity; ++i) {
748 in[2 * i + 1] = vals[i];
752 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_CopyB(dbg_info *db, ir_node *block,
762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
773 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
774 res->attr.copyb.data_type = data_type;
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
781 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
782 ir_node *objptr, ir_type *type) {
785 ir_graph *irg = current_ir_graph;
789 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
790 res->attr.instof.type = type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
794 } /* new_bd_InstOf */
797 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_Bound(dbg_info *db, ir_node *block,
812 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
815 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
822 res->attr.bound.exc.pin_state = op_pin_state_pinned;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
831 ir_graph *irg = current_ir_graph;
833 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
834 res = optimize_node(res);
835 IRN_VRFY_IRG(res, irg);
840 new_bd_ASM(dbg_info *db, ir_node *block, ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
842 ir_graph *irg = current_ir_graph;
845 NEW_ARR_A(ir_node *, in, arity + 1);
848 for (i = 0; i < arity; ++i)
849 in[i + 1] = inputs[i];
851 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity + 1, in);
852 res->attr.assem.pin_state = op_pin_state_pinned;
853 res->attr.assem.asm_text = asm_text;
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
869 ir_graph *rem = current_ir_graph;
872 current_ir_graph = irg;
873 res = new_bd_Block(db, arity, in);
874 current_ir_graph = rem;
880 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
881 ir_graph *rem = current_ir_graph;
884 current_ir_graph = irg;
885 res = new_bd_Start(db, block);
886 current_ir_graph = rem;
892 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
894 ir_graph *rem = current_ir_graph;
896 current_ir_graph = rem;
897 res = new_bd_End(db, block);
898 current_ir_graph = rem;
903 /* Creates a Phi node with all predecessors. Calling this constructor
904 is only allowed if the corresponding block is mature. */
906 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
908 ir_graph *rem = current_ir_graph;
910 current_ir_graph = irg;
911 res = new_bd_Phi(db, block,arity, in, mode);
912 current_ir_graph = rem;
918 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_Const_type(db, block, mode, con, tp);
924 current_ir_graph = rem;
927 } /* new_rd_Const_type */
930 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
932 ir_graph *rem = current_ir_graph;
934 current_ir_graph = irg;
935 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
936 current_ir_graph = rem;
942 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
943 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
944 } /* new_rd_Const_long */
947 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_Id(db, block, val, mode);
953 current_ir_graph = rem;
959 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
962 ir_graph *rem = current_ir_graph;
964 current_ir_graph = irg;
965 res = new_bd_Proj(db, block, arg, mode, proj);
966 current_ir_graph = rem;
972 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
975 ir_graph *rem = current_ir_graph;
977 current_ir_graph = irg;
978 res = new_bd_defaultProj(db, block, arg, max_proj);
979 current_ir_graph = rem;
982 } /* new_rd_defaultProj */
985 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
987 ir_graph *rem = current_ir_graph;
989 current_ir_graph = irg;
990 res = new_bd_Conv(db, block, op, mode, 0);
991 current_ir_graph = rem;
997 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
999 ir_graph *rem = current_ir_graph;
1001 current_ir_graph = irg;
1002 res = new_bd_Cast(db, block, op, to_tp);
1003 current_ir_graph = rem;
1009 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Tuple(db, block, arity, in);
1015 current_ir_graph = rem;
1018 } /* new_rd_Tuple */
1025 NEW_RD_DIVOP(DivMod)
1038 NEW_RD_BINOP(Borrow)
1041 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1042 ir_node *op1, ir_node *op2) {
1044 ir_graph *rem = current_ir_graph;
1046 current_ir_graph = irg;
1047 res = new_bd_Cmp(db, block, op1, op2);
1048 current_ir_graph = rem;
1054 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1056 ir_graph *rem = current_ir_graph;
1058 current_ir_graph = irg;
1059 res = new_bd_Jmp(db, block);
1060 current_ir_graph = rem;
1066 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1068 ir_graph *rem = current_ir_graph;
1070 current_ir_graph = irg;
1071 res = new_bd_IJmp(db, block, tgt);
1072 current_ir_graph = rem;
1078 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1080 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_Cond(db, block, c);
1084 current_ir_graph = rem;
1090 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1091 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1097 current_ir_graph = rem;
1103 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1104 ir_node *store, int arity, ir_node **in) {
1106 ir_graph *rem = current_ir_graph;
1108 current_ir_graph = irg;
1109 res = new_bd_Return(db, block, store, arity, in);
1110 current_ir_graph = rem;
1113 } /* new_rd_Return */
1116 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1117 ir_node *store, ir_node *adr, ir_mode *mode) {
1119 ir_graph *rem = current_ir_graph;
1121 current_ir_graph = irg;
1122 res = new_bd_Load(db, block, store, adr, mode);
1123 current_ir_graph = rem;
1129 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1130 ir_node *store, ir_node *adr, ir_node *val) {
1132 ir_graph *rem = current_ir_graph;
1134 current_ir_graph = irg;
1135 res = new_bd_Store(db, block, store, adr, val);
1136 current_ir_graph = rem;
1139 } /* new_rd_Store */
1142 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1143 ir_node *size, ir_type *alloc_type, where_alloc where) {
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1149 current_ir_graph = rem;
1152 } /* new_rd_Alloc */
1155 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1156 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1158 ir_graph *rem = current_ir_graph;
1160 current_ir_graph = irg;
1161 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1162 current_ir_graph = rem;
1168 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1169 ir_node *store, ir_node *objptr, ir_entity *ent) {
1171 ir_graph *rem = current_ir_graph;
1173 current_ir_graph = irg;
1174 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1175 current_ir_graph = rem;
1178 } /* new_rd_simpleSel */
1181 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1182 int arity, ir_node **in, ir_entity *ent) {
1184 ir_graph *rem = current_ir_graph;
1186 current_ir_graph = irg;
1187 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1188 current_ir_graph = rem;
1194 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1195 symconst_kind symkind, ir_type *tp) {
1197 ir_graph *rem = current_ir_graph;
1199 current_ir_graph = irg;
1200 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1201 current_ir_graph = rem;
1204 } /* new_rd_SymConst_type */
1207 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1208 symconst_kind symkind) {
1209 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1211 } /* new_rd_SymConst */
1213 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1214 symconst_symbol sym;
1215 sym.entity_p = symbol;
1216 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1217 } /* new_rd_SymConst_addr_ent */
1219 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1220 symconst_symbol sym;
1221 sym.entity_p = symbol;
1222 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1223 } /* new_rd_SymConst_ofs_ent */
1225 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1226 symconst_symbol sym;
1227 sym.ident_p = symbol;
1228 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1229 } /* new_rd_SymConst_addr_name */
1231 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1232 symconst_symbol sym;
1233 sym.type_p = symbol;
1234 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1235 } /* new_rd_SymConst_type_tag */
1237 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1238 symconst_symbol sym;
1239 sym.type_p = symbol;
1240 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1241 } /* new_rd_SymConst_size */
1243 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1244 symconst_symbol sym;
1245 sym.type_p = symbol;
1246 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1247 } /* new_rd_SymConst_align */
1250 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1252 ir_graph *rem = current_ir_graph;
1255 current_ir_graph = irg;
1256 res = new_bd_Sync(db, block);
1257 current_ir_graph = rem;
1259 for (i = 0; i < arity; ++i)
1260 add_Sync_pred(res, in[i]);
1266 new_rd_Bad(ir_graph *irg) {
1267 return get_irg_bad(irg);
1271 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1273 ir_graph *rem = current_ir_graph;
1275 current_ir_graph = irg;
1276 res = new_bd_Confirm(db, block, val, bound, cmp);
1277 current_ir_graph = rem;
1280 } /* new_rd_Confirm */
1283 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1285 ir_graph *rem = current_ir_graph;
1287 current_ir_graph = irg;
1288 res = new_bd_Unknown(m);
1289 current_ir_graph = rem;
1292 } /* new_rd_Unknown */
1295 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1297 ir_graph *rem = current_ir_graph;
1299 current_ir_graph = irg;
1300 res = new_bd_CallBegin(db, block, call);
1301 current_ir_graph = rem;
1304 } /* new_rd_CallBegin */
1307 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1310 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1311 set_irg_end_reg(irg, res);
1312 IRN_VRFY_IRG(res, irg);
1314 } /* new_rd_EndReg */
1317 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1320 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1321 set_irg_end_except(irg, res);
1322 IRN_VRFY_IRG (res, irg);
1324 } /* new_rd_EndExcept */
1327 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1329 ir_graph *rem = current_ir_graph;
1331 current_ir_graph = irg;
1332 res = new_bd_Break(db, block);
1333 current_ir_graph = rem;
1336 } /* new_rd_Break */
1339 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1342 ir_graph *rem = current_ir_graph;
1344 current_ir_graph = irg;
1345 res = new_bd_Filter(db, block, arg, mode, proj);
1346 current_ir_graph = rem;
1349 } /* new_rd_Filter */
1352 new_rd_NoMem(ir_graph *irg) {
1353 return get_irg_no_mem(irg);
1354 } /* new_rd_NoMem */
1357 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1358 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1360 ir_graph *rem = current_ir_graph;
1362 current_ir_graph = irg;
1363 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1364 current_ir_graph = rem;
1370 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1371 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1373 ir_graph *rem = current_ir_graph;
1375 current_ir_graph = irg;
1376 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1377 current_ir_graph = rem;
1382 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1383 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1385 ir_graph *rem = current_ir_graph;
1387 current_ir_graph = irg;
1388 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1389 current_ir_graph = rem;
1392 } /* new_rd_CopyB */
1395 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1396 ir_node *objptr, ir_type *type) {
1398 ir_graph *rem = current_ir_graph;
1400 current_ir_graph = irg;
1401 res = new_bd_InstOf(db, block, store, objptr, type);
1402 current_ir_graph = rem;
1405 } /* new_rd_InstOf */
1408 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Raise(db, block, store, obj);
1414 current_ir_graph = rem;
1417 } /* new_rd_Raise */
1419 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1420 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Bound(db, block, store, idx, lower, upper);
1426 current_ir_graph = rem;
1429 } /* new_rd_Bound */
1431 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1433 ir_graph *rem = current_ir_graph;
1435 current_ir_graph = irg;
1436 res = new_bd_Pin(db, block, node);
1437 current_ir_graph = rem;
1442 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1443 int arity, ir_node *inputs[], ident *asm_text) {
1445 ir_graph *rem = current_ir_graph;
1447 current_ir_graph = irg;
1448 res = new_bd_ASM(db, block, store, arity, inputs, asm_text);
1449 current_ir_graph = rem;
1455 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1456 return new_rd_Block(NULL, irg, arity, in);
1458 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1459 return new_rd_Start(NULL, irg, block);
1461 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1462 return new_rd_End(NULL, irg, block);
1464 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1465 return new_rd_Jmp(NULL, irg, block);
1467 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1468 return new_rd_IJmp(NULL, irg, block, tgt);
1470 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1471 return new_rd_Cond(NULL, irg, block, c);
1473 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1474 ir_node *store, int arity, ir_node **in) {
1475 return new_rd_Return(NULL, irg, block, store, arity, in);
1477 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1478 ir_mode *mode, tarval *con) {
1479 return new_rd_Const(NULL, irg, block, mode, con);
1481 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1482 ir_mode *mode, long value) {
1483 return new_rd_Const_long(NULL, irg, block, mode, value);
1485 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1486 ir_mode *mode, tarval *con, ir_type *tp) {
1487 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1489 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1490 symconst_symbol value, symconst_kind symkind) {
1491 return new_rd_SymConst(NULL, irg, block, value, symkind);
1493 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1494 ir_node *objptr, ir_entity *ent) {
1495 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1497 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1498 ir_node *objptr, int n_index, ir_node **index,
1500 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1502 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *callee, int arity, ir_node **in,
1505 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1507 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1508 ir_node *op1, ir_node *op2, ir_mode *mode) {
1509 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1511 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1512 ir_node *op1, ir_node *op2, ir_mode *mode) {
1513 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1515 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1516 ir_node *op, ir_mode *mode) {
1517 return new_rd_Minus(NULL, irg, block, op, mode);
1519 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1520 ir_node *op1, ir_node *op2, ir_mode *mode) {
1521 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1523 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1524 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1525 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1527 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1528 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1529 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1531 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1532 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1533 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1535 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1536 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1537 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1539 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1540 ir_node *op, ir_mode *mode) {
1541 return new_rd_Abs(NULL, irg, block, op, mode);
1543 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1544 ir_node *op1, ir_node *op2, ir_mode *mode) {
1545 return new_rd_And(NULL, irg, block, op1, op2, mode);
1547 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1548 ir_node *op1, ir_node *op2, ir_mode *mode) {
1549 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1551 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1556 ir_node *op, ir_mode *mode) {
1557 return new_rd_Not(NULL, irg, block, op, mode);
1559 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1560 ir_node *op, ir_node *k, ir_mode *mode) {
1561 return new_rd_Shl(NULL, irg, block, op, k, mode);
1563 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1564 ir_node *op, ir_node *k, ir_mode *mode) {
1565 return new_rd_Shr(NULL, irg, block, op, k, mode);
1567 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1568 ir_node *op, ir_node *k, ir_mode *mode) {
1569 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1571 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_node *k, ir_mode *mode) {
1573 return new_rd_Rot(NULL, irg, block, op, k, mode);
1575 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Carry(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2) {
1585 return new_rd_Cmp(NULL, irg, block, op1, op2);
1587 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_mode *mode) {
1589 return new_rd_Conv(NULL, irg, block, op, mode);
1591 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1592 return new_rd_Cast(NULL, irg, block, op, to_tp);
1594 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1595 ir_node **in, ir_mode *mode) {
1596 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1598 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1599 ir_node *store, ir_node *adr, ir_mode *mode) {
1600 return new_rd_Load(NULL, irg, block, store, adr, mode);
1602 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1603 ir_node *store, ir_node *adr, ir_node *val) {
1604 return new_rd_Store(NULL, irg, block, store, adr, val);
1606 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1607 ir_node *size, ir_type *alloc_type, where_alloc where) {
1608 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1610 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1611 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1612 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1614 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1615 return new_rd_Sync(NULL, irg, block, arity, in);
1617 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1618 ir_mode *mode, long proj) {
1619 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1621 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1623 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1625 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1626 int arity, ir_node **in) {
1627 return new_rd_Tuple(NULL, irg, block, arity, in );
1629 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1630 ir_node *val, ir_mode *mode) {
1631 return new_rd_Id(NULL, irg, block, val, mode);
1633 ir_node *new_r_Bad(ir_graph *irg) {
1634 return new_rd_Bad(irg);
1636 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1637 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1639 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1640 return new_rd_Unknown(irg, m);
1642 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1643 return new_rd_CallBegin(NULL, irg, block, callee);
1645 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1646 return new_rd_EndReg(NULL, irg, block);
1648 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1649 return new_rd_EndExcept(NULL, irg, block);
1651 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1652 return new_rd_Break(NULL, irg, block);
1654 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1655 ir_mode *mode, long proj) {
1656 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1658 ir_node *new_r_NoMem(ir_graph *irg) {
1659 return new_rd_NoMem(irg);
1661 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1662 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1663 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1665 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1666 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1667 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1669 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1670 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1671 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1673 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1675 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1677 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1678 ir_node *store, ir_node *obj) {
1679 return new_rd_Raise(NULL, irg, block, store, obj);
1681 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1682 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1683 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1685 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1686 return new_rd_Pin(NULL, irg, block, node);
1688 ir_node *new_r_ASM(ir_graph *irg, ir_node *block, ir_node *store,
1689 int arity, ir_node *inputs[], ident *asm_text) {
1690 return new_rd_ASM(NULL, irg, block, store, arity, inputs, asm_text);
1693 /** ********************/
1694 /** public interfaces */
1695 /** construction tools */
1699 * - create a new Start node in the current block
1701 * @return s - pointer to the created Start node
1706 new_d_Start(dbg_info *db) {
1709 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1710 op_Start, mode_T, 0, NULL);
1712 res = optimize_node(res);
1713 IRN_VRFY_IRG(res, current_ir_graph);
1718 new_d_End(dbg_info *db) {
1720 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1721 op_End, mode_X, -1, NULL);
1722 res = optimize_node(res);
1723 IRN_VRFY_IRG(res, current_ir_graph);
1728 /* Constructs a Block with a fixed number of predecessors.
1729 Does set current_block. Can be used with automatic Phi
1730 node construction. */
1732 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1735 int has_unknown = 0;
1737 res = new_bd_Block(db, arity, in);
1739 /* Create and initialize array for Phi-node construction. */
1740 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1741 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1742 current_ir_graph->n_loc);
1743 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1746 for (i = arity-1; i >= 0; i--)
1747 if (get_irn_op(in[i]) == op_Unknown) {
1752 if (!has_unknown) res = optimize_node(res);
1753 current_ir_graph->current_block = res;
1755 IRN_VRFY_IRG(res, current_ir_graph);
1760 /* ***********************************************************************/
1761 /* Methods necessary for automatic Phi node creation */
1763 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1764 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1765 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1766 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1768 Call Graph: ( A ---> B == A "calls" B)
1770 get_value mature_immBlock
1778 get_r_value_internal |
1782 new_rd_Phi0 new_rd_Phi_in
1784 * *************************************************************************** */
1786 /** Creates a Phi node with 0 predecessors. */
1787 static INLINE ir_node *
1788 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1791 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1792 IRN_VRFY_IRG(res, irg);
1796 /* There are two implementations of the Phi node construction. The first
1797 is faster, but does not work for blocks with more than 2 predecessors.
1798 The second works always but is slower and causes more unnecessary Phi
1800 Select the implementations by the following preprocessor flag set in
1802 #if USE_FAST_PHI_CONSTRUCTION
1804 /* This is a stack used for allocating and deallocating nodes in
1805 new_rd_Phi_in. The original implementation used the obstack
1806 to model this stack, now it is explicit. This reduces side effects.
1808 #if USE_EXPLICIT_PHI_IN_STACK
1810 new_Phi_in_stack(void) {
1813 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1815 res->stack = NEW_ARR_F (ir_node *, 0);
1819 } /* new_Phi_in_stack */
1822 free_Phi_in_stack(Phi_in_stack *s) {
1823 DEL_ARR_F(s->stack);
1825 } /* free_Phi_in_stack */
1828 free_to_Phi_in_stack(ir_node *phi) {
1829 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1830 current_ir_graph->Phi_in_stack->pos)
1831 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1833 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1835 (current_ir_graph->Phi_in_stack->pos)++;
1836 } /* free_to_Phi_in_stack */
1838 static INLINE ir_node *
1839 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1840 int arity, ir_node **in) {
1842 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1843 int pos = current_ir_graph->Phi_in_stack->pos;
1847 /* We need to allocate a new node */
1848 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1849 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1851 /* reuse the old node and initialize it again. */
1854 assert(res->kind == k_ir_node);
1855 assert(res->op == op_Phi);
1860 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1861 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1863 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1865 (current_ir_graph->Phi_in_stack->pos)--;
1868 } /* alloc_or_pop_from_Phi_in_stack */
1869 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1872 * Creates a Phi node with a given, fixed array **in of predecessors.
1873 * If the Phi node is unnecessary, as the same value reaches the block
1874 * through all control flow paths, it is eliminated and the value
1875 * returned directly. This constructor is only intended for use in
1876 * the automatic Phi node generation triggered by get_value or mature.
1877 * The implementation is quite tricky and depends on the fact, that
1878 * the nodes are allocated on a stack:
1879 * The in array contains predecessors and NULLs. The NULLs appear,
1880 * if get_r_value_internal, that computed the predecessors, reached
1881 * the same block on two paths. In this case the same value reaches
1882 * this block on both paths, there is no definition in between. We need
1883 * not allocate a Phi where these path's merge, but we have to communicate
1884 * this fact to the caller. This happens by returning a pointer to the
1885 * node the caller _will_ allocate. (Yes, we predict the address. We can
1886 * do so because the nodes are allocated on the obstack.) The caller then
1887 * finds a pointer to itself and, when this routine is called again,
1888 * eliminates itself.
1890 static INLINE ir_node *
1891 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1893 ir_node *res, *known;
1895 /* Allocate a new node on the obstack. This can return a node to
1896 which some of the pointers in the in-array already point.
1897 Attention: the constructor copies the in array, i.e., the later
1898 changes to the array in this routine do not affect the
1899 constructed node! If the in array contains NULLs, there will be
1900 missing predecessors in the returned node. Is this a possible
1901 internal state of the Phi node generation? */
1902 #if USE_EXPLICIT_PHI_IN_STACK
1903 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1905 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1906 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1909 /* The in-array can contain NULLs. These were returned by
1910 get_r_value_internal if it reached the same block/definition on a
1911 second path. The NULLs are replaced by the node itself to
1912 simplify the test in the next loop. */
1913 for (i = 0; i < ins; ++i) {
1918 /* This loop checks whether the Phi has more than one predecessor.
1919 If so, it is a real Phi node and we break the loop. Else the Phi
1920 node merges the same definition on several paths and therefore is
1922 for (i = 0; i < ins; ++i) {
1923 if (in[i] == res || in[i] == known)
1932 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1934 #if USE_EXPLICIT_PHI_IN_STACK
1935 free_to_Phi_in_stack(res);
1937 edges_node_deleted(res, current_ir_graph);
1938 obstack_free(current_ir_graph->obst, res);
1942 res = optimize_node (res);
1943 IRN_VRFY_IRG(res, irg);
1946 /* return the pointer to the Phi node. This node might be deallocated! */
1948 } /* new_rd_Phi_in */
1951 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1954 * Allocates and returns this node. The routine called to allocate the
1955 * node might optimize it away and return a real value, or even a pointer
1956 * to a deallocated Phi node on top of the obstack!
1957 * This function is called with an in-array of proper size.
1960 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1961 ir_node *prevBlock, *res;
1964 /* This loop goes to all predecessor blocks of the block the Phi node is in
1965 and there finds the operands of the Phi node by calling
1966 get_r_value_internal. */
1967 for (i = 1; i <= ins; ++i) {
1968 assert (block->in[i]);
1969 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1971 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1974 /* After collecting all predecessors into the array nin a new Phi node
1975 with these predecessors is created. This constructor contains an
1976 optimization: If all predecessors of the Phi node are identical it
1977 returns the only operand instead of a new Phi node. If the value
1978 passes two different control flow edges without being defined, and
1979 this is the second path treated, a pointer to the node that will be
1980 allocated for the first path (recursion) is returned. We already
1981 know the address of this node, as it is the next node to be allocated
1982 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1983 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1985 /* Now we now the value for "pos" and can enter it in the array with
1986 all known local variables. Attention: this might be a pointer to
1987 a node, that later will be allocated!!! See new_rd_Phi_in().
1988 If this is called in mature, after some set_value() in the same block,
1989 the proper value must not be overwritten:
1991 get_value (makes Phi0, put's it into graph_arr)
1992 set_value (overwrites Phi0 in graph_arr)
1993 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1996 if (!block->attr.block.graph_arr[pos]) {
1997 block->attr.block.graph_arr[pos] = res;
1999 /* printf(" value already computed by %s\n",
2000 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2007 * This function returns the last definition of a variable. In case
2008 * this variable was last defined in a previous block, Phi nodes are
2009 * inserted. If the part of the firm graph containing the definition
2010 * is not yet constructed, a dummy Phi node is returned.
2013 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2016 /* There are 4 cases to treat.
2018 1. The block is not mature and we visit it the first time. We can not
2019 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2020 predecessors is returned. This node is added to the linked list (field
2021 "link") of the containing block to be completed when this block is
2022 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2025 2. The value is already known in this block, graph_arr[pos] is set and we
2026 visit the block the first time. We can return the value without
2027 creating any new nodes.
2029 3. The block is mature and we visit it the first time. A Phi node needs
2030 to be created (phi_merge). If the Phi is not needed, as all it's
2031 operands are the same value reaching the block through different
2032 paths, it's optimized away and the value itself is returned.
2034 4. The block is mature, and we visit it the second time. Now two
2035 subcases are possible:
2036 * The value was computed completely the last time we were here. This
2037 is the case if there is no loop. We can return the proper value.
2038 * The recursion that visited this node and set the flag did not
2039 return yet. We are computing a value in a loop and need to
2040 break the recursion without knowing the result yet.
2041 @@@ strange case. Straight forward we would create a Phi before
2042 starting the computation of it's predecessors. In this case we will
2043 find a Phi here in any case. The problem is that this implementation
2044 only creates a Phi after computing the predecessors, so that it is
2045 hard to compute self references of this Phi. @@@
2046 There is no simple check for the second subcase. Therefore we check
2047 for a second visit and treat all such cases as the second subcase.
2048 Anyways, the basic situation is the same: we reached a block
2049 on two paths without finding a definition of the value: No Phi
2050 nodes are needed on both paths.
2051 We return this information "Two paths, no Phi needed" by a very tricky
2052 implementation that relies on the fact that an obstack is a stack and
2053 will return a node with the same address on different allocations.
2054 Look also at phi_merge and new_rd_phi_in to understand this.
2055 @@@ Unfortunately this does not work, see testprogram
2056 three_cfpred_example.
2060 /* case 4 -- already visited. */
2061 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2063 /* visited the first time */
2064 set_irn_visited(block, get_irg_visited(current_ir_graph));
2066 /* Get the local valid value */
2067 res = block->attr.block.graph_arr[pos];
2069 /* case 2 -- If the value is actually computed, return it. */
2070 if (res) return res;
2072 if (block->attr.block.is_matured) { /* case 3 */
2074 /* The Phi has the same amount of ins as the corresponding block. */
2075 int ins = get_irn_arity(block);
2077 NEW_ARR_A(ir_node *, nin, ins);
2079 /* Phi merge collects the predecessors and then creates a node. */
2080 res = phi_merge(block, pos, mode, nin, ins);
2082 } else { /* case 1 */
2083 /* The block is not mature, we don't know how many in's are needed. A Phi
2084 with zero predecessors is created. Such a Phi node is called Phi0
2085 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2086 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2088 The Phi0 has to remember the pos of it's internal value. If the real
2089 Phi is computed, pos is used to update the array with the local
2092 res = new_rd_Phi0(current_ir_graph, block, mode);
2093 res->attr.phi0_pos = pos;
2094 res->link = block->link;
2098 /* If we get here, the frontend missed a use-before-definition error */
2101 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2102 assert(mode->code >= irm_F && mode->code <= irm_P);
2103 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2106 /* The local valid value is available now. */
2107 block->attr.block.graph_arr[pos] = res;
2110 } /* get_r_value_internal */
2115 it starts the recursion. This causes an Id at the entry of
2116 every block that has no definition of the value! **/
2118 #if USE_EXPLICIT_PHI_IN_STACK
2120 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2121 void free_Phi_in_stack(Phi_in_stack *s) {}
2124 static INLINE ir_node *
2125 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2126 ir_node **in, int ins, ir_node *phi0) {
2128 ir_node *res, *known;
2130 /* Allocate a new node on the obstack. The allocation copies the in
2132 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2133 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2135 /* This loop checks whether the Phi has more than one predecessor.
2136 If so, it is a real Phi node and we break the loop. Else the
2137 Phi node merges the same definition on several paths and therefore
2138 is not needed. Don't consider Bad nodes! */
2140 for (i=0; i < ins; ++i)
2144 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2146 /* Optimize self referencing Phis: We can't detect them yet properly, as
2147 they still refer to the Phi0 they will replace. So replace right now. */
2148 if (phi0 && in[i] == phi0) in[i] = res;
2150 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2158 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2161 edges_node_deleted(res, current_ir_graph);
2162 obstack_free (current_ir_graph->obst, res);
2163 if (is_Phi(known)) {
2164 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2165 order, an enclosing Phi know may get superfluous. */
2166 res = optimize_in_place_2(known);
2168 exchange(known, res);
2174 /* A undefined value, e.g., in unreachable code. */
2178 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2179 IRN_VRFY_IRG(res, irg);
2180 /* Memory Phis in endless loops must be kept alive.
2181 As we can't distinguish these easily we keep all of them alive. */
2182 if ((res->op == op_Phi) && (mode == mode_M))
2183 add_End_keepalive(get_irg_end(irg), res);
2187 } /* new_rd_Phi_in */
2190 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2192 #if PRECISE_EXC_CONTEXT
2194 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2197 * Construct a new frag_array for node n.
2198 * Copy the content from the current graph_arr of the corresponding block:
2199 * this is the current state.
2200 * Set ProjM(n) as current memory state.
2201 * Further the last entry in frag_arr of current block points to n. This
2202 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2204 static INLINE ir_node **new_frag_arr(ir_node *n) {
2208 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2209 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2210 sizeof(ir_node *)*current_ir_graph->n_loc);
2212 /* turn off optimization before allocating Proj nodes, as res isn't
2214 opt = get_opt_optimize(); set_optimize(0);
2215 /* Here we rely on the fact that all frag ops have Memory as first result! */
2216 if (get_irn_op(n) == op_Call)
2217 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2218 else if (get_irn_op(n) == op_CopyB)
2219 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2221 assert((pn_Quot_M == pn_DivMod_M) &&
2222 (pn_Quot_M == pn_Div_M) &&
2223 (pn_Quot_M == pn_Mod_M) &&
2224 (pn_Quot_M == pn_Load_M) &&
2225 (pn_Quot_M == pn_Store_M) &&
2226 (pn_Quot_M == pn_Alloc_M) &&
2227 (pn_Quot_M == pn_Bound_M));
2228 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2232 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2234 } /* new_frag_arr */
2237 * Returns the frag_arr from a node.
2239 static INLINE ir_node **get_frag_arr(ir_node *n) {
2240 switch (get_irn_opcode(n)) {
2242 return n->attr.call.exc.frag_arr;
2244 return n->attr.alloc.exc.frag_arr;
2246 return n->attr.load.exc.frag_arr;
2248 return n->attr.store.exc.frag_arr;
2250 return n->attr.except.frag_arr;
2252 } /* get_frag_arr */
2255 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2257 if (!frag_arr[pos]) frag_arr[pos] = val;
2258 if (frag_arr[current_ir_graph->n_loc - 1]) {
2259 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2260 assert(arr != frag_arr && "Endless recursion detected");
2261 set_frag_value(arr, pos, val);
2266 for (i = 0; i < 1000; ++i) {
2267 if (!frag_arr[pos]) {
2268 frag_arr[pos] = val;
2270 if (frag_arr[current_ir_graph->n_loc - 1]) {
2271 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2277 assert(0 && "potential endless recursion");
2279 } /* set_frag_value */
2282 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2286 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2288 frag_arr = get_frag_arr(cfOp);
2289 res = frag_arr[pos];
2291 if (block->attr.block.graph_arr[pos]) {
2292 /* There was a set_value() after the cfOp and no get_value before that
2293 set_value(). We must build a Phi node now. */
2294 if (block->attr.block.is_matured) {
2295 int ins = get_irn_arity(block);
2297 NEW_ARR_A(ir_node *, nin, ins);
2298 res = phi_merge(block, pos, mode, nin, ins);
2300 res = new_rd_Phi0(current_ir_graph, block, mode);
2301 res->attr.phi0.pos = pos;
2302 res->link = block->link;
2306 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2307 but this should be better: (remove comment if this works) */
2308 /* It's a Phi, we can write this into all graph_arrs with NULL */
2309 set_frag_value(block->attr.block.graph_arr, pos, res);
2311 res = get_r_value_internal(block, pos, mode);
2312 set_frag_value(block->attr.block.graph_arr, pos, res);
2316 } /* get_r_frag_value_internal */
2317 #endif /* PRECISE_EXC_CONTEXT */
2320 * Computes the predecessors for the real phi node, and then
2321 * allocates and returns this node. The routine called to allocate the
2322 * node might optimize it away and return a real value.
2323 * This function must be called with an in-array of proper size.
2326 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2327 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2330 /* If this block has no value at pos create a Phi0 and remember it
2331 in graph_arr to break recursions.
2332 Else we may not set graph_arr as there a later value is remembered. */
2334 if (!block->attr.block.graph_arr[pos]) {
2335 if (block == get_irg_start_block(current_ir_graph)) {
2336 /* Collapsing to Bad tarvals is no good idea.
2337 So we call a user-supplied routine here that deals with this case as
2338 appropriate for the given language. Sorrily the only help we can give
2339 here is the position.
2341 Even if all variables are defined before use, it can happen that
2342 we get to the start block, if a Cond has been replaced by a tuple
2343 (bad, jmp). In this case we call the function needlessly, eventually
2344 generating an non existent error.
2345 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2348 if (default_initialize_local_variable) {
2349 ir_node *rem = get_cur_block();
2351 set_cur_block(block);
2352 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2356 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2357 /* We don't need to care about exception ops in the start block.
2358 There are none by definition. */
2359 return block->attr.block.graph_arr[pos];
2361 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2362 block->attr.block.graph_arr[pos] = phi0;
2363 #if PRECISE_EXC_CONTEXT
2364 if (get_opt_precise_exc_context()) {
2365 /* Set graph_arr for fragile ops. Also here we should break recursion.
2366 We could choose a cyclic path through an cfop. But the recursion would
2367 break at some point. */
2368 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2374 /* This loop goes to all predecessor blocks of the block the Phi node
2375 is in and there finds the operands of the Phi node by calling
2376 get_r_value_internal. */
2377 for (i = 1; i <= ins; ++i) {
2378 prevCfOp = skip_Proj(block->in[i]);
2380 if (is_Bad(prevCfOp)) {
2381 /* In case a Cond has been optimized we would get right to the start block
2382 with an invalid definition. */
2383 nin[i-1] = new_Bad();
2386 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2388 if (!is_Bad(prevBlock)) {
2389 #if PRECISE_EXC_CONTEXT
2390 if (get_opt_precise_exc_context() &&
2391 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2392 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2393 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2396 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2398 nin[i-1] = new_Bad();
2402 /* We want to pass the Phi0 node to the constructor: this finds additional
2403 optimization possibilities.
2404 The Phi0 node either is allocated in this function, or it comes from
2405 a former call to get_r_value_internal. In this case we may not yet
2406 exchange phi0, as this is done in mature_immBlock. */
2408 phi0_all = block->attr.block.graph_arr[pos];
2409 if (!((get_irn_op(phi0_all) == op_Phi) &&
2410 (get_irn_arity(phi0_all) == 0) &&
2411 (get_nodes_block(phi0_all) == block)))
2417 /* After collecting all predecessors into the array nin a new Phi node
2418 with these predecessors is created. This constructor contains an
2419 optimization: If all predecessors of the Phi node are identical it
2420 returns the only operand instead of a new Phi node. */
2421 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2423 /* In case we allocated a Phi0 node at the beginning of this procedure,
2424 we need to exchange this Phi0 with the real Phi. */
2426 exchange(phi0, res);
2427 block->attr.block.graph_arr[pos] = res;
2428 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2429 only an optimization. */
2436 * This function returns the last definition of a variable. In case
2437 * this variable was last defined in a previous block, Phi nodes are
2438 * inserted. If the part of the firm graph containing the definition
2439 * is not yet constructed, a dummy Phi node is returned.
2442 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2444 /* There are 4 cases to treat.
2446 1. The block is not mature and we visit it the first time. We can not
2447 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2448 predecessors is returned. This node is added to the linked list (field
2449 "link") of the containing block to be completed when this block is
2450 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2453 2. The value is already known in this block, graph_arr[pos] is set and we
2454 visit the block the first time. We can return the value without
2455 creating any new nodes.
2457 3. The block is mature and we visit it the first time. A Phi node needs
2458 to be created (phi_merge). If the Phi is not needed, as all it's
2459 operands are the same value reaching the block through different
2460 paths, it's optimized away and the value itself is returned.
2462 4. The block is mature, and we visit it the second time. Now two
2463 subcases are possible:
2464 * The value was computed completely the last time we were here. This
2465 is the case if there is no loop. We can return the proper value.
2466 * The recursion that visited this node and set the flag did not
2467 return yet. We are computing a value in a loop and need to
2468 break the recursion. This case only happens if we visited
2469 the same block with phi_merge before, which inserted a Phi0.
2470 So we return the Phi0.
2473 /* case 4 -- already visited. */
2474 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2475 /* As phi_merge allocates a Phi0 this value is always defined. Here
2476 is the critical difference of the two algorithms. */
2477 assert(block->attr.block.graph_arr[pos]);
2478 return block->attr.block.graph_arr[pos];
2481 /* visited the first time */
2482 set_irn_visited(block, get_irg_visited(current_ir_graph));
2484 /* Get the local valid value */
2485 res = block->attr.block.graph_arr[pos];
2487 /* case 2 -- If the value is actually computed, return it. */
2488 if (res) { return res; };
2490 if (block->attr.block.is_matured) { /* case 3 */
2492 /* The Phi has the same amount of ins as the corresponding block. */
2493 int ins = get_irn_arity(block);
2495 NEW_ARR_A (ir_node *, nin, ins);
2497 /* Phi merge collects the predecessors and then creates a node. */
2498 res = phi_merge (block, pos, mode, nin, ins);
2500 } else { /* case 1 */
2501 /* The block is not mature, we don't know how many in's are needed. A Phi
2502 with zero predecessors is created. Such a Phi node is called Phi0
2503 node. The Phi0 is then added to the list of Phi0 nodes in this block
2504 to be matured by mature_immBlock later.
2505 The Phi0 has to remember the pos of it's internal value. If the real
2506 Phi is computed, pos is used to update the array with the local
2508 res = new_rd_Phi0(current_ir_graph, block, mode);
2509 res->attr.phi0.pos = pos;
2510 res->link = block->link;
2514 /* If we get here, the frontend missed a use-before-definition error */
2517 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2518 assert(mode->code >= irm_F && mode->code <= irm_P);
2519 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2520 get_mode_null(mode));
2523 /* The local valid value is available now. */
2524 block->attr.block.graph_arr[pos] = res;
2527 } /* get_r_value_internal */
2529 #endif /* USE_FAST_PHI_CONSTRUCTION */
2531 /* ************************************************************************** */
2534 * Finalize a Block node, when all control flows are known.
2535 * Acceptable parameters are only Block nodes.
2538 mature_immBlock(ir_node *block) {
2543 assert(get_irn_opcode(block) == iro_Block);
2544 /* @@@ should be commented in
2545 assert (!get_Block_matured(block) && "Block already matured"); */
2547 if (!get_Block_matured(block)) {
2548 ins = ARR_LEN(block->in)-1;
2549 /* Fix block parameters */
2550 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2552 /* An array for building the Phi nodes. */
2553 NEW_ARR_A(ir_node *, nin, ins);
2555 /* Traverse a chain of Phi nodes attached to this block and mature
2557 for (n = block->link; n; n = next) {
2558 inc_irg_visited(current_ir_graph);
2560 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2563 block->attr.block.is_matured = 1;
2565 /* Now, as the block is a finished firm node, we can optimize it.
2566 Since other nodes have been allocated since the block was created
2567 we can not free the node on the obstack. Therefore we have to call
2569 Unfortunately the optimization does not change a lot, as all allocated
2570 nodes refer to the unoptimized node.
2571 We can call _2, as global cse has no effect on blocks. */
2572 block = optimize_in_place_2(block);
2573 IRN_VRFY_IRG(block, current_ir_graph);
2575 } /* mature_immBlock */
2578 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2579 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2583 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2584 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2588 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2589 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2590 } /* new_d_Const_long */
2593 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2594 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2595 } /* new_d_Const_type */
2599 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2600 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2604 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2605 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2609 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2611 assert(arg->op == op_Cond);
2612 arg->attr.cond.kind = fragmentary;
2613 arg->attr.cond.default_proj = max_proj;
2614 res = new_Proj(arg, mode_X, max_proj);
2616 } /* new_d_defaultProj */
2619 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2620 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2624 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2625 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2626 } /* new_d_strictConv */
2629 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2630 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2634 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2635 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2644 * Allocate the frag array.
2646 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2647 if (get_opt_precise_exc_context()) {
2648 if ((current_ir_graph->phase_state == phase_building) &&
2649 (get_irn_op(res) == op) && /* Could be optimized away. */
2650 !*frag_store) /* Could be a cse where the arr is already set. */ {
2651 *frag_store = new_frag_arr(res);
2654 } /* allocate_frag_arr */
2657 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2659 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2660 #if PRECISE_EXC_CONTEXT
2661 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2668 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2670 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2671 #if PRECISE_EXC_CONTEXT
2672 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2676 } /* new_d_DivMod */
2679 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2681 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2682 #if PRECISE_EXC_CONTEXT
2683 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2690 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2692 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2693 #if PRECISE_EXC_CONTEXT
2694 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2713 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2714 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2718 new_d_Jmp(dbg_info *db) {
2719 return new_bd_Jmp(db, current_ir_graph->current_block);
2723 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2724 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2728 new_d_Cond(dbg_info *db, ir_node *c) {
2729 return new_bd_Cond(db, current_ir_graph->current_block, c);
2733 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2736 res = new_bd_Call(db, current_ir_graph->current_block,
2737 store, callee, arity, in, tp);
2738 #if PRECISE_EXC_CONTEXT
2739 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2746 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2747 return new_bd_Return(db, current_ir_graph->current_block,
2749 } /* new_d_Return */
2752 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2754 res = new_bd_Load(db, current_ir_graph->current_block,
2756 #if PRECISE_EXC_CONTEXT
2757 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2764 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2766 res = new_bd_Store(db, current_ir_graph->current_block,
2768 #if PRECISE_EXC_CONTEXT
2769 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2776 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2777 where_alloc where) {
2779 res = new_bd_Alloc(db, current_ir_graph->current_block,
2780 store, size, alloc_type, where);
2781 #if PRECISE_EXC_CONTEXT
2782 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2789 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2790 ir_node *size, ir_type *free_type, where_alloc where) {
2791 return new_bd_Free(db, current_ir_graph->current_block,
2792 store, ptr, size, free_type, where);
2796 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2797 /* GL: objptr was called frame before. Frame was a bad choice for the name
2798 as the operand could as well be a pointer to a dynamic object. */
2800 return new_bd_Sel(db, current_ir_graph->current_block,
2801 store, objptr, 0, NULL, ent);
2802 } /* new_d_simpleSel */
2805 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2806 return new_bd_Sel(db, current_ir_graph->current_block,
2807 store, objptr, n_index, index, sel);
2811 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2812 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2814 } /* new_d_SymConst_type */
2817 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2818 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2819 value, kind, firm_unknown_type);
2820 } /* new_d_SymConst */
2823 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2824 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2830 return _new_d_Bad();
2834 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2835 return new_bd_Confirm(db, current_ir_graph->current_block,
2837 } /* new_d_Confirm */
2840 new_d_Unknown(ir_mode *m) {
2841 return new_bd_Unknown(m);
2842 } /* new_d_Unknown */
2845 new_d_CallBegin(dbg_info *db, ir_node *call) {
2846 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2847 } /* new_d_CallBegin */
2850 new_d_EndReg(dbg_info *db) {
2851 return new_bd_EndReg(db, current_ir_graph->current_block);
2852 } /* new_d_EndReg */
2855 new_d_EndExcept(dbg_info *db) {
2856 return new_bd_EndExcept(db, current_ir_graph->current_block);
2857 } /* new_d_EndExcept */
2860 new_d_Break(dbg_info *db) {
2861 return new_bd_Break(db, current_ir_graph->current_block);
2865 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2866 return new_bd_Filter(db, current_ir_graph->current_block,
2868 } /* new_d_Filter */
2871 (new_d_NoMem)(void) {
2872 return _new_d_NoMem();
2876 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2877 ir_node *ir_true, ir_mode *mode) {
2878 return new_bd_Mux(db, current_ir_graph->current_block,
2879 sel, ir_false, ir_true, mode);
2883 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2884 return new_bd_Psi(db, current_ir_graph->current_block,
2885 arity, conds, vals, mode);
2888 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2889 ir_node *dst, ir_node *src, ir_type *data_type) {
2891 res = new_bd_CopyB(db, current_ir_graph->current_block,
2892 store, dst, src, data_type);
2893 #if PRECISE_EXC_CONTEXT
2894 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2900 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2901 return new_bd_InstOf(db, current_ir_graph->current_block,
2902 store, objptr, type);
2903 } /* new_d_InstOf */
2906 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2907 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2910 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2911 ir_node *idx, ir_node *lower, ir_node *upper) {
2913 res = new_bd_Bound(db, current_ir_graph->current_block,
2914 store, idx, lower, upper);
2915 #if PRECISE_EXC_CONTEXT
2916 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2922 new_d_Pin(dbg_info *db, ir_node *node) {
2923 return new_bd_Pin(db, current_ir_graph->current_block, node);
2927 new_d_ASM(dbg_info *db, ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
2928 return new_bd_ASM(db, current_ir_graph->current_block, store, arity, inputs, asm_text);
2931 /* ********************************************************************* */
2932 /* Comfortable interface with automatic Phi node construction. */
2933 /* (Uses also constructors of ?? interface, except new_Block. */
2934 /* ********************************************************************* */
2936 /* Block construction */
2937 /* immature Block without predecessors */
2939 new_d_immBlock(dbg_info *db) {
2942 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2943 /* creates a new dynamic in-array as length of in is -1 */
2944 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2945 current_ir_graph->current_block = res;
2947 /* macroblock head */
2950 res->attr.block.is_matured = 0;
2951 res->attr.block.is_dead = 0;
2952 res->attr.block.is_mb_head = 1;
2953 res->attr.block.irg = current_ir_graph;
2954 res->attr.block.backedge = NULL;
2955 res->attr.block.in_cg = NULL;
2956 res->attr.block.cg_backedge = NULL;
2957 res->attr.block.extblk = NULL;
2958 res->attr.block.region = NULL;
2959 res->attr.block.mb_depth = 0;
2961 set_Block_block_visited(res, 0);
2963 /* Create and initialize array for Phi-node construction. */
2964 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2965 current_ir_graph->n_loc);
2966 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2968 /* Immature block may not be optimized! */
2969 IRN_VRFY_IRG(res, current_ir_graph);
2972 } /* new_d_immBlock */
2975 new_immBlock(void) {
2976 return new_d_immBlock(NULL);
2977 } /* new_immBlock */
2979 /* immature PartBlock with its predecessors */
2981 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2982 ir_node *res = new_d_immBlock(db);
2983 ir_node *blk = get_nodes_block(pred_jmp);
2985 res->in[0] = blk->in[0];
2986 add_immBlock_pred(res, pred_jmp);
2988 res->attr.block.is_mb_head = 0;
2989 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2992 } /* new_d_immPartBlock */
2995 new_immPartBlock(ir_node *pred_jmp) {
2996 return new_d_immPartBlock(NULL, pred_jmp);
2997 } /* new_immPartBlock */
2999 /* add an edge to a jmp/control flow node */
3001 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3002 int n = ARR_LEN(block->in) - 1;
3004 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3005 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3006 assert(jmp != NULL);
3008 ARR_APP1(ir_node *, block->in, jmp);
3010 hook_set_irn_n(block, n, jmp, NULL);
3011 } /* add_immBlock_pred */
3013 /* changing the current block */
3015 set_cur_block(ir_node *target) {
3016 current_ir_graph->current_block = target;
3017 } /* set_cur_block */
3019 /* ************************ */
3020 /* parameter administration */
3022 /* get a value from the parameter array from the current block by its index */
3024 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3025 ir_graph *irg = current_ir_graph;
3026 assert(get_irg_phase_state(irg) == phase_building);
3027 inc_irg_visited(irg);
3029 return get_r_value_internal(irg->current_block, pos + 1, mode);
3032 /* get a value from the parameter array from the current block by its index */
3034 get_value(int pos, ir_mode *mode) {
3035 return get_d_value(NULL, pos, mode);
3038 /* set a value at position pos in the parameter array from the current block */
3040 set_value(int pos, ir_node *value) {
3041 ir_graph *irg = current_ir_graph;
3042 assert(get_irg_phase_state(irg) == phase_building);
3043 assert(pos+1 < irg->n_loc);
3044 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3047 /* Find the value number for a node in the current block.*/
3049 find_value(ir_node *value) {
3051 ir_node *bl = current_ir_graph->current_block;
3053 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3054 if (bl->attr.block.graph_arr[i] == value)
3059 /* get the current store */
3062 ir_graph *irg = current_ir_graph;
3064 assert(get_irg_phase_state(irg) == phase_building);
3065 /* GL: one could call get_value instead */
3066 inc_irg_visited(irg);
3067 return get_r_value_internal(irg->current_block, 0, mode_M);
3070 /* set the current store: handles automatic Sync construction for Load nodes */
3072 set_store(ir_node *store) {
3073 ir_node *load, *pload, *pred, *in[2];
3075 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3076 /* Beware: due to dead code elimination, a store might become a Bad node even in
3077 the construction phase. */
3078 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3080 if (get_opt_auto_create_sync()) {
3081 /* handle non-volatile Load nodes by automatically creating Sync's */
3082 load = skip_Proj(store);
3083 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3084 pred = get_Load_mem(load);
3086 if (is_Sync(pred)) {
3087 /* a Load after a Sync: move it up */
3088 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3090 set_Load_mem(load, get_memop_mem(mem));
3091 add_Sync_pred(pred, store);
3094 pload = skip_Proj(pred);
3095 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3096 /* a Load after a Load: create a new Sync */
3097 set_Load_mem(load, get_Load_mem(pload));
3101 store = new_Sync(2, in);
3106 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3110 keep_alive(ir_node *ka) {
3111 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3114 /* --- Useful access routines --- */
3115 /* Returns the current block of the current graph. To set the current
3116 block use set_cur_block. */
3117 ir_node *get_cur_block(void) {
3118 return get_irg_current_block(current_ir_graph);
3119 } /* get_cur_block */
3121 /* Returns the frame type of the current graph */
3122 ir_type *get_cur_frame_type(void) {
3123 return get_irg_frame_type(current_ir_graph);
3124 } /* get_cur_frame_type */
3127 /* ********************************************************************* */
3130 /* call once for each run of the library */
3132 init_cons(uninitialized_local_variable_func_t *func) {
3133 default_initialize_local_variable = func;
3137 irp_finalize_cons(void) {
3139 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3140 irg_finalize_cons(get_irp_irg(i));
3142 irp->phase_state = phase_high;
3143 } /* irp_finalize_cons */
3146 ir_node *new_Block(int arity, ir_node **in) {
3147 return new_d_Block(NULL, arity, in);
3149 ir_node *new_Start(void) {
3150 return new_d_Start(NULL);
3152 ir_node *new_End(void) {
3153 return new_d_End(NULL);
3155 ir_node *new_Jmp(void) {
3156 return new_d_Jmp(NULL);
3158 ir_node *new_IJmp(ir_node *tgt) {
3159 return new_d_IJmp(NULL, tgt);
3161 ir_node *new_Cond(ir_node *c) {
3162 return new_d_Cond(NULL, c);
3164 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3165 return new_d_Return(NULL, store, arity, in);
3167 ir_node *new_Const(ir_mode *mode, tarval *con) {
3168 return new_d_Const(NULL, mode, con);
3171 ir_node *new_Const_long(ir_mode *mode, long value) {
3172 return new_d_Const_long(NULL, mode, value);
3175 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3176 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3179 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3180 return new_d_SymConst_type(NULL, value, kind, type);
3182 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3183 return new_d_SymConst(NULL, value, kind);
3185 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3186 return new_d_simpleSel(NULL, store, objptr, ent);
3188 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3190 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3192 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3194 return new_d_Call(NULL, store, callee, arity, in, tp);
3196 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3197 return new_d_Add(NULL, op1, op2, mode);
3199 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3200 return new_d_Sub(NULL, op1, op2, mode);
3202 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3203 return new_d_Minus(NULL, op, mode);
3205 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3206 return new_d_Mul(NULL, op1, op2, mode);
3208 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3209 return new_d_Quot(NULL, memop, op1, op2, mode);
3211 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3212 return new_d_DivMod(NULL, memop, op1, op2, mode);
3214 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3215 return new_d_Div(NULL, memop, op1, op2, mode);
3217 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3218 return new_d_Mod(NULL, memop, op1, op2, mode);
3220 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3221 return new_d_Abs(NULL, op, mode);
3223 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3224 return new_d_And(NULL, op1, op2, mode);
3226 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3227 return new_d_Or(NULL, op1, op2, mode);
3229 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_Eor(NULL, op1, op2, mode);
3232 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3233 return new_d_Not(NULL, op, mode);
3235 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3236 return new_d_Shl(NULL, op, k, mode);
3238 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3239 return new_d_Shr(NULL, op, k, mode);
3241 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3242 return new_d_Shrs(NULL, op, k, mode);
3244 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3245 return new_d_Rot(NULL, op, k, mode);
3247 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3248 return new_d_Carry(NULL, op1, op2, mode);
3250 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3251 return new_d_Borrow(NULL, op1, op2, mode);
3253 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3254 return new_d_Cmp(NULL, op1, op2);
3256 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3257 return new_d_Conv(NULL, op, mode);
3259 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3260 return new_d_strictConv(NULL, op, mode);
3262 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3263 return new_d_Cast(NULL, op, to_tp);
3265 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3266 return new_d_Phi(NULL, arity, in, mode);
3268 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3269 return new_d_Load(NULL, store, addr, mode);
3271 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3272 return new_d_Store(NULL, store, addr, val);
3274 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3275 where_alloc where) {
3276 return new_d_Alloc(NULL, store, size, alloc_type, where);
3278 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3279 ir_type *free_type, where_alloc where) {
3280 return new_d_Free(NULL, store, ptr, size, free_type, where);
3282 ir_node *new_Sync(int arity, ir_node *in[]) {
3283 return new_d_Sync(NULL, arity, in);
3285 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3286 return new_d_Proj(NULL, arg, mode, proj);
3288 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3289 return new_d_defaultProj(NULL, arg, max_proj);
3291 ir_node *new_Tuple(int arity, ir_node **in) {
3292 return new_d_Tuple(NULL, arity, in);
3294 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3295 return new_d_Id(NULL, val, mode);
3297 ir_node *new_Bad(void) {
3300 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3301 return new_d_Confirm (NULL, val, bound, cmp);
3303 ir_node *new_Unknown(ir_mode *m) {
3304 return new_d_Unknown(m);
3306 ir_node *new_CallBegin(ir_node *callee) {
3307 return new_d_CallBegin(NULL, callee);
3309 ir_node *new_EndReg(void) {
3310 return new_d_EndReg(NULL);
3312 ir_node *new_EndExcept(void) {
3313 return new_d_EndExcept(NULL);
3315 ir_node *new_Break(void) {
3316 return new_d_Break(NULL);
3318 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3319 return new_d_Filter(NULL, arg, mode, proj);
3321 ir_node *new_NoMem(void) {
3322 return new_d_NoMem();
3324 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3325 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3327 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3328 return new_d_Psi(NULL, arity, conds, vals, mode);
3330 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3331 return new_d_CopyB(NULL, store, dst, src, data_type);
3333 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3334 return new_d_InstOf(NULL, store, objptr, ent);
3336 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3337 return new_d_Raise(NULL, store, obj);
3339 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3340 return new_d_Bound(NULL, store, idx, lower, upper);
3342 ir_node *new_Pin(ir_node *node) {
3343 return new_d_Pin(NULL, node);
3345 ir_node *new_ASM(ir_node *store, int arity, ir_node *inputs[], ident *asm_text) {
3346 return new_d_ASM(NULL, store, arity, inputs, asm_text);