2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.irg = irg;
198 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
199 res->attr.block.in_cg = NULL;
200 res->attr.block.cg_backedge = NULL;
201 res->attr.block.extblk = NULL;
202 res->attr.block.mb_depth = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if ( get_Block_matured(block) )
247 assert( get_irn_arity(block) == arity );
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity-1; i >= 0; i--)
254 if (get_irn_op(in[i]) == op_Unknown) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if ((res->op == op_Phi) && (mode == mode_M))
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
274 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
275 res->attr.con.tv = con;
276 set_Const_type(res, tp); /* Call method because of complex assertion. */
277 res = optimize_node (res);
278 assert(get_Const_type(res) == tp);
279 IRN_VRFY_IRG(res, irg);
282 } /* new_bd_Const_type */
285 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
286 ir_graph *irg = current_ir_graph;
288 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
292 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
293 ir_graph *irg = current_ir_graph;
295 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
296 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
329 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
332 ir_graph *irg = current_ir_graph;
334 assert(arg->op == op_Cond);
335 arg->attr.cond.kind = fragmentary;
336 arg->attr.cond.default_proj = max_proj;
337 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
339 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.totype = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
402 ir_graph *irg = current_ir_graph;
405 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node(res);
407 IRN_VRFY_IRG(res, irg);
412 new_bd_Jmp(dbg_info *db, ir_node *block) {
414 ir_graph *irg = current_ir_graph;
416 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
417 res = optimize_node (res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG(res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node (res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
512 new_bd_Store(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_node *val) {
516 ir_graph *irg = current_ir_graph;
521 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
522 res->attr.store.exc.pin_state = op_pin_state_pinned;
523 res->attr.store.volatility = volatility_non_volatile;
524 res = optimize_node(res);
525 IRN_VRFY_IRG(res, irg);
530 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
531 ir_node *size, ir_type *alloc_type, where_alloc where) {
534 ir_graph *irg = current_ir_graph;
538 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
539 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
540 res->attr.alloc.where = where;
541 res->attr.alloc.type = alloc_type;
542 res = optimize_node(res);
543 IRN_VRFY_IRG(res, irg);
548 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
549 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
552 ir_graph *irg = current_ir_graph;
557 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
558 res->attr.free.where = where;
559 res->attr.free.type = free_type;
560 res = optimize_node(res);
561 IRN_VRFY_IRG(res, irg);
566 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
567 int arity, ir_node **in, ir_entity *ent) {
571 ir_graph *irg = current_ir_graph;
572 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
574 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
577 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
580 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
582 * Sel's can select functions which should be of mode mode_P_code.
584 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
585 res->attr.sel.ent = ent;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
593 symconst_kind symkind, ir_type *tp) {
596 ir_graph *irg = current_ir_graph;
598 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
599 mode = mode_P_data; /* FIXME: can be mode_P_code */
603 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
605 res->attr.symc.num = symkind;
606 res->attr.symc.sym = value;
607 res->attr.symc.tp = tp;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
612 } /* new_bd_SymConst_type */
615 new_bd_Sync(dbg_info *db, ir_node *block) {
617 ir_graph *irg = current_ir_graph;
619 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
620 /* no need to call optimize node here, Sync are always created with no predecessors */
621 IRN_VRFY_IRG(res, irg);
626 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
627 ir_node *in[2], *res;
628 ir_graph *irg = current_ir_graph;
632 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
633 res->attr.confirm_cmp = cmp;
634 res = optimize_node (res);
635 IRN_VRFY_IRG(res, irg);
637 } /* new_bd_Confirm */
640 new_bd_Unknown(ir_mode *m) {
642 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
645 res = optimize_node(res);
647 } /* new_bd_Unknown */
650 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
653 ir_graph *irg = current_ir_graph;
655 in[0] = get_Call_ptr(call);
656 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
657 /* res->attr.callbegin.irg = irg; */
658 res->attr.callbegin.call = call;
659 res = optimize_node(res);
660 IRN_VRFY_IRG(res, irg);
662 } /* new_bd_CallBegin */
665 new_bd_EndReg(dbg_info *db, ir_node *block) {
667 ir_graph *irg = current_ir_graph;
669 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
670 set_irg_end_reg(irg, res);
671 IRN_VRFY_IRG(res, irg);
673 } /* new_bd_EndReg */
676 new_bd_EndExcept(dbg_info *db, ir_node *block) {
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
681 set_irg_end_except(irg, res);
682 IRN_VRFY_IRG (res, irg);
684 } /* new_bd_EndExcept */
687 new_bd_Break(dbg_info *db, ir_node *block) {
689 ir_graph *irg = current_ir_graph;
691 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
704 res->attr.filter.proj = proj;
705 res->attr.filter.in_cg = NULL;
706 res->attr.filter.backedge = NULL;
709 assert(get_Proj_pred(res));
710 assert(get_nodes_block(get_Proj_pred(res)));
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
715 } /* new_bd_Filter */
718 new_bd_Mux(dbg_info *db, ir_node *block,
719 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
728 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
737 new_bd_Psi(dbg_info *db, ir_node *block,
738 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
744 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
746 for (i = 0; i < arity; ++i) {
748 in[2 * i + 1] = vals[i];
752 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_CopyB(dbg_info *db, ir_node *block,
762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
773 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
774 res->attr.copyb.data_type = data_type;
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
781 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
782 ir_node *objptr, ir_type *type) {
785 ir_graph *irg = current_ir_graph;
789 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
790 res->attr.instof.type = type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
794 } /* new_bd_InstOf */
797 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_Bound(dbg_info *db, ir_node *block,
812 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
815 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
822 res->attr.bound.exc.pin_state = op_pin_state_pinned;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
831 ir_graph *irg = current_ir_graph;
833 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
834 res = optimize_node(res);
835 IRN_VRFY_IRG(res, irg);
839 /* --------------------------------------------- */
840 /* private interfaces, for professional use only */
841 /* --------------------------------------------- */
843 /* Constructs a Block with a fixed number of predecessors.
844 Does not set current_block. Can not be used with automatic
845 Phi node construction. */
847 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
848 ir_graph *rem = current_ir_graph;
851 current_ir_graph = irg;
852 res = new_bd_Block(db, arity, in);
853 current_ir_graph = rem;
859 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
860 ir_graph *rem = current_ir_graph;
863 current_ir_graph = irg;
864 res = new_bd_Start(db, block);
865 current_ir_graph = rem;
871 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
873 ir_graph *rem = current_ir_graph;
875 current_ir_graph = rem;
876 res = new_bd_End(db, block);
877 current_ir_graph = rem;
882 /* Creates a Phi node with all predecessors. Calling this constructor
883 is only allowed if the corresponding block is mature. */
885 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
887 ir_graph *rem = current_ir_graph;
889 current_ir_graph = irg;
890 res = new_bd_Phi(db, block,arity, in, mode);
891 current_ir_graph = rem;
897 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
899 ir_graph *rem = current_ir_graph;
901 current_ir_graph = irg;
902 res = new_bd_Const_type(db, block, mode, con, tp);
903 current_ir_graph = rem;
906 } /* new_rd_Const_type */
909 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
911 ir_graph *rem = current_ir_graph;
913 current_ir_graph = irg;
914 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
915 current_ir_graph = rem;
921 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
922 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
923 } /* new_rd_Const_long */
926 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
928 ir_graph *rem = current_ir_graph;
930 current_ir_graph = irg;
931 res = new_bd_Id(db, block, val, mode);
932 current_ir_graph = rem;
938 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
941 ir_graph *rem = current_ir_graph;
943 current_ir_graph = irg;
944 res = new_bd_Proj(db, block, arg, mode, proj);
945 current_ir_graph = rem;
951 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_defaultProj(db, block, arg, max_proj);
958 current_ir_graph = rem;
961 } /* new_rd_defaultProj */
964 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Conv(db, block, op, mode, 0);
970 current_ir_graph = rem;
976 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
978 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_Cast(db, block, op, to_tp);
982 current_ir_graph = rem;
988 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
990 ir_graph *rem = current_ir_graph;
992 current_ir_graph = irg;
993 res = new_bd_Tuple(db, block, arity, in);
994 current_ir_graph = rem;
1004 NEW_RD_DIVOP(DivMod)
1017 NEW_RD_BINOP(Borrow)
1020 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1021 ir_node *op1, ir_node *op2) {
1023 ir_graph *rem = current_ir_graph;
1025 current_ir_graph = irg;
1026 res = new_bd_Cmp(db, block, op1, op2);
1027 current_ir_graph = rem;
1033 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1035 ir_graph *rem = current_ir_graph;
1037 current_ir_graph = irg;
1038 res = new_bd_Jmp(db, block);
1039 current_ir_graph = rem;
1045 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1047 ir_graph *rem = current_ir_graph;
1049 current_ir_graph = irg;
1050 res = new_bd_IJmp(db, block, tgt);
1051 current_ir_graph = rem;
1057 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Cond(db, block, c);
1063 current_ir_graph = rem;
1069 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1070 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1072 ir_graph *rem = current_ir_graph;
1074 current_ir_graph = irg;
1075 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1076 current_ir_graph = rem;
1082 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1083 ir_node *store, int arity, ir_node **in) {
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Return(db, block, store, arity, in);
1089 current_ir_graph = rem;
1092 } /* new_rd_Return */
1095 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1096 ir_node *store, ir_node *adr, ir_mode *mode) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Load(db, block, store, adr, mode);
1102 current_ir_graph = rem;
1108 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1109 ir_node *store, ir_node *adr, ir_node *val) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Store(db, block, store, adr, val);
1115 current_ir_graph = rem;
1118 } /* new_rd_Store */
1121 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1122 ir_node *size, ir_type *alloc_type, where_alloc where) {
1124 ir_graph *rem = current_ir_graph;
1126 current_ir_graph = irg;
1127 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1128 current_ir_graph = rem;
1131 } /* new_rd_Alloc */
1134 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1135 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1141 current_ir_graph = rem;
1147 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1148 ir_node *store, ir_node *objptr, ir_entity *ent) {
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1154 current_ir_graph = rem;
1157 } /* new_rd_simpleSel */
1160 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1161 int arity, ir_node **in, ir_entity *ent) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1167 current_ir_graph = rem;
1173 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1174 symconst_kind symkind, ir_type *tp) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1180 current_ir_graph = rem;
1183 } /* new_rd_SymConst_type */
1186 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1187 symconst_kind symkind) {
1188 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1190 } /* new_rd_SymConst */
1192 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1193 symconst_symbol sym;
1194 sym.entity_p = symbol;
1195 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1196 } /* new_rd_SymConst_addr_ent */
1198 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1199 symconst_symbol sym;
1200 sym.entity_p = symbol;
1201 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1202 } /* new_rd_SymConst_ofs_ent */
1204 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1205 symconst_symbol sym;
1206 sym.ident_p = symbol;
1207 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1208 } /* new_rd_SymConst_addr_name */
1210 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1211 symconst_symbol sym;
1212 sym.type_p = symbol;
1213 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1214 } /* new_rd_SymConst_type_tag */
1216 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1217 symconst_symbol sym;
1218 sym.type_p = symbol;
1219 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1220 } /* new_rd_SymConst_size */
1222 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1223 symconst_symbol sym;
1224 sym.type_p = symbol;
1225 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1226 } /* new_rd_SymConst_align */
1229 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1231 ir_graph *rem = current_ir_graph;
1234 current_ir_graph = irg;
1235 res = new_bd_Sync(db, block);
1236 current_ir_graph = rem;
1238 for (i = 0; i < arity; ++i)
1239 add_Sync_pred(res, in[i]);
1245 new_rd_Bad(ir_graph *irg) {
1246 return get_irg_bad(irg);
1250 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1252 ir_graph *rem = current_ir_graph;
1254 current_ir_graph = irg;
1255 res = new_bd_Confirm(db, block, val, bound, cmp);
1256 current_ir_graph = rem;
1259 } /* new_rd_Confirm */
1262 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1264 ir_graph *rem = current_ir_graph;
1266 current_ir_graph = irg;
1267 res = new_bd_Unknown(m);
1268 current_ir_graph = rem;
1271 } /* new_rd_Unknown */
1274 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1276 ir_graph *rem = current_ir_graph;
1278 current_ir_graph = irg;
1279 res = new_bd_CallBegin(db, block, call);
1280 current_ir_graph = rem;
1283 } /* new_rd_CallBegin */
1286 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1289 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1290 set_irg_end_reg(irg, res);
1291 IRN_VRFY_IRG(res, irg);
1293 } /* new_rd_EndReg */
1296 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1299 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1300 set_irg_end_except(irg, res);
1301 IRN_VRFY_IRG (res, irg);
1303 } /* new_rd_EndExcept */
1306 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1308 ir_graph *rem = current_ir_graph;
1310 current_ir_graph = irg;
1311 res = new_bd_Break(db, block);
1312 current_ir_graph = rem;
1315 } /* new_rd_Break */
1318 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1321 ir_graph *rem = current_ir_graph;
1323 current_ir_graph = irg;
1324 res = new_bd_Filter(db, block, arg, mode, proj);
1325 current_ir_graph = rem;
1328 } /* new_rd_Filter */
1331 new_rd_NoMem(ir_graph *irg) {
1332 return get_irg_no_mem(irg);
1333 } /* new_rd_NoMem */
1336 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1337 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1339 ir_graph *rem = current_ir_graph;
1341 current_ir_graph = irg;
1342 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1343 current_ir_graph = rem;
1349 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1350 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1352 ir_graph *rem = current_ir_graph;
1354 current_ir_graph = irg;
1355 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1356 current_ir_graph = rem;
1361 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1362 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1364 ir_graph *rem = current_ir_graph;
1366 current_ir_graph = irg;
1367 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1368 current_ir_graph = rem;
1371 } /* new_rd_CopyB */
1374 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1375 ir_node *objptr, ir_type *type) {
1377 ir_graph *rem = current_ir_graph;
1379 current_ir_graph = irg;
1380 res = new_bd_InstOf(db, block, store, objptr, type);
1381 current_ir_graph = rem;
1384 } /* new_rd_InstOf */
1387 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_Raise(db, block, store, obj);
1393 current_ir_graph = rem;
1396 } /* new_rd_Raise */
1398 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1399 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1401 ir_graph *rem = current_ir_graph;
1403 current_ir_graph = irg;
1404 res = new_bd_Bound(db, block, store, idx, lower, upper);
1405 current_ir_graph = rem;
1408 } /* new_rd_Bound */
1410 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1412 ir_graph *rem = current_ir_graph;
1414 current_ir_graph = irg;
1415 res = new_bd_Pin(db, block, node);
1416 current_ir_graph = rem;
1421 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1422 return new_rd_Block(NULL, irg, arity, in);
1424 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1425 return new_rd_Start(NULL, irg, block);
1427 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1428 return new_rd_End(NULL, irg, block);
1430 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1431 return new_rd_Jmp(NULL, irg, block);
1433 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1434 return new_rd_IJmp(NULL, irg, block, tgt);
1436 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1437 return new_rd_Cond(NULL, irg, block, c);
1439 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1440 ir_node *store, int arity, ir_node **in) {
1441 return new_rd_Return(NULL, irg, block, store, arity, in);
1443 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1444 ir_mode *mode, tarval *con) {
1445 return new_rd_Const(NULL, irg, block, mode, con);
1447 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1448 ir_mode *mode, long value) {
1449 return new_rd_Const_long(NULL, irg, block, mode, value);
1451 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1452 ir_mode *mode, tarval *con, ir_type *tp) {
1453 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1455 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1456 symconst_symbol value, symconst_kind symkind) {
1457 return new_rd_SymConst(NULL, irg, block, value, symkind);
1459 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1460 ir_node *objptr, ir_entity *ent) {
1461 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1463 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1464 ir_node *objptr, int n_index, ir_node **index,
1466 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1468 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1469 ir_node *callee, int arity, ir_node **in,
1471 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1473 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1474 ir_node *op1, ir_node *op2, ir_mode *mode) {
1475 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1477 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1478 ir_node *op1, ir_node *op2, ir_mode *mode) {
1479 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1481 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1482 ir_node *op, ir_mode *mode) {
1483 return new_rd_Minus(NULL, irg, block, op, mode);
1485 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1486 ir_node *op1, ir_node *op2, ir_mode *mode) {
1487 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1489 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1490 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1491 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1493 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1494 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1495 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1497 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1498 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1499 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1501 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1502 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1503 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1505 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1506 ir_node *op, ir_mode *mode) {
1507 return new_rd_Abs(NULL, irg, block, op, mode);
1509 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1510 ir_node *op1, ir_node *op2, ir_mode *mode) {
1511 return new_rd_And(NULL, irg, block, op1, op2, mode);
1513 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1518 ir_node *op1, ir_node *op2, ir_mode *mode) {
1519 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1521 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1522 ir_node *op, ir_mode *mode) {
1523 return new_rd_Not(NULL, irg, block, op, mode);
1525 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1526 ir_node *op, ir_node *k, ir_mode *mode) {
1527 return new_rd_Shl(NULL, irg, block, op, k, mode);
1529 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1530 ir_node *op, ir_node *k, ir_mode *mode) {
1531 return new_rd_Shr(NULL, irg, block, op, k, mode);
1533 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1534 ir_node *op, ir_node *k, ir_mode *mode) {
1535 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1537 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1538 ir_node *op, ir_node *k, ir_mode *mode) {
1539 return new_rd_Rot(NULL, irg, block, op, k, mode);
1541 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1542 ir_node *op, ir_node *k, ir_mode *mode) {
1543 return new_rd_Carry(NULL, irg, block, op, k, mode);
1545 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1546 ir_node *op, ir_node *k, ir_mode *mode) {
1547 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1549 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2) {
1551 return new_rd_Cmp(NULL, irg, block, op1, op2);
1553 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1554 ir_node *op, ir_mode *mode) {
1555 return new_rd_Conv(NULL, irg, block, op, mode);
1557 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1558 return new_rd_Cast(NULL, irg, block, op, to_tp);
1560 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1561 ir_node **in, ir_mode *mode) {
1562 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1564 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1565 ir_node *store, ir_node *adr, ir_mode *mode) {
1566 return new_rd_Load(NULL, irg, block, store, adr, mode);
1568 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1569 ir_node *store, ir_node *adr, ir_node *val) {
1570 return new_rd_Store(NULL, irg, block, store, adr, val);
1572 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1573 ir_node *size, ir_type *alloc_type, where_alloc where) {
1574 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1576 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1577 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1578 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1580 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1581 return new_rd_Sync(NULL, irg, block, arity, in);
1583 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1584 ir_mode *mode, long proj) {
1585 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1587 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1589 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1591 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1592 int arity, ir_node **in) {
1593 return new_rd_Tuple(NULL, irg, block, arity, in );
1595 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1596 ir_node *val, ir_mode *mode) {
1597 return new_rd_Id(NULL, irg, block, val, mode);
1599 ir_node *new_r_Bad(ir_graph *irg) {
1600 return new_rd_Bad(irg);
1602 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1603 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1605 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1606 return new_rd_Unknown(irg, m);
1608 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1609 return new_rd_CallBegin(NULL, irg, block, callee);
1611 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1612 return new_rd_EndReg(NULL, irg, block);
1614 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1615 return new_rd_EndExcept(NULL, irg, block);
1617 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1618 return new_rd_Break(NULL, irg, block);
1620 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1621 ir_mode *mode, long proj) {
1622 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1624 ir_node *new_r_NoMem(ir_graph *irg) {
1625 return new_rd_NoMem(irg);
1627 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1628 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1629 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1631 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1632 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1633 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1635 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1636 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1637 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1639 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1641 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1643 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1644 ir_node *store, ir_node *obj) {
1645 return new_rd_Raise(NULL, irg, block, store, obj);
1647 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1648 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1649 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1651 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1652 return new_rd_Pin(NULL, irg, block, node);
1655 /** ********************/
1656 /** public interfaces */
1657 /** construction tools */
1661 * - create a new Start node in the current block
1663 * @return s - pointer to the created Start node
1668 new_d_Start(dbg_info *db) {
1671 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1672 op_Start, mode_T, 0, NULL);
1674 res = optimize_node(res);
1675 IRN_VRFY_IRG(res, current_ir_graph);
1680 new_d_End(dbg_info *db) {
1682 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1683 op_End, mode_X, -1, NULL);
1684 res = optimize_node(res);
1685 IRN_VRFY_IRG(res, current_ir_graph);
1690 /* Constructs a Block with a fixed number of predecessors.
1691 Does set current_block. Can be used with automatic Phi
1692 node construction. */
1694 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1697 int has_unknown = 0;
1699 res = new_bd_Block(db, arity, in);
1701 /* Create and initialize array for Phi-node construction. */
1702 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1703 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1704 current_ir_graph->n_loc);
1705 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1708 for (i = arity-1; i >= 0; i--)
1709 if (get_irn_op(in[i]) == op_Unknown) {
1714 if (!has_unknown) res = optimize_node(res);
1715 current_ir_graph->current_block = res;
1717 IRN_VRFY_IRG(res, current_ir_graph);
1722 /* ***********************************************************************/
1723 /* Methods necessary for automatic Phi node creation */
1725 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1726 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1727 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1728 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1730 Call Graph: ( A ---> B == A "calls" B)
1732 get_value mature_immBlock
1740 get_r_value_internal |
1744 new_rd_Phi0 new_rd_Phi_in
1746 * *************************************************************************** */
1748 /** Creates a Phi node with 0 predecessors. */
1749 static INLINE ir_node *
1750 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1753 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1754 IRN_VRFY_IRG(res, irg);
1758 /* There are two implementations of the Phi node construction. The first
1759 is faster, but does not work for blocks with more than 2 predecessors.
1760 The second works always but is slower and causes more unnecessary Phi
1762 Select the implementations by the following preprocessor flag set in
1764 #if USE_FAST_PHI_CONSTRUCTION
1766 /* This is a stack used for allocating and deallocating nodes in
1767 new_rd_Phi_in. The original implementation used the obstack
1768 to model this stack, now it is explicit. This reduces side effects.
1770 #if USE_EXPLICIT_PHI_IN_STACK
1772 new_Phi_in_stack(void) {
1775 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1777 res->stack = NEW_ARR_F (ir_node *, 0);
1781 } /* new_Phi_in_stack */
1784 free_Phi_in_stack(Phi_in_stack *s) {
1785 DEL_ARR_F(s->stack);
1787 } /* free_Phi_in_stack */
1790 free_to_Phi_in_stack(ir_node *phi) {
1791 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1792 current_ir_graph->Phi_in_stack->pos)
1793 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1795 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1797 (current_ir_graph->Phi_in_stack->pos)++;
1798 } /* free_to_Phi_in_stack */
1800 static INLINE ir_node *
1801 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1802 int arity, ir_node **in) {
1804 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1805 int pos = current_ir_graph->Phi_in_stack->pos;
1809 /* We need to allocate a new node */
1810 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1811 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1813 /* reuse the old node and initialize it again. */
1816 assert(res->kind == k_ir_node);
1817 assert(res->op == op_Phi);
1822 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1823 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1825 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1827 (current_ir_graph->Phi_in_stack->pos)--;
1830 } /* alloc_or_pop_from_Phi_in_stack */
1831 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1834 * Creates a Phi node with a given, fixed array **in of predecessors.
1835 * If the Phi node is unnecessary, as the same value reaches the block
1836 * through all control flow paths, it is eliminated and the value
1837 * returned directly. This constructor is only intended for use in
1838 * the automatic Phi node generation triggered by get_value or mature.
1839 * The implementation is quite tricky and depends on the fact, that
1840 * the nodes are allocated on a stack:
1841 * The in array contains predecessors and NULLs. The NULLs appear,
1842 * if get_r_value_internal, that computed the predecessors, reached
1843 * the same block on two paths. In this case the same value reaches
1844 * this block on both paths, there is no definition in between. We need
1845 * not allocate a Phi where these path's merge, but we have to communicate
1846 * this fact to the caller. This happens by returning a pointer to the
1847 * node the caller _will_ allocate. (Yes, we predict the address. We can
1848 * do so because the nodes are allocated on the obstack.) The caller then
1849 * finds a pointer to itself and, when this routine is called again,
1850 * eliminates itself.
1852 static INLINE ir_node *
1853 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1855 ir_node *res, *known;
1857 /* Allocate a new node on the obstack. This can return a node to
1858 which some of the pointers in the in-array already point.
1859 Attention: the constructor copies the in array, i.e., the later
1860 changes to the array in this routine do not affect the
1861 constructed node! If the in array contains NULLs, there will be
1862 missing predecessors in the returned node. Is this a possible
1863 internal state of the Phi node generation? */
1864 #if USE_EXPLICIT_PHI_IN_STACK
1865 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1867 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1868 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1871 /* The in-array can contain NULLs. These were returned by
1872 get_r_value_internal if it reached the same block/definition on a
1873 second path. The NULLs are replaced by the node itself to
1874 simplify the test in the next loop. */
1875 for (i = 0; i < ins; ++i) {
1880 /* This loop checks whether the Phi has more than one predecessor.
1881 If so, it is a real Phi node and we break the loop. Else the Phi
1882 node merges the same definition on several paths and therefore is
1884 for (i = 0; i < ins; ++i) {
1885 if (in[i] == res || in[i] == known)
1894 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1896 #if USE_EXPLICIT_PHI_IN_STACK
1897 free_to_Phi_in_stack(res);
1899 edges_node_deleted(res, current_ir_graph);
1900 obstack_free(current_ir_graph->obst, res);
1904 res = optimize_node (res);
1905 IRN_VRFY_IRG(res, irg);
1908 /* return the pointer to the Phi node. This node might be deallocated! */
1910 } /* new_rd_Phi_in */
1913 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1916 * Allocates and returns this node. The routine called to allocate the
1917 * node might optimize it away and return a real value, or even a pointer
1918 * to a deallocated Phi node on top of the obstack!
1919 * This function is called with an in-array of proper size.
1922 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1923 ir_node *prevBlock, *res;
1926 /* This loop goes to all predecessor blocks of the block the Phi node is in
1927 and there finds the operands of the Phi node by calling
1928 get_r_value_internal. */
1929 for (i = 1; i <= ins; ++i) {
1930 assert (block->in[i]);
1931 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1933 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1936 /* After collecting all predecessors into the array nin a new Phi node
1937 with these predecessors is created. This constructor contains an
1938 optimization: If all predecessors of the Phi node are identical it
1939 returns the only operand instead of a new Phi node. If the value
1940 passes two different control flow edges without being defined, and
1941 this is the second path treated, a pointer to the node that will be
1942 allocated for the first path (recursion) is returned. We already
1943 know the address of this node, as it is the next node to be allocated
1944 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1945 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1947 /* Now we now the value for "pos" and can enter it in the array with
1948 all known local variables. Attention: this might be a pointer to
1949 a node, that later will be allocated!!! See new_rd_Phi_in().
1950 If this is called in mature, after some set_value() in the same block,
1951 the proper value must not be overwritten:
1953 get_value (makes Phi0, put's it into graph_arr)
1954 set_value (overwrites Phi0 in graph_arr)
1955 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1958 if (!block->attr.block.graph_arr[pos]) {
1959 block->attr.block.graph_arr[pos] = res;
1961 /* printf(" value already computed by %s\n",
1962 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1969 * This function returns the last definition of a variable. In case
1970 * this variable was last defined in a previous block, Phi nodes are
1971 * inserted. If the part of the firm graph containing the definition
1972 * is not yet constructed, a dummy Phi node is returned.
1975 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1978 /* There are 4 cases to treat.
1980 1. The block is not mature and we visit it the first time. We can not
1981 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1982 predecessors is returned. This node is added to the linked list (field
1983 "link") of the containing block to be completed when this block is
1984 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1987 2. The value is already known in this block, graph_arr[pos] is set and we
1988 visit the block the first time. We can return the value without
1989 creating any new nodes.
1991 3. The block is mature and we visit it the first time. A Phi node needs
1992 to be created (phi_merge). If the Phi is not needed, as all it's
1993 operands are the same value reaching the block through different
1994 paths, it's optimized away and the value itself is returned.
1996 4. The block is mature, and we visit it the second time. Now two
1997 subcases are possible:
1998 * The value was computed completely the last time we were here. This
1999 is the case if there is no loop. We can return the proper value.
2000 * The recursion that visited this node and set the flag did not
2001 return yet. We are computing a value in a loop and need to
2002 break the recursion without knowing the result yet.
2003 @@@ strange case. Straight forward we would create a Phi before
2004 starting the computation of it's predecessors. In this case we will
2005 find a Phi here in any case. The problem is that this implementation
2006 only creates a Phi after computing the predecessors, so that it is
2007 hard to compute self references of this Phi. @@@
2008 There is no simple check for the second subcase. Therefore we check
2009 for a second visit and treat all such cases as the second subcase.
2010 Anyways, the basic situation is the same: we reached a block
2011 on two paths without finding a definition of the value: No Phi
2012 nodes are needed on both paths.
2013 We return this information "Two paths, no Phi needed" by a very tricky
2014 implementation that relies on the fact that an obstack is a stack and
2015 will return a node with the same address on different allocations.
2016 Look also at phi_merge and new_rd_phi_in to understand this.
2017 @@@ Unfortunately this does not work, see testprogram
2018 three_cfpred_example.
2022 /* case 4 -- already visited. */
2023 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2025 /* visited the first time */
2026 set_irn_visited(block, get_irg_visited(current_ir_graph));
2028 /* Get the local valid value */
2029 res = block->attr.block.graph_arr[pos];
2031 /* case 2 -- If the value is actually computed, return it. */
2032 if (res) return res;
2034 if (block->attr.block.is_matured) { /* case 3 */
2036 /* The Phi has the same amount of ins as the corresponding block. */
2037 int ins = get_irn_arity(block);
2039 NEW_ARR_A(ir_node *, nin, ins);
2041 /* Phi merge collects the predecessors and then creates a node. */
2042 res = phi_merge(block, pos, mode, nin, ins);
2044 } else { /* case 1 */
2045 /* The block is not mature, we don't know how many in's are needed. A Phi
2046 with zero predecessors is created. Such a Phi node is called Phi0
2047 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2048 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2050 The Phi0 has to remember the pos of it's internal value. If the real
2051 Phi is computed, pos is used to update the array with the local
2054 res = new_rd_Phi0(current_ir_graph, block, mode);
2055 res->attr.phi0_pos = pos;
2056 res->link = block->link;
2060 /* If we get here, the frontend missed a use-before-definition error */
2063 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2064 assert(mode->code >= irm_F && mode->code <= irm_P);
2065 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2068 /* The local valid value is available now. */
2069 block->attr.block.graph_arr[pos] = res;
2072 } /* get_r_value_internal */
2077 it starts the recursion. This causes an Id at the entry of
2078 every block that has no definition of the value! **/
2080 #if USE_EXPLICIT_PHI_IN_STACK
2082 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2083 void free_Phi_in_stack(Phi_in_stack *s) {}
2086 static INLINE ir_node *
2087 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2088 ir_node **in, int ins, ir_node *phi0) {
2090 ir_node *res, *known;
2092 /* Allocate a new node on the obstack. The allocation copies the in
2094 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2095 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2097 /* This loop checks whether the Phi has more than one predecessor.
2098 If so, it is a real Phi node and we break the loop. Else the
2099 Phi node merges the same definition on several paths and therefore
2100 is not needed. Don't consider Bad nodes! */
2102 for (i=0; i < ins; ++i)
2106 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2108 /* Optimize self referencing Phis: We can't detect them yet properly, as
2109 they still refer to the Phi0 they will replace. So replace right now. */
2110 if (phi0 && in[i] == phi0) in[i] = res;
2112 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2120 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2123 edges_node_deleted(res, current_ir_graph);
2124 obstack_free (current_ir_graph->obst, res);
2125 if (is_Phi(known)) {
2126 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2127 order, an enclosing Phi know may get superfluous. */
2128 res = optimize_in_place_2(known);
2130 exchange(known, res);
2136 /* A undefined value, e.g., in unreachable code. */
2140 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2141 IRN_VRFY_IRG(res, irg);
2142 /* Memory Phis in endless loops must be kept alive.
2143 As we can't distinguish these easily we keep all of them alive. */
2144 if ((res->op == op_Phi) && (mode == mode_M))
2145 add_End_keepalive(get_irg_end(irg), res);
2149 } /* new_rd_Phi_in */
2152 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2154 #if PRECISE_EXC_CONTEXT
2156 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2159 * Construct a new frag_array for node n.
2160 * Copy the content from the current graph_arr of the corresponding block:
2161 * this is the current state.
2162 * Set ProjM(n) as current memory state.
2163 * Further the last entry in frag_arr of current block points to n. This
2164 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2166 static INLINE ir_node **new_frag_arr(ir_node *n) {
2170 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2171 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2172 sizeof(ir_node *)*current_ir_graph->n_loc);
2174 /* turn off optimization before allocating Proj nodes, as res isn't
2176 opt = get_opt_optimize(); set_optimize(0);
2177 /* Here we rely on the fact that all frag ops have Memory as first result! */
2178 if (get_irn_op(n) == op_Call)
2179 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2180 else if (get_irn_op(n) == op_CopyB)
2181 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2183 assert((pn_Quot_M == pn_DivMod_M) &&
2184 (pn_Quot_M == pn_Div_M) &&
2185 (pn_Quot_M == pn_Mod_M) &&
2186 (pn_Quot_M == pn_Load_M) &&
2187 (pn_Quot_M == pn_Store_M) &&
2188 (pn_Quot_M == pn_Alloc_M) &&
2189 (pn_Quot_M == pn_Bound_M));
2190 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2194 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2196 } /* new_frag_arr */
2199 * Returns the frag_arr from a node.
2201 static INLINE ir_node **get_frag_arr(ir_node *n) {
2202 switch (get_irn_opcode(n)) {
2204 return n->attr.call.exc.frag_arr;
2206 return n->attr.alloc.exc.frag_arr;
2208 return n->attr.load.exc.frag_arr;
2210 return n->attr.store.exc.frag_arr;
2212 return n->attr.except.frag_arr;
2214 } /* get_frag_arr */
2217 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2219 if (!frag_arr[pos]) frag_arr[pos] = val;
2220 if (frag_arr[current_ir_graph->n_loc - 1]) {
2221 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2222 assert(arr != frag_arr && "Endless recursion detected");
2223 set_frag_value(arr, pos, val);
2228 for (i = 0; i < 1000; ++i) {
2229 if (!frag_arr[pos]) {
2230 frag_arr[pos] = val;
2232 if (frag_arr[current_ir_graph->n_loc - 1]) {
2233 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2239 assert(0 && "potential endless recursion");
2241 } /* set_frag_value */
2244 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2248 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2250 frag_arr = get_frag_arr(cfOp);
2251 res = frag_arr[pos];
2253 if (block->attr.block.graph_arr[pos]) {
2254 /* There was a set_value() after the cfOp and no get_value before that
2255 set_value(). We must build a Phi node now. */
2256 if (block->attr.block.is_matured) {
2257 int ins = get_irn_arity(block);
2259 NEW_ARR_A(ir_node *, nin, ins);
2260 res = phi_merge(block, pos, mode, nin, ins);
2262 res = new_rd_Phi0(current_ir_graph, block, mode);
2263 res->attr.phi0.pos = pos;
2264 res->link = block->link;
2268 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2269 but this should be better: (remove comment if this works) */
2270 /* It's a Phi, we can write this into all graph_arrs with NULL */
2271 set_frag_value(block->attr.block.graph_arr, pos, res);
2273 res = get_r_value_internal(block, pos, mode);
2274 set_frag_value(block->attr.block.graph_arr, pos, res);
2278 } /* get_r_frag_value_internal */
2279 #endif /* PRECISE_EXC_CONTEXT */
2282 * Computes the predecessors for the real phi node, and then
2283 * allocates and returns this node. The routine called to allocate the
2284 * node might optimize it away and return a real value.
2285 * This function must be called with an in-array of proper size.
2288 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2289 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2292 /* If this block has no value at pos create a Phi0 and remember it
2293 in graph_arr to break recursions.
2294 Else we may not set graph_arr as there a later value is remembered. */
2296 if (!block->attr.block.graph_arr[pos]) {
2297 if (block == get_irg_start_block(current_ir_graph)) {
2298 /* Collapsing to Bad tarvals is no good idea.
2299 So we call a user-supplied routine here that deals with this case as
2300 appropriate for the given language. Sorrily the only help we can give
2301 here is the position.
2303 Even if all variables are defined before use, it can happen that
2304 we get to the start block, if a Cond has been replaced by a tuple
2305 (bad, jmp). In this case we call the function needlessly, eventually
2306 generating an non existent error.
2307 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2310 if (default_initialize_local_variable) {
2311 ir_node *rem = get_cur_block();
2313 set_cur_block(block);
2314 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2318 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2319 /* We don't need to care about exception ops in the start block.
2320 There are none by definition. */
2321 return block->attr.block.graph_arr[pos];
2323 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2324 block->attr.block.graph_arr[pos] = phi0;
2325 #if PRECISE_EXC_CONTEXT
2326 if (get_opt_precise_exc_context()) {
2327 /* Set graph_arr for fragile ops. Also here we should break recursion.
2328 We could choose a cyclic path through an cfop. But the recursion would
2329 break at some point. */
2330 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2336 /* This loop goes to all predecessor blocks of the block the Phi node
2337 is in and there finds the operands of the Phi node by calling
2338 get_r_value_internal. */
2339 for (i = 1; i <= ins; ++i) {
2340 prevCfOp = skip_Proj(block->in[i]);
2342 if (is_Bad(prevCfOp)) {
2343 /* In case a Cond has been optimized we would get right to the start block
2344 with an invalid definition. */
2345 nin[i-1] = new_Bad();
2348 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2350 if (!is_Bad(prevBlock)) {
2351 #if PRECISE_EXC_CONTEXT
2352 if (get_opt_precise_exc_context() &&
2353 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2354 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2355 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2358 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2360 nin[i-1] = new_Bad();
2364 /* We want to pass the Phi0 node to the constructor: this finds additional
2365 optimization possibilities.
2366 The Phi0 node either is allocated in this function, or it comes from
2367 a former call to get_r_value_internal. In this case we may not yet
2368 exchange phi0, as this is done in mature_immBlock. */
2370 phi0_all = block->attr.block.graph_arr[pos];
2371 if (!((get_irn_op(phi0_all) == op_Phi) &&
2372 (get_irn_arity(phi0_all) == 0) &&
2373 (get_nodes_block(phi0_all) == block)))
2379 /* After collecting all predecessors into the array nin a new Phi node
2380 with these predecessors is created. This constructor contains an
2381 optimization: If all predecessors of the Phi node are identical it
2382 returns the only operand instead of a new Phi node. */
2383 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2385 /* In case we allocated a Phi0 node at the beginning of this procedure,
2386 we need to exchange this Phi0 with the real Phi. */
2388 exchange(phi0, res);
2389 block->attr.block.graph_arr[pos] = res;
2390 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2391 only an optimization. */
2398 * This function returns the last definition of a variable. In case
2399 * this variable was last defined in a previous block, Phi nodes are
2400 * inserted. If the part of the firm graph containing the definition
2401 * is not yet constructed, a dummy Phi node is returned.
2404 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2406 /* There are 4 cases to treat.
2408 1. The block is not mature and we visit it the first time. We can not
2409 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2410 predecessors is returned. This node is added to the linked list (field
2411 "link") of the containing block to be completed when this block is
2412 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2415 2. The value is already known in this block, graph_arr[pos] is set and we
2416 visit the block the first time. We can return the value without
2417 creating any new nodes.
2419 3. The block is mature and we visit it the first time. A Phi node needs
2420 to be created (phi_merge). If the Phi is not needed, as all it's
2421 operands are the same value reaching the block through different
2422 paths, it's optimized away and the value itself is returned.
2424 4. The block is mature, and we visit it the second time. Now two
2425 subcases are possible:
2426 * The value was computed completely the last time we were here. This
2427 is the case if there is no loop. We can return the proper value.
2428 * The recursion that visited this node and set the flag did not
2429 return yet. We are computing a value in a loop and need to
2430 break the recursion. This case only happens if we visited
2431 the same block with phi_merge before, which inserted a Phi0.
2432 So we return the Phi0.
2435 /* case 4 -- already visited. */
2436 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2437 /* As phi_merge allocates a Phi0 this value is always defined. Here
2438 is the critical difference of the two algorithms. */
2439 assert(block->attr.block.graph_arr[pos]);
2440 return block->attr.block.graph_arr[pos];
2443 /* visited the first time */
2444 set_irn_visited(block, get_irg_visited(current_ir_graph));
2446 /* Get the local valid value */
2447 res = block->attr.block.graph_arr[pos];
2449 /* case 2 -- If the value is actually computed, return it. */
2450 if (res) { return res; };
2452 if (block->attr.block.is_matured) { /* case 3 */
2454 /* The Phi has the same amount of ins as the corresponding block. */
2455 int ins = get_irn_arity(block);
2457 NEW_ARR_A (ir_node *, nin, ins);
2459 /* Phi merge collects the predecessors and then creates a node. */
2460 res = phi_merge (block, pos, mode, nin, ins);
2462 } else { /* case 1 */
2463 /* The block is not mature, we don't know how many in's are needed. A Phi
2464 with zero predecessors is created. Such a Phi node is called Phi0
2465 node. The Phi0 is then added to the list of Phi0 nodes in this block
2466 to be matured by mature_immBlock later.
2467 The Phi0 has to remember the pos of it's internal value. If the real
2468 Phi is computed, pos is used to update the array with the local
2470 res = new_rd_Phi0(current_ir_graph, block, mode);
2471 res->attr.phi0.pos = pos;
2472 res->link = block->link;
2476 /* If we get here, the frontend missed a use-before-definition error */
2479 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2480 assert(mode->code >= irm_F && mode->code <= irm_P);
2481 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2482 get_mode_null(mode));
2485 /* The local valid value is available now. */
2486 block->attr.block.graph_arr[pos] = res;
2489 } /* get_r_value_internal */
2491 #endif /* USE_FAST_PHI_CONSTRUCTION */
2493 /* ************************************************************************** */
2496 * Finalize a Block node, when all control flows are known.
2497 * Acceptable parameters are only Block nodes.
2500 mature_immBlock(ir_node *block) {
2505 assert(get_irn_opcode(block) == iro_Block);
2506 /* @@@ should be commented in
2507 assert (!get_Block_matured(block) && "Block already matured"); */
2509 if (!get_Block_matured(block)) {
2510 ins = ARR_LEN(block->in)-1;
2511 /* Fix block parameters */
2512 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2514 /* An array for building the Phi nodes. */
2515 NEW_ARR_A(ir_node *, nin, ins);
2517 /* Traverse a chain of Phi nodes attached to this block and mature
2519 for (n = block->link; n; n = next) {
2520 inc_irg_visited(current_ir_graph);
2522 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2525 block->attr.block.is_matured = 1;
2527 /* Now, as the block is a finished firm node, we can optimize it.
2528 Since other nodes have been allocated since the block was created
2529 we can not free the node on the obstack. Therefore we have to call
2531 Unfortunately the optimization does not change a lot, as all allocated
2532 nodes refer to the unoptimized node.
2533 We can call _2, as global cse has no effect on blocks. */
2534 block = optimize_in_place_2(block);
2535 IRN_VRFY_IRG(block, current_ir_graph);
2537 } /* mature_immBlock */
2540 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2541 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2545 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2546 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2550 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2551 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2552 } /* new_d_Const_long */
2555 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2556 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2557 } /* new_d_Const_type */
2561 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2562 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2566 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2567 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2571 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2573 assert(arg->op == op_Cond);
2574 arg->attr.cond.kind = fragmentary;
2575 arg->attr.cond.default_proj = max_proj;
2576 res = new_Proj(arg, mode_X, max_proj);
2578 } /* new_d_defaultProj */
2581 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2582 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2586 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2587 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2588 } /* new_d_strictConv */
2591 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2592 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2596 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2597 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2606 * Allocate the frag array.
2608 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2609 if (get_opt_precise_exc_context()) {
2610 if ((current_ir_graph->phase_state == phase_building) &&
2611 (get_irn_op(res) == op) && /* Could be optimized away. */
2612 !*frag_store) /* Could be a cse where the arr is already set. */ {
2613 *frag_store = new_frag_arr(res);
2616 } /* allocate_frag_arr */
2619 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2621 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2622 #if PRECISE_EXC_CONTEXT
2623 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2630 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2632 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2633 #if PRECISE_EXC_CONTEXT
2634 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2638 } /* new_d_DivMod */
2641 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2643 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2644 #if PRECISE_EXC_CONTEXT
2645 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2652 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2654 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2655 #if PRECISE_EXC_CONTEXT
2656 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2675 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2676 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2680 new_d_Jmp(dbg_info *db) {
2681 return new_bd_Jmp(db, current_ir_graph->current_block);
2685 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2686 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2690 new_d_Cond(dbg_info *db, ir_node *c) {
2691 return new_bd_Cond(db, current_ir_graph->current_block, c);
2695 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2698 res = new_bd_Call(db, current_ir_graph->current_block,
2699 store, callee, arity, in, tp);
2700 #if PRECISE_EXC_CONTEXT
2701 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2708 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2709 return new_bd_Return(db, current_ir_graph->current_block,
2711 } /* new_d_Return */
2714 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2716 res = new_bd_Load(db, current_ir_graph->current_block,
2718 #if PRECISE_EXC_CONTEXT
2719 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2726 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2728 res = new_bd_Store(db, current_ir_graph->current_block,
2730 #if PRECISE_EXC_CONTEXT
2731 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2738 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2739 where_alloc where) {
2741 res = new_bd_Alloc(db, current_ir_graph->current_block,
2742 store, size, alloc_type, where);
2743 #if PRECISE_EXC_CONTEXT
2744 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2751 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2752 ir_node *size, ir_type *free_type, where_alloc where) {
2753 return new_bd_Free(db, current_ir_graph->current_block,
2754 store, ptr, size, free_type, where);
2758 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2759 /* GL: objptr was called frame before. Frame was a bad choice for the name
2760 as the operand could as well be a pointer to a dynamic object. */
2762 return new_bd_Sel(db, current_ir_graph->current_block,
2763 store, objptr, 0, NULL, ent);
2764 } /* new_d_simpleSel */
2767 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2768 return new_bd_Sel(db, current_ir_graph->current_block,
2769 store, objptr, n_index, index, sel);
2773 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2774 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2776 } /* new_d_SymConst_type */
2779 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2780 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2781 value, kind, firm_unknown_type);
2782 } /* new_d_SymConst */
2785 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2786 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2792 return _new_d_Bad();
2796 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2797 return new_bd_Confirm(db, current_ir_graph->current_block,
2799 } /* new_d_Confirm */
2802 new_d_Unknown(ir_mode *m) {
2803 return new_bd_Unknown(m);
2804 } /* new_d_Unknown */
2807 new_d_CallBegin(dbg_info *db, ir_node *call) {
2808 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2809 } /* new_d_CallBegin */
2812 new_d_EndReg(dbg_info *db) {
2813 return new_bd_EndReg(db, current_ir_graph->current_block);
2814 } /* new_d_EndReg */
2817 new_d_EndExcept(dbg_info *db) {
2818 return new_bd_EndExcept(db, current_ir_graph->current_block);
2819 } /* new_d_EndExcept */
2822 new_d_Break(dbg_info *db) {
2823 return new_bd_Break(db, current_ir_graph->current_block);
2827 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2828 return new_bd_Filter(db, current_ir_graph->current_block,
2830 } /* new_d_Filter */
2833 (new_d_NoMem)(void) {
2834 return _new_d_NoMem();
2838 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2839 ir_node *ir_true, ir_mode *mode) {
2840 return new_bd_Mux(db, current_ir_graph->current_block,
2841 sel, ir_false, ir_true, mode);
2845 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2846 return new_bd_Psi(db, current_ir_graph->current_block,
2847 arity, conds, vals, mode);
2850 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2851 ir_node *dst, ir_node *src, ir_type *data_type) {
2853 res = new_bd_CopyB(db, current_ir_graph->current_block,
2854 store, dst, src, data_type);
2855 #if PRECISE_EXC_CONTEXT
2856 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2862 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2863 return new_bd_InstOf(db, current_ir_graph->current_block,
2864 store, objptr, type);
2865 } /* new_d_InstOf */
2868 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2869 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2872 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2873 ir_node *idx, ir_node *lower, ir_node *upper) {
2875 res = new_bd_Bound(db, current_ir_graph->current_block,
2876 store, idx, lower, upper);
2877 #if PRECISE_EXC_CONTEXT
2878 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2884 new_d_Pin(dbg_info *db, ir_node *node) {
2885 return new_bd_Pin(db, current_ir_graph->current_block, node);
2888 /* ********************************************************************* */
2889 /* Comfortable interface with automatic Phi node construction. */
2890 /* (Uses also constructors of ?? interface, except new_Block. */
2891 /* ********************************************************************* */
2893 /* Block construction */
2894 /* immature Block without predecessors */
2896 new_d_immBlock(dbg_info *db) {
2899 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2900 /* creates a new dynamic in-array as length of in is -1 */
2901 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2902 current_ir_graph->current_block = res;
2904 /* macroblock head */
2907 res->attr.block.is_matured = 0;
2908 res->attr.block.is_dead = 0;
2909 res->attr.block.is_mb_head = 1;
2910 res->attr.block.irg = current_ir_graph;
2911 res->attr.block.backedge = NULL;
2912 res->attr.block.in_cg = NULL;
2913 res->attr.block.cg_backedge = NULL;
2914 res->attr.block.extblk = NULL;
2915 res->attr.block.region = NULL;
2916 res->attr.block.mb_depth = 0;
2918 set_Block_block_visited(res, 0);
2920 /* Create and initialize array for Phi-node construction. */
2921 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2922 current_ir_graph->n_loc);
2923 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2925 /* Immature block may not be optimized! */
2926 IRN_VRFY_IRG(res, current_ir_graph);
2929 } /* new_d_immBlock */
2932 new_immBlock(void) {
2933 return new_d_immBlock(NULL);
2934 } /* new_immBlock */
2936 /* immature PartBlock with its predecessors */
2938 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2939 ir_node *res = new_d_immBlock(db);
2940 ir_node *blk = get_nodes_block(pred_jmp);
2942 res->in[0] = blk->in[0];
2943 res->attr.block.is_mb_head = 0;
2944 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2946 add_immBlock_pred(res, pred_jmp);
2949 } /* new_d_immPartBlock */
2952 new_immPartBlock(ir_node *pred_jmp) {
2953 return new_d_immPartBlock(NULL, pred_jmp);
2954 } /* new_immPartBlock */
2956 /* add an edge to a jmp/control flow node */
2958 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2959 int n = ARR_LEN(block->in) - 1;
2961 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2962 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2963 assert(jmp != NULL);
2965 ARR_APP1(ir_node *, block->in, jmp);
2967 hook_set_irn_n(block, n, jmp, NULL);
2968 } /* add_immBlock_pred */
2970 /* changing the current block */
2972 set_cur_block(ir_node *target) {
2973 current_ir_graph->current_block = target;
2974 } /* set_cur_block */
2976 /* ************************ */
2977 /* parameter administration */
2979 /* get a value from the parameter array from the current block by its index */
2981 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2982 ir_graph *irg = current_ir_graph;
2983 assert(get_irg_phase_state(irg) == phase_building);
2984 inc_irg_visited(irg);
2986 return get_r_value_internal(irg->current_block, pos + 1, mode);
2989 /* get a value from the parameter array from the current block by its index */
2991 get_value(int pos, ir_mode *mode) {
2992 return get_d_value(NULL, pos, mode);
2995 /* set a value at position pos in the parameter array from the current block */
2997 set_value(int pos, ir_node *value) {
2998 ir_graph *irg = current_ir_graph;
2999 assert(get_irg_phase_state(irg) == phase_building);
3000 assert(pos+1 < irg->n_loc);
3001 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3004 /* Find the value number for a node in the current block.*/
3006 find_value(ir_node *value) {
3008 ir_node *bl = current_ir_graph->current_block;
3010 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3011 if (bl->attr.block.graph_arr[i] == value)
3016 /* get the current store */
3019 ir_graph *irg = current_ir_graph;
3021 assert(get_irg_phase_state(irg) == phase_building);
3022 /* GL: one could call get_value instead */
3023 inc_irg_visited(irg);
3024 return get_r_value_internal(irg->current_block, 0, mode_M);
3027 /* set the current store: handles automatic Sync construction for Load nodes */
3029 set_store(ir_node *store) {
3030 ir_node *load, *pload, *pred, *in[2];
3032 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3033 /* Beware: due to dead code elimination, a store might become a Bad node even in
3034 the construction phase. */
3035 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3037 if (get_opt_auto_create_sync()) {
3038 /* handle non-volatile Load nodes by automatically creating Sync's */
3039 load = skip_Proj(store);
3040 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3041 pred = get_Load_mem(load);
3043 if (is_Sync(pred)) {
3044 /* a Load after a Sync: move it up */
3045 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3047 set_Load_mem(load, get_memop_mem(mem));
3048 add_Sync_pred(pred, store);
3051 pload = skip_Proj(pred);
3052 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3053 /* a Load after a Load: create a new Sync */
3054 set_Load_mem(load, get_Load_mem(pload));
3058 store = new_Sync(2, in);
3063 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3067 keep_alive(ir_node *ka) {
3068 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3071 /* --- Useful access routines --- */
3072 /* Returns the current block of the current graph. To set the current
3073 block use set_cur_block. */
3074 ir_node *get_cur_block(void) {
3075 return get_irg_current_block(current_ir_graph);
3076 } /* get_cur_block */
3078 /* Returns the frame type of the current graph */
3079 ir_type *get_cur_frame_type(void) {
3080 return get_irg_frame_type(current_ir_graph);
3081 } /* get_cur_frame_type */
3084 /* ********************************************************************* */
3087 /* call once for each run of the library */
3089 init_cons(uninitialized_local_variable_func_t *func) {
3090 default_initialize_local_variable = func;
3094 irp_finalize_cons(void) {
3096 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3097 irg_finalize_cons(get_irp_irg(i));
3099 irp->phase_state = phase_high;
3100 } /* irp_finalize_cons */
3103 ir_node *new_Block(int arity, ir_node **in) {
3104 return new_d_Block(NULL, arity, in);
3106 ir_node *new_Start(void) {
3107 return new_d_Start(NULL);
3109 ir_node *new_End(void) {
3110 return new_d_End(NULL);
3112 ir_node *new_Jmp(void) {
3113 return new_d_Jmp(NULL);
3115 ir_node *new_IJmp(ir_node *tgt) {
3116 return new_d_IJmp(NULL, tgt);
3118 ir_node *new_Cond(ir_node *c) {
3119 return new_d_Cond(NULL, c);
3121 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3122 return new_d_Return(NULL, store, arity, in);
3124 ir_node *new_Const(ir_mode *mode, tarval *con) {
3125 return new_d_Const(NULL, mode, con);
3128 ir_node *new_Const_long(ir_mode *mode, long value) {
3129 return new_d_Const_long(NULL, mode, value);
3132 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3133 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3136 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3137 return new_d_SymConst_type(NULL, value, kind, type);
3139 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3140 return new_d_SymConst(NULL, value, kind);
3142 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3143 return new_d_simpleSel(NULL, store, objptr, ent);
3145 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3147 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3149 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3151 return new_d_Call(NULL, store, callee, arity, in, tp);
3153 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3154 return new_d_Add(NULL, op1, op2, mode);
3156 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3157 return new_d_Sub(NULL, op1, op2, mode);
3159 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3160 return new_d_Minus(NULL, op, mode);
3162 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3163 return new_d_Mul(NULL, op1, op2, mode);
3165 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3166 return new_d_Quot(NULL, memop, op1, op2, mode);
3168 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3169 return new_d_DivMod(NULL, memop, op1, op2, mode);
3171 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3172 return new_d_Div(NULL, memop, op1, op2, mode);
3174 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3175 return new_d_Mod(NULL, memop, op1, op2, mode);
3177 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3178 return new_d_Abs(NULL, op, mode);
3180 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3181 return new_d_And(NULL, op1, op2, mode);
3183 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3184 return new_d_Or(NULL, op1, op2, mode);
3186 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3187 return new_d_Eor(NULL, op1, op2, mode);
3189 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3190 return new_d_Not(NULL, op, mode);
3192 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3193 return new_d_Shl(NULL, op, k, mode);
3195 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3196 return new_d_Shr(NULL, op, k, mode);
3198 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3199 return new_d_Shrs(NULL, op, k, mode);
3201 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3202 return new_d_Rot(NULL, op, k, mode);
3204 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3205 return new_d_Carry(NULL, op1, op2, mode);
3207 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Borrow(NULL, op1, op2, mode);
3210 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3211 return new_d_Cmp(NULL, op1, op2);
3213 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3214 return new_d_Conv(NULL, op, mode);
3216 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3217 return new_d_strictConv(NULL, op, mode);
3219 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3220 return new_d_Cast(NULL, op, to_tp);
3222 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3223 return new_d_Phi(NULL, arity, in, mode);
3225 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3226 return new_d_Load(NULL, store, addr, mode);
3228 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3229 return new_d_Store(NULL, store, addr, val);
3231 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3232 where_alloc where) {
3233 return new_d_Alloc(NULL, store, size, alloc_type, where);
3235 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3236 ir_type *free_type, where_alloc where) {
3237 return new_d_Free(NULL, store, ptr, size, free_type, where);
3239 ir_node *new_Sync(int arity, ir_node *in[]) {
3240 return new_d_Sync(NULL, arity, in);
3242 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3243 return new_d_Proj(NULL, arg, mode, proj);
3245 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3246 return new_d_defaultProj(NULL, arg, max_proj);
3248 ir_node *new_Tuple(int arity, ir_node **in) {
3249 return new_d_Tuple(NULL, arity, in);
3251 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3252 return new_d_Id(NULL, val, mode);
3254 ir_node *new_Bad(void) {
3257 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3258 return new_d_Confirm (NULL, val, bound, cmp);
3260 ir_node *new_Unknown(ir_mode *m) {
3261 return new_d_Unknown(m);
3263 ir_node *new_CallBegin(ir_node *callee) {
3264 return new_d_CallBegin(NULL, callee);
3266 ir_node *new_EndReg(void) {
3267 return new_d_EndReg(NULL);
3269 ir_node *new_EndExcept(void) {
3270 return new_d_EndExcept(NULL);
3272 ir_node *new_Break(void) {
3273 return new_d_Break(NULL);
3275 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3276 return new_d_Filter(NULL, arg, mode, proj);
3278 ir_node *new_NoMem(void) {
3279 return new_d_NoMem();
3281 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3282 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3284 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3285 return new_d_Psi(NULL, arity, conds, vals, mode);
3287 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3288 return new_d_CopyB(NULL, store, dst, src, data_type);
3290 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3291 return new_d_InstOf(NULL, store, objptr, ent);
3293 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3294 return new_d_Raise(NULL, store, obj);
3296 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3297 return new_d_Bound(NULL, store, idx, lower, upper);
3299 ir_node *new_Pin(ir_node *node) {
3300 return new_d_Pin(NULL, node);