2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = op_pin_state_pinned; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode); \
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.irg = irg;
198 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
199 res->attr.block.in_cg = NULL;
200 res->attr.block.cg_backedge = NULL;
201 res->attr.block.extblk = NULL;
202 res->attr.block.mb_depth = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if ( get_Block_matured(block) )
247 assert( get_irn_arity(block) == arity );
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity-1; i >= 0; i--)
254 if (get_irn_op(in[i]) == op_Unknown) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if ((res->op == op_Phi) && (mode == mode_M))
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
275 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
276 res->attr.con.tv = con;
277 set_Const_type(res, tp); /* Call method because of complex assertion. */
278 res = optimize_node (res);
279 assert(get_Const_type(res) == tp);
280 IRN_VRFY_IRG(res, irg);
283 } /* new_bd_Const_type */
286 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
287 ir_graph *irg = current_ir_graph;
289 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
293 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
294 ir_graph *irg = current_ir_graph;
296 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
297 } /* new_bd_Const_long */
300 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
302 ir_graph *irg = current_ir_graph;
304 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
305 res = optimize_node(res);
306 IRN_VRFY_IRG(res, irg);
311 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
314 ir_graph *irg = current_ir_graph;
316 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
317 res->attr.proj = proj;
320 assert(get_Proj_pred(res));
321 assert(get_nodes_block(get_Proj_pred(res)));
323 res = optimize_node(res);
325 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
355 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
357 ir_graph *irg = current_ir_graph;
359 assert(is_atomic_type(to_tp));
361 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
362 res->attr.cast.totype = to_tp;
363 res = optimize_node(res);
364 IRN_VRFY_IRG(res, irg);
369 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
371 ir_graph *irg = current_ir_graph;
373 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
374 res = optimize_node (res);
375 IRN_VRFY_IRG(res, irg);
400 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
403 ir_graph *irg = current_ir_graph;
406 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
407 res = optimize_node(res);
408 IRN_VRFY_IRG(res, irg);
413 new_bd_Jmp(dbg_info *db, ir_node *block) {
415 ir_graph *irg = current_ir_graph;
417 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
418 res = optimize_node (res);
419 IRN_VRFY_IRG(res, irg);
424 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
426 ir_graph *irg = current_ir_graph;
428 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
429 res = optimize_node (res);
430 IRN_VRFY_IRG(res, irg);
432 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node (res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res = optimize_node(res);
508 IRN_VRFY_IRG(res, irg);
513 new_bd_Store(dbg_info *db, ir_node *block,
514 ir_node *store, ir_node *adr, ir_node *val) {
517 ir_graph *irg = current_ir_graph;
522 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
523 res->attr.store.exc.pin_state = op_pin_state_pinned;
524 res->attr.store.volatility = volatility_non_volatile;
525 res = optimize_node(res);
526 IRN_VRFY_IRG(res, irg);
531 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
532 ir_node *size, ir_type *alloc_type, where_alloc where) {
535 ir_graph *irg = current_ir_graph;
539 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
540 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
541 res->attr.alloc.where = where;
542 res->attr.alloc.type = alloc_type;
543 res = optimize_node(res);
544 IRN_VRFY_IRG(res, irg);
549 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
550 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
553 ir_graph *irg = current_ir_graph;
558 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
559 res->attr.free.where = where;
560 res->attr.free.type = free_type;
561 res = optimize_node(res);
562 IRN_VRFY_IRG(res, irg);
567 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
568 int arity, ir_node **in, ir_entity *ent) {
572 ir_graph *irg = current_ir_graph;
573 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
575 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
578 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
581 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
583 * Sel's can select functions which should be of mode mode_P_code.
585 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
586 res->attr.sel.ent = ent;
587 res = optimize_node(res);
588 IRN_VRFY_IRG(res, irg);
593 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
594 symconst_kind symkind, ir_type *tp) {
597 ir_graph *irg = current_ir_graph;
599 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
600 mode = mode_P_data; /* FIXME: can be mode_P_code */
604 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
606 res->attr.symc.num = symkind;
607 res->attr.symc.sym = value;
608 res->attr.symc.tp = tp;
610 res = optimize_node(res);
611 IRN_VRFY_IRG(res, irg);
613 } /* new_bd_SymConst_type */
616 new_bd_Sync(dbg_info *db, ir_node *block) {
618 ir_graph *irg = current_ir_graph;
620 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
621 /* no need to call optimize node here, Sync are always created with no predecessors */
622 IRN_VRFY_IRG(res, irg);
627 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
628 ir_node *in[2], *res;
629 ir_graph *irg = current_ir_graph;
633 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
634 res->attr.confirm.cmp = cmp;
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, irg);
638 } /* new_bd_Confirm */
641 new_bd_Unknown(ir_mode *m) {
643 ir_graph *irg = current_ir_graph;
645 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
646 res = optimize_node(res);
648 } /* new_bd_Unknown */
651 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
654 ir_graph *irg = current_ir_graph;
656 in[0] = get_Call_ptr(call);
657 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
658 /* res->attr.callbegin.irg = irg; */
659 res->attr.callbegin.call = call;
660 res = optimize_node(res);
661 IRN_VRFY_IRG(res, irg);
663 } /* new_bd_CallBegin */
666 new_bd_EndReg(dbg_info *db, ir_node *block) {
668 ir_graph *irg = current_ir_graph;
670 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
671 set_irg_end_reg(irg, res);
672 IRN_VRFY_IRG(res, irg);
674 } /* new_bd_EndReg */
677 new_bd_EndExcept(dbg_info *db, ir_node *block) {
679 ir_graph *irg = current_ir_graph;
681 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
682 set_irg_end_except(irg, res);
683 IRN_VRFY_IRG (res, irg);
685 } /* new_bd_EndExcept */
688 new_bd_Break(dbg_info *db, ir_node *block) {
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
693 res = optimize_node(res);
694 IRN_VRFY_IRG(res, irg);
699 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
705 res->attr.filter.proj = proj;
706 res->attr.filter.in_cg = NULL;
707 res->attr.filter.backedge = NULL;
710 assert(get_Proj_pred(res));
711 assert(get_nodes_block(get_Proj_pred(res)));
713 res = optimize_node(res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_Filter */
719 new_bd_Mux(dbg_info *db, ir_node *block,
720 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
723 ir_graph *irg = current_ir_graph;
729 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
732 res = optimize_node(res);
733 IRN_VRFY_IRG(res, irg);
738 new_bd_Psi(dbg_info *db, ir_node *block,
739 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
742 ir_graph *irg = current_ir_graph;
745 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
747 for (i = 0; i < arity; ++i) {
749 in[2 * i + 1] = vals[i];
753 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
756 res = optimize_node(res);
757 IRN_VRFY_IRG(res, irg);
762 new_bd_CopyB(dbg_info *db, ir_node *block,
763 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
766 ir_graph *irg = current_ir_graph;
772 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
774 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
775 res->attr.copyb.data_type = data_type;
776 res = optimize_node(res);
777 IRN_VRFY_IRG(res, irg);
782 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
783 ir_node *objptr, ir_type *type) {
786 ir_graph *irg = current_ir_graph;
790 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
791 res->attr.instof.type = type;
792 res = optimize_node(res);
793 IRN_VRFY_IRG(res, irg);
795 } /* new_bd_InstOf */
798 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
801 ir_graph *irg = current_ir_graph;
805 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
806 res = optimize_node(res);
807 IRN_VRFY_IRG(res, irg);
812 new_bd_Bound(dbg_info *db, ir_node *block,
813 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
816 ir_graph *irg = current_ir_graph;
822 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
823 res->attr.bound.exc.pin_state = op_pin_state_pinned;
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
832 ir_graph *irg = current_ir_graph;
834 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
835 res = optimize_node(res);
836 IRN_VRFY_IRG(res, irg);
841 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
842 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
844 ir_graph *irg = current_ir_graph;
847 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
848 res->attr.assem.pin_state = op_pin_state_pinned;
849 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
850 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
851 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
852 res->attr.assem.asm_text = asm_text;
854 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
855 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
857 res = optimize_node(res);
858 IRN_VRFY_IRG(res, irg);
862 /* --------------------------------------------- */
863 /* private interfaces, for professional use only */
864 /* --------------------------------------------- */
866 /* Constructs a Block with a fixed number of predecessors.
867 Does not set current_block. Can not be used with automatic
868 Phi node construction. */
870 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
871 ir_graph *rem = current_ir_graph;
874 current_ir_graph = irg;
875 res = new_bd_Block(db, arity, in);
876 current_ir_graph = rem;
882 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
883 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_Start(db, block);
888 current_ir_graph = rem;
894 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
896 ir_graph *rem = current_ir_graph;
898 current_ir_graph = irg;
899 res = new_bd_End(db, block);
900 current_ir_graph = rem;
905 /* Creates a Phi node with all predecessors. Calling this constructor
906 is only allowed if the corresponding block is mature. */
908 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
910 ir_graph *rem = current_ir_graph;
912 current_ir_graph = irg;
913 res = new_bd_Phi(db, block,arity, in, mode);
914 current_ir_graph = rem;
920 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
922 ir_graph *rem = current_ir_graph;
924 current_ir_graph = irg;
925 res = new_bd_Const_type(db, block, mode, con, tp);
926 current_ir_graph = rem;
929 } /* new_rd_Const_type */
932 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
934 ir_graph *rem = current_ir_graph;
936 current_ir_graph = irg;
937 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
938 current_ir_graph = rem;
944 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
945 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
946 } /* new_rd_Const_long */
949 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
951 ir_graph *rem = current_ir_graph;
953 current_ir_graph = irg;
954 res = new_bd_Id(db, block, val, mode);
955 current_ir_graph = rem;
961 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
964 ir_graph *rem = current_ir_graph;
966 current_ir_graph = irg;
967 res = new_bd_Proj(db, block, arg, mode, proj);
968 current_ir_graph = rem;
974 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
977 ir_graph *rem = current_ir_graph;
979 current_ir_graph = irg;
980 res = new_bd_defaultProj(db, block, arg, max_proj);
981 current_ir_graph = rem;
984 } /* new_rd_defaultProj */
987 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
989 ir_graph *rem = current_ir_graph;
991 current_ir_graph = irg;
992 res = new_bd_Conv(db, block, op, mode, 0);
993 current_ir_graph = rem;
999 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1001 ir_graph *rem = current_ir_graph;
1003 current_ir_graph = irg;
1004 res = new_bd_Cast(db, block, op, to_tp);
1005 current_ir_graph = rem;
1011 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1013 ir_graph *rem = current_ir_graph;
1015 current_ir_graph = irg;
1016 res = new_bd_Tuple(db, block, arity, in);
1017 current_ir_graph = rem;
1020 } /* new_rd_Tuple */
1027 NEW_RD_DIVOP(DivMod)
1040 NEW_RD_BINOP(Borrow)
1043 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1044 ir_node *op1, ir_node *op2) {
1046 ir_graph *rem = current_ir_graph;
1048 current_ir_graph = irg;
1049 res = new_bd_Cmp(db, block, op1, op2);
1050 current_ir_graph = rem;
1056 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Jmp(db, block);
1062 current_ir_graph = rem;
1068 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1070 ir_graph *rem = current_ir_graph;
1072 current_ir_graph = irg;
1073 res = new_bd_IJmp(db, block, tgt);
1074 current_ir_graph = rem;
1080 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1082 ir_graph *rem = current_ir_graph;
1084 current_ir_graph = irg;
1085 res = new_bd_Cond(db, block, c);
1086 current_ir_graph = rem;
1092 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1093 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1095 ir_graph *rem = current_ir_graph;
1097 current_ir_graph = irg;
1098 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1099 current_ir_graph = rem;
1105 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1106 ir_node *store, int arity, ir_node **in) {
1108 ir_graph *rem = current_ir_graph;
1110 current_ir_graph = irg;
1111 res = new_bd_Return(db, block, store, arity, in);
1112 current_ir_graph = rem;
1115 } /* new_rd_Return */
1118 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1119 ir_node *store, ir_node *adr, ir_mode *mode) {
1121 ir_graph *rem = current_ir_graph;
1123 current_ir_graph = irg;
1124 res = new_bd_Load(db, block, store, adr, mode);
1125 current_ir_graph = rem;
1131 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1132 ir_node *store, ir_node *adr, ir_node *val) {
1134 ir_graph *rem = current_ir_graph;
1136 current_ir_graph = irg;
1137 res = new_bd_Store(db, block, store, adr, val);
1138 current_ir_graph = rem;
1141 } /* new_rd_Store */
1144 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1145 ir_node *size, ir_type *alloc_type, where_alloc where) {
1147 ir_graph *rem = current_ir_graph;
1149 current_ir_graph = irg;
1150 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1151 current_ir_graph = rem;
1154 } /* new_rd_Alloc */
1157 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1158 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1160 ir_graph *rem = current_ir_graph;
1162 current_ir_graph = irg;
1163 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1164 current_ir_graph = rem;
1170 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1171 ir_node *store, ir_node *objptr, ir_entity *ent) {
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1177 current_ir_graph = rem;
1180 } /* new_rd_simpleSel */
1183 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1184 int arity, ir_node **in, ir_entity *ent) {
1186 ir_graph *rem = current_ir_graph;
1188 current_ir_graph = irg;
1189 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1190 current_ir_graph = rem;
1196 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1197 symconst_kind symkind, ir_type *tp) {
1199 ir_graph *rem = current_ir_graph;
1201 current_ir_graph = irg;
1202 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1203 current_ir_graph = rem;
1206 } /* new_rd_SymConst_type */
1209 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1210 symconst_kind symkind) {
1211 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1213 } /* new_rd_SymConst */
1215 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1216 symconst_symbol sym;
1217 sym.entity_p = symbol;
1218 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1219 } /* new_rd_SymConst_addr_ent */
1221 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1222 symconst_symbol sym;
1223 sym.entity_p = symbol;
1224 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1225 } /* new_rd_SymConst_ofs_ent */
1227 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1228 symconst_symbol sym;
1229 sym.ident_p = symbol;
1230 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1231 } /* new_rd_SymConst_addr_name */
1233 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1234 symconst_symbol sym;
1235 sym.type_p = symbol;
1236 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1237 } /* new_rd_SymConst_type_tag */
1239 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1240 symconst_symbol sym;
1241 sym.type_p = symbol;
1242 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1243 } /* new_rd_SymConst_size */
1245 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1246 symconst_symbol sym;
1247 sym.type_p = symbol;
1248 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1249 } /* new_rd_SymConst_align */
1252 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1254 ir_graph *rem = current_ir_graph;
1257 current_ir_graph = irg;
1258 res = new_bd_Sync(db, block);
1259 current_ir_graph = rem;
1261 for (i = 0; i < arity; ++i)
1262 add_Sync_pred(res, in[i]);
1268 new_rd_Bad(ir_graph *irg) {
1269 return get_irg_bad(irg);
1273 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1275 ir_graph *rem = current_ir_graph;
1277 current_ir_graph = irg;
1278 res = new_bd_Confirm(db, block, val, bound, cmp);
1279 current_ir_graph = rem;
1282 } /* new_rd_Confirm */
1285 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1287 ir_graph *rem = current_ir_graph;
1289 current_ir_graph = irg;
1290 res = new_bd_Unknown(m);
1291 current_ir_graph = rem;
1294 } /* new_rd_Unknown */
1297 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_CallBegin(db, block, call);
1303 current_ir_graph = rem;
1306 } /* new_rd_CallBegin */
1309 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1312 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1313 set_irg_end_reg(irg, res);
1314 IRN_VRFY_IRG(res, irg);
1316 } /* new_rd_EndReg */
1319 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1322 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1323 set_irg_end_except(irg, res);
1324 IRN_VRFY_IRG (res, irg);
1326 } /* new_rd_EndExcept */
1329 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1331 ir_graph *rem = current_ir_graph;
1333 current_ir_graph = irg;
1334 res = new_bd_Break(db, block);
1335 current_ir_graph = rem;
1338 } /* new_rd_Break */
1341 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1344 ir_graph *rem = current_ir_graph;
1346 current_ir_graph = irg;
1347 res = new_bd_Filter(db, block, arg, mode, proj);
1348 current_ir_graph = rem;
1351 } /* new_rd_Filter */
1354 new_rd_NoMem(ir_graph *irg) {
1355 return get_irg_no_mem(irg);
1356 } /* new_rd_NoMem */
1359 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1360 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1366 current_ir_graph = rem;
1372 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1373 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1375 ir_graph *rem = current_ir_graph;
1377 current_ir_graph = irg;
1378 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1379 current_ir_graph = rem;
1384 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1385 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1391 current_ir_graph = rem;
1394 } /* new_rd_CopyB */
1397 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1398 ir_node *objptr, ir_type *type) {
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_InstOf(db, block, store, objptr, type);
1404 current_ir_graph = rem;
1407 } /* new_rd_InstOf */
1410 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1412 ir_graph *rem = current_ir_graph;
1414 current_ir_graph = irg;
1415 res = new_bd_Raise(db, block, store, obj);
1416 current_ir_graph = rem;
1419 } /* new_rd_Raise */
1421 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1422 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1424 ir_graph *rem = current_ir_graph;
1426 current_ir_graph = irg;
1427 res = new_bd_Bound(db, block, store, idx, lower, upper);
1428 current_ir_graph = rem;
1431 } /* new_rd_Bound */
1433 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1435 ir_graph *rem = current_ir_graph;
1437 current_ir_graph = irg;
1438 res = new_bd_Pin(db, block, node);
1439 current_ir_graph = rem;
1444 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1445 int arity, ir_node *in[], ir_asm_constraint *inputs,
1446 int n_outs, ir_asm_constraint *outputs,
1447 int n_clobber, ident *clobber[], ident *asm_text) {
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1453 current_ir_graph = rem;
1459 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1460 return new_rd_Block(NULL, irg, arity, in);
1462 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1463 return new_rd_Start(NULL, irg, block);
1465 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1466 return new_rd_End(NULL, irg, block);
1468 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1469 return new_rd_Jmp(NULL, irg, block);
1471 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1472 return new_rd_IJmp(NULL, irg, block, tgt);
1474 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1475 return new_rd_Cond(NULL, irg, block, c);
1477 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1478 ir_node *store, int arity, ir_node **in) {
1479 return new_rd_Return(NULL, irg, block, store, arity, in);
1481 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1482 ir_mode *mode, tarval *con) {
1483 return new_rd_Const(NULL, irg, block, mode, con);
1485 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1486 ir_mode *mode, long value) {
1487 return new_rd_Const_long(NULL, irg, block, mode, value);
1489 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1490 ir_mode *mode, tarval *con, ir_type *tp) {
1491 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1493 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1494 symconst_symbol value, symconst_kind symkind) {
1495 return new_rd_SymConst(NULL, irg, block, value, symkind);
1497 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1498 ir_node *objptr, ir_entity *ent) {
1499 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1501 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1502 ir_node *objptr, int n_index, ir_node **index,
1504 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1506 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1507 ir_node *callee, int arity, ir_node **in,
1509 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1511 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1512 ir_node *op1, ir_node *op2, ir_mode *mode) {
1513 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1515 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1516 ir_node *op1, ir_node *op2, ir_mode *mode) {
1517 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1519 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1520 ir_node *op, ir_mode *mode) {
1521 return new_rd_Minus(NULL, irg, block, op, mode);
1523 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1524 ir_node *op1, ir_node *op2, ir_mode *mode) {
1525 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1527 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1528 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1529 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode);
1531 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1532 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1533 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode);
1535 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1536 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1537 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode);
1539 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1540 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
1541 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode);
1543 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1544 ir_node *op, ir_mode *mode) {
1545 return new_rd_Abs(NULL, irg, block, op, mode);
1547 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1548 ir_node *op1, ir_node *op2, ir_mode *mode) {
1549 return new_rd_And(NULL, irg, block, op1, op2, mode);
1551 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1556 ir_node *op1, ir_node *op2, ir_mode *mode) {
1557 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1559 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1560 ir_node *op, ir_mode *mode) {
1561 return new_rd_Not(NULL, irg, block, op, mode);
1563 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1564 ir_node *op, ir_node *k, ir_mode *mode) {
1565 return new_rd_Shl(NULL, irg, block, op, k, mode);
1567 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1568 ir_node *op, ir_node *k, ir_mode *mode) {
1569 return new_rd_Shr(NULL, irg, block, op, k, mode);
1571 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_node *k, ir_mode *mode) {
1573 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1575 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Rot(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Carry(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1584 ir_node *op, ir_node *k, ir_mode *mode) {
1585 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1587 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1588 ir_node *op1, ir_node *op2) {
1589 return new_rd_Cmp(NULL, irg, block, op1, op2);
1591 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_mode *mode) {
1593 return new_rd_Conv(NULL, irg, block, op, mode);
1595 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1596 return new_rd_Cast(NULL, irg, block, op, to_tp);
1598 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1599 ir_node **in, ir_mode *mode) {
1600 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1602 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1603 ir_node *store, ir_node *adr, ir_mode *mode) {
1604 return new_rd_Load(NULL, irg, block, store, adr, mode);
1606 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1607 ir_node *store, ir_node *adr, ir_node *val) {
1608 return new_rd_Store(NULL, irg, block, store, adr, val);
1610 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1611 ir_node *size, ir_type *alloc_type, where_alloc where) {
1612 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1614 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1615 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1616 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1618 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1619 return new_rd_Sync(NULL, irg, block, arity, in);
1621 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1622 ir_mode *mode, long proj) {
1623 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1625 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1627 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1629 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1630 int arity, ir_node **in) {
1631 return new_rd_Tuple(NULL, irg, block, arity, in );
1633 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1634 ir_node *val, ir_mode *mode) {
1635 return new_rd_Id(NULL, irg, block, val, mode);
1637 ir_node *new_r_Bad(ir_graph *irg) {
1638 return new_rd_Bad(irg);
1640 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1641 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1643 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1644 return new_rd_Unknown(irg, m);
1646 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1647 return new_rd_CallBegin(NULL, irg, block, callee);
1649 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1650 return new_rd_EndReg(NULL, irg, block);
1652 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1653 return new_rd_EndExcept(NULL, irg, block);
1655 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1656 return new_rd_Break(NULL, irg, block);
1658 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1659 ir_mode *mode, long proj) {
1660 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1662 ir_node *new_r_NoMem(ir_graph *irg) {
1663 return new_rd_NoMem(irg);
1665 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1666 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1667 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1669 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1670 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1671 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1673 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1674 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1675 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1677 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1679 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1681 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1682 ir_node *store, ir_node *obj) {
1683 return new_rd_Raise(NULL, irg, block, store, obj);
1685 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1686 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1687 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1689 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1690 return new_rd_Pin(NULL, irg, block, node);
1692 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1693 int arity, ir_node *in[], ir_asm_constraint *inputs,
1694 int n_outs, ir_asm_constraint *outputs,
1695 int n_clobber, ident *clobber[], ident *asm_text) {
1696 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1699 /** ********************/
1700 /** public interfaces */
1701 /** construction tools */
1705 * - create a new Start node in the current block
1707 * @return s - pointer to the created Start node
1712 new_d_Start(dbg_info *db) {
1715 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1716 op_Start, mode_T, 0, NULL);
1718 res = optimize_node(res);
1719 IRN_VRFY_IRG(res, current_ir_graph);
1724 new_d_End(dbg_info *db) {
1726 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1727 op_End, mode_X, -1, NULL);
1728 res = optimize_node(res);
1729 IRN_VRFY_IRG(res, current_ir_graph);
1734 /* Constructs a Block with a fixed number of predecessors.
1735 Does set current_block. Can be used with automatic Phi
1736 node construction. */
1738 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1741 int has_unknown = 0;
1743 res = new_bd_Block(db, arity, in);
1745 /* Create and initialize array for Phi-node construction. */
1746 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1747 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1748 current_ir_graph->n_loc);
1749 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1752 for (i = arity-1; i >= 0; i--)
1753 if (get_irn_op(in[i]) == op_Unknown) {
1758 if (!has_unknown) res = optimize_node(res);
1759 current_ir_graph->current_block = res;
1761 IRN_VRFY_IRG(res, current_ir_graph);
1766 /* ***********************************************************************/
1767 /* Methods necessary for automatic Phi node creation */
1769 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1770 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1771 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1772 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1774 Call Graph: ( A ---> B == A "calls" B)
1776 get_value mature_immBlock
1784 get_r_value_internal |
1788 new_rd_Phi0 new_rd_Phi_in
1790 * *************************************************************************** */
1792 /** Creates a Phi node with 0 predecessors. */
1793 static INLINE ir_node *
1794 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1797 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1798 IRN_VRFY_IRG(res, irg);
1802 /* There are two implementations of the Phi node construction. The first
1803 is faster, but does not work for blocks with more than 2 predecessors.
1804 The second works always but is slower and causes more unnecessary Phi
1806 Select the implementations by the following preprocessor flag set in
1808 #if USE_FAST_PHI_CONSTRUCTION
1810 /* This is a stack used for allocating and deallocating nodes in
1811 new_rd_Phi_in. The original implementation used the obstack
1812 to model this stack, now it is explicit. This reduces side effects.
1814 #if USE_EXPLICIT_PHI_IN_STACK
1816 new_Phi_in_stack(void) {
1819 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1821 res->stack = NEW_ARR_F (ir_node *, 0);
1825 } /* new_Phi_in_stack */
1828 free_Phi_in_stack(Phi_in_stack *s) {
1829 DEL_ARR_F(s->stack);
1831 } /* free_Phi_in_stack */
1834 free_to_Phi_in_stack(ir_node *phi) {
1835 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1836 current_ir_graph->Phi_in_stack->pos)
1837 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1839 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1841 (current_ir_graph->Phi_in_stack->pos)++;
1842 } /* free_to_Phi_in_stack */
1844 static INLINE ir_node *
1845 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1846 int arity, ir_node **in) {
1848 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1849 int pos = current_ir_graph->Phi_in_stack->pos;
1853 /* We need to allocate a new node */
1854 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1855 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1857 /* reuse the old node and initialize it again. */
1860 assert(res->kind == k_ir_node);
1861 assert(res->op == op_Phi);
1866 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1867 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1869 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1871 (current_ir_graph->Phi_in_stack->pos)--;
1874 } /* alloc_or_pop_from_Phi_in_stack */
1875 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1878 * Creates a Phi node with a given, fixed array **in of predecessors.
1879 * If the Phi node is unnecessary, as the same value reaches the block
1880 * through all control flow paths, it is eliminated and the value
1881 * returned directly. This constructor is only intended for use in
1882 * the automatic Phi node generation triggered by get_value or mature.
1883 * The implementation is quite tricky and depends on the fact, that
1884 * the nodes are allocated on a stack:
1885 * The in array contains predecessors and NULLs. The NULLs appear,
1886 * if get_r_value_internal, that computed the predecessors, reached
1887 * the same block on two paths. In this case the same value reaches
1888 * this block on both paths, there is no definition in between. We need
1889 * not allocate a Phi where these path's merge, but we have to communicate
1890 * this fact to the caller. This happens by returning a pointer to the
1891 * node the caller _will_ allocate. (Yes, we predict the address. We can
1892 * do so because the nodes are allocated on the obstack.) The caller then
1893 * finds a pointer to itself and, when this routine is called again,
1894 * eliminates itself.
1896 static INLINE ir_node *
1897 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1899 ir_node *res, *known;
1901 /* Allocate a new node on the obstack. This can return a node to
1902 which some of the pointers in the in-array already point.
1903 Attention: the constructor copies the in array, i.e., the later
1904 changes to the array in this routine do not affect the
1905 constructed node! If the in array contains NULLs, there will be
1906 missing predecessors in the returned node. Is this a possible
1907 internal state of the Phi node generation? */
1908 #if USE_EXPLICIT_PHI_IN_STACK
1909 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1911 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1912 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1915 /* The in-array can contain NULLs. These were returned by
1916 get_r_value_internal if it reached the same block/definition on a
1917 second path. The NULLs are replaced by the node itself to
1918 simplify the test in the next loop. */
1919 for (i = 0; i < ins; ++i) {
1924 /* This loop checks whether the Phi has more than one predecessor.
1925 If so, it is a real Phi node and we break the loop. Else the Phi
1926 node merges the same definition on several paths and therefore is
1928 for (i = 0; i < ins; ++i) {
1929 if (in[i] == res || in[i] == known)
1938 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1940 #if USE_EXPLICIT_PHI_IN_STACK
1941 free_to_Phi_in_stack(res);
1943 edges_node_deleted(res, current_ir_graph);
1944 obstack_free(current_ir_graph->obst, res);
1948 res = optimize_node (res);
1949 IRN_VRFY_IRG(res, irg);
1952 /* return the pointer to the Phi node. This node might be deallocated! */
1954 } /* new_rd_Phi_in */
1957 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1960 * Allocates and returns this node. The routine called to allocate the
1961 * node might optimize it away and return a real value, or even a pointer
1962 * to a deallocated Phi node on top of the obstack!
1963 * This function is called with an in-array of proper size.
1966 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1967 ir_node *prevBlock, *res;
1970 /* This loop goes to all predecessor blocks of the block the Phi node is in
1971 and there finds the operands of the Phi node by calling
1972 get_r_value_internal. */
1973 for (i = 1; i <= ins; ++i) {
1974 assert (block->in[i]);
1975 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1977 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1980 /* After collecting all predecessors into the array nin a new Phi node
1981 with these predecessors is created. This constructor contains an
1982 optimization: If all predecessors of the Phi node are identical it
1983 returns the only operand instead of a new Phi node. If the value
1984 passes two different control flow edges without being defined, and
1985 this is the second path treated, a pointer to the node that will be
1986 allocated for the first path (recursion) is returned. We already
1987 know the address of this node, as it is the next node to be allocated
1988 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1989 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1991 /* Now we now the value for "pos" and can enter it in the array with
1992 all known local variables. Attention: this might be a pointer to
1993 a node, that later will be allocated!!! See new_rd_Phi_in().
1994 If this is called in mature, after some set_value() in the same block,
1995 the proper value must not be overwritten:
1997 get_value (makes Phi0, put's it into graph_arr)
1998 set_value (overwrites Phi0 in graph_arr)
1999 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2002 if (!block->attr.block.graph_arr[pos]) {
2003 block->attr.block.graph_arr[pos] = res;
2005 /* printf(" value already computed by %s\n",
2006 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2013 * This function returns the last definition of a variable. In case
2014 * this variable was last defined in a previous block, Phi nodes are
2015 * inserted. If the part of the firm graph containing the definition
2016 * is not yet constructed, a dummy Phi node is returned.
2019 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2022 /* There are 4 cases to treat.
2024 1. The block is not mature and we visit it the first time. We can not
2025 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2026 predecessors is returned. This node is added to the linked list (field
2027 "link") of the containing block to be completed when this block is
2028 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2031 2. The value is already known in this block, graph_arr[pos] is set and we
2032 visit the block the first time. We can return the value without
2033 creating any new nodes.
2035 3. The block is mature and we visit it the first time. A Phi node needs
2036 to be created (phi_merge). If the Phi is not needed, as all it's
2037 operands are the same value reaching the block through different
2038 paths, it's optimized away and the value itself is returned.
2040 4. The block is mature, and we visit it the second time. Now two
2041 subcases are possible:
2042 * The value was computed completely the last time we were here. This
2043 is the case if there is no loop. We can return the proper value.
2044 * The recursion that visited this node and set the flag did not
2045 return yet. We are computing a value in a loop and need to
2046 break the recursion without knowing the result yet.
2047 @@@ strange case. Straight forward we would create a Phi before
2048 starting the computation of it's predecessors. In this case we will
2049 find a Phi here in any case. The problem is that this implementation
2050 only creates a Phi after computing the predecessors, so that it is
2051 hard to compute self references of this Phi. @@@
2052 There is no simple check for the second subcase. Therefore we check
2053 for a second visit and treat all such cases as the second subcase.
2054 Anyways, the basic situation is the same: we reached a block
2055 on two paths without finding a definition of the value: No Phi
2056 nodes are needed on both paths.
2057 We return this information "Two paths, no Phi needed" by a very tricky
2058 implementation that relies on the fact that an obstack is a stack and
2059 will return a node with the same address on different allocations.
2060 Look also at phi_merge and new_rd_phi_in to understand this.
2061 @@@ Unfortunately this does not work, see testprogram
2062 three_cfpred_example.
2066 /* case 4 -- already visited. */
2067 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2069 /* visited the first time */
2070 set_irn_visited(block, get_irg_visited(current_ir_graph));
2072 /* Get the local valid value */
2073 res = block->attr.block.graph_arr[pos];
2075 /* case 2 -- If the value is actually computed, return it. */
2076 if (res) return res;
2078 if (block->attr.block.is_matured) { /* case 3 */
2080 /* The Phi has the same amount of ins as the corresponding block. */
2081 int ins = get_irn_arity(block);
2083 NEW_ARR_A(ir_node *, nin, ins);
2085 /* Phi merge collects the predecessors and then creates a node. */
2086 res = phi_merge(block, pos, mode, nin, ins);
2088 } else { /* case 1 */
2089 /* The block is not mature, we don't know how many in's are needed. A Phi
2090 with zero predecessors is created. Such a Phi node is called Phi0
2091 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2092 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2094 The Phi0 has to remember the pos of it's internal value. If the real
2095 Phi is computed, pos is used to update the array with the local
2098 res = new_rd_Phi0(current_ir_graph, block, mode);
2099 res->attr.phi0_pos = pos;
2100 res->link = block->link;
2104 /* If we get here, the frontend missed a use-before-definition error */
2107 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2108 assert(mode->code >= irm_F && mode->code <= irm_P);
2109 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2112 /* The local valid value is available now. */
2113 block->attr.block.graph_arr[pos] = res;
2116 } /* get_r_value_internal */
2121 it starts the recursion. This causes an Id at the entry of
2122 every block that has no definition of the value! **/
2124 #if USE_EXPLICIT_PHI_IN_STACK
2126 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2127 void free_Phi_in_stack(Phi_in_stack *s) {}
2130 static INLINE ir_node *
2131 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2132 ir_node **in, int ins, ir_node *phi0) {
2134 ir_node *res, *known;
2136 /* Allocate a new node on the obstack. The allocation copies the in
2138 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2139 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2141 /* This loop checks whether the Phi has more than one predecessor.
2142 If so, it is a real Phi node and we break the loop. Else the
2143 Phi node merges the same definition on several paths and therefore
2144 is not needed. Don't consider Bad nodes! */
2146 for (i=0; i < ins; ++i)
2150 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2152 /* Optimize self referencing Phis: We can't detect them yet properly, as
2153 they still refer to the Phi0 they will replace. So replace right now. */
2154 if (phi0 && in[i] == phi0) in[i] = res;
2156 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2164 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2167 edges_node_deleted(res, current_ir_graph);
2168 obstack_free (current_ir_graph->obst, res);
2169 if (is_Phi(known)) {
2170 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2171 order, an enclosing Phi know may get superfluous. */
2172 res = optimize_in_place_2(known);
2174 exchange(known, res);
2180 /* A undefined value, e.g., in unreachable code. */
2184 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2185 IRN_VRFY_IRG(res, irg);
2186 /* Memory Phis in endless loops must be kept alive.
2187 As we can't distinguish these easily we keep all of them alive. */
2188 if ((res->op == op_Phi) && (mode == mode_M))
2189 add_End_keepalive(get_irg_end(irg), res);
2193 } /* new_rd_Phi_in */
2196 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2198 #if PRECISE_EXC_CONTEXT
2200 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2203 * Construct a new frag_array for node n.
2204 * Copy the content from the current graph_arr of the corresponding block:
2205 * this is the current state.
2206 * Set ProjM(n) as current memory state.
2207 * Further the last entry in frag_arr of current block points to n. This
2208 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2210 static INLINE ir_node **new_frag_arr(ir_node *n) {
2214 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2215 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2216 sizeof(ir_node *)*current_ir_graph->n_loc);
2218 /* turn off optimization before allocating Proj nodes, as res isn't
2220 opt = get_opt_optimize(); set_optimize(0);
2221 /* Here we rely on the fact that all frag ops have Memory as first result! */
2222 if (get_irn_op(n) == op_Call)
2223 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2224 else if (get_irn_op(n) == op_CopyB)
2225 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2227 assert((pn_Quot_M == pn_DivMod_M) &&
2228 (pn_Quot_M == pn_Div_M) &&
2229 (pn_Quot_M == pn_Mod_M) &&
2230 (pn_Quot_M == pn_Load_M) &&
2231 (pn_Quot_M == pn_Store_M) &&
2232 (pn_Quot_M == pn_Alloc_M) &&
2233 (pn_Quot_M == pn_Bound_M));
2234 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2238 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2240 } /* new_frag_arr */
2243 * Returns the frag_arr from a node.
2245 static INLINE ir_node **get_frag_arr(ir_node *n) {
2246 switch (get_irn_opcode(n)) {
2248 return n->attr.call.exc.frag_arr;
2250 return n->attr.alloc.exc.frag_arr;
2252 return n->attr.load.exc.frag_arr;
2254 return n->attr.store.exc.frag_arr;
2256 return n->attr.except.frag_arr;
2258 } /* get_frag_arr */
2261 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2263 if (!frag_arr[pos]) frag_arr[pos] = val;
2264 if (frag_arr[current_ir_graph->n_loc - 1]) {
2265 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2266 assert(arr != frag_arr && "Endless recursion detected");
2267 set_frag_value(arr, pos, val);
2272 for (i = 0; i < 1000; ++i) {
2273 if (!frag_arr[pos]) {
2274 frag_arr[pos] = val;
2276 if (frag_arr[current_ir_graph->n_loc - 1]) {
2277 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2283 assert(0 && "potential endless recursion");
2285 } /* set_frag_value */
2288 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2292 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2294 frag_arr = get_frag_arr(cfOp);
2295 res = frag_arr[pos];
2297 if (block->attr.block.graph_arr[pos]) {
2298 /* There was a set_value() after the cfOp and no get_value before that
2299 set_value(). We must build a Phi node now. */
2300 if (block->attr.block.is_matured) {
2301 int ins = get_irn_arity(block);
2303 NEW_ARR_A(ir_node *, nin, ins);
2304 res = phi_merge(block, pos, mode, nin, ins);
2306 res = new_rd_Phi0(current_ir_graph, block, mode);
2307 res->attr.phi0.pos = pos;
2308 res->link = block->link;
2312 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2313 but this should be better: (remove comment if this works) */
2314 /* It's a Phi, we can write this into all graph_arrs with NULL */
2315 set_frag_value(block->attr.block.graph_arr, pos, res);
2317 res = get_r_value_internal(block, pos, mode);
2318 set_frag_value(block->attr.block.graph_arr, pos, res);
2322 } /* get_r_frag_value_internal */
2323 #endif /* PRECISE_EXC_CONTEXT */
2326 * Computes the predecessors for the real phi node, and then
2327 * allocates and returns this node. The routine called to allocate the
2328 * node might optimize it away and return a real value.
2329 * This function must be called with an in-array of proper size.
2332 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2333 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2336 /* If this block has no value at pos create a Phi0 and remember it
2337 in graph_arr to break recursions.
2338 Else we may not set graph_arr as there a later value is remembered. */
2340 if (!block->attr.block.graph_arr[pos]) {
2341 if (block == get_irg_start_block(current_ir_graph)) {
2342 /* Collapsing to Bad tarvals is no good idea.
2343 So we call a user-supplied routine here that deals with this case as
2344 appropriate for the given language. Sorrily the only help we can give
2345 here is the position.
2347 Even if all variables are defined before use, it can happen that
2348 we get to the start block, if a Cond has been replaced by a tuple
2349 (bad, jmp). In this case we call the function needlessly, eventually
2350 generating an non existent error.
2351 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2354 if (default_initialize_local_variable) {
2355 ir_node *rem = get_cur_block();
2357 set_cur_block(block);
2358 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2362 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2363 /* We don't need to care about exception ops in the start block.
2364 There are none by definition. */
2365 return block->attr.block.graph_arr[pos];
2367 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2368 block->attr.block.graph_arr[pos] = phi0;
2369 #if PRECISE_EXC_CONTEXT
2370 if (get_opt_precise_exc_context()) {
2371 /* Set graph_arr for fragile ops. Also here we should break recursion.
2372 We could choose a cyclic path through an cfop. But the recursion would
2373 break at some point. */
2374 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2380 /* This loop goes to all predecessor blocks of the block the Phi node
2381 is in and there finds the operands of the Phi node by calling
2382 get_r_value_internal. */
2383 for (i = 1; i <= ins; ++i) {
2384 prevCfOp = skip_Proj(block->in[i]);
2386 if (is_Bad(prevCfOp)) {
2387 /* In case a Cond has been optimized we would get right to the start block
2388 with an invalid definition. */
2389 nin[i-1] = new_Bad();
2392 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2394 if (!is_Bad(prevBlock)) {
2395 #if PRECISE_EXC_CONTEXT
2396 if (get_opt_precise_exc_context() &&
2397 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2398 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2399 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2402 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2404 nin[i-1] = new_Bad();
2408 /* We want to pass the Phi0 node to the constructor: this finds additional
2409 optimization possibilities.
2410 The Phi0 node either is allocated in this function, or it comes from
2411 a former call to get_r_value_internal. In this case we may not yet
2412 exchange phi0, as this is done in mature_immBlock. */
2414 phi0_all = block->attr.block.graph_arr[pos];
2415 if (!((get_irn_op(phi0_all) == op_Phi) &&
2416 (get_irn_arity(phi0_all) == 0) &&
2417 (get_nodes_block(phi0_all) == block)))
2423 /* After collecting all predecessors into the array nin a new Phi node
2424 with these predecessors is created. This constructor contains an
2425 optimization: If all predecessors of the Phi node are identical it
2426 returns the only operand instead of a new Phi node. */
2427 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2429 /* In case we allocated a Phi0 node at the beginning of this procedure,
2430 we need to exchange this Phi0 with the real Phi. */
2432 exchange(phi0, res);
2433 block->attr.block.graph_arr[pos] = res;
2434 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2435 only an optimization. */
2442 * This function returns the last definition of a variable. In case
2443 * this variable was last defined in a previous block, Phi nodes are
2444 * inserted. If the part of the firm graph containing the definition
2445 * is not yet constructed, a dummy Phi node is returned.
2448 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2450 /* There are 4 cases to treat.
2452 1. The block is not mature and we visit it the first time. We can not
2453 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2454 predecessors is returned. This node is added to the linked list (field
2455 "link") of the containing block to be completed when this block is
2456 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2459 2. The value is already known in this block, graph_arr[pos] is set and we
2460 visit the block the first time. We can return the value without
2461 creating any new nodes.
2463 3. The block is mature and we visit it the first time. A Phi node needs
2464 to be created (phi_merge). If the Phi is not needed, as all it's
2465 operands are the same value reaching the block through different
2466 paths, it's optimized away and the value itself is returned.
2468 4. The block is mature, and we visit it the second time. Now two
2469 subcases are possible:
2470 * The value was computed completely the last time we were here. This
2471 is the case if there is no loop. We can return the proper value.
2472 * The recursion that visited this node and set the flag did not
2473 return yet. We are computing a value in a loop and need to
2474 break the recursion. This case only happens if we visited
2475 the same block with phi_merge before, which inserted a Phi0.
2476 So we return the Phi0.
2479 /* case 4 -- already visited. */
2480 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2481 /* As phi_merge allocates a Phi0 this value is always defined. Here
2482 is the critical difference of the two algorithms. */
2483 assert(block->attr.block.graph_arr[pos]);
2484 return block->attr.block.graph_arr[pos];
2487 /* visited the first time */
2488 set_irn_visited(block, get_irg_visited(current_ir_graph));
2490 /* Get the local valid value */
2491 res = block->attr.block.graph_arr[pos];
2493 /* case 2 -- If the value is actually computed, return it. */
2494 if (res) { return res; };
2496 if (block->attr.block.is_matured) { /* case 3 */
2498 /* The Phi has the same amount of ins as the corresponding block. */
2499 int ins = get_irn_arity(block);
2501 NEW_ARR_A (ir_node *, nin, ins);
2503 /* Phi merge collects the predecessors and then creates a node. */
2504 res = phi_merge (block, pos, mode, nin, ins);
2506 } else { /* case 1 */
2507 /* The block is not mature, we don't know how many in's are needed. A Phi
2508 with zero predecessors is created. Such a Phi node is called Phi0
2509 node. The Phi0 is then added to the list of Phi0 nodes in this block
2510 to be matured by mature_immBlock later.
2511 The Phi0 has to remember the pos of it's internal value. If the real
2512 Phi is computed, pos is used to update the array with the local
2514 res = new_rd_Phi0(current_ir_graph, block, mode);
2515 res->attr.phi0.pos = pos;
2516 res->link = block->link;
2520 /* If we get here, the frontend missed a use-before-definition error */
2523 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2524 assert(mode->code >= irm_F && mode->code <= irm_P);
2525 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2526 get_mode_null(mode));
2529 /* The local valid value is available now. */
2530 block->attr.block.graph_arr[pos] = res;
2533 } /* get_r_value_internal */
2535 #endif /* USE_FAST_PHI_CONSTRUCTION */
2537 /* ************************************************************************** */
2540 * Finalize a Block node, when all control flows are known.
2541 * Acceptable parameters are only Block nodes.
2544 mature_immBlock(ir_node *block) {
2549 assert(get_irn_opcode(block) == iro_Block);
2550 /* @@@ should be commented in
2551 assert (!get_Block_matured(block) && "Block already matured"); */
2553 if (!get_Block_matured(block)) {
2554 ins = ARR_LEN(block->in)-1;
2555 /* Fix block parameters */
2556 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2558 /* An array for building the Phi nodes. */
2559 NEW_ARR_A(ir_node *, nin, ins);
2561 /* Traverse a chain of Phi nodes attached to this block and mature
2563 for (n = block->link; n; n = next) {
2564 inc_irg_visited(current_ir_graph);
2566 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2569 block->attr.block.is_matured = 1;
2571 /* Now, as the block is a finished firm node, we can optimize it.
2572 Since other nodes have been allocated since the block was created
2573 we can not free the node on the obstack. Therefore we have to call
2575 Unfortunately the optimization does not change a lot, as all allocated
2576 nodes refer to the unoptimized node.
2577 We can call _2, as global cse has no effect on blocks. */
2578 block = optimize_in_place_2(block);
2579 IRN_VRFY_IRG(block, current_ir_graph);
2581 } /* mature_immBlock */
2584 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2585 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2589 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2590 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2594 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2595 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2596 } /* new_d_Const_long */
2599 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2600 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2601 } /* new_d_Const_type */
2605 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2606 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2610 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2611 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2615 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2618 assert(arg->op == op_Cond);
2619 arg->attr.cond.kind = fragmentary;
2620 arg->attr.cond.default_proj = max_proj;
2621 res = new_Proj(arg, mode_X, max_proj);
2623 } /* new_d_defaultProj */
2626 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2627 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2631 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2632 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2633 } /* new_d_strictConv */
2636 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2637 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2641 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2642 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2651 * Allocate the frag array.
2653 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2654 if (get_opt_precise_exc_context()) {
2655 if ((current_ir_graph->phase_state == phase_building) &&
2656 (get_irn_op(res) == op) && /* Could be optimized away. */
2657 !*frag_store) /* Could be a cse where the arr is already set. */ {
2658 *frag_store = new_frag_arr(res);
2661 } /* allocate_frag_arr */
2664 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2666 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode);
2667 #if PRECISE_EXC_CONTEXT
2668 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2675 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2677 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2678 #if PRECISE_EXC_CONTEXT
2679 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2683 } /* new_d_DivMod */
2686 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2688 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode);
2689 #if PRECISE_EXC_CONTEXT
2690 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2697 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
2699 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode);
2700 #if PRECISE_EXC_CONTEXT
2701 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2720 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2721 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2725 new_d_Jmp(dbg_info *db) {
2726 return new_bd_Jmp(db, current_ir_graph->current_block);
2730 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2731 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2735 new_d_Cond(dbg_info *db, ir_node *c) {
2736 return new_bd_Cond(db, current_ir_graph->current_block, c);
2740 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2743 res = new_bd_Call(db, current_ir_graph->current_block,
2744 store, callee, arity, in, tp);
2745 #if PRECISE_EXC_CONTEXT
2746 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2753 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2754 return new_bd_Return(db, current_ir_graph->current_block,
2756 } /* new_d_Return */
2759 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2761 res = new_bd_Load(db, current_ir_graph->current_block,
2763 #if PRECISE_EXC_CONTEXT
2764 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2771 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2773 res = new_bd_Store(db, current_ir_graph->current_block,
2775 #if PRECISE_EXC_CONTEXT
2776 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2783 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2784 where_alloc where) {
2786 res = new_bd_Alloc(db, current_ir_graph->current_block,
2787 store, size, alloc_type, where);
2788 #if PRECISE_EXC_CONTEXT
2789 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2796 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2797 ir_node *size, ir_type *free_type, where_alloc where) {
2798 return new_bd_Free(db, current_ir_graph->current_block,
2799 store, ptr, size, free_type, where);
2803 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2804 /* GL: objptr was called frame before. Frame was a bad choice for the name
2805 as the operand could as well be a pointer to a dynamic object. */
2807 return new_bd_Sel(db, current_ir_graph->current_block,
2808 store, objptr, 0, NULL, ent);
2809 } /* new_d_simpleSel */
2812 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2813 return new_bd_Sel(db, current_ir_graph->current_block,
2814 store, objptr, n_index, index, sel);
2818 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2819 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2821 } /* new_d_SymConst_type */
2824 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2825 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2826 value, kind, firm_unknown_type);
2827 } /* new_d_SymConst */
2830 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2831 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2837 return _new_d_Bad();
2841 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2842 return new_bd_Confirm(db, current_ir_graph->current_block,
2844 } /* new_d_Confirm */
2847 new_d_Unknown(ir_mode *m) {
2848 return new_bd_Unknown(m);
2849 } /* new_d_Unknown */
2852 new_d_CallBegin(dbg_info *db, ir_node *call) {
2853 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2854 } /* new_d_CallBegin */
2857 new_d_EndReg(dbg_info *db) {
2858 return new_bd_EndReg(db, current_ir_graph->current_block);
2859 } /* new_d_EndReg */
2862 new_d_EndExcept(dbg_info *db) {
2863 return new_bd_EndExcept(db, current_ir_graph->current_block);
2864 } /* new_d_EndExcept */
2867 new_d_Break(dbg_info *db) {
2868 return new_bd_Break(db, current_ir_graph->current_block);
2872 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2873 return new_bd_Filter(db, current_ir_graph->current_block,
2875 } /* new_d_Filter */
2878 (new_d_NoMem)(void) {
2879 return _new_d_NoMem();
2883 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2884 ir_node *ir_true, ir_mode *mode) {
2885 return new_bd_Mux(db, current_ir_graph->current_block,
2886 sel, ir_false, ir_true, mode);
2890 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2891 return new_bd_Psi(db, current_ir_graph->current_block,
2892 arity, conds, vals, mode);
2895 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2896 ir_node *dst, ir_node *src, ir_type *data_type) {
2898 res = new_bd_CopyB(db, current_ir_graph->current_block,
2899 store, dst, src, data_type);
2900 #if PRECISE_EXC_CONTEXT
2901 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2907 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2908 return new_bd_InstOf(db, current_ir_graph->current_block,
2909 store, objptr, type);
2910 } /* new_d_InstOf */
2913 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2914 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2917 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2918 ir_node *idx, ir_node *lower, ir_node *upper) {
2920 res = new_bd_Bound(db, current_ir_graph->current_block,
2921 store, idx, lower, upper);
2922 #if PRECISE_EXC_CONTEXT
2923 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2929 new_d_Pin(dbg_info *db, ir_node *node) {
2930 return new_bd_Pin(db, current_ir_graph->current_block, node);
2934 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2935 int n_outs, ir_asm_constraint *outputs,
2936 int n_clobber, ident *clobber[], ident *asm_text) {
2937 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2940 /* ********************************************************************* */
2941 /* Comfortable interface with automatic Phi node construction. */
2942 /* (Uses also constructors of ?? interface, except new_Block. */
2943 /* ********************************************************************* */
2945 /* Block construction */
2946 /* immature Block without predecessors */
2948 new_d_immBlock(dbg_info *db) {
2951 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2952 /* creates a new dynamic in-array as length of in is -1 */
2953 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2954 current_ir_graph->current_block = res;
2956 /* macroblock head */
2959 res->attr.block.is_matured = 0;
2960 res->attr.block.is_dead = 0;
2961 res->attr.block.is_mb_head = 1;
2962 res->attr.block.irg = current_ir_graph;
2963 res->attr.block.backedge = NULL;
2964 res->attr.block.in_cg = NULL;
2965 res->attr.block.cg_backedge = NULL;
2966 res->attr.block.extblk = NULL;
2967 res->attr.block.region = NULL;
2968 res->attr.block.mb_depth = 0;
2970 set_Block_block_visited(res, 0);
2972 /* Create and initialize array for Phi-node construction. */
2973 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2974 current_ir_graph->n_loc);
2975 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2977 /* Immature block may not be optimized! */
2978 IRN_VRFY_IRG(res, current_ir_graph);
2981 } /* new_d_immBlock */
2984 new_immBlock(void) {
2985 return new_d_immBlock(NULL);
2986 } /* new_immBlock */
2988 /* immature PartBlock with its predecessors */
2990 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2991 ir_node *res = new_d_immBlock(db);
2992 ir_node *blk = get_nodes_block(pred_jmp);
2994 res->in[0] = blk->in[0];
2995 add_immBlock_pred(res, pred_jmp);
2997 res->attr.block.is_mb_head = 0;
2998 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
3001 } /* new_d_immPartBlock */
3004 new_immPartBlock(ir_node *pred_jmp) {
3005 return new_d_immPartBlock(NULL, pred_jmp);
3006 } /* new_immPartBlock */
3008 /* add an edge to a jmp/control flow node */
3010 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3011 int n = ARR_LEN(block->in) - 1;
3013 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3014 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3015 assert(jmp != NULL);
3017 ARR_APP1(ir_node *, block->in, jmp);
3019 hook_set_irn_n(block, n, jmp, NULL);
3020 } /* add_immBlock_pred */
3022 /* changing the current block */
3024 set_cur_block(ir_node *target) {
3025 current_ir_graph->current_block = target;
3026 } /* set_cur_block */
3028 /* ************************ */
3029 /* parameter administration */
3031 /* get a value from the parameter array from the current block by its index */
3033 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3034 ir_graph *irg = current_ir_graph;
3035 assert(get_irg_phase_state(irg) == phase_building);
3036 inc_irg_visited(irg);
3039 return get_r_value_internal(irg->current_block, pos + 1, mode);
3042 /* get a value from the parameter array from the current block by its index */
3044 get_value(int pos, ir_mode *mode) {
3045 return get_d_value(NULL, pos, mode);
3048 /* set a value at position pos in the parameter array from the current block */
3050 set_value(int pos, ir_node *value) {
3051 ir_graph *irg = current_ir_graph;
3052 assert(get_irg_phase_state(irg) == phase_building);
3053 assert(pos+1 < irg->n_loc);
3054 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3057 /* Find the value number for a node in the current block.*/
3059 find_value(ir_node *value) {
3061 ir_node *bl = current_ir_graph->current_block;
3063 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3064 if (bl->attr.block.graph_arr[i] == value)
3069 /* get the current store */
3072 ir_graph *irg = current_ir_graph;
3074 assert(get_irg_phase_state(irg) == phase_building);
3075 /* GL: one could call get_value instead */
3076 inc_irg_visited(irg);
3077 return get_r_value_internal(irg->current_block, 0, mode_M);
3080 /* set the current store: handles automatic Sync construction for Load nodes */
3082 set_store(ir_node *store) {
3083 ir_node *load, *pload, *pred, *in[2];
3085 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3086 /* Beware: due to dead code elimination, a store might become a Bad node even in
3087 the construction phase. */
3088 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3090 if (get_opt_auto_create_sync()) {
3091 /* handle non-volatile Load nodes by automatically creating Sync's */
3092 load = skip_Proj(store);
3093 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3094 pred = get_Load_mem(load);
3096 if (is_Sync(pred)) {
3097 /* a Load after a Sync: move it up */
3098 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3100 set_Load_mem(load, get_memop_mem(mem));
3101 add_Sync_pred(pred, store);
3104 pload = skip_Proj(pred);
3105 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3106 /* a Load after a Load: create a new Sync */
3107 set_Load_mem(load, get_Load_mem(pload));
3111 store = new_Sync(2, in);
3116 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3120 keep_alive(ir_node *ka) {
3121 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3124 /* --- Useful access routines --- */
3125 /* Returns the current block of the current graph. To set the current
3126 block use set_cur_block. */
3127 ir_node *get_cur_block(void) {
3128 return get_irg_current_block(current_ir_graph);
3129 } /* get_cur_block */
3131 /* Returns the frame type of the current graph */
3132 ir_type *get_cur_frame_type(void) {
3133 return get_irg_frame_type(current_ir_graph);
3134 } /* get_cur_frame_type */
3137 /* ********************************************************************* */
3140 /* call once for each run of the library */
3142 init_cons(uninitialized_local_variable_func_t *func) {
3143 default_initialize_local_variable = func;
3147 irp_finalize_cons(void) {
3149 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3150 irg_finalize_cons(get_irp_irg(i));
3152 irp->phase_state = phase_high;
3153 } /* irp_finalize_cons */
3156 ir_node *new_Block(int arity, ir_node **in) {
3157 return new_d_Block(NULL, arity, in);
3159 ir_node *new_Start(void) {
3160 return new_d_Start(NULL);
3162 ir_node *new_End(void) {
3163 return new_d_End(NULL);
3165 ir_node *new_Jmp(void) {
3166 return new_d_Jmp(NULL);
3168 ir_node *new_IJmp(ir_node *tgt) {
3169 return new_d_IJmp(NULL, tgt);
3171 ir_node *new_Cond(ir_node *c) {
3172 return new_d_Cond(NULL, c);
3174 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3175 return new_d_Return(NULL, store, arity, in);
3177 ir_node *new_Const(ir_mode *mode, tarval *con) {
3178 return new_d_Const(NULL, mode, con);
3181 ir_node *new_Const_long(ir_mode *mode, long value) {
3182 return new_d_Const_long(NULL, mode, value);
3185 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3186 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3189 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3190 return new_d_SymConst_type(NULL, value, kind, type);
3192 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3193 return new_d_SymConst(NULL, value, kind);
3195 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3196 return new_d_simpleSel(NULL, store, objptr, ent);
3198 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3200 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3202 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3204 return new_d_Call(NULL, store, callee, arity, in, tp);
3206 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3207 return new_d_Add(NULL, op1, op2, mode);
3209 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3210 return new_d_Sub(NULL, op1, op2, mode);
3212 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3213 return new_d_Minus(NULL, op, mode);
3215 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3216 return new_d_Mul(NULL, op1, op2, mode);
3218 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3219 return new_d_Quot(NULL, memop, op1, op2, mode);
3221 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3222 return new_d_DivMod(NULL, memop, op1, op2, mode);
3224 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3225 return new_d_Div(NULL, memop, op1, op2, mode);
3227 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode) {
3228 return new_d_Mod(NULL, memop, op1, op2, mode);
3230 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3231 return new_d_Abs(NULL, op, mode);
3233 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3234 return new_d_And(NULL, op1, op2, mode);
3236 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3237 return new_d_Or(NULL, op1, op2, mode);
3239 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3240 return new_d_Eor(NULL, op1, op2, mode);
3242 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3243 return new_d_Not(NULL, op, mode);
3245 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3246 return new_d_Shl(NULL, op, k, mode);
3248 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3249 return new_d_Shr(NULL, op, k, mode);
3251 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3252 return new_d_Shrs(NULL, op, k, mode);
3254 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3255 return new_d_Rot(NULL, op, k, mode);
3257 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3258 return new_d_Carry(NULL, op1, op2, mode);
3260 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3261 return new_d_Borrow(NULL, op1, op2, mode);
3263 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3264 return new_d_Cmp(NULL, op1, op2);
3266 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3267 return new_d_Conv(NULL, op, mode);
3269 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3270 return new_d_strictConv(NULL, op, mode);
3272 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3273 return new_d_Cast(NULL, op, to_tp);
3275 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3276 return new_d_Phi(NULL, arity, in, mode);
3278 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3279 return new_d_Load(NULL, store, addr, mode);
3281 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3282 return new_d_Store(NULL, store, addr, val);
3284 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3285 where_alloc where) {
3286 return new_d_Alloc(NULL, store, size, alloc_type, where);
3288 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3289 ir_type *free_type, where_alloc where) {
3290 return new_d_Free(NULL, store, ptr, size, free_type, where);
3292 ir_node *new_Sync(int arity, ir_node *in[]) {
3293 return new_d_Sync(NULL, arity, in);
3295 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3296 return new_d_Proj(NULL, arg, mode, proj);
3298 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3299 return new_d_defaultProj(NULL, arg, max_proj);
3301 ir_node *new_Tuple(int arity, ir_node **in) {
3302 return new_d_Tuple(NULL, arity, in);
3304 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3305 return new_d_Id(NULL, val, mode);
3307 ir_node *new_Bad(void) {
3310 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3311 return new_d_Confirm(NULL, val, bound, cmp);
3313 ir_node *new_Unknown(ir_mode *m) {
3314 return new_d_Unknown(m);
3316 ir_node *new_CallBegin(ir_node *callee) {
3317 return new_d_CallBegin(NULL, callee);
3319 ir_node *new_EndReg(void) {
3320 return new_d_EndReg(NULL);
3322 ir_node *new_EndExcept(void) {
3323 return new_d_EndExcept(NULL);
3325 ir_node *new_Break(void) {
3326 return new_d_Break(NULL);
3328 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3329 return new_d_Filter(NULL, arg, mode, proj);
3331 ir_node *new_NoMem(void) {
3332 return new_d_NoMem();
3334 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3335 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3337 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3338 return new_d_Psi(NULL, arity, conds, vals, mode);
3340 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3341 return new_d_CopyB(NULL, store, dst, src, data_type);
3343 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3344 return new_d_InstOf(NULL, store, objptr, ent);
3346 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3347 return new_d_Raise(NULL, store, obj);
3349 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3350 return new_d_Bound(NULL, store, idx, lower, upper);
3352 ir_node *new_Pin(ir_node *node) {
3353 return new_d_Pin(NULL, node);
3355 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3356 int n_outs, ir_asm_constraint *outputs,
3357 int n_clobber, ident *clobber[], ident *asm_text) {
3358 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);