2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if ( get_Block_matured(block) )
249 assert( get_irn_arity(block) == arity );
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity-1; i >= 0; i--)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
403 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
406 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
410 res = optimize_node(res);
411 IRN_VRFY_IRG(res, irg);
416 new_bd_Jmp(dbg_info *db, ir_node *block) {
418 ir_graph *irg = current_ir_graph;
420 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node(res);
422 IRN_VRFY_IRG(res, irg);
427 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
429 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.cond.kind = dense;
444 res->attr.cond.default_proj = 0;
445 res->attr.cond.pred = COND_JMP_PRED_NONE;
446 res = optimize_node(res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
457 ir_graph *irg = current_ir_graph;
460 NEW_ARR_A(ir_node *, r_in, r_arity);
463 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
465 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
467 assert((get_unknown_type() == tp) || is_Method_type(tp));
468 set_Call_type(res, tp);
469 res->attr.call.exc.pin_state = op_pin_state_pinned;
470 res->attr.call.callee_arr = NULL;
471 res = optimize_node(res);
472 IRN_VRFY_IRG(res, irg);
477 new_bd_Return(dbg_info *db, ir_node *block,
478 ir_node *store, int arity, ir_node **in) {
482 ir_graph *irg = current_ir_graph;
485 NEW_ARR_A (ir_node *, r_in, r_arity);
487 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
489 res = optimize_node(res);
490 IRN_VRFY_IRG(res, irg);
492 } /* new_bd_Return */
495 new_bd_Load(dbg_info *db, ir_node *block,
496 ir_node *store, ir_node *adr, ir_mode *mode) {
499 ir_graph *irg = current_ir_graph;
503 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
504 res->attr.load.exc.pin_state = op_pin_state_pinned;
505 res->attr.load.load_mode = mode;
506 res->attr.load.volatility = volatility_non_volatile;
507 res->attr.load.aligned = align_is_aligned;
508 res = optimize_node(res);
509 IRN_VRFY_IRG(res, irg);
514 new_bd_Store(dbg_info *db, ir_node *block,
515 ir_node *store, ir_node *adr, ir_node *val) {
518 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
524 res->attr.store.exc.pin_state = op_pin_state_pinned;
525 res->attr.store.volatility = volatility_non_volatile;
526 res->attr.store.aligned = align_is_aligned;
527 res = optimize_node(res);
528 IRN_VRFY_IRG(res, irg);
533 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
534 ir_node *size, ir_type *alloc_type, where_alloc where) {
537 ir_graph *irg = current_ir_graph;
541 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
542 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
543 res->attr.alloc.where = where;
544 res->attr.alloc.type = alloc_type;
545 res = optimize_node(res);
546 IRN_VRFY_IRG(res, irg);
551 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
552 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
555 ir_graph *irg = current_ir_graph;
560 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
561 res->attr.free.where = where;
562 res->attr.free.type = free_type;
563 res = optimize_node(res);
564 IRN_VRFY_IRG(res, irg);
569 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
570 int arity, ir_node **in, ir_entity *ent) {
574 ir_graph *irg = current_ir_graph;
575 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
577 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
580 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
583 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
585 * Sel's can select functions which should be of mode mode_P_code.
587 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
588 res->attr.sel.ent = ent;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
595 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
596 symconst_kind symkind, ir_type *tp) {
599 ir_graph *irg = current_ir_graph;
601 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
602 mode = mode_P_data; /* FIXME: can be mode_P_code */
606 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
608 res->attr.symc.num = symkind;
609 res->attr.symc.sym = value;
610 res->attr.symc.tp = tp;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
615 } /* new_bd_SymConst_type */
618 new_bd_Sync(dbg_info *db, ir_node *block) {
620 ir_graph *irg = current_ir_graph;
622 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
623 /* no need to call optimize node here, Sync are always created with no predecessors */
624 IRN_VRFY_IRG(res, irg);
629 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
630 ir_node *in[2], *res;
631 ir_graph *irg = current_ir_graph;
635 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
636 res->attr.confirm.cmp = cmp;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
640 } /* new_bd_Confirm */
643 new_bd_Unknown(ir_mode *m) {
645 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
648 res = optimize_node(res);
650 } /* new_bd_Unknown */
653 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
656 ir_graph *irg = current_ir_graph;
658 in[0] = get_Call_ptr(call);
659 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
660 /* res->attr.callbegin.irg = irg; */
661 res->attr.callbegin.call = call;
662 res = optimize_node(res);
663 IRN_VRFY_IRG(res, irg);
665 } /* new_bd_CallBegin */
668 new_bd_EndReg(dbg_info *db, ir_node *block) {
670 ir_graph *irg = current_ir_graph;
672 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
673 set_irg_end_reg(irg, res);
674 IRN_VRFY_IRG(res, irg);
676 } /* new_bd_EndReg */
679 new_bd_EndExcept(dbg_info *db, ir_node *block) {
681 ir_graph *irg = current_ir_graph;
683 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
684 set_irg_end_except(irg, res);
685 IRN_VRFY_IRG (res, irg);
687 } /* new_bd_EndExcept */
690 new_bd_Break(dbg_info *db, ir_node *block) {
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
695 res = optimize_node(res);
696 IRN_VRFY_IRG(res, irg);
701 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
707 res->attr.filter.proj = proj;
708 res->attr.filter.in_cg = NULL;
709 res->attr.filter.backedge = NULL;
712 assert(get_Proj_pred(res));
713 assert(get_nodes_block(get_Proj_pred(res)));
715 res = optimize_node(res);
716 IRN_VRFY_IRG(res, irg);
718 } /* new_bd_Filter */
721 new_bd_Mux(dbg_info *db, ir_node *block,
722 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
725 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
734 res = optimize_node(res);
735 IRN_VRFY_IRG(res, irg);
740 new_bd_Psi(dbg_info *db, ir_node *block,
741 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
744 ir_graph *irg = current_ir_graph;
747 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
749 for (i = 0; i < arity; ++i) {
751 in[2 * i + 1] = vals[i];
755 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_CopyB(dbg_info *db, ir_node *block,
765 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
768 ir_graph *irg = current_ir_graph;
774 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
776 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
777 res->attr.copyb.data_type = data_type;
778 res = optimize_node(res);
779 IRN_VRFY_IRG(res, irg);
784 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
785 ir_node *objptr, ir_type *type) {
788 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
793 res->attr.instof.type = type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
797 } /* new_bd_InstOf */
800 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
803 ir_graph *irg = current_ir_graph;
807 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
814 new_bd_Bound(dbg_info *db, ir_node *block,
815 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
818 ir_graph *irg = current_ir_graph;
824 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
825 res->attr.bound.exc.pin_state = op_pin_state_pinned;
826 res = optimize_node(res);
827 IRN_VRFY_IRG(res, irg);
832 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
834 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
837 res = optimize_node(res);
838 IRN_VRFY_IRG(res, irg);
843 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
844 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
846 ir_graph *irg = current_ir_graph;
849 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
850 res->attr.assem.pin_state = op_pin_state_pinned;
851 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
852 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
853 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
854 res->attr.assem.asm_text = asm_text;
856 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
857 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
864 /* --------------------------------------------- */
865 /* private interfaces, for professional use only */
866 /* --------------------------------------------- */
868 /* Constructs a Block with a fixed number of predecessors.
869 Does not set current_block. Can not be used with automatic
870 Phi node construction. */
872 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
873 ir_graph *rem = current_ir_graph;
876 current_ir_graph = irg;
877 res = new_bd_Block(db, arity, in);
878 current_ir_graph = rem;
884 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
885 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Start(db, block);
890 current_ir_graph = rem;
896 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_End(db, block);
902 current_ir_graph = rem;
907 /* Creates a Phi node with all predecessors. Calling this constructor
908 is only allowed if the corresponding block is mature. */
910 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi(db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_Const_type(db, block, mode, con, tp);
928 current_ir_graph = rem;
931 } /* new_rd_Const_type */
934 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
936 ir_graph *rem = current_ir_graph;
938 current_ir_graph = irg;
939 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
940 current_ir_graph = rem;
946 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
947 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
948 } /* new_rd_Const_long */
951 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
953 ir_graph *rem = current_ir_graph;
955 current_ir_graph = irg;
956 res = new_bd_Id(db, block, val, mode);
957 current_ir_graph = rem;
963 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Proj(db, block, arg, mode, proj);
970 current_ir_graph = rem;
976 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_defaultProj(db, block, arg, max_proj);
983 current_ir_graph = rem;
986 } /* new_rd_defaultProj */
989 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Conv(db, block, op, mode, 0);
995 current_ir_graph = rem;
1001 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_Cast(db, block, op, to_tp);
1007 current_ir_graph = rem;
1013 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1015 ir_graph *rem = current_ir_graph;
1017 current_ir_graph = irg;
1018 res = new_bd_Tuple(db, block, arity, in);
1019 current_ir_graph = rem;
1022 } /* new_rd_Tuple */
1030 NEW_RD_DIVOP(DivMod)
1043 NEW_RD_BINOP(Borrow)
1046 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1047 ir_node *op1, ir_node *op2) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Cmp(db, block, op1, op2);
1053 current_ir_graph = rem;
1059 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_Jmp(db, block);
1065 current_ir_graph = rem;
1071 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_IJmp(db, block, tgt);
1077 current_ir_graph = rem;
1083 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1085 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_Cond(db, block, c);
1089 current_ir_graph = rem;
1095 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1096 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1102 current_ir_graph = rem;
1108 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1109 ir_node *store, int arity, ir_node **in) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Return(db, block, store, arity, in);
1115 current_ir_graph = rem;
1118 } /* new_rd_Return */
1121 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, ir_node *adr, ir_mode *mode) {
1124 ir_graph *rem = current_ir_graph;
1126 current_ir_graph = irg;
1127 res = new_bd_Load(db, block, store, adr, mode);
1128 current_ir_graph = rem;
1134 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1135 ir_node *store, ir_node *adr, ir_node *val) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_Store(db, block, store, adr, val);
1141 current_ir_graph = rem;
1144 } /* new_rd_Store */
1147 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1148 ir_node *size, ir_type *alloc_type, where_alloc where) {
1150 ir_graph *rem = current_ir_graph;
1152 current_ir_graph = irg;
1153 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1154 current_ir_graph = rem;
1157 } /* new_rd_Alloc */
1160 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1161 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1167 current_ir_graph = rem;
1173 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1174 ir_node *store, ir_node *objptr, ir_entity *ent) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1180 current_ir_graph = rem;
1183 } /* new_rd_simpleSel */
1186 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1187 int arity, ir_node **in, ir_entity *ent) {
1189 ir_graph *rem = current_ir_graph;
1191 current_ir_graph = irg;
1192 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1193 current_ir_graph = rem;
1199 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1200 symconst_kind symkind, ir_type *tp) {
1202 ir_graph *rem = current_ir_graph;
1204 current_ir_graph = irg;
1205 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1206 current_ir_graph = rem;
1209 } /* new_rd_SymConst_type */
1212 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1213 symconst_kind symkind) {
1214 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1216 } /* new_rd_SymConst */
1218 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1219 symconst_symbol sym;
1220 sym.entity_p = symbol;
1221 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1222 } /* new_rd_SymConst_addr_ent */
1224 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1225 symconst_symbol sym;
1226 sym.entity_p = symbol;
1227 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1228 } /* new_rd_SymConst_ofs_ent */
1230 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1231 symconst_symbol sym;
1232 sym.ident_p = symbol;
1233 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1234 } /* new_rd_SymConst_addr_name */
1236 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1237 symconst_symbol sym;
1238 sym.type_p = symbol;
1239 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1240 } /* new_rd_SymConst_type_tag */
1242 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.type_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1246 } /* new_rd_SymConst_size */
1248 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.type_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1252 } /* new_rd_SymConst_align */
1255 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1257 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sync(db, block);
1262 current_ir_graph = rem;
1264 for (i = 0; i < arity; ++i)
1265 add_Sync_pred(res, in[i]);
1271 new_rd_Bad(ir_graph *irg) {
1272 return get_irg_bad(irg);
1276 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1278 ir_graph *rem = current_ir_graph;
1280 current_ir_graph = irg;
1281 res = new_bd_Confirm(db, block, val, bound, cmp);
1282 current_ir_graph = rem;
1285 } /* new_rd_Confirm */
1288 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1290 ir_graph *rem = current_ir_graph;
1292 current_ir_graph = irg;
1293 res = new_bd_Unknown(m);
1294 current_ir_graph = rem;
1297 } /* new_rd_Unknown */
1300 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_CallBegin(db, block, call);
1306 current_ir_graph = rem;
1309 } /* new_rd_CallBegin */
1312 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1315 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1316 set_irg_end_reg(irg, res);
1317 IRN_VRFY_IRG(res, irg);
1319 } /* new_rd_EndReg */
1322 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1325 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1326 set_irg_end_except(irg, res);
1327 IRN_VRFY_IRG (res, irg);
1329 } /* new_rd_EndExcept */
1332 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1334 ir_graph *rem = current_ir_graph;
1336 current_ir_graph = irg;
1337 res = new_bd_Break(db, block);
1338 current_ir_graph = rem;
1341 } /* new_rd_Break */
1344 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Filter(db, block, arg, mode, proj);
1351 current_ir_graph = rem;
1354 } /* new_rd_Filter */
1357 new_rd_NoMem(ir_graph *irg) {
1358 return get_irg_no_mem(irg);
1359 } /* new_rd_NoMem */
1362 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1363 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1369 current_ir_graph = rem;
1375 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1376 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1378 ir_graph *rem = current_ir_graph;
1380 current_ir_graph = irg;
1381 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1382 current_ir_graph = rem;
1387 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1388 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1390 ir_graph *rem = current_ir_graph;
1392 current_ir_graph = irg;
1393 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1394 current_ir_graph = rem;
1397 } /* new_rd_CopyB */
1400 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1401 ir_node *objptr, ir_type *type) {
1403 ir_graph *rem = current_ir_graph;
1405 current_ir_graph = irg;
1406 res = new_bd_InstOf(db, block, store, objptr, type);
1407 current_ir_graph = rem;
1410 } /* new_rd_InstOf */
1413 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1415 ir_graph *rem = current_ir_graph;
1417 current_ir_graph = irg;
1418 res = new_bd_Raise(db, block, store, obj);
1419 current_ir_graph = rem;
1422 } /* new_rd_Raise */
1424 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1425 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_Bound(db, block, store, idx, lower, upper);
1431 current_ir_graph = rem;
1434 } /* new_rd_Bound */
1436 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1438 ir_graph *rem = current_ir_graph;
1440 current_ir_graph = irg;
1441 res = new_bd_Pin(db, block, node);
1442 current_ir_graph = rem;
1447 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1448 int arity, ir_node *in[], ir_asm_constraint *inputs,
1449 int n_outs, ir_asm_constraint *outputs,
1450 int n_clobber, ident *clobber[], ident *asm_text) {
1452 ir_graph *rem = current_ir_graph;
1454 current_ir_graph = irg;
1455 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1456 current_ir_graph = rem;
1462 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1463 return new_rd_Block(NULL, irg, arity, in);
1465 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1466 return new_rd_Start(NULL, irg, block);
1468 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1469 return new_rd_End(NULL, irg, block);
1471 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1472 return new_rd_Jmp(NULL, irg, block);
1474 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1475 return new_rd_IJmp(NULL, irg, block, tgt);
1477 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1478 return new_rd_Cond(NULL, irg, block, c);
1480 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1481 ir_node *store, int arity, ir_node **in) {
1482 return new_rd_Return(NULL, irg, block, store, arity, in);
1484 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con) {
1486 return new_rd_Const(NULL, irg, block, mode, con);
1488 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1489 ir_mode *mode, long value) {
1490 return new_rd_Const_long(NULL, irg, block, mode, value);
1492 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1493 ir_mode *mode, tarval *con, ir_type *tp) {
1494 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1496 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1497 symconst_symbol value, symconst_kind symkind) {
1498 return new_rd_SymConst(NULL, irg, block, value, symkind);
1500 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *objptr, ir_entity *ent) {
1502 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1504 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *objptr, int n_index, ir_node **index,
1507 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1509 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1510 ir_node *callee, int arity, ir_node **in,
1512 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1514 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_mode *mode) {
1524 return new_rd_Minus(NULL, irg, block, op, mode);
1526 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1531 ir_node *op1, ir_node *op2, ir_mode *mode) {
1532 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1534 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1544 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1546 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1547 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1548 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1550 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1551 ir_node *op, ir_mode *mode) {
1552 return new_rd_Abs(NULL, irg, block, op, mode);
1554 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_And(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1559 ir_node *op1, ir_node *op2, ir_mode *mode) {
1560 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1562 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1563 ir_node *op1, ir_node *op2, ir_mode *mode) {
1564 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1566 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_mode *mode) {
1568 return new_rd_Not(NULL, irg, block, op, mode);
1570 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shl(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Shr(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Rot(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_node *k, ir_mode *mode) {
1588 return new_rd_Carry(NULL, irg, block, op, k, mode);
1590 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1591 ir_node *op, ir_node *k, ir_mode *mode) {
1592 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1594 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1595 ir_node *op1, ir_node *op2) {
1596 return new_rd_Cmp(NULL, irg, block, op1, op2);
1598 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_mode *mode) {
1600 return new_rd_Conv(NULL, irg, block, op, mode);
1602 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1603 return new_rd_Cast(NULL, irg, block, op, to_tp);
1605 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1606 ir_node **in, ir_mode *mode) {
1607 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1609 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1610 ir_node *store, ir_node *adr, ir_mode *mode) {
1611 return new_rd_Load(NULL, irg, block, store, adr, mode);
1613 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1614 ir_node *store, ir_node *adr, ir_node *val) {
1615 return new_rd_Store(NULL, irg, block, store, adr, val);
1617 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1618 ir_node *size, ir_type *alloc_type, where_alloc where) {
1619 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1621 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1622 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1623 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1625 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1626 return new_rd_Sync(NULL, irg, block, arity, in);
1628 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1629 ir_mode *mode, long proj) {
1630 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1632 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1634 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1636 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1637 int arity, ir_node **in) {
1638 return new_rd_Tuple(NULL, irg, block, arity, in );
1640 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1641 ir_node *val, ir_mode *mode) {
1642 return new_rd_Id(NULL, irg, block, val, mode);
1644 ir_node *new_r_Bad(ir_graph *irg) {
1645 return new_rd_Bad(irg);
1647 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1648 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1650 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1651 return new_rd_Unknown(irg, m);
1653 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1654 return new_rd_CallBegin(NULL, irg, block, callee);
1656 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1657 return new_rd_EndReg(NULL, irg, block);
1659 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1660 return new_rd_EndExcept(NULL, irg, block);
1662 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1663 return new_rd_Break(NULL, irg, block);
1665 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1666 ir_mode *mode, long proj) {
1667 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1669 ir_node *new_r_NoMem(ir_graph *irg) {
1670 return new_rd_NoMem(irg);
1672 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1673 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1674 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1676 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1677 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1678 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1680 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1682 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1684 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1686 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1688 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1689 ir_node *store, ir_node *obj) {
1690 return new_rd_Raise(NULL, irg, block, store, obj);
1692 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1693 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1694 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1696 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1697 return new_rd_Pin(NULL, irg, block, node);
1699 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1700 int arity, ir_node *in[], ir_asm_constraint *inputs,
1701 int n_outs, ir_asm_constraint *outputs,
1702 int n_clobber, ident *clobber[], ident *asm_text) {
1703 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1706 /** ********************/
1707 /** public interfaces */
1708 /** construction tools */
1712 * - create a new Start node in the current block
1714 * @return s - pointer to the created Start node
1719 new_d_Start(dbg_info *db) {
1722 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1723 op_Start, mode_T, 0, NULL);
1725 res = optimize_node(res);
1726 IRN_VRFY_IRG(res, current_ir_graph);
1731 new_d_End(dbg_info *db) {
1733 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1734 op_End, mode_X, -1, NULL);
1735 res = optimize_node(res);
1736 IRN_VRFY_IRG(res, current_ir_graph);
1741 /* Constructs a Block with a fixed number of predecessors.
1742 Does set current_block. Can be used with automatic Phi
1743 node construction. */
1745 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1748 int has_unknown = 0;
1750 res = new_bd_Block(db, arity, in);
1752 /* Create and initialize array for Phi-node construction. */
1753 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1754 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1755 current_ir_graph->n_loc);
1756 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1759 for (i = arity-1; i >= 0; i--)
1760 if (get_irn_op(in[i]) == op_Unknown) {
1765 if (!has_unknown) res = optimize_node(res);
1766 current_ir_graph->current_block = res;
1768 IRN_VRFY_IRG(res, current_ir_graph);
1773 /* ***********************************************************************/
1774 /* Methods necessary for automatic Phi node creation */
1776 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1777 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1778 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1779 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1781 Call Graph: ( A ---> B == A "calls" B)
1783 get_value mature_immBlock
1791 get_r_value_internal |
1795 new_rd_Phi0 new_rd_Phi_in
1797 * *************************************************************************** */
1799 /** Creates a Phi node with 0 predecessors. */
1800 static INLINE ir_node *
1801 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1804 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1805 IRN_VRFY_IRG(res, irg);
1809 /* There are two implementations of the Phi node construction. The first
1810 is faster, but does not work for blocks with more than 2 predecessors.
1811 The second works always but is slower and causes more unnecessary Phi
1813 Select the implementations by the following preprocessor flag set in
1815 #if USE_FAST_PHI_CONSTRUCTION
1817 /* This is a stack used for allocating and deallocating nodes in
1818 new_rd_Phi_in. The original implementation used the obstack
1819 to model this stack, now it is explicit. This reduces side effects.
1821 #if USE_EXPLICIT_PHI_IN_STACK
1823 new_Phi_in_stack(void) {
1826 res = (Phi_in_stack *) xmalloc(sizeof(Phi_in_stack));
1828 res->stack = NEW_ARR_F(ir_node *, 0);
1832 } /* new_Phi_in_stack */
1835 free_Phi_in_stack(Phi_in_stack *s) {
1836 DEL_ARR_F(s->stack);
1838 } /* free_Phi_in_stack */
1841 free_to_Phi_in_stack(ir_node *phi) {
1842 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1843 current_ir_graph->Phi_in_stack->pos)
1844 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1846 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1848 (current_ir_graph->Phi_in_stack->pos)++;
1849 } /* free_to_Phi_in_stack */
1851 static INLINE ir_node *
1852 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1853 int arity, ir_node **in) {
1855 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1856 int pos = current_ir_graph->Phi_in_stack->pos;
1860 /* We need to allocate a new node */
1861 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1862 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1864 /* reuse the old node and initialize it again. */
1867 assert(res->kind == k_ir_node);
1868 assert(res->op == op_Phi);
1873 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1874 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1876 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1878 (current_ir_graph->Phi_in_stack->pos)--;
1881 } /* alloc_or_pop_from_Phi_in_stack */
1882 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1885 * Creates a Phi node with a given, fixed array **in of predecessors.
1886 * If the Phi node is unnecessary, as the same value reaches the block
1887 * through all control flow paths, it is eliminated and the value
1888 * returned directly. This constructor is only intended for use in
1889 * the automatic Phi node generation triggered by get_value or mature.
1890 * The implementation is quite tricky and depends on the fact, that
1891 * the nodes are allocated on a stack:
1892 * The in array contains predecessors and NULLs. The NULLs appear,
1893 * if get_r_value_internal, that computed the predecessors, reached
1894 * the same block on two paths. In this case the same value reaches
1895 * this block on both paths, there is no definition in between. We need
1896 * not allocate a Phi where these path's merge, but we have to communicate
1897 * this fact to the caller. This happens by returning a pointer to the
1898 * node the caller _will_ allocate. (Yes, we predict the address. We can
1899 * do so because the nodes are allocated on the obstack.) The caller then
1900 * finds a pointer to itself and, when this routine is called again,
1901 * eliminates itself.
1903 static INLINE ir_node *
1904 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1906 ir_node *res, *known;
1908 /* Allocate a new node on the obstack. This can return a node to
1909 which some of the pointers in the in-array already point.
1910 Attention: the constructor copies the in array, i.e., the later
1911 changes to the array in this routine do not affect the
1912 constructed node! If the in array contains NULLs, there will be
1913 missing predecessors in the returned node. Is this a possible
1914 internal state of the Phi node generation? */
1915 #if USE_EXPLICIT_PHI_IN_STACK
1916 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1918 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1919 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1922 /* The in-array can contain NULLs. These were returned by
1923 get_r_value_internal if it reached the same block/definition on a
1924 second path. The NULLs are replaced by the node itself to
1925 simplify the test in the next loop. */
1926 for (i = 0; i < ins; ++i) {
1931 /* This loop checks whether the Phi has more than one predecessor.
1932 If so, it is a real Phi node and we break the loop. Else the Phi
1933 node merges the same definition on several paths and therefore is
1935 for (i = 0; i < ins; ++i) {
1936 if (in[i] == res || in[i] == known)
1945 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1947 #if USE_EXPLICIT_PHI_IN_STACK
1948 free_to_Phi_in_stack(res);
1950 edges_node_deleted(res, current_ir_graph);
1951 obstack_free(current_ir_graph->obst, res);
1955 res = optimize_node (res);
1956 IRN_VRFY_IRG(res, irg);
1959 /* return the pointer to the Phi node. This node might be deallocated! */
1961 } /* new_rd_Phi_in */
1964 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1967 * Allocates and returns this node. The routine called to allocate the
1968 * node might optimize it away and return a real value, or even a pointer
1969 * to a deallocated Phi node on top of the obstack!
1970 * This function is called with an in-array of proper size.
1973 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1974 ir_node *prevBlock, *res;
1977 /* This loop goes to all predecessor blocks of the block the Phi node is in
1978 and there finds the operands of the Phi node by calling
1979 get_r_value_internal. */
1980 for (i = 1; i <= ins; ++i) {
1981 assert (block->in[i]);
1982 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1984 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1987 /* After collecting all predecessors into the array nin a new Phi node
1988 with these predecessors is created. This constructor contains an
1989 optimization: If all predecessors of the Phi node are identical it
1990 returns the only operand instead of a new Phi node. If the value
1991 passes two different control flow edges without being defined, and
1992 this is the second path treated, a pointer to the node that will be
1993 allocated for the first path (recursion) is returned. We already
1994 know the address of this node, as it is the next node to be allocated
1995 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1996 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1998 /* Now we now the value for "pos" and can enter it in the array with
1999 all known local variables. Attention: this might be a pointer to
2000 a node, that later will be allocated!!! See new_rd_Phi_in().
2001 If this is called in mature, after some set_value() in the same block,
2002 the proper value must not be overwritten:
2004 get_value (makes Phi0, put's it into graph_arr)
2005 set_value (overwrites Phi0 in graph_arr)
2006 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2009 if (!block->attr.block.graph_arr[pos]) {
2010 block->attr.block.graph_arr[pos] = res;
2012 /* printf(" value already computed by %s\n",
2013 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2020 * This function returns the last definition of a variable. In case
2021 * this variable was last defined in a previous block, Phi nodes are
2022 * inserted. If the part of the firm graph containing the definition
2023 * is not yet constructed, a dummy Phi node is returned.
2026 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2029 /* There are 4 cases to treat.
2031 1. The block is not mature and we visit it the first time. We can not
2032 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2033 predecessors is returned. This node is added to the linked list (field
2034 "link") of the containing block to be completed when this block is
2035 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2038 2. The value is already known in this block, graph_arr[pos] is set and we
2039 visit the block the first time. We can return the value without
2040 creating any new nodes.
2042 3. The block is mature and we visit it the first time. A Phi node needs
2043 to be created (phi_merge). If the Phi is not needed, as all it's
2044 operands are the same value reaching the block through different
2045 paths, it's optimized away and the value itself is returned.
2047 4. The block is mature, and we visit it the second time. Now two
2048 subcases are possible:
2049 * The value was computed completely the last time we were here. This
2050 is the case if there is no loop. We can return the proper value.
2051 * The recursion that visited this node and set the flag did not
2052 return yet. We are computing a value in a loop and need to
2053 break the recursion without knowing the result yet.
2054 @@@ strange case. Straight forward we would create a Phi before
2055 starting the computation of it's predecessors. In this case we will
2056 find a Phi here in any case. The problem is that this implementation
2057 only creates a Phi after computing the predecessors, so that it is
2058 hard to compute self references of this Phi. @@@
2059 There is no simple check for the second subcase. Therefore we check
2060 for a second visit and treat all such cases as the second subcase.
2061 Anyways, the basic situation is the same: we reached a block
2062 on two paths without finding a definition of the value: No Phi
2063 nodes are needed on both paths.
2064 We return this information "Two paths, no Phi needed" by a very tricky
2065 implementation that relies on the fact that an obstack is a stack and
2066 will return a node with the same address on different allocations.
2067 Look also at phi_merge and new_rd_phi_in to understand this.
2068 @@@ Unfortunately this does not work, see testprogram
2069 three_cfpred_example.
2073 /* case 4 -- already visited. */
2074 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2076 /* visited the first time */
2077 set_irn_visited(block, get_irg_visited(current_ir_graph));
2079 /* Get the local valid value */
2080 res = block->attr.block.graph_arr[pos];
2082 /* case 2 -- If the value is actually computed, return it. */
2083 if (res) return res;
2085 if (block->attr.block.is_matured) { /* case 3 */
2087 /* The Phi has the same amount of ins as the corresponding block. */
2088 int ins = get_irn_arity(block);
2090 NEW_ARR_A(ir_node *, nin, ins);
2092 /* Phi merge collects the predecessors and then creates a node. */
2093 res = phi_merge(block, pos, mode, nin, ins);
2095 } else { /* case 1 */
2096 /* The block is not mature, we don't know how many in's are needed. A Phi
2097 with zero predecessors is created. Such a Phi node is called Phi0
2098 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2099 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2101 The Phi0 has to remember the pos of it's internal value. If the real
2102 Phi is computed, pos is used to update the array with the local
2105 res = new_rd_Phi0(current_ir_graph, block, mode);
2106 res->attr.phi0_pos = pos;
2107 res->link = block->link;
2111 /* If we get here, the frontend missed a use-before-definition error */
2114 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2115 assert(mode->code >= irm_F && mode->code <= irm_P);
2116 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2119 /* The local valid value is available now. */
2120 block->attr.block.graph_arr[pos] = res;
2123 } /* get_r_value_internal */
2128 it starts the recursion. This causes an Id at the entry of
2129 every block that has no definition of the value! **/
2131 #if USE_EXPLICIT_PHI_IN_STACK
2133 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2134 void free_Phi_in_stack(Phi_in_stack *s) {}
2137 static INLINE ir_node *
2138 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2139 ir_node **in, int ins, ir_node *phi0) {
2141 ir_node *res, *known;
2143 /* Allocate a new node on the obstack. The allocation copies the in
2145 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2146 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2148 /* This loop checks whether the Phi has more than one predecessor.
2149 If so, it is a real Phi node and we break the loop. Else the
2150 Phi node merges the same definition on several paths and therefore
2151 is not needed. Don't consider Bad nodes! */
2153 for (i=0; i < ins; ++i)
2157 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2159 /* Optimize self referencing Phis: We can't detect them yet properly, as
2160 they still refer to the Phi0 they will replace. So replace right now. */
2161 if (phi0 && in[i] == phi0) in[i] = res;
2163 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2171 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2174 edges_node_deleted(res, current_ir_graph);
2175 obstack_free (current_ir_graph->obst, res);
2176 if (is_Phi(known)) {
2177 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2178 order, an enclosing Phi know may get superfluous. */
2179 res = optimize_in_place_2(known);
2181 exchange(known, res);
2187 /* A undefined value, e.g., in unreachable code. */
2191 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2192 IRN_VRFY_IRG(res, irg);
2193 /* Memory Phis in endless loops must be kept alive.
2194 As we can't distinguish these easily we keep all of them alive. */
2195 if ((res->op == op_Phi) && (mode == mode_M))
2196 add_End_keepalive(get_irg_end(irg), res);
2200 } /* new_rd_Phi_in */
2203 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2205 #if PRECISE_EXC_CONTEXT
2207 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2210 * Construct a new frag_array for node n.
2211 * Copy the content from the current graph_arr of the corresponding block:
2212 * this is the current state.
2213 * Set ProjM(n) as current memory state.
2214 * Further the last entry in frag_arr of current block points to n. This
2215 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2217 static INLINE ir_node **new_frag_arr(ir_node *n) {
2221 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2222 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2223 sizeof(ir_node *)*current_ir_graph->n_loc);
2225 /* turn off optimization before allocating Proj nodes, as res isn't
2227 opt = get_opt_optimize(); set_optimize(0);
2228 /* Here we rely on the fact that all frag ops have Memory as first result! */
2229 if (get_irn_op(n) == op_Call)
2230 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2231 else if (get_irn_op(n) == op_CopyB)
2232 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2234 assert((pn_Quot_M == pn_DivMod_M) &&
2235 (pn_Quot_M == pn_Div_M) &&
2236 (pn_Quot_M == pn_Mod_M) &&
2237 (pn_Quot_M == pn_Load_M) &&
2238 (pn_Quot_M == pn_Store_M) &&
2239 (pn_Quot_M == pn_Alloc_M) &&
2240 (pn_Quot_M == pn_Bound_M));
2241 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2245 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2247 } /* new_frag_arr */
2250 * Returns the frag_arr from a node.
2252 static INLINE ir_node **get_frag_arr(ir_node *n) {
2253 switch (get_irn_opcode(n)) {
2255 return n->attr.call.exc.frag_arr;
2257 return n->attr.alloc.exc.frag_arr;
2259 return n->attr.load.exc.frag_arr;
2261 return n->attr.store.exc.frag_arr;
2263 return n->attr.except.frag_arr;
2265 } /* get_frag_arr */
2268 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2270 if (!frag_arr[pos]) frag_arr[pos] = val;
2271 if (frag_arr[current_ir_graph->n_loc - 1]) {
2272 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2273 assert(arr != frag_arr && "Endless recursion detected");
2274 set_frag_value(arr, pos, val);
2279 for (i = 0; i < 1000; ++i) {
2280 if (!frag_arr[pos]) {
2281 frag_arr[pos] = val;
2283 if (frag_arr[current_ir_graph->n_loc - 1]) {
2284 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2290 assert(0 && "potential endless recursion");
2292 } /* set_frag_value */
2295 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2299 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2301 frag_arr = get_frag_arr(cfOp);
2302 res = frag_arr[pos];
2304 if (block->attr.block.graph_arr[pos]) {
2305 /* There was a set_value() after the cfOp and no get_value before that
2306 set_value(). We must build a Phi node now. */
2307 if (block->attr.block.is_matured) {
2308 int ins = get_irn_arity(block);
2310 NEW_ARR_A(ir_node *, nin, ins);
2311 res = phi_merge(block, pos, mode, nin, ins);
2313 res = new_rd_Phi0(current_ir_graph, block, mode);
2314 res->attr.phi0.pos = pos;
2315 res->link = block->link;
2319 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2320 but this should be better: (remove comment if this works) */
2321 /* It's a Phi, we can write this into all graph_arrs with NULL */
2322 set_frag_value(block->attr.block.graph_arr, pos, res);
2324 res = get_r_value_internal(block, pos, mode);
2325 set_frag_value(block->attr.block.graph_arr, pos, res);
2329 } /* get_r_frag_value_internal */
2330 #endif /* PRECISE_EXC_CONTEXT */
2333 * Computes the predecessors for the real phi node, and then
2334 * allocates and returns this node. The routine called to allocate the
2335 * node might optimize it away and return a real value.
2336 * This function must be called with an in-array of proper size.
2339 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2340 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2343 /* If this block has no value at pos create a Phi0 and remember it
2344 in graph_arr to break recursions.
2345 Else we may not set graph_arr as there a later value is remembered. */
2347 if (!block->attr.block.graph_arr[pos]) {
2348 if (block == get_irg_start_block(current_ir_graph)) {
2349 /* Collapsing to Bad tarvals is no good idea.
2350 So we call a user-supplied routine here that deals with this case as
2351 appropriate for the given language. Sorrily the only help we can give
2352 here is the position.
2354 Even if all variables are defined before use, it can happen that
2355 we get to the start block, if a Cond has been replaced by a tuple
2356 (bad, jmp). In this case we call the function needlessly, eventually
2357 generating an non existent error.
2358 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2361 if (default_initialize_local_variable) {
2362 ir_node *rem = get_cur_block();
2364 set_cur_block(block);
2365 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2369 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2370 /* We don't need to care about exception ops in the start block.
2371 There are none by definition. */
2372 return block->attr.block.graph_arr[pos];
2374 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2375 block->attr.block.graph_arr[pos] = phi0;
2376 #if PRECISE_EXC_CONTEXT
2377 if (get_opt_precise_exc_context()) {
2378 /* Set graph_arr for fragile ops. Also here we should break recursion.
2379 We could choose a cyclic path through an cfop. But the recursion would
2380 break at some point. */
2381 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2387 /* This loop goes to all predecessor blocks of the block the Phi node
2388 is in and there finds the operands of the Phi node by calling
2389 get_r_value_internal. */
2390 for (i = 1; i <= ins; ++i) {
2391 prevCfOp = skip_Proj(block->in[i]);
2393 if (is_Bad(prevCfOp)) {
2394 /* In case a Cond has been optimized we would get right to the start block
2395 with an invalid definition. */
2396 nin[i-1] = new_Bad();
2399 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2401 if (!is_Bad(prevBlock)) {
2402 #if PRECISE_EXC_CONTEXT
2403 if (get_opt_precise_exc_context() &&
2404 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2405 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2406 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2409 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2411 nin[i-1] = new_Bad();
2415 /* We want to pass the Phi0 node to the constructor: this finds additional
2416 optimization possibilities.
2417 The Phi0 node either is allocated in this function, or it comes from
2418 a former call to get_r_value_internal. In this case we may not yet
2419 exchange phi0, as this is done in mature_immBlock. */
2421 phi0_all = block->attr.block.graph_arr[pos];
2422 if (!((get_irn_op(phi0_all) == op_Phi) &&
2423 (get_irn_arity(phi0_all) == 0) &&
2424 (get_nodes_block(phi0_all) == block)))
2430 /* After collecting all predecessors into the array nin a new Phi node
2431 with these predecessors is created. This constructor contains an
2432 optimization: If all predecessors of the Phi node are identical it
2433 returns the only operand instead of a new Phi node. */
2434 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2436 /* In case we allocated a Phi0 node at the beginning of this procedure,
2437 we need to exchange this Phi0 with the real Phi. */
2439 exchange(phi0, res);
2440 block->attr.block.graph_arr[pos] = res;
2441 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2442 only an optimization. */
2449 * This function returns the last definition of a variable. In case
2450 * this variable was last defined in a previous block, Phi nodes are
2451 * inserted. If the part of the firm graph containing the definition
2452 * is not yet constructed, a dummy Phi node is returned.
2455 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2457 /* There are 4 cases to treat.
2459 1. The block is not mature and we visit it the first time. We can not
2460 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2461 predecessors is returned. This node is added to the linked list (field
2462 "link") of the containing block to be completed when this block is
2463 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2466 2. The value is already known in this block, graph_arr[pos] is set and we
2467 visit the block the first time. We can return the value without
2468 creating any new nodes.
2470 3. The block is mature and we visit it the first time. A Phi node needs
2471 to be created (phi_merge). If the Phi is not needed, as all it's
2472 operands are the same value reaching the block through different
2473 paths, it's optimized away and the value itself is returned.
2475 4. The block is mature, and we visit it the second time. Now two
2476 subcases are possible:
2477 * The value was computed completely the last time we were here. This
2478 is the case if there is no loop. We can return the proper value.
2479 * The recursion that visited this node and set the flag did not
2480 return yet. We are computing a value in a loop and need to
2481 break the recursion. This case only happens if we visited
2482 the same block with phi_merge before, which inserted a Phi0.
2483 So we return the Phi0.
2486 /* case 4 -- already visited. */
2487 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2488 /* As phi_merge allocates a Phi0 this value is always defined. Here
2489 is the critical difference of the two algorithms. */
2490 assert(block->attr.block.graph_arr[pos]);
2491 return block->attr.block.graph_arr[pos];
2494 /* visited the first time */
2495 set_irn_visited(block, get_irg_visited(current_ir_graph));
2497 /* Get the local valid value */
2498 res = block->attr.block.graph_arr[pos];
2500 /* case 2 -- If the value is actually computed, return it. */
2501 if (res) { return res; };
2503 if (block->attr.block.is_matured) { /* case 3 */
2505 /* The Phi has the same amount of ins as the corresponding block. */
2506 int ins = get_irn_arity(block);
2508 NEW_ARR_A (ir_node *, nin, ins);
2510 /* Phi merge collects the predecessors and then creates a node. */
2511 res = phi_merge (block, pos, mode, nin, ins);
2513 } else { /* case 1 */
2514 /* The block is not mature, we don't know how many in's are needed. A Phi
2515 with zero predecessors is created. Such a Phi node is called Phi0
2516 node. The Phi0 is then added to the list of Phi0 nodes in this block
2517 to be matured by mature_immBlock later.
2518 The Phi0 has to remember the pos of it's internal value. If the real
2519 Phi is computed, pos is used to update the array with the local
2521 res = new_rd_Phi0(current_ir_graph, block, mode);
2522 res->attr.phi0.pos = pos;
2523 res->link = block->link;
2527 /* If we get here, the frontend missed a use-before-definition error */
2530 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2531 assert(mode->code >= irm_F && mode->code <= irm_P);
2532 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2533 get_mode_null(mode));
2536 /* The local valid value is available now. */
2537 block->attr.block.graph_arr[pos] = res;
2540 } /* get_r_value_internal */
2542 #endif /* USE_FAST_PHI_CONSTRUCTION */
2544 /* ************************************************************************** */
2547 * Finalize a Block node, when all control flows are known.
2548 * Acceptable parameters are only Block nodes.
2551 mature_immBlock(ir_node *block) {
2556 assert(get_irn_opcode(block) == iro_Block);
2557 /* @@@ should be commented in
2558 assert (!get_Block_matured(block) && "Block already matured"); */
2560 if (!get_Block_matured(block)) {
2561 ins = ARR_LEN(block->in)-1;
2562 /* Fix block parameters */
2563 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2565 /* An array for building the Phi nodes. */
2566 NEW_ARR_A(ir_node *, nin, ins);
2568 /* Traverse a chain of Phi nodes attached to this block and mature
2570 for (n = block->link; n; n = next) {
2571 inc_irg_visited(current_ir_graph);
2573 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2576 block->attr.block.is_matured = 1;
2578 /* Now, as the block is a finished firm node, we can optimize it.
2579 Since other nodes have been allocated since the block was created
2580 we can not free the node on the obstack. Therefore we have to call
2582 Unfortunately the optimization does not change a lot, as all allocated
2583 nodes refer to the unoptimized node.
2584 We can call _2, as global cse has no effect on blocks. */
2585 block = optimize_in_place_2(block);
2586 IRN_VRFY_IRG(block, current_ir_graph);
2588 } /* mature_immBlock */
2591 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2592 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2596 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2597 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2601 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2602 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2603 } /* new_d_Const_long */
2606 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2607 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2608 } /* new_d_Const_type */
2612 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2613 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2617 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2618 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2622 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2625 assert(arg->op == op_Cond);
2626 arg->attr.cond.kind = fragmentary;
2627 arg->attr.cond.default_proj = max_proj;
2628 res = new_Proj(arg, mode_X, max_proj);
2630 } /* new_d_defaultProj */
2633 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2634 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2638 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2639 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2640 } /* new_d_strictConv */
2643 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2644 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2648 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2649 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2659 * Allocate the frag array.
2661 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2662 if (get_opt_precise_exc_context()) {
2663 if ((current_ir_graph->phase_state == phase_building) &&
2664 (get_irn_op(res) == op) && /* Could be optimized away. */
2665 !*frag_store) /* Could be a cse where the arr is already set. */ {
2666 *frag_store = new_frag_arr(res);
2669 } /* allocate_frag_arr */
2672 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2674 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2675 #if PRECISE_EXC_CONTEXT
2676 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2683 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2685 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2686 #if PRECISE_EXC_CONTEXT
2687 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2691 } /* new_d_DivMod */
2694 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2696 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2697 #if PRECISE_EXC_CONTEXT
2698 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2705 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2707 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2708 #if PRECISE_EXC_CONTEXT
2709 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2728 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2729 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2733 new_d_Jmp(dbg_info *db) {
2734 return new_bd_Jmp(db, current_ir_graph->current_block);
2738 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2739 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2743 new_d_Cond(dbg_info *db, ir_node *c) {
2744 return new_bd_Cond(db, current_ir_graph->current_block, c);
2748 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2751 res = new_bd_Call(db, current_ir_graph->current_block,
2752 store, callee, arity, in, tp);
2753 #if PRECISE_EXC_CONTEXT
2754 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2761 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2762 return new_bd_Return(db, current_ir_graph->current_block,
2764 } /* new_d_Return */
2767 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2769 res = new_bd_Load(db, current_ir_graph->current_block,
2771 #if PRECISE_EXC_CONTEXT
2772 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2779 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2781 res = new_bd_Store(db, current_ir_graph->current_block,
2783 #if PRECISE_EXC_CONTEXT
2784 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2791 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2792 where_alloc where) {
2794 res = new_bd_Alloc(db, current_ir_graph->current_block,
2795 store, size, alloc_type, where);
2796 #if PRECISE_EXC_CONTEXT
2797 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2804 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2805 ir_node *size, ir_type *free_type, where_alloc where) {
2806 return new_bd_Free(db, current_ir_graph->current_block,
2807 store, ptr, size, free_type, where);
2811 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2812 /* GL: objptr was called frame before. Frame was a bad choice for the name
2813 as the operand could as well be a pointer to a dynamic object. */
2815 return new_bd_Sel(db, current_ir_graph->current_block,
2816 store, objptr, 0, NULL, ent);
2817 } /* new_d_simpleSel */
2820 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2821 return new_bd_Sel(db, current_ir_graph->current_block,
2822 store, objptr, n_index, index, sel);
2826 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2827 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2829 } /* new_d_SymConst_type */
2832 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2833 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2834 value, kind, firm_unknown_type);
2835 } /* new_d_SymConst */
2838 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2839 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2845 return _new_d_Bad();
2849 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2850 return new_bd_Confirm(db, current_ir_graph->current_block,
2852 } /* new_d_Confirm */
2855 new_d_Unknown(ir_mode *m) {
2856 return new_bd_Unknown(m);
2857 } /* new_d_Unknown */
2860 new_d_CallBegin(dbg_info *db, ir_node *call) {
2861 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2862 } /* new_d_CallBegin */
2865 new_d_EndReg(dbg_info *db) {
2866 return new_bd_EndReg(db, current_ir_graph->current_block);
2867 } /* new_d_EndReg */
2870 new_d_EndExcept(dbg_info *db) {
2871 return new_bd_EndExcept(db, current_ir_graph->current_block);
2872 } /* new_d_EndExcept */
2875 new_d_Break(dbg_info *db) {
2876 return new_bd_Break(db, current_ir_graph->current_block);
2880 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2881 return new_bd_Filter(db, current_ir_graph->current_block,
2883 } /* new_d_Filter */
2886 (new_d_NoMem)(void) {
2887 return _new_d_NoMem();
2891 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2892 ir_node *ir_true, ir_mode *mode) {
2893 return new_bd_Mux(db, current_ir_graph->current_block,
2894 sel, ir_false, ir_true, mode);
2898 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2899 return new_bd_Psi(db, current_ir_graph->current_block,
2900 arity, conds, vals, mode);
2903 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2904 ir_node *dst, ir_node *src, ir_type *data_type) {
2906 res = new_bd_CopyB(db, current_ir_graph->current_block,
2907 store, dst, src, data_type);
2908 #if PRECISE_EXC_CONTEXT
2909 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2915 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2916 return new_bd_InstOf(db, current_ir_graph->current_block,
2917 store, objptr, type);
2918 } /* new_d_InstOf */
2921 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2922 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2925 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2926 ir_node *idx, ir_node *lower, ir_node *upper) {
2928 res = new_bd_Bound(db, current_ir_graph->current_block,
2929 store, idx, lower, upper);
2930 #if PRECISE_EXC_CONTEXT
2931 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2937 new_d_Pin(dbg_info *db, ir_node *node) {
2938 return new_bd_Pin(db, current_ir_graph->current_block, node);
2942 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2943 int n_outs, ir_asm_constraint *outputs,
2944 int n_clobber, ident *clobber[], ident *asm_text) {
2945 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2948 /* ********************************************************************* */
2949 /* Comfortable interface with automatic Phi node construction. */
2950 /* (Uses also constructors of ?? interface, except new_Block. */
2951 /* ********************************************************************* */
2953 /* Block construction */
2954 /* immature Block without predecessors */
2956 new_d_immBlock(dbg_info *db) {
2959 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2960 /* creates a new dynamic in-array as length of in is -1 */
2961 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2962 current_ir_graph->current_block = res;
2964 /* macroblock head */
2967 res->attr.block.is_matured = 0;
2968 res->attr.block.is_dead = 0;
2969 res->attr.block.is_mb_head = 1;
2970 res->attr.block.has_label = 0;
2971 res->attr.block.irg = current_ir_graph;
2972 res->attr.block.backedge = NULL;
2973 res->attr.block.in_cg = NULL;
2974 res->attr.block.cg_backedge = NULL;
2975 res->attr.block.extblk = NULL;
2976 res->attr.block.region = NULL;
2977 res->attr.block.mb_depth = 0;
2978 res->attr.block.label = 0;
2980 set_Block_block_visited(res, 0);
2982 /* Create and initialize array for Phi-node construction. */
2983 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2984 current_ir_graph->n_loc);
2985 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2987 /* Immature block may not be optimized! */
2988 IRN_VRFY_IRG(res, current_ir_graph);
2991 } /* new_d_immBlock */
2994 new_immBlock(void) {
2995 return new_d_immBlock(NULL);
2996 } /* new_immBlock */
2998 /* immature PartBlock with its predecessors */
3000 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
3001 ir_node *res = new_d_immBlock(db);
3002 ir_node *blk = get_nodes_block(pred_jmp);
3004 res->in[0] = blk->in[0];
3005 add_immBlock_pred(res, pred_jmp);
3007 res->attr.block.is_mb_head = 0;
3008 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
3011 } /* new_d_immPartBlock */
3014 new_immPartBlock(ir_node *pred_jmp) {
3015 return new_d_immPartBlock(NULL, pred_jmp);
3016 } /* new_immPartBlock */
3018 /* add an edge to a jmp/control flow node */
3020 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3021 int n = ARR_LEN(block->in) - 1;
3023 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3024 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3025 assert(jmp != NULL);
3027 ARR_APP1(ir_node *, block->in, jmp);
3029 hook_set_irn_n(block, n, jmp, NULL);
3030 } /* add_immBlock_pred */
3032 /* changing the current block */
3034 set_cur_block(ir_node *target) {
3035 current_ir_graph->current_block = target;
3036 } /* set_cur_block */
3038 /* ************************ */
3039 /* parameter administration */
3041 /* get a value from the parameter array from the current block by its index */
3043 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3044 ir_graph *irg = current_ir_graph;
3045 assert(get_irg_phase_state(irg) == phase_building);
3046 inc_irg_visited(irg);
3049 return get_r_value_internal(irg->current_block, pos + 1, mode);
3052 /* get a value from the parameter array from the current block by its index */
3054 get_value(int pos, ir_mode *mode) {
3055 return get_d_value(NULL, pos, mode);
3058 /* set a value at position pos in the parameter array from the current block */
3060 set_value(int pos, ir_node *value) {
3061 ir_graph *irg = current_ir_graph;
3062 assert(get_irg_phase_state(irg) == phase_building);
3063 assert(pos+1 < irg->n_loc);
3064 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3067 /* Find the value number for a node in the current block.*/
3069 find_value(ir_node *value) {
3071 ir_node *bl = current_ir_graph->current_block;
3073 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3074 if (bl->attr.block.graph_arr[i] == value)
3079 /* get the current store */
3082 ir_graph *irg = current_ir_graph;
3084 assert(get_irg_phase_state(irg) == phase_building);
3085 /* GL: one could call get_value instead */
3086 inc_irg_visited(irg);
3087 return get_r_value_internal(irg->current_block, 0, mode_M);
3090 /* set the current store: handles automatic Sync construction for Load nodes */
3092 set_store(ir_node *store) {
3093 ir_node *load, *pload, *pred, *in[2];
3095 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3096 /* Beware: due to dead code elimination, a store might become a Bad node even in
3097 the construction phase. */
3098 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3100 if (get_opt_auto_create_sync()) {
3101 /* handle non-volatile Load nodes by automatically creating Sync's */
3102 load = skip_Proj(store);
3103 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3104 pred = get_Load_mem(load);
3106 if (is_Sync(pred)) {
3107 /* a Load after a Sync: move it up */
3108 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3110 set_Load_mem(load, get_memop_mem(mem));
3111 add_Sync_pred(pred, store);
3114 pload = skip_Proj(pred);
3115 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3116 /* a Load after a Load: create a new Sync */
3117 set_Load_mem(load, get_Load_mem(pload));
3121 store = new_Sync(2, in);
3126 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3130 keep_alive(ir_node *ka) {
3131 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3134 /* --- Useful access routines --- */
3135 /* Returns the current block of the current graph. To set the current
3136 block use set_cur_block. */
3137 ir_node *get_cur_block(void) {
3138 return get_irg_current_block(current_ir_graph);
3139 } /* get_cur_block */
3141 /* Returns the frame type of the current graph */
3142 ir_type *get_cur_frame_type(void) {
3143 return get_irg_frame_type(current_ir_graph);
3144 } /* get_cur_frame_type */
3147 /* ********************************************************************* */
3150 /* call once for each run of the library */
3152 init_cons(uninitialized_local_variable_func_t *func) {
3153 default_initialize_local_variable = func;
3157 irp_finalize_cons(void) {
3159 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3160 irg_finalize_cons(get_irp_irg(i));
3162 irp->phase_state = phase_high;
3163 } /* irp_finalize_cons */
3166 ir_node *new_Block(int arity, ir_node **in) {
3167 return new_d_Block(NULL, arity, in);
3169 ir_node *new_Start(void) {
3170 return new_d_Start(NULL);
3172 ir_node *new_End(void) {
3173 return new_d_End(NULL);
3175 ir_node *new_Jmp(void) {
3176 return new_d_Jmp(NULL);
3178 ir_node *new_IJmp(ir_node *tgt) {
3179 return new_d_IJmp(NULL, tgt);
3181 ir_node *new_Cond(ir_node *c) {
3182 return new_d_Cond(NULL, c);
3184 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3185 return new_d_Return(NULL, store, arity, in);
3187 ir_node *new_Const(ir_mode *mode, tarval *con) {
3188 return new_d_Const(NULL, mode, con);
3191 ir_node *new_Const_long(ir_mode *mode, long value) {
3192 return new_d_Const_long(NULL, mode, value);
3195 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3196 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3199 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3200 return new_d_SymConst_type(NULL, value, kind, type);
3202 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3203 return new_d_SymConst(NULL, value, kind);
3205 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3206 return new_d_simpleSel(NULL, store, objptr, ent);
3208 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3210 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3212 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3214 return new_d_Call(NULL, store, callee, arity, in, tp);
3216 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Add(NULL, op1, op2, mode);
3219 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3220 return new_d_Sub(NULL, op1, op2, mode);
3222 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3223 return new_d_Minus(NULL, op, mode);
3225 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3226 return new_d_Mul(NULL, op1, op2, mode);
3228 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3229 return new_d_Mulh(NULL, op1, op2, mode);
3231 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3232 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3234 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3235 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3237 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3238 return new_d_Div(NULL, memop, op1, op2, mode, state);
3240 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3241 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3243 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3244 return new_d_Abs(NULL, op, mode);
3246 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3247 return new_d_And(NULL, op1, op2, mode);
3249 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3250 return new_d_Or(NULL, op1, op2, mode);
3252 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3253 return new_d_Eor(NULL, op1, op2, mode);
3255 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3256 return new_d_Not(NULL, op, mode);
3258 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3259 return new_d_Shl(NULL, op, k, mode);
3261 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3262 return new_d_Shr(NULL, op, k, mode);
3264 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3265 return new_d_Shrs(NULL, op, k, mode);
3267 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3268 return new_d_Rot(NULL, op, k, mode);
3270 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3271 return new_d_Carry(NULL, op1, op2, mode);
3273 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3274 return new_d_Borrow(NULL, op1, op2, mode);
3276 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3277 return new_d_Cmp(NULL, op1, op2);
3279 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3280 return new_d_Conv(NULL, op, mode);
3282 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3283 return new_d_strictConv(NULL, op, mode);
3285 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3286 return new_d_Cast(NULL, op, to_tp);
3288 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3289 return new_d_Phi(NULL, arity, in, mode);
3291 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3292 return new_d_Load(NULL, store, addr, mode);
3294 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3295 return new_d_Store(NULL, store, addr, val);
3297 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3298 where_alloc where) {
3299 return new_d_Alloc(NULL, store, size, alloc_type, where);
3301 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3302 ir_type *free_type, where_alloc where) {
3303 return new_d_Free(NULL, store, ptr, size, free_type, where);
3305 ir_node *new_Sync(int arity, ir_node *in[]) {
3306 return new_d_Sync(NULL, arity, in);
3308 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3309 return new_d_Proj(NULL, arg, mode, proj);
3311 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3312 return new_d_defaultProj(NULL, arg, max_proj);
3314 ir_node *new_Tuple(int arity, ir_node **in) {
3315 return new_d_Tuple(NULL, arity, in);
3317 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3318 return new_d_Id(NULL, val, mode);
3320 ir_node *new_Bad(void) {
3323 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3324 return new_d_Confirm(NULL, val, bound, cmp);
3326 ir_node *new_Unknown(ir_mode *m) {
3327 return new_d_Unknown(m);
3329 ir_node *new_CallBegin(ir_node *callee) {
3330 return new_d_CallBegin(NULL, callee);
3332 ir_node *new_EndReg(void) {
3333 return new_d_EndReg(NULL);
3335 ir_node *new_EndExcept(void) {
3336 return new_d_EndExcept(NULL);
3338 ir_node *new_Break(void) {
3339 return new_d_Break(NULL);
3341 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3342 return new_d_Filter(NULL, arg, mode, proj);
3344 ir_node *new_NoMem(void) {
3345 return new_d_NoMem();
3347 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3348 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3350 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3351 return new_d_Psi(NULL, arity, conds, vals, mode);
3353 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3354 return new_d_CopyB(NULL, store, dst, src, data_type);
3356 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3357 return new_d_InstOf(NULL, store, objptr, ent);
3359 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3360 return new_d_Raise(NULL, store, obj);
3362 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3363 return new_d_Bound(NULL, store, idx, lower, upper);
3365 ir_node *new_Pin(ir_node *node) {
3366 return new_d_Pin(NULL, node);
3368 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3369 int n_outs, ir_asm_constraint *outputs,
3370 int n_clobber, ident *clobber[], ident *asm_text) {
3371 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3374 /* create a new anchor node */
3375 ir_node *new_Anchor(ir_graph *irg) {
3376 ir_node *in[anchor_last];
3377 memset(in, 0, sizeof(in));
3378 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);