2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res = optimize_node(res); \
119 IRN_VRFY_IRG(res, irg); \
123 /* creates a rd constructor for a binop */
124 #define NEW_RD_BINOP(instr) \
126 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
127 ir_node *op1, ir_node *op2, ir_mode *mode) \
130 ir_graph *rem = current_ir_graph; \
131 current_ir_graph = irg; \
132 res = new_bd_##instr(db, block, op1, op2, mode); \
133 current_ir_graph = rem; \
137 /* creates a rd constructor for an unop */
138 #define NEW_RD_UNOP(instr) \
140 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
141 ir_node *op, ir_mode *mode) \
144 ir_graph *rem = current_ir_graph; \
145 current_ir_graph = irg; \
146 res = new_bd_##instr(db, block, op, mode); \
147 current_ir_graph = rem; \
151 /* creates a rd constructor for an divop */
152 #define NEW_RD_DIVOP(instr) \
154 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
155 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
158 ir_graph *rem = current_ir_graph; \
159 current_ir_graph = irg; \
160 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
161 current_ir_graph = rem; \
165 /* creates a d constructor for an binop */
166 #define NEW_D_BINOP(instr) \
168 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
169 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
172 /* creates a d constructor for an unop */
173 #define NEW_D_UNOP(instr) \
175 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
176 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
181 * Constructs a Block with a fixed number of predecessors.
182 * Does not set current_block. Cannot be used with automatic
183 * Phi node construction.
186 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
188 ir_graph *irg = current_ir_graph;
190 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
192 /* macroblock header */
195 res->attr.block.is_dead = 0;
196 res->attr.block.is_mb_head = 1;
197 res->attr.block.has_label = 0;
198 res->attr.block.irg = irg;
199 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
200 res->attr.block.in_cg = NULL;
201 res->attr.block.cg_backedge = NULL;
202 res->attr.block.extblk = NULL;
203 res->attr.block.mb_depth = 0;
204 res->attr.block.label = 0;
206 set_Block_matured(res, 1);
207 set_Block_block_visited(res, 0);
209 IRN_VRFY_IRG(res, irg);
214 new_bd_Start(dbg_info *db, ir_node *block) {
216 ir_graph *irg = current_ir_graph;
218 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
220 IRN_VRFY_IRG(res, irg);
225 new_bd_End(dbg_info *db, ir_node *block) {
227 ir_graph *irg = current_ir_graph;
229 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
231 IRN_VRFY_IRG(res, irg);
236 * Creates a Phi node with all predecessors. Calling this constructor
237 * is only allowed if the corresponding block is mature.
240 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
242 ir_graph *irg = current_ir_graph;
246 /* Don't assert that block matured: the use of this constructor is strongly
248 if ( get_Block_matured(block) )
249 assert( get_irn_arity(block) == arity );
251 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
253 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
255 for (i = arity-1; i >= 0; i--)
256 if (get_irn_op(in[i]) == op_Unknown) {
261 if (!has_unknown) res = optimize_node(res);
262 IRN_VRFY_IRG(res, irg);
264 /* Memory Phis in endless loops must be kept alive.
265 As we can't distinguish these easily we keep all of them alive. */
266 if ((res->op == op_Phi) && (mode == mode_M))
267 add_End_keepalive(get_irg_end(irg), res);
272 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
274 ir_graph *irg = current_ir_graph;
277 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
278 res->attr.con.tv = con;
279 set_Const_type(res, tp); /* Call method because of complex assertion. */
280 res = optimize_node (res);
281 assert(get_Const_type(res) == tp);
282 IRN_VRFY_IRG(res, irg);
285 } /* new_bd_Const_type */
288 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
289 ir_graph *irg = current_ir_graph;
291 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
295 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
296 ir_graph *irg = current_ir_graph;
298 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
299 } /* new_bd_Const_long */
302 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
304 ir_graph *irg = current_ir_graph;
306 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
307 res = optimize_node(res);
308 IRN_VRFY_IRG(res, irg);
313 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
316 ir_graph *irg = current_ir_graph;
318 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
319 res->attr.proj = proj;
322 assert(get_Proj_pred(res));
323 assert(get_nodes_block(get_Proj_pred(res)));
325 res = optimize_node(res);
327 IRN_VRFY_IRG(res, irg);
332 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
335 ir_graph *irg = current_ir_graph;
337 assert(arg->op == op_Cond);
338 arg->attr.cond.kind = fragmentary;
339 arg->attr.cond.default_proj = max_proj;
340 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
342 } /* new_bd_defaultProj */
345 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
347 ir_graph *irg = current_ir_graph;
349 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
350 res->attr.conv.strict = strict_flag;
351 res = optimize_node(res);
352 IRN_VRFY_IRG(res, irg);
357 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
359 ir_graph *irg = current_ir_graph;
361 assert(is_atomic_type(to_tp));
363 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
364 res->attr.cast.totype = to_tp;
365 res = optimize_node(res);
366 IRN_VRFY_IRG(res, irg);
371 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
373 ir_graph *irg = current_ir_graph;
375 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
376 res = optimize_node (res);
377 IRN_VRFY_IRG(res, irg);
402 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
405 ir_graph *irg = current_ir_graph;
408 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
409 res = optimize_node(res);
410 IRN_VRFY_IRG(res, irg);
415 new_bd_Jmp(dbg_info *db, ir_node *block) {
417 ir_graph *irg = current_ir_graph;
419 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
420 res = optimize_node(res);
421 IRN_VRFY_IRG(res, irg);
426 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
428 ir_graph *irg = current_ir_graph;
430 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
431 res = optimize_node(res);
432 IRN_VRFY_IRG(res, irg);
437 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
439 ir_graph *irg = current_ir_graph;
441 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
442 res->attr.cond.kind = dense;
443 res->attr.cond.default_proj = 0;
444 res->attr.cond.pred = COND_JMP_PRED_NONE;
445 res = optimize_node(res);
446 IRN_VRFY_IRG(res, irg);
451 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
452 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
456 ir_graph *irg = current_ir_graph;
459 NEW_ARR_A(ir_node *, r_in, r_arity);
462 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
464 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
466 assert((get_unknown_type() == tp) || is_Method_type(tp));
467 set_Call_type(res, tp);
468 res->attr.call.exc.pin_state = op_pin_state_pinned;
469 res->attr.call.callee_arr = NULL;
470 res = optimize_node(res);
471 IRN_VRFY_IRG(res, irg);
476 new_bd_Return(dbg_info *db, ir_node *block,
477 ir_node *store, int arity, ir_node **in) {
481 ir_graph *irg = current_ir_graph;
484 NEW_ARR_A (ir_node *, r_in, r_arity);
486 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
487 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
491 } /* new_bd_Return */
494 new_bd_Load(dbg_info *db, ir_node *block,
495 ir_node *store, ir_node *adr, ir_mode *mode) {
498 ir_graph *irg = current_ir_graph;
502 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
503 res->attr.load.exc.pin_state = op_pin_state_pinned;
504 res->attr.load.load_mode = mode;
505 res->attr.load.volatility = volatility_non_volatile;
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
512 new_bd_Store(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_node *val) {
516 ir_graph *irg = current_ir_graph;
521 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
522 res->attr.store.exc.pin_state = op_pin_state_pinned;
523 res->attr.store.volatility = volatility_non_volatile;
524 res = optimize_node(res);
525 IRN_VRFY_IRG(res, irg);
530 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
531 ir_node *size, ir_type *alloc_type, where_alloc where) {
534 ir_graph *irg = current_ir_graph;
538 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
539 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
540 res->attr.alloc.where = where;
541 res->attr.alloc.type = alloc_type;
542 res = optimize_node(res);
543 IRN_VRFY_IRG(res, irg);
548 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
549 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
552 ir_graph *irg = current_ir_graph;
557 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
558 res->attr.free.where = where;
559 res->attr.free.type = free_type;
560 res = optimize_node(res);
561 IRN_VRFY_IRG(res, irg);
566 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
567 int arity, ir_node **in, ir_entity *ent) {
571 ir_graph *irg = current_ir_graph;
572 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
574 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
577 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
580 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
582 * Sel's can select functions which should be of mode mode_P_code.
584 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
585 res->attr.sel.ent = ent;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
593 symconst_kind symkind, ir_type *tp) {
596 ir_graph *irg = current_ir_graph;
598 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
599 mode = mode_P_data; /* FIXME: can be mode_P_code */
603 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
605 res->attr.symc.num = symkind;
606 res->attr.symc.sym = value;
607 res->attr.symc.tp = tp;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
612 } /* new_bd_SymConst_type */
615 new_bd_Sync(dbg_info *db, ir_node *block) {
617 ir_graph *irg = current_ir_graph;
619 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
620 /* no need to call optimize node here, Sync are always created with no predecessors */
621 IRN_VRFY_IRG(res, irg);
626 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
627 ir_node *in[2], *res;
628 ir_graph *irg = current_ir_graph;
632 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
633 res->attr.confirm.cmp = cmp;
634 res = optimize_node(res);
635 IRN_VRFY_IRG(res, irg);
637 } /* new_bd_Confirm */
640 new_bd_Unknown(ir_mode *m) {
642 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
645 res = optimize_node(res);
647 } /* new_bd_Unknown */
650 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
653 ir_graph *irg = current_ir_graph;
655 in[0] = get_Call_ptr(call);
656 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
657 /* res->attr.callbegin.irg = irg; */
658 res->attr.callbegin.call = call;
659 res = optimize_node(res);
660 IRN_VRFY_IRG(res, irg);
662 } /* new_bd_CallBegin */
665 new_bd_EndReg(dbg_info *db, ir_node *block) {
667 ir_graph *irg = current_ir_graph;
669 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
670 set_irg_end_reg(irg, res);
671 IRN_VRFY_IRG(res, irg);
673 } /* new_bd_EndReg */
676 new_bd_EndExcept(dbg_info *db, ir_node *block) {
678 ir_graph *irg = current_ir_graph;
680 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
681 set_irg_end_except(irg, res);
682 IRN_VRFY_IRG (res, irg);
684 } /* new_bd_EndExcept */
687 new_bd_Break(dbg_info *db, ir_node *block) {
689 ir_graph *irg = current_ir_graph;
691 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
692 res = optimize_node(res);
693 IRN_VRFY_IRG(res, irg);
698 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
704 res->attr.filter.proj = proj;
705 res->attr.filter.in_cg = NULL;
706 res->attr.filter.backedge = NULL;
709 assert(get_Proj_pred(res));
710 assert(get_nodes_block(get_Proj_pred(res)));
712 res = optimize_node(res);
713 IRN_VRFY_IRG(res, irg);
715 } /* new_bd_Filter */
718 new_bd_Mux(dbg_info *db, ir_node *block,
719 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
722 ir_graph *irg = current_ir_graph;
728 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
731 res = optimize_node(res);
732 IRN_VRFY_IRG(res, irg);
737 new_bd_Psi(dbg_info *db, ir_node *block,
738 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
741 ir_graph *irg = current_ir_graph;
744 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
746 for (i = 0; i < arity; ++i) {
748 in[2 * i + 1] = vals[i];
752 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
761 new_bd_CopyB(dbg_info *db, ir_node *block,
762 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
773 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
774 res->attr.copyb.data_type = data_type;
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
781 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
782 ir_node *objptr, ir_type *type) {
785 ir_graph *irg = current_ir_graph;
789 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
790 res->attr.instof.type = type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
794 } /* new_bd_InstOf */
797 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
805 res = optimize_node(res);
806 IRN_VRFY_IRG(res, irg);
811 new_bd_Bound(dbg_info *db, ir_node *block,
812 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
815 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
822 res->attr.bound.exc.pin_state = op_pin_state_pinned;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
831 ir_graph *irg = current_ir_graph;
833 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
834 res = optimize_node(res);
835 IRN_VRFY_IRG(res, irg);
840 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
841 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
843 ir_graph *irg = current_ir_graph;
846 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
847 res->attr.assem.pin_state = op_pin_state_pinned;
848 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
849 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
850 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
851 res->attr.assem.asm_text = asm_text;
853 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
854 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
856 res = optimize_node(res);
857 IRN_VRFY_IRG(res, irg);
861 /* --------------------------------------------- */
862 /* private interfaces, for professional use only */
863 /* --------------------------------------------- */
865 /* Constructs a Block with a fixed number of predecessors.
866 Does not set current_block. Can not be used with automatic
867 Phi node construction. */
869 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
870 ir_graph *rem = current_ir_graph;
873 current_ir_graph = irg;
874 res = new_bd_Block(db, arity, in);
875 current_ir_graph = rem;
881 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
882 ir_graph *rem = current_ir_graph;
885 current_ir_graph = irg;
886 res = new_bd_Start(db, block);
887 current_ir_graph = rem;
893 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
895 ir_graph *rem = current_ir_graph;
897 current_ir_graph = irg;
898 res = new_bd_End(db, block);
899 current_ir_graph = rem;
904 /* Creates a Phi node with all predecessors. Calling this constructor
905 is only allowed if the corresponding block is mature. */
907 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
909 ir_graph *rem = current_ir_graph;
911 current_ir_graph = irg;
912 res = new_bd_Phi(db, block,arity, in, mode);
913 current_ir_graph = rem;
919 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
921 ir_graph *rem = current_ir_graph;
923 current_ir_graph = irg;
924 res = new_bd_Const_type(db, block, mode, con, tp);
925 current_ir_graph = rem;
928 } /* new_rd_Const_type */
931 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
933 ir_graph *rem = current_ir_graph;
935 current_ir_graph = irg;
936 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
937 current_ir_graph = rem;
943 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
944 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
945 } /* new_rd_Const_long */
948 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
950 ir_graph *rem = current_ir_graph;
952 current_ir_graph = irg;
953 res = new_bd_Id(db, block, val, mode);
954 current_ir_graph = rem;
960 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
963 ir_graph *rem = current_ir_graph;
965 current_ir_graph = irg;
966 res = new_bd_Proj(db, block, arg, mode, proj);
967 current_ir_graph = rem;
973 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
976 ir_graph *rem = current_ir_graph;
978 current_ir_graph = irg;
979 res = new_bd_defaultProj(db, block, arg, max_proj);
980 current_ir_graph = rem;
983 } /* new_rd_defaultProj */
986 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
988 ir_graph *rem = current_ir_graph;
990 current_ir_graph = irg;
991 res = new_bd_Conv(db, block, op, mode, 0);
992 current_ir_graph = rem;
998 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1000 ir_graph *rem = current_ir_graph;
1002 current_ir_graph = irg;
1003 res = new_bd_Cast(db, block, op, to_tp);
1004 current_ir_graph = rem;
1010 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1012 ir_graph *rem = current_ir_graph;
1014 current_ir_graph = irg;
1015 res = new_bd_Tuple(db, block, arity, in);
1016 current_ir_graph = rem;
1019 } /* new_rd_Tuple */
1026 NEW_RD_DIVOP(DivMod)
1039 NEW_RD_BINOP(Borrow)
1042 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1043 ir_node *op1, ir_node *op2) {
1045 ir_graph *rem = current_ir_graph;
1047 current_ir_graph = irg;
1048 res = new_bd_Cmp(db, block, op1, op2);
1049 current_ir_graph = rem;
1055 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1057 ir_graph *rem = current_ir_graph;
1059 current_ir_graph = irg;
1060 res = new_bd_Jmp(db, block);
1061 current_ir_graph = rem;
1067 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1069 ir_graph *rem = current_ir_graph;
1071 current_ir_graph = irg;
1072 res = new_bd_IJmp(db, block, tgt);
1073 current_ir_graph = rem;
1079 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1081 ir_graph *rem = current_ir_graph;
1083 current_ir_graph = irg;
1084 res = new_bd_Cond(db, block, c);
1085 current_ir_graph = rem;
1091 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1092 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1094 ir_graph *rem = current_ir_graph;
1096 current_ir_graph = irg;
1097 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1098 current_ir_graph = rem;
1104 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1105 ir_node *store, int arity, ir_node **in) {
1107 ir_graph *rem = current_ir_graph;
1109 current_ir_graph = irg;
1110 res = new_bd_Return(db, block, store, arity, in);
1111 current_ir_graph = rem;
1114 } /* new_rd_Return */
1117 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1118 ir_node *store, ir_node *adr, ir_mode *mode) {
1120 ir_graph *rem = current_ir_graph;
1122 current_ir_graph = irg;
1123 res = new_bd_Load(db, block, store, adr, mode);
1124 current_ir_graph = rem;
1130 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1131 ir_node *store, ir_node *adr, ir_node *val) {
1133 ir_graph *rem = current_ir_graph;
1135 current_ir_graph = irg;
1136 res = new_bd_Store(db, block, store, adr, val);
1137 current_ir_graph = rem;
1140 } /* new_rd_Store */
1143 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1144 ir_node *size, ir_type *alloc_type, where_alloc where) {
1146 ir_graph *rem = current_ir_graph;
1148 current_ir_graph = irg;
1149 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1150 current_ir_graph = rem;
1153 } /* new_rd_Alloc */
1156 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1157 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1163 current_ir_graph = rem;
1169 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *store, ir_node *objptr, ir_entity *ent) {
1172 ir_graph *rem = current_ir_graph;
1174 current_ir_graph = irg;
1175 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1176 current_ir_graph = rem;
1179 } /* new_rd_simpleSel */
1182 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1183 int arity, ir_node **in, ir_entity *ent) {
1185 ir_graph *rem = current_ir_graph;
1187 current_ir_graph = irg;
1188 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1189 current_ir_graph = rem;
1195 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1196 symconst_kind symkind, ir_type *tp) {
1198 ir_graph *rem = current_ir_graph;
1200 current_ir_graph = irg;
1201 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1202 current_ir_graph = rem;
1205 } /* new_rd_SymConst_type */
1208 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1209 symconst_kind symkind) {
1210 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1212 } /* new_rd_SymConst */
1214 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1215 symconst_symbol sym;
1216 sym.entity_p = symbol;
1217 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1218 } /* new_rd_SymConst_addr_ent */
1220 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
1221 symconst_symbol sym;
1222 sym.entity_p = symbol;
1223 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
1224 } /* new_rd_SymConst_ofs_ent */
1226 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1227 symconst_symbol sym;
1228 sym.ident_p = symbol;
1229 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1230 } /* new_rd_SymConst_addr_name */
1232 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1233 symconst_symbol sym;
1234 sym.type_p = symbol;
1235 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1236 } /* new_rd_SymConst_type_tag */
1238 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1239 symconst_symbol sym;
1240 sym.type_p = symbol;
1241 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1242 } /* new_rd_SymConst_size */
1244 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1245 symconst_symbol sym;
1246 sym.type_p = symbol;
1247 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1248 } /* new_rd_SymConst_align */
1251 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1253 ir_graph *rem = current_ir_graph;
1256 current_ir_graph = irg;
1257 res = new_bd_Sync(db, block);
1258 current_ir_graph = rem;
1260 for (i = 0; i < arity; ++i)
1261 add_Sync_pred(res, in[i]);
1267 new_rd_Bad(ir_graph *irg) {
1268 return get_irg_bad(irg);
1272 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1274 ir_graph *rem = current_ir_graph;
1276 current_ir_graph = irg;
1277 res = new_bd_Confirm(db, block, val, bound, cmp);
1278 current_ir_graph = rem;
1281 } /* new_rd_Confirm */
1284 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1286 ir_graph *rem = current_ir_graph;
1288 current_ir_graph = irg;
1289 res = new_bd_Unknown(m);
1290 current_ir_graph = rem;
1293 } /* new_rd_Unknown */
1296 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1298 ir_graph *rem = current_ir_graph;
1300 current_ir_graph = irg;
1301 res = new_bd_CallBegin(db, block, call);
1302 current_ir_graph = rem;
1305 } /* new_rd_CallBegin */
1308 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1311 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1312 set_irg_end_reg(irg, res);
1313 IRN_VRFY_IRG(res, irg);
1315 } /* new_rd_EndReg */
1318 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1321 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1322 set_irg_end_except(irg, res);
1323 IRN_VRFY_IRG (res, irg);
1325 } /* new_rd_EndExcept */
1328 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1330 ir_graph *rem = current_ir_graph;
1332 current_ir_graph = irg;
1333 res = new_bd_Break(db, block);
1334 current_ir_graph = rem;
1337 } /* new_rd_Break */
1340 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1343 ir_graph *rem = current_ir_graph;
1345 current_ir_graph = irg;
1346 res = new_bd_Filter(db, block, arg, mode, proj);
1347 current_ir_graph = rem;
1350 } /* new_rd_Filter */
1353 new_rd_NoMem(ir_graph *irg) {
1354 return get_irg_no_mem(irg);
1355 } /* new_rd_NoMem */
1358 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1359 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1361 ir_graph *rem = current_ir_graph;
1363 current_ir_graph = irg;
1364 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1365 current_ir_graph = rem;
1371 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1372 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1378 current_ir_graph = rem;
1383 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1384 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1390 current_ir_graph = rem;
1393 } /* new_rd_CopyB */
1396 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1397 ir_node *objptr, ir_type *type) {
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_InstOf(db, block, store, objptr, type);
1403 current_ir_graph = rem;
1406 } /* new_rd_InstOf */
1409 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Raise(db, block, store, obj);
1415 current_ir_graph = rem;
1418 } /* new_rd_Raise */
1420 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Bound(db, block, store, idx, lower, upper);
1427 current_ir_graph = rem;
1430 } /* new_rd_Bound */
1432 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1434 ir_graph *rem = current_ir_graph;
1436 current_ir_graph = irg;
1437 res = new_bd_Pin(db, block, node);
1438 current_ir_graph = rem;
1443 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1444 int arity, ir_node *in[], ir_asm_constraint *inputs,
1445 int n_outs, ir_asm_constraint *outputs,
1446 int n_clobber, ident *clobber[], ident *asm_text) {
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1452 current_ir_graph = rem;
1458 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1459 return new_rd_Block(NULL, irg, arity, in);
1461 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1462 return new_rd_Start(NULL, irg, block);
1464 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1465 return new_rd_End(NULL, irg, block);
1467 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1468 return new_rd_Jmp(NULL, irg, block);
1470 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1471 return new_rd_IJmp(NULL, irg, block, tgt);
1473 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1474 return new_rd_Cond(NULL, irg, block, c);
1476 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1477 ir_node *store, int arity, ir_node **in) {
1478 return new_rd_Return(NULL, irg, block, store, arity, in);
1480 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1481 ir_mode *mode, tarval *con) {
1482 return new_rd_Const(NULL, irg, block, mode, con);
1484 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1485 ir_mode *mode, long value) {
1486 return new_rd_Const_long(NULL, irg, block, mode, value);
1488 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1489 ir_mode *mode, tarval *con, ir_type *tp) {
1490 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1492 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
1493 symconst_symbol value, symconst_kind symkind) {
1494 return new_rd_SymConst(NULL, irg, block, value, symkind);
1496 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1497 ir_node *objptr, ir_entity *ent) {
1498 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1500 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *objptr, int n_index, ir_node **index,
1503 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1505 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1506 ir_node *callee, int arity, ir_node **in,
1508 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1510 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1511 ir_node *op1, ir_node *op2, ir_mode *mode) {
1512 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1514 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1519 ir_node *op, ir_mode *mode) {
1520 return new_rd_Minus(NULL, irg, block, op, mode);
1522 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1523 ir_node *op1, ir_node *op2, ir_mode *mode) {
1524 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1526 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1527 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1528 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1530 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1531 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1532 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1534 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1540 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1542 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1543 ir_node *op, ir_mode *mode) {
1544 return new_rd_Abs(NULL, irg, block, op, mode);
1546 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1547 ir_node *op1, ir_node *op2, ir_mode *mode) {
1548 return new_rd_And(NULL, irg, block, op1, op2, mode);
1550 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1559 ir_node *op, ir_mode *mode) {
1560 return new_rd_Not(NULL, irg, block, op, mode);
1562 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1563 ir_node *op, ir_node *k, ir_mode *mode) {
1564 return new_rd_Shl(NULL, irg, block, op, k, mode);
1566 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_node *k, ir_mode *mode) {
1568 return new_rd_Shr(NULL, irg, block, op, k, mode);
1570 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Rot(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Carry(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1587 ir_node *op1, ir_node *op2) {
1588 return new_rd_Cmp(NULL, irg, block, op1, op2);
1590 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1591 ir_node *op, ir_mode *mode) {
1592 return new_rd_Conv(NULL, irg, block, op, mode);
1594 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1595 return new_rd_Cast(NULL, irg, block, op, to_tp);
1597 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1598 ir_node **in, ir_mode *mode) {
1599 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1601 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1602 ir_node *store, ir_node *adr, ir_mode *mode) {
1603 return new_rd_Load(NULL, irg, block, store, adr, mode);
1605 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1606 ir_node *store, ir_node *adr, ir_node *val) {
1607 return new_rd_Store(NULL, irg, block, store, adr, val);
1609 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1610 ir_node *size, ir_type *alloc_type, where_alloc where) {
1611 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1613 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1614 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1615 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1617 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1618 return new_rd_Sync(NULL, irg, block, arity, in);
1620 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1621 ir_mode *mode, long proj) {
1622 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1624 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1626 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1628 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1629 int arity, ir_node **in) {
1630 return new_rd_Tuple(NULL, irg, block, arity, in );
1632 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1633 ir_node *val, ir_mode *mode) {
1634 return new_rd_Id(NULL, irg, block, val, mode);
1636 ir_node *new_r_Bad(ir_graph *irg) {
1637 return new_rd_Bad(irg);
1639 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1640 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1642 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1643 return new_rd_Unknown(irg, m);
1645 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1646 return new_rd_CallBegin(NULL, irg, block, callee);
1648 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1649 return new_rd_EndReg(NULL, irg, block);
1651 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1652 return new_rd_EndExcept(NULL, irg, block);
1654 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1655 return new_rd_Break(NULL, irg, block);
1657 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1658 ir_mode *mode, long proj) {
1659 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1661 ir_node *new_r_NoMem(ir_graph *irg) {
1662 return new_rd_NoMem(irg);
1664 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1665 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1666 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1668 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1669 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1670 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1672 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1673 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1674 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1676 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1678 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1680 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *obj) {
1682 return new_rd_Raise(NULL, irg, block, store, obj);
1684 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1685 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1686 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1688 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1689 return new_rd_Pin(NULL, irg, block, node);
1691 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1692 int arity, ir_node *in[], ir_asm_constraint *inputs,
1693 int n_outs, ir_asm_constraint *outputs,
1694 int n_clobber, ident *clobber[], ident *asm_text) {
1695 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1698 /** ********************/
1699 /** public interfaces */
1700 /** construction tools */
1704 * - create a new Start node in the current block
1706 * @return s - pointer to the created Start node
1711 new_d_Start(dbg_info *db) {
1714 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1715 op_Start, mode_T, 0, NULL);
1717 res = optimize_node(res);
1718 IRN_VRFY_IRG(res, current_ir_graph);
1723 new_d_End(dbg_info *db) {
1725 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1726 op_End, mode_X, -1, NULL);
1727 res = optimize_node(res);
1728 IRN_VRFY_IRG(res, current_ir_graph);
1733 /* Constructs a Block with a fixed number of predecessors.
1734 Does set current_block. Can be used with automatic Phi
1735 node construction. */
1737 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1740 int has_unknown = 0;
1742 res = new_bd_Block(db, arity, in);
1744 /* Create and initialize array for Phi-node construction. */
1745 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1746 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1747 current_ir_graph->n_loc);
1748 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1751 for (i = arity-1; i >= 0; i--)
1752 if (get_irn_op(in[i]) == op_Unknown) {
1757 if (!has_unknown) res = optimize_node(res);
1758 current_ir_graph->current_block = res;
1760 IRN_VRFY_IRG(res, current_ir_graph);
1765 /* ***********************************************************************/
1766 /* Methods necessary for automatic Phi node creation */
1768 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1769 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1770 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1771 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1773 Call Graph: ( A ---> B == A "calls" B)
1775 get_value mature_immBlock
1783 get_r_value_internal |
1787 new_rd_Phi0 new_rd_Phi_in
1789 * *************************************************************************** */
1791 /** Creates a Phi node with 0 predecessors. */
1792 static INLINE ir_node *
1793 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1796 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1797 IRN_VRFY_IRG(res, irg);
1801 /* There are two implementations of the Phi node construction. The first
1802 is faster, but does not work for blocks with more than 2 predecessors.
1803 The second works always but is slower and causes more unnecessary Phi
1805 Select the implementations by the following preprocessor flag set in
1807 #if USE_FAST_PHI_CONSTRUCTION
1809 /* This is a stack used for allocating and deallocating nodes in
1810 new_rd_Phi_in. The original implementation used the obstack
1811 to model this stack, now it is explicit. This reduces side effects.
1813 #if USE_EXPLICIT_PHI_IN_STACK
1815 new_Phi_in_stack(void) {
1818 res = (Phi_in_stack *) xmalloc(sizeof(Phi_in_stack));
1820 res->stack = NEW_ARR_F(ir_node *, 0);
1824 } /* new_Phi_in_stack */
1827 free_Phi_in_stack(Phi_in_stack *s) {
1828 DEL_ARR_F(s->stack);
1830 } /* free_Phi_in_stack */
1833 free_to_Phi_in_stack(ir_node *phi) {
1834 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1835 current_ir_graph->Phi_in_stack->pos)
1836 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1838 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1840 (current_ir_graph->Phi_in_stack->pos)++;
1841 } /* free_to_Phi_in_stack */
1843 static INLINE ir_node *
1844 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1845 int arity, ir_node **in) {
1847 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1848 int pos = current_ir_graph->Phi_in_stack->pos;
1852 /* We need to allocate a new node */
1853 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1854 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1856 /* reuse the old node and initialize it again. */
1859 assert(res->kind == k_ir_node);
1860 assert(res->op == op_Phi);
1865 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1866 res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
1868 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1870 (current_ir_graph->Phi_in_stack->pos)--;
1873 } /* alloc_or_pop_from_Phi_in_stack */
1874 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1877 * Creates a Phi node with a given, fixed array **in of predecessors.
1878 * If the Phi node is unnecessary, as the same value reaches the block
1879 * through all control flow paths, it is eliminated and the value
1880 * returned directly. This constructor is only intended for use in
1881 * the automatic Phi node generation triggered by get_value or mature.
1882 * The implementation is quite tricky and depends on the fact, that
1883 * the nodes are allocated on a stack:
1884 * The in array contains predecessors and NULLs. The NULLs appear,
1885 * if get_r_value_internal, that computed the predecessors, reached
1886 * the same block on two paths. In this case the same value reaches
1887 * this block on both paths, there is no definition in between. We need
1888 * not allocate a Phi where these path's merge, but we have to communicate
1889 * this fact to the caller. This happens by returning a pointer to the
1890 * node the caller _will_ allocate. (Yes, we predict the address. We can
1891 * do so because the nodes are allocated on the obstack.) The caller then
1892 * finds a pointer to itself and, when this routine is called again,
1893 * eliminates itself.
1895 static INLINE ir_node *
1896 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins) {
1898 ir_node *res, *known;
1900 /* Allocate a new node on the obstack. This can return a node to
1901 which some of the pointers in the in-array already point.
1902 Attention: the constructor copies the in array, i.e., the later
1903 changes to the array in this routine do not affect the
1904 constructed node! If the in array contains NULLs, there will be
1905 missing predecessors in the returned node. Is this a possible
1906 internal state of the Phi node generation? */
1907 #if USE_EXPLICIT_PHI_IN_STACK
1908 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1910 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1911 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1914 /* The in-array can contain NULLs. These were returned by
1915 get_r_value_internal if it reached the same block/definition on a
1916 second path. The NULLs are replaced by the node itself to
1917 simplify the test in the next loop. */
1918 for (i = 0; i < ins; ++i) {
1923 /* This loop checks whether the Phi has more than one predecessor.
1924 If so, it is a real Phi node and we break the loop. Else the Phi
1925 node merges the same definition on several paths and therefore is
1927 for (i = 0; i < ins; ++i) {
1928 if (in[i] == res || in[i] == known)
1937 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1939 #if USE_EXPLICIT_PHI_IN_STACK
1940 free_to_Phi_in_stack(res);
1942 edges_node_deleted(res, current_ir_graph);
1943 obstack_free(current_ir_graph->obst, res);
1947 res = optimize_node (res);
1948 IRN_VRFY_IRG(res, irg);
1951 /* return the pointer to the Phi node. This node might be deallocated! */
1953 } /* new_rd_Phi_in */
1956 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1959 * Allocates and returns this node. The routine called to allocate the
1960 * node might optimize it away and return a real value, or even a pointer
1961 * to a deallocated Phi node on top of the obstack!
1962 * This function is called with an in-array of proper size.
1965 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
1966 ir_node *prevBlock, *res;
1969 /* This loop goes to all predecessor blocks of the block the Phi node is in
1970 and there finds the operands of the Phi node by calling
1971 get_r_value_internal. */
1972 for (i = 1; i <= ins; ++i) {
1973 assert (block->in[i]);
1974 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1976 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1979 /* After collecting all predecessors into the array nin a new Phi node
1980 with these predecessors is created. This constructor contains an
1981 optimization: If all predecessors of the Phi node are identical it
1982 returns the only operand instead of a new Phi node. If the value
1983 passes two different control flow edges without being defined, and
1984 this is the second path treated, a pointer to the node that will be
1985 allocated for the first path (recursion) is returned. We already
1986 know the address of this node, as it is the next node to be allocated
1987 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1988 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1990 /* Now we now the value for "pos" and can enter it in the array with
1991 all known local variables. Attention: this might be a pointer to
1992 a node, that later will be allocated!!! See new_rd_Phi_in().
1993 If this is called in mature, after some set_value() in the same block,
1994 the proper value must not be overwritten:
1996 get_value (makes Phi0, put's it into graph_arr)
1997 set_value (overwrites Phi0 in graph_arr)
1998 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2001 if (!block->attr.block.graph_arr[pos]) {
2002 block->attr.block.graph_arr[pos] = res;
2004 /* printf(" value already computed by %s\n",
2005 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2012 * This function returns the last definition of a variable. In case
2013 * this variable was last defined in a previous block, Phi nodes are
2014 * inserted. If the part of the firm graph containing the definition
2015 * is not yet constructed, a dummy Phi node is returned.
2018 get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
2021 /* There are 4 cases to treat.
2023 1. The block is not mature and we visit it the first time. We can not
2024 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2025 predecessors is returned. This node is added to the linked list (field
2026 "link") of the containing block to be completed when this block is
2027 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2030 2. The value is already known in this block, graph_arr[pos] is set and we
2031 visit the block the first time. We can return the value without
2032 creating any new nodes.
2034 3. The block is mature and we visit it the first time. A Phi node needs
2035 to be created (phi_merge). If the Phi is not needed, as all it's
2036 operands are the same value reaching the block through different
2037 paths, it's optimized away and the value itself is returned.
2039 4. The block is mature, and we visit it the second time. Now two
2040 subcases are possible:
2041 * The value was computed completely the last time we were here. This
2042 is the case if there is no loop. We can return the proper value.
2043 * The recursion that visited this node and set the flag did not
2044 return yet. We are computing a value in a loop and need to
2045 break the recursion without knowing the result yet.
2046 @@@ strange case. Straight forward we would create a Phi before
2047 starting the computation of it's predecessors. In this case we will
2048 find a Phi here in any case. The problem is that this implementation
2049 only creates a Phi after computing the predecessors, so that it is
2050 hard to compute self references of this Phi. @@@
2051 There is no simple check for the second subcase. Therefore we check
2052 for a second visit and treat all such cases as the second subcase.
2053 Anyways, the basic situation is the same: we reached a block
2054 on two paths without finding a definition of the value: No Phi
2055 nodes are needed on both paths.
2056 We return this information "Two paths, no Phi needed" by a very tricky
2057 implementation that relies on the fact that an obstack is a stack and
2058 will return a node with the same address on different allocations.
2059 Look also at phi_merge and new_rd_phi_in to understand this.
2060 @@@ Unfortunately this does not work, see testprogram
2061 three_cfpred_example.
2065 /* case 4 -- already visited. */
2066 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2068 /* visited the first time */
2069 set_irn_visited(block, get_irg_visited(current_ir_graph));
2071 /* Get the local valid value */
2072 res = block->attr.block.graph_arr[pos];
2074 /* case 2 -- If the value is actually computed, return it. */
2075 if (res) return res;
2077 if (block->attr.block.is_matured) { /* case 3 */
2079 /* The Phi has the same amount of ins as the corresponding block. */
2080 int ins = get_irn_arity(block);
2082 NEW_ARR_A(ir_node *, nin, ins);
2084 /* Phi merge collects the predecessors and then creates a node. */
2085 res = phi_merge(block, pos, mode, nin, ins);
2087 } else { /* case 1 */
2088 /* The block is not mature, we don't know how many in's are needed. A Phi
2089 with zero predecessors is created. Such a Phi node is called Phi0
2090 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2091 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2093 The Phi0 has to remember the pos of it's internal value. If the real
2094 Phi is computed, pos is used to update the array with the local
2097 res = new_rd_Phi0(current_ir_graph, block, mode);
2098 res->attr.phi0_pos = pos;
2099 res->link = block->link;
2103 /* If we get here, the frontend missed a use-before-definition error */
2106 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2107 assert(mode->code >= irm_F && mode->code <= irm_P);
2108 res = new_rd_Const(NULL, current_ir_graph, block, mode, tarval_mode_null[mode->code]);
2111 /* The local valid value is available now. */
2112 block->attr.block.graph_arr[pos] = res;
2115 } /* get_r_value_internal */
2120 it starts the recursion. This causes an Id at the entry of
2121 every block that has no definition of the value! **/
2123 #if USE_EXPLICIT_PHI_IN_STACK
2125 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2126 void free_Phi_in_stack(Phi_in_stack *s) {}
2129 static INLINE ir_node *
2130 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
2131 ir_node **in, int ins, ir_node *phi0) {
2133 ir_node *res, *known;
2135 /* Allocate a new node on the obstack. The allocation copies the in
2137 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2138 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2140 /* This loop checks whether the Phi has more than one predecessor.
2141 If so, it is a real Phi node and we break the loop. Else the
2142 Phi node merges the same definition on several paths and therefore
2143 is not needed. Don't consider Bad nodes! */
2145 for (i=0; i < ins; ++i)
2149 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2151 /* Optimize self referencing Phis: We can't detect them yet properly, as
2152 they still refer to the Phi0 they will replace. So replace right now. */
2153 if (phi0 && in[i] == phi0) in[i] = res;
2155 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2163 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2166 edges_node_deleted(res, current_ir_graph);
2167 obstack_free (current_ir_graph->obst, res);
2168 if (is_Phi(known)) {
2169 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2170 order, an enclosing Phi know may get superfluous. */
2171 res = optimize_in_place_2(known);
2173 exchange(known, res);
2179 /* A undefined value, e.g., in unreachable code. */
2183 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2184 IRN_VRFY_IRG(res, irg);
2185 /* Memory Phis in endless loops must be kept alive.
2186 As we can't distinguish these easily we keep all of them alive. */
2187 if ((res->op == op_Phi) && (mode == mode_M))
2188 add_End_keepalive(get_irg_end(irg), res);
2192 } /* new_rd_Phi_in */
2195 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
2197 #if PRECISE_EXC_CONTEXT
2199 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2202 * Construct a new frag_array for node n.
2203 * Copy the content from the current graph_arr of the corresponding block:
2204 * this is the current state.
2205 * Set ProjM(n) as current memory state.
2206 * Further the last entry in frag_arr of current block points to n. This
2207 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2209 static INLINE ir_node **new_frag_arr(ir_node *n) {
2213 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2214 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2215 sizeof(ir_node *)*current_ir_graph->n_loc);
2217 /* turn off optimization before allocating Proj nodes, as res isn't
2219 opt = get_opt_optimize(); set_optimize(0);
2220 /* Here we rely on the fact that all frag ops have Memory as first result! */
2221 if (get_irn_op(n) == op_Call)
2222 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2223 else if (get_irn_op(n) == op_CopyB)
2224 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2226 assert((pn_Quot_M == pn_DivMod_M) &&
2227 (pn_Quot_M == pn_Div_M) &&
2228 (pn_Quot_M == pn_Mod_M) &&
2229 (pn_Quot_M == pn_Load_M) &&
2230 (pn_Quot_M == pn_Store_M) &&
2231 (pn_Quot_M == pn_Alloc_M) &&
2232 (pn_Quot_M == pn_Bound_M));
2233 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2237 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2239 } /* new_frag_arr */
2242 * Returns the frag_arr from a node.
2244 static INLINE ir_node **get_frag_arr(ir_node *n) {
2245 switch (get_irn_opcode(n)) {
2247 return n->attr.call.exc.frag_arr;
2249 return n->attr.alloc.exc.frag_arr;
2251 return n->attr.load.exc.frag_arr;
2253 return n->attr.store.exc.frag_arr;
2255 return n->attr.except.frag_arr;
2257 } /* get_frag_arr */
2260 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2262 if (!frag_arr[pos]) frag_arr[pos] = val;
2263 if (frag_arr[current_ir_graph->n_loc - 1]) {
2264 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2265 assert(arr != frag_arr && "Endless recursion detected");
2266 set_frag_value(arr, pos, val);
2271 for (i = 0; i < 1000; ++i) {
2272 if (!frag_arr[pos]) {
2273 frag_arr[pos] = val;
2275 if (frag_arr[current_ir_graph->n_loc - 1]) {
2276 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2282 assert(0 && "potential endless recursion");
2284 } /* set_frag_value */
2287 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2291 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2293 frag_arr = get_frag_arr(cfOp);
2294 res = frag_arr[pos];
2296 if (block->attr.block.graph_arr[pos]) {
2297 /* There was a set_value() after the cfOp and no get_value before that
2298 set_value(). We must build a Phi node now. */
2299 if (block->attr.block.is_matured) {
2300 int ins = get_irn_arity(block);
2302 NEW_ARR_A(ir_node *, nin, ins);
2303 res = phi_merge(block, pos, mode, nin, ins);
2305 res = new_rd_Phi0(current_ir_graph, block, mode);
2306 res->attr.phi0.pos = pos;
2307 res->link = block->link;
2311 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2312 but this should be better: (remove comment if this works) */
2313 /* It's a Phi, we can write this into all graph_arrs with NULL */
2314 set_frag_value(block->attr.block.graph_arr, pos, res);
2316 res = get_r_value_internal(block, pos, mode);
2317 set_frag_value(block->attr.block.graph_arr, pos, res);
2321 } /* get_r_frag_value_internal */
2322 #endif /* PRECISE_EXC_CONTEXT */
2325 * Computes the predecessors for the real phi node, and then
2326 * allocates and returns this node. The routine called to allocate the
2327 * node might optimize it away and return a real value.
2328 * This function must be called with an in-array of proper size.
2331 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2332 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2335 /* If this block has no value at pos create a Phi0 and remember it
2336 in graph_arr to break recursions.
2337 Else we may not set graph_arr as there a later value is remembered. */
2339 if (!block->attr.block.graph_arr[pos]) {
2340 if (block == get_irg_start_block(current_ir_graph)) {
2341 /* Collapsing to Bad tarvals is no good idea.
2342 So we call a user-supplied routine here that deals with this case as
2343 appropriate for the given language. Sorrily the only help we can give
2344 here is the position.
2346 Even if all variables are defined before use, it can happen that
2347 we get to the start block, if a Cond has been replaced by a tuple
2348 (bad, jmp). In this case we call the function needlessly, eventually
2349 generating an non existent error.
2350 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2353 if (default_initialize_local_variable) {
2354 ir_node *rem = get_cur_block();
2356 set_cur_block(block);
2357 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2361 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2362 /* We don't need to care about exception ops in the start block.
2363 There are none by definition. */
2364 return block->attr.block.graph_arr[pos];
2366 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2367 block->attr.block.graph_arr[pos] = phi0;
2368 #if PRECISE_EXC_CONTEXT
2369 if (get_opt_precise_exc_context()) {
2370 /* Set graph_arr for fragile ops. Also here we should break recursion.
2371 We could choose a cyclic path through an cfop. But the recursion would
2372 break at some point. */
2373 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2379 /* This loop goes to all predecessor blocks of the block the Phi node
2380 is in and there finds the operands of the Phi node by calling
2381 get_r_value_internal. */
2382 for (i = 1; i <= ins; ++i) {
2383 prevCfOp = skip_Proj(block->in[i]);
2385 if (is_Bad(prevCfOp)) {
2386 /* In case a Cond has been optimized we would get right to the start block
2387 with an invalid definition. */
2388 nin[i-1] = new_Bad();
2391 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2393 if (!is_Bad(prevBlock)) {
2394 #if PRECISE_EXC_CONTEXT
2395 if (get_opt_precise_exc_context() &&
2396 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2397 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2398 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2401 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2403 nin[i-1] = new_Bad();
2407 /* We want to pass the Phi0 node to the constructor: this finds additional
2408 optimization possibilities.
2409 The Phi0 node either is allocated in this function, or it comes from
2410 a former call to get_r_value_internal. In this case we may not yet
2411 exchange phi0, as this is done in mature_immBlock. */
2413 phi0_all = block->attr.block.graph_arr[pos];
2414 if (!((get_irn_op(phi0_all) == op_Phi) &&
2415 (get_irn_arity(phi0_all) == 0) &&
2416 (get_nodes_block(phi0_all) == block)))
2422 /* After collecting all predecessors into the array nin a new Phi node
2423 with these predecessors is created. This constructor contains an
2424 optimization: If all predecessors of the Phi node are identical it
2425 returns the only operand instead of a new Phi node. */
2426 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2428 /* In case we allocated a Phi0 node at the beginning of this procedure,
2429 we need to exchange this Phi0 with the real Phi. */
2431 exchange(phi0, res);
2432 block->attr.block.graph_arr[pos] = res;
2433 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2434 only an optimization. */
2441 * This function returns the last definition of a variable. In case
2442 * this variable was last defined in a previous block, Phi nodes are
2443 * inserted. If the part of the firm graph containing the definition
2444 * is not yet constructed, a dummy Phi node is returned.
2447 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2449 /* There are 4 cases to treat.
2451 1. The block is not mature and we visit it the first time. We can not
2452 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2453 predecessors is returned. This node is added to the linked list (field
2454 "link") of the containing block to be completed when this block is
2455 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2458 2. The value is already known in this block, graph_arr[pos] is set and we
2459 visit the block the first time. We can return the value without
2460 creating any new nodes.
2462 3. The block is mature and we visit it the first time. A Phi node needs
2463 to be created (phi_merge). If the Phi is not needed, as all it's
2464 operands are the same value reaching the block through different
2465 paths, it's optimized away and the value itself is returned.
2467 4. The block is mature, and we visit it the second time. Now two
2468 subcases are possible:
2469 * The value was computed completely the last time we were here. This
2470 is the case if there is no loop. We can return the proper value.
2471 * The recursion that visited this node and set the flag did not
2472 return yet. We are computing a value in a loop and need to
2473 break the recursion. This case only happens if we visited
2474 the same block with phi_merge before, which inserted a Phi0.
2475 So we return the Phi0.
2478 /* case 4 -- already visited. */
2479 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2480 /* As phi_merge allocates a Phi0 this value is always defined. Here
2481 is the critical difference of the two algorithms. */
2482 assert(block->attr.block.graph_arr[pos]);
2483 return block->attr.block.graph_arr[pos];
2486 /* visited the first time */
2487 set_irn_visited(block, get_irg_visited(current_ir_graph));
2489 /* Get the local valid value */
2490 res = block->attr.block.graph_arr[pos];
2492 /* case 2 -- If the value is actually computed, return it. */
2493 if (res) { return res; };
2495 if (block->attr.block.is_matured) { /* case 3 */
2497 /* The Phi has the same amount of ins as the corresponding block. */
2498 int ins = get_irn_arity(block);
2500 NEW_ARR_A (ir_node *, nin, ins);
2502 /* Phi merge collects the predecessors and then creates a node. */
2503 res = phi_merge (block, pos, mode, nin, ins);
2505 } else { /* case 1 */
2506 /* The block is not mature, we don't know how many in's are needed. A Phi
2507 with zero predecessors is created. Such a Phi node is called Phi0
2508 node. The Phi0 is then added to the list of Phi0 nodes in this block
2509 to be matured by mature_immBlock later.
2510 The Phi0 has to remember the pos of it's internal value. If the real
2511 Phi is computed, pos is used to update the array with the local
2513 res = new_rd_Phi0(current_ir_graph, block, mode);
2514 res->attr.phi0.pos = pos;
2515 res->link = block->link;
2519 /* If we get here, the frontend missed a use-before-definition error */
2522 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2523 assert(mode->code >= irm_F && mode->code <= irm_P);
2524 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2525 get_mode_null(mode));
2528 /* The local valid value is available now. */
2529 block->attr.block.graph_arr[pos] = res;
2532 } /* get_r_value_internal */
2534 #endif /* USE_FAST_PHI_CONSTRUCTION */
2536 /* ************************************************************************** */
2539 * Finalize a Block node, when all control flows are known.
2540 * Acceptable parameters are only Block nodes.
2543 mature_immBlock(ir_node *block) {
2548 assert(get_irn_opcode(block) == iro_Block);
2549 /* @@@ should be commented in
2550 assert (!get_Block_matured(block) && "Block already matured"); */
2552 if (!get_Block_matured(block)) {
2553 ins = ARR_LEN(block->in)-1;
2554 /* Fix block parameters */
2555 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2557 /* An array for building the Phi nodes. */
2558 NEW_ARR_A(ir_node *, nin, ins);
2560 /* Traverse a chain of Phi nodes attached to this block and mature
2562 for (n = block->link; n; n = next) {
2563 inc_irg_visited(current_ir_graph);
2565 exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
2568 block->attr.block.is_matured = 1;
2570 /* Now, as the block is a finished firm node, we can optimize it.
2571 Since other nodes have been allocated since the block was created
2572 we can not free the node on the obstack. Therefore we have to call
2574 Unfortunately the optimization does not change a lot, as all allocated
2575 nodes refer to the unoptimized node.
2576 We can call _2, as global cse has no effect on blocks. */
2577 block = optimize_in_place_2(block);
2578 IRN_VRFY_IRG(block, current_ir_graph);
2580 } /* mature_immBlock */
2583 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2584 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2588 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2589 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2593 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2594 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2595 } /* new_d_Const_long */
2598 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2599 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2600 } /* new_d_Const_type */
2604 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2605 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2609 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2610 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2614 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2617 assert(arg->op == op_Cond);
2618 arg->attr.cond.kind = fragmentary;
2619 arg->attr.cond.default_proj = max_proj;
2620 res = new_Proj(arg, mode_X, max_proj);
2622 } /* new_d_defaultProj */
2625 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2626 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2630 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2631 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2632 } /* new_d_strictConv */
2635 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2636 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2640 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2641 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2650 * Allocate the frag array.
2652 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2653 if (get_opt_precise_exc_context()) {
2654 if ((current_ir_graph->phase_state == phase_building) &&
2655 (get_irn_op(res) == op) && /* Could be optimized away. */
2656 !*frag_store) /* Could be a cse where the arr is already set. */ {
2657 *frag_store = new_frag_arr(res);
2660 } /* allocate_frag_arr */
2663 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2665 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2666 #if PRECISE_EXC_CONTEXT
2667 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2674 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2676 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2677 #if PRECISE_EXC_CONTEXT
2678 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2682 } /* new_d_DivMod */
2685 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2687 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2688 #if PRECISE_EXC_CONTEXT
2689 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2696 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2698 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2699 #if PRECISE_EXC_CONTEXT
2700 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2719 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2720 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2724 new_d_Jmp(dbg_info *db) {
2725 return new_bd_Jmp(db, current_ir_graph->current_block);
2729 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2730 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2734 new_d_Cond(dbg_info *db, ir_node *c) {
2735 return new_bd_Cond(db, current_ir_graph->current_block, c);
2739 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2742 res = new_bd_Call(db, current_ir_graph->current_block,
2743 store, callee, arity, in, tp);
2744 #if PRECISE_EXC_CONTEXT
2745 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2752 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2753 return new_bd_Return(db, current_ir_graph->current_block,
2755 } /* new_d_Return */
2758 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2760 res = new_bd_Load(db, current_ir_graph->current_block,
2762 #if PRECISE_EXC_CONTEXT
2763 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2770 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2772 res = new_bd_Store(db, current_ir_graph->current_block,
2774 #if PRECISE_EXC_CONTEXT
2775 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2782 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2783 where_alloc where) {
2785 res = new_bd_Alloc(db, current_ir_graph->current_block,
2786 store, size, alloc_type, where);
2787 #if PRECISE_EXC_CONTEXT
2788 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2795 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2796 ir_node *size, ir_type *free_type, where_alloc where) {
2797 return new_bd_Free(db, current_ir_graph->current_block,
2798 store, ptr, size, free_type, where);
2802 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2803 /* GL: objptr was called frame before. Frame was a bad choice for the name
2804 as the operand could as well be a pointer to a dynamic object. */
2806 return new_bd_Sel(db, current_ir_graph->current_block,
2807 store, objptr, 0, NULL, ent);
2808 } /* new_d_simpleSel */
2811 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2812 return new_bd_Sel(db, current_ir_graph->current_block,
2813 store, objptr, n_index, index, sel);
2817 new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2818 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2820 } /* new_d_SymConst_type */
2823 new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
2824 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
2825 value, kind, firm_unknown_type);
2826 } /* new_d_SymConst */
2829 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2830 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2836 return _new_d_Bad();
2840 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2841 return new_bd_Confirm(db, current_ir_graph->current_block,
2843 } /* new_d_Confirm */
2846 new_d_Unknown(ir_mode *m) {
2847 return new_bd_Unknown(m);
2848 } /* new_d_Unknown */
2851 new_d_CallBegin(dbg_info *db, ir_node *call) {
2852 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2853 } /* new_d_CallBegin */
2856 new_d_EndReg(dbg_info *db) {
2857 return new_bd_EndReg(db, current_ir_graph->current_block);
2858 } /* new_d_EndReg */
2861 new_d_EndExcept(dbg_info *db) {
2862 return new_bd_EndExcept(db, current_ir_graph->current_block);
2863 } /* new_d_EndExcept */
2866 new_d_Break(dbg_info *db) {
2867 return new_bd_Break(db, current_ir_graph->current_block);
2871 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2872 return new_bd_Filter(db, current_ir_graph->current_block,
2874 } /* new_d_Filter */
2877 (new_d_NoMem)(void) {
2878 return _new_d_NoMem();
2882 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2883 ir_node *ir_true, ir_mode *mode) {
2884 return new_bd_Mux(db, current_ir_graph->current_block,
2885 sel, ir_false, ir_true, mode);
2889 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2890 return new_bd_Psi(db, current_ir_graph->current_block,
2891 arity, conds, vals, mode);
2894 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2895 ir_node *dst, ir_node *src, ir_type *data_type) {
2897 res = new_bd_CopyB(db, current_ir_graph->current_block,
2898 store, dst, src, data_type);
2899 #if PRECISE_EXC_CONTEXT
2900 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2906 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2907 return new_bd_InstOf(db, current_ir_graph->current_block,
2908 store, objptr, type);
2909 } /* new_d_InstOf */
2912 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2913 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2916 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2917 ir_node *idx, ir_node *lower, ir_node *upper) {
2919 res = new_bd_Bound(db, current_ir_graph->current_block,
2920 store, idx, lower, upper);
2921 #if PRECISE_EXC_CONTEXT
2922 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2928 new_d_Pin(dbg_info *db, ir_node *node) {
2929 return new_bd_Pin(db, current_ir_graph->current_block, node);
2933 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2934 int n_outs, ir_asm_constraint *outputs,
2935 int n_clobber, ident *clobber[], ident *asm_text) {
2936 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2939 /* ********************************************************************* */
2940 /* Comfortable interface with automatic Phi node construction. */
2941 /* (Uses also constructors of ?? interface, except new_Block. */
2942 /* ********************************************************************* */
2944 /* Block construction */
2945 /* immature Block without predecessors */
2947 new_d_immBlock(dbg_info *db) {
2950 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2951 /* creates a new dynamic in-array as length of in is -1 */
2952 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2953 current_ir_graph->current_block = res;
2955 /* macroblock head */
2958 res->attr.block.is_matured = 0;
2959 res->attr.block.is_dead = 0;
2960 res->attr.block.is_mb_head = 1;
2961 res->attr.block.has_label = 0;
2962 res->attr.block.irg = current_ir_graph;
2963 res->attr.block.backedge = NULL;
2964 res->attr.block.in_cg = NULL;
2965 res->attr.block.cg_backedge = NULL;
2966 res->attr.block.extblk = NULL;
2967 res->attr.block.region = NULL;
2968 res->attr.block.mb_depth = 0;
2969 res->attr.block.label = 0;
2971 set_Block_block_visited(res, 0);
2973 /* Create and initialize array for Phi-node construction. */
2974 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2975 current_ir_graph->n_loc);
2976 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2978 /* Immature block may not be optimized! */
2979 IRN_VRFY_IRG(res, current_ir_graph);
2982 } /* new_d_immBlock */
2985 new_immBlock(void) {
2986 return new_d_immBlock(NULL);
2987 } /* new_immBlock */
2989 /* immature PartBlock with its predecessors */
2991 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2992 ir_node *res = new_d_immBlock(db);
2993 ir_node *blk = get_nodes_block(pred_jmp);
2995 res->in[0] = blk->in[0];
2996 add_immBlock_pred(res, pred_jmp);
2998 res->attr.block.is_mb_head = 0;
2999 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
3002 } /* new_d_immPartBlock */
3005 new_immPartBlock(ir_node *pred_jmp) {
3006 return new_d_immPartBlock(NULL, pred_jmp);
3007 } /* new_immPartBlock */
3009 /* add an edge to a jmp/control flow node */
3011 add_immBlock_pred(ir_node *block, ir_node *jmp) {
3012 int n = ARR_LEN(block->in) - 1;
3014 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
3015 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
3016 assert(jmp != NULL);
3018 ARR_APP1(ir_node *, block->in, jmp);
3020 hook_set_irn_n(block, n, jmp, NULL);
3021 } /* add_immBlock_pred */
3023 /* changing the current block */
3025 set_cur_block(ir_node *target) {
3026 current_ir_graph->current_block = target;
3027 } /* set_cur_block */
3029 /* ************************ */
3030 /* parameter administration */
3032 /* get a value from the parameter array from the current block by its index */
3034 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
3035 ir_graph *irg = current_ir_graph;
3036 assert(get_irg_phase_state(irg) == phase_building);
3037 inc_irg_visited(irg);
3040 return get_r_value_internal(irg->current_block, pos + 1, mode);
3043 /* get a value from the parameter array from the current block by its index */
3045 get_value(int pos, ir_mode *mode) {
3046 return get_d_value(NULL, pos, mode);
3049 /* set a value at position pos in the parameter array from the current block */
3051 set_value(int pos, ir_node *value) {
3052 ir_graph *irg = current_ir_graph;
3053 assert(get_irg_phase_state(irg) == phase_building);
3054 assert(pos+1 < irg->n_loc);
3055 irg->current_block->attr.block.graph_arr[pos + 1] = value;
3058 /* Find the value number for a node in the current block.*/
3060 find_value(ir_node *value) {
3062 ir_node *bl = current_ir_graph->current_block;
3064 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
3065 if (bl->attr.block.graph_arr[i] == value)
3070 /* get the current store */
3073 ir_graph *irg = current_ir_graph;
3075 assert(get_irg_phase_state(irg) == phase_building);
3076 /* GL: one could call get_value instead */
3077 inc_irg_visited(irg);
3078 return get_r_value_internal(irg->current_block, 0, mode_M);
3081 /* set the current store: handles automatic Sync construction for Load nodes */
3083 set_store(ir_node *store) {
3084 ir_node *load, *pload, *pred, *in[2];
3086 assert(get_irg_phase_state(current_ir_graph) == phase_building);
3087 /* Beware: due to dead code elimination, a store might become a Bad node even in
3088 the construction phase. */
3089 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
3091 if (get_opt_auto_create_sync()) {
3092 /* handle non-volatile Load nodes by automatically creating Sync's */
3093 load = skip_Proj(store);
3094 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3095 pred = get_Load_mem(load);
3097 if (is_Sync(pred)) {
3098 /* a Load after a Sync: move it up */
3099 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3101 set_Load_mem(load, get_memop_mem(mem));
3102 add_Sync_pred(pred, store);
3105 pload = skip_Proj(pred);
3106 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3107 /* a Load after a Load: create a new Sync */
3108 set_Load_mem(load, get_Load_mem(pload));
3112 store = new_Sync(2, in);
3117 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3121 keep_alive(ir_node *ka) {
3122 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3125 /* --- Useful access routines --- */
3126 /* Returns the current block of the current graph. To set the current
3127 block use set_cur_block. */
3128 ir_node *get_cur_block(void) {
3129 return get_irg_current_block(current_ir_graph);
3130 } /* get_cur_block */
3132 /* Returns the frame type of the current graph */
3133 ir_type *get_cur_frame_type(void) {
3134 return get_irg_frame_type(current_ir_graph);
3135 } /* get_cur_frame_type */
3138 /* ********************************************************************* */
3141 /* call once for each run of the library */
3143 init_cons(uninitialized_local_variable_func_t *func) {
3144 default_initialize_local_variable = func;
3148 irp_finalize_cons(void) {
3150 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3151 irg_finalize_cons(get_irp_irg(i));
3153 irp->phase_state = phase_high;
3154 } /* irp_finalize_cons */
3157 ir_node *new_Block(int arity, ir_node **in) {
3158 return new_d_Block(NULL, arity, in);
3160 ir_node *new_Start(void) {
3161 return new_d_Start(NULL);
3163 ir_node *new_End(void) {
3164 return new_d_End(NULL);
3166 ir_node *new_Jmp(void) {
3167 return new_d_Jmp(NULL);
3169 ir_node *new_IJmp(ir_node *tgt) {
3170 return new_d_IJmp(NULL, tgt);
3172 ir_node *new_Cond(ir_node *c) {
3173 return new_d_Cond(NULL, c);
3175 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3176 return new_d_Return(NULL, store, arity, in);
3178 ir_node *new_Const(ir_mode *mode, tarval *con) {
3179 return new_d_Const(NULL, mode, con);
3182 ir_node *new_Const_long(ir_mode *mode, long value) {
3183 return new_d_Const_long(NULL, mode, value);
3186 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3187 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3190 ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
3191 return new_d_SymConst_type(NULL, value, kind, type);
3193 ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
3194 return new_d_SymConst(NULL, value, kind);
3196 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3197 return new_d_simpleSel(NULL, store, objptr, ent);
3199 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3201 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3203 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3205 return new_d_Call(NULL, store, callee, arity, in, tp);
3207 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Add(NULL, op1, op2, mode);
3210 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3211 return new_d_Sub(NULL, op1, op2, mode);
3213 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3214 return new_d_Minus(NULL, op, mode);
3216 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Mul(NULL, op1, op2, mode);
3219 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3220 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3222 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3223 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3225 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3226 return new_d_Div(NULL, memop, op1, op2, mode, state);
3228 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3229 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3231 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3232 return new_d_Abs(NULL, op, mode);
3234 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3235 return new_d_And(NULL, op1, op2, mode);
3237 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3238 return new_d_Or(NULL, op1, op2, mode);
3240 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3241 return new_d_Eor(NULL, op1, op2, mode);
3243 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3244 return new_d_Not(NULL, op, mode);
3246 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3247 return new_d_Shl(NULL, op, k, mode);
3249 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3250 return new_d_Shr(NULL, op, k, mode);
3252 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Shrs(NULL, op, k, mode);
3255 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3256 return new_d_Rot(NULL, op, k, mode);
3258 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3259 return new_d_Carry(NULL, op1, op2, mode);
3261 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3262 return new_d_Borrow(NULL, op1, op2, mode);
3264 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3265 return new_d_Cmp(NULL, op1, op2);
3267 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3268 return new_d_Conv(NULL, op, mode);
3270 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3271 return new_d_strictConv(NULL, op, mode);
3273 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3274 return new_d_Cast(NULL, op, to_tp);
3276 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3277 return new_d_Phi(NULL, arity, in, mode);
3279 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3280 return new_d_Load(NULL, store, addr, mode);
3282 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3283 return new_d_Store(NULL, store, addr, val);
3285 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3286 where_alloc where) {
3287 return new_d_Alloc(NULL, store, size, alloc_type, where);
3289 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3290 ir_type *free_type, where_alloc where) {
3291 return new_d_Free(NULL, store, ptr, size, free_type, where);
3293 ir_node *new_Sync(int arity, ir_node *in[]) {
3294 return new_d_Sync(NULL, arity, in);
3296 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3297 return new_d_Proj(NULL, arg, mode, proj);
3299 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3300 return new_d_defaultProj(NULL, arg, max_proj);
3302 ir_node *new_Tuple(int arity, ir_node **in) {
3303 return new_d_Tuple(NULL, arity, in);
3305 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3306 return new_d_Id(NULL, val, mode);
3308 ir_node *new_Bad(void) {
3311 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3312 return new_d_Confirm(NULL, val, bound, cmp);
3314 ir_node *new_Unknown(ir_mode *m) {
3315 return new_d_Unknown(m);
3317 ir_node *new_CallBegin(ir_node *callee) {
3318 return new_d_CallBegin(NULL, callee);
3320 ir_node *new_EndReg(void) {
3321 return new_d_EndReg(NULL);
3323 ir_node *new_EndExcept(void) {
3324 return new_d_EndExcept(NULL);
3326 ir_node *new_Break(void) {
3327 return new_d_Break(NULL);
3329 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3330 return new_d_Filter(NULL, arg, mode, proj);
3332 ir_node *new_NoMem(void) {
3333 return new_d_NoMem();
3335 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3336 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3338 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3339 return new_d_Psi(NULL, arity, conds, vals, mode);
3341 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3342 return new_d_CopyB(NULL, store, dst, src, data_type);
3344 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3345 return new_d_InstOf(NULL, store, objptr, ent);
3347 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3348 return new_d_Raise(NULL, store, obj);
3350 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3351 return new_d_Bound(NULL, store, idx, lower, upper);
3353 ir_node *new_Pin(ir_node *node) {
3354 return new_d_Pin(NULL, node);
3356 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3357 int n_outs, ir_asm_constraint *outputs,
3358 int n_clobber, ident *clobber[], ident *asm_text) {
3359 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3362 /* create a new anchor node */
3363 ir_node *new_Anchor(ir_graph *irg) {
3364 ir_node *in[anchor_last];
3365 memset(in, 0, sizeof(in));
3366 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);