2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res->attr.divmod.no_remainder = 0; \
119 res = optimize_node(res); \
120 IRN_VRFY_IRG(res, irg); \
124 /* creates a rd constructor for a binop */
125 #define NEW_RD_BINOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op1, ir_node *op2, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op1, op2, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an unop */
139 #define NEW_RD_UNOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *op, ir_mode *mode) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, op, mode); \
148 current_ir_graph = rem; \
152 /* creates a rd constructor for an divop */
153 #define NEW_RD_DIVOP(instr) \
155 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
156 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
159 ir_graph *rem = current_ir_graph; \
160 current_ir_graph = irg; \
161 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
162 current_ir_graph = rem; \
166 /* creates a d constructor for an binop */
167 #define NEW_D_BINOP(instr) \
169 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
170 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
173 /* creates a d constructor for an unop */
174 #define NEW_D_UNOP(instr) \
176 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
177 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
182 * Constructs a Block with a fixed number of predecessors.
183 * Does not set current_block. Cannot be used with automatic
184 * Phi node construction.
187 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
189 ir_graph *irg = current_ir_graph;
191 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
193 /* macroblock header */
196 res->attr.block.is_dead = 0;
197 res->attr.block.is_mb_head = 1;
198 res->attr.block.has_label = 0;
199 res->attr.block.irg = irg;
200 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
201 res->attr.block.in_cg = NULL;
202 res->attr.block.cg_backedge = NULL;
203 res->attr.block.extblk = NULL;
204 res->attr.block.mb_depth = 0;
205 res->attr.block.label = 0;
207 set_Block_matured(res, 1);
208 set_Block_block_visited(res, 0);
210 IRN_VRFY_IRG(res, irg);
215 new_bd_Start(dbg_info *db, ir_node *block) {
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
221 IRN_VRFY_IRG(res, irg);
226 new_bd_End(dbg_info *db, ir_node *block) {
228 ir_graph *irg = current_ir_graph;
230 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
232 IRN_VRFY_IRG(res, irg);
237 * Creates a Phi node with all predecessors. Calling this constructor
238 * is only allowed if the corresponding block is mature.
241 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
243 ir_graph *irg = current_ir_graph;
247 /* Don't assert that block matured: the use of this constructor is strongly
249 if (get_Block_matured(block))
250 assert(get_irn_arity(block) == arity);
252 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
254 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
256 for (i = arity - 1; i >= 0; --i)
257 if (get_irn_op(in[i]) == op_Unknown) {
262 if (!has_unknown) res = optimize_node(res);
263 IRN_VRFY_IRG(res, irg);
265 /* Memory Phis in endless loops must be kept alive.
266 As we can't distinguish these easily we keep all of them alive. */
267 if (is_Phi(res) && mode == mode_M)
268 add_End_keepalive(get_irg_end(irg), res);
273 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
275 ir_graph *irg = current_ir_graph;
278 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
279 res->attr.con.tv = con;
280 set_Const_type(res, tp); /* Call method because of complex assertion. */
281 res = optimize_node (res);
282 assert(get_Const_type(res) == tp);
283 IRN_VRFY_IRG(res, irg);
286 } /* new_bd_Const_type */
289 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
296 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
297 ir_graph *irg = current_ir_graph;
299 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
300 } /* new_bd_Const_long */
303 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
305 ir_graph *irg = current_ir_graph;
307 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
317 ir_graph *irg = current_ir_graph;
319 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
320 res->attr.proj = proj;
323 assert(get_Proj_pred(res));
324 assert(get_nodes_block(get_Proj_pred(res)));
326 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
336 ir_graph *irg = current_ir_graph;
338 assert(arg->op == op_Cond);
339 arg->attr.cond.kind = fragmentary;
340 arg->attr.cond.default_proj = max_proj;
341 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
343 } /* new_bd_defaultProj */
346 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
348 ir_graph *irg = current_ir_graph;
350 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
351 res->attr.conv.strict = strict_flag;
352 res = optimize_node(res);
353 IRN_VRFY_IRG(res, irg);
358 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
360 ir_graph *irg = current_ir_graph;
362 assert(is_atomic_type(to_tp));
364 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
365 res->attr.cast.totype = to_tp;
366 res = optimize_node(res);
367 IRN_VRFY_IRG(res, irg);
372 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
374 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
377 res = optimize_node (res);
378 IRN_VRFY_IRG(res, irg);
403 /** Creates a remainderless Div node. */
404 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
405 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
409 ir_graph *irg = current_ir_graph;
413 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
414 res->attr.divmod.exc.pin_state = state;
415 res->attr.divmod.res_mode = mode;
416 res->attr.divmod.no_remainder = 1;
417 res = optimize_node(res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
426 ir_graph *irg = current_ir_graph;
429 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_bd_Jmp(dbg_info *db, ir_node *block) {
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
449 ir_graph *irg = current_ir_graph;
451 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
452 res = optimize_node(res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
460 ir_graph *irg = current_ir_graph;
462 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
463 res->attr.cond.kind = dense;
464 res->attr.cond.default_proj = 0;
465 res->attr.cond.pred = COND_JMP_PRED_NONE;
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
472 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
473 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
477 ir_graph *irg = current_ir_graph;
480 NEW_ARR_A(ir_node *, r_in, r_arity);
483 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
485 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
487 assert((get_unknown_type() == tp) || is_Method_type(tp));
488 set_Call_type(res, tp);
489 res->attr.call.exc.pin_state = op_pin_state_pinned;
490 res->attr.call.callee_arr = NULL;
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Return(dbg_info *db, ir_node *block,
498 ir_node *store, int arity, ir_node **in) {
502 ir_graph *irg = current_ir_graph;
505 NEW_ARR_A (ir_node *, r_in, r_arity);
507 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
508 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
509 res = optimize_node(res);
510 IRN_VRFY_IRG(res, irg);
512 } /* new_bd_Return */
515 new_bd_Load(dbg_info *db, ir_node *block,
516 ir_node *store, ir_node *adr, ir_mode *mode) {
519 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
524 res->attr.load.exc.pin_state = op_pin_state_pinned;
525 res->attr.load.load_mode = mode;
526 res->attr.load.volatility = volatility_non_volatile;
527 res->attr.load.aligned = align_is_aligned;
528 res = optimize_node(res);
529 IRN_VRFY_IRG(res, irg);
534 new_bd_Store(dbg_info *db, ir_node *block,
535 ir_node *store, ir_node *adr, ir_node *val) {
538 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
544 res->attr.store.exc.pin_state = op_pin_state_pinned;
545 res->attr.store.volatility = volatility_non_volatile;
546 res->attr.store.aligned = align_is_aligned;
547 res = optimize_node(res);
548 IRN_VRFY_IRG(res, irg);
553 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
554 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
557 ir_graph *irg = current_ir_graph;
561 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
562 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
563 res->attr.alloc.where = where;
564 res->attr.alloc.type = alloc_type;
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
572 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
575 ir_graph *irg = current_ir_graph;
580 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
581 res->attr.free.where = where;
582 res->attr.free.type = free_type;
583 res = optimize_node(res);
584 IRN_VRFY_IRG(res, irg);
589 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
590 int arity, ir_node **in, ir_entity *ent) {
594 ir_graph *irg = current_ir_graph;
595 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
597 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
600 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
603 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
605 * Sel's can select functions which should be of mode mode_P_code.
607 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
608 res->attr.sel.ent = ent;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
616 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
617 ir_graph *irg = current_ir_graph;
618 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
620 res->attr.symc.num = symkind;
621 res->attr.symc.sym = value;
622 res->attr.symc.tp = tp;
624 res = optimize_node(res);
625 IRN_VRFY_IRG(res, irg);
627 } /* new_bd_SymConst_type */
630 new_bd_Sync(dbg_info *db, ir_node *block) {
632 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
635 /* no need to call optimize node here, Sync are always created with no predecessors */
636 IRN_VRFY_IRG(res, irg);
641 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
642 ir_node *in[2], *res;
643 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
648 res->attr.confirm.cmp = cmp;
649 res = optimize_node(res);
650 IRN_VRFY_IRG(res, irg);
652 } /* new_bd_Confirm */
655 new_bd_Unknown(ir_mode *m) {
657 ir_graph *irg = current_ir_graph;
659 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
660 res = optimize_node(res);
662 } /* new_bd_Unknown */
665 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
668 ir_graph *irg = current_ir_graph;
670 in[0] = get_Call_ptr(call);
671 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
672 /* res->attr.callbegin.irg = irg; */
673 res->attr.callbegin.call = call;
674 res = optimize_node(res);
675 IRN_VRFY_IRG(res, irg);
677 } /* new_bd_CallBegin */
680 new_bd_EndReg(dbg_info *db, ir_node *block) {
682 ir_graph *irg = current_ir_graph;
684 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
685 set_irg_end_reg(irg, res);
686 IRN_VRFY_IRG(res, irg);
688 } /* new_bd_EndReg */
691 new_bd_EndExcept(dbg_info *db, ir_node *block) {
693 ir_graph *irg = current_ir_graph;
695 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
696 set_irg_end_except(irg, res);
697 IRN_VRFY_IRG (res, irg);
699 } /* new_bd_EndExcept */
702 new_bd_Break(dbg_info *db, ir_node *block) {
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
713 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
716 ir_graph *irg = current_ir_graph;
718 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
719 res->attr.filter.proj = proj;
720 res->attr.filter.in_cg = NULL;
721 res->attr.filter.backedge = NULL;
724 assert(get_Proj_pred(res));
725 assert(get_nodes_block(get_Proj_pred(res)));
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
730 } /* new_bd_Filter */
733 new_bd_Mux(dbg_info *db, ir_node *block,
734 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
737 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
746 res = optimize_node(res);
747 IRN_VRFY_IRG(res, irg);
752 new_bd_Psi(dbg_info *db, ir_node *block,
753 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
756 ir_graph *irg = current_ir_graph;
759 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
761 for (i = 0; i < arity; ++i) {
763 in[2 * i + 1] = vals[i];
767 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_CopyB(dbg_info *db, ir_node *block,
777 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
780 ir_graph *irg = current_ir_graph;
786 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
788 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
789 res->attr.copyb.data_type = data_type;
790 res = optimize_node(res);
791 IRN_VRFY_IRG(res, irg);
796 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
797 ir_node *objptr, ir_type *type) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
805 res->attr.instof.type = type;
806 res = optimize_node(res);
807 IRN_VRFY_IRG(res, irg);
809 } /* new_bd_InstOf */
812 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
815 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
820 res = optimize_node(res);
821 IRN_VRFY_IRG(res, irg);
826 new_bd_Bound(dbg_info *db, ir_node *block,
827 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
830 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
837 res->attr.bound.exc.pin_state = op_pin_state_pinned;
838 res = optimize_node(res);
839 IRN_VRFY_IRG(res, irg);
844 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
849 res = optimize_node(res);
850 IRN_VRFY_IRG(res, irg);
855 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
856 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
858 ir_graph *irg = current_ir_graph;
861 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
862 res->attr.assem.pin_state = op_pin_state_pinned;
863 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
864 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
865 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
866 res->attr.assem.asm_text = asm_text;
868 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
869 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
870 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
872 res = optimize_node(res);
873 IRN_VRFY_IRG(res, irg);
877 /* --------------------------------------------- */
878 /* private interfaces, for professional use only */
879 /* --------------------------------------------- */
881 /* Constructs a Block with a fixed number of predecessors.
882 Does not set current_block. Can not be used with automatic
883 Phi node construction. */
885 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
886 ir_graph *rem = current_ir_graph;
889 current_ir_graph = irg;
890 res = new_bd_Block(db, arity, in);
891 current_ir_graph = rem;
897 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
901 current_ir_graph = irg;
902 res = new_bd_Start(db, block);
903 current_ir_graph = rem;
909 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
911 ir_graph *rem = current_ir_graph;
913 current_ir_graph = irg;
914 res = new_bd_End(db, block);
915 current_ir_graph = rem;
920 /* Creates a Phi node with all predecessors. Calling this constructor
921 is only allowed if the corresponding block is mature. */
923 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Phi(db, block,arity, in, mode);
929 current_ir_graph = rem;
935 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
937 ir_graph *rem = current_ir_graph;
939 current_ir_graph = irg;
940 res = new_bd_Const_type(db, block, mode, con, tp);
941 current_ir_graph = rem;
944 } /* new_rd_Const_type */
947 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
953 current_ir_graph = rem;
959 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
960 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
961 } /* new_rd_Const_long */
964 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Id(db, block, val, mode);
970 current_ir_graph = rem;
976 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Proj(db, block, arg, mode, proj);
983 current_ir_graph = rem;
989 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
992 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_defaultProj(db, block, arg, max_proj);
996 current_ir_graph = rem;
999 } /* new_rd_defaultProj */
1002 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = irg;
1007 res = new_bd_Conv(db, block, op, mode, 0);
1008 current_ir_graph = rem;
1014 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1016 ir_graph *rem = current_ir_graph;
1018 current_ir_graph = irg;
1019 res = new_bd_Cast(db, block, op, to_tp);
1020 current_ir_graph = rem;
1026 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1028 ir_graph *rem = current_ir_graph;
1030 current_ir_graph = irg;
1031 res = new_bd_Tuple(db, block, arity, in);
1032 current_ir_graph = rem;
1035 } /* new_rd_Tuple */
1043 NEW_RD_DIVOP(DivMod)
1056 NEW_RD_BINOP(Borrow)
1058 /* creates a rd constructor for an divRL */
1059 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1060 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1063 ir_graph *rem = current_ir_graph;
1064 current_ir_graph = irg;
1065 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1066 current_ir_graph = rem;
1071 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1072 ir_node *op1, ir_node *op2) {
1074 ir_graph *rem = current_ir_graph;
1076 current_ir_graph = irg;
1077 res = new_bd_Cmp(db, block, op1, op2);
1078 current_ir_graph = rem;
1084 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Jmp(db, block);
1090 current_ir_graph = rem;
1096 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_IJmp(db, block, tgt);
1102 current_ir_graph = rem;
1108 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1110 ir_graph *rem = current_ir_graph;
1112 current_ir_graph = irg;
1113 res = new_bd_Cond(db, block, c);
1114 current_ir_graph = rem;
1120 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1121 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1127 current_ir_graph = rem;
1133 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1134 ir_node *store, int arity, ir_node **in) {
1136 ir_graph *rem = current_ir_graph;
1138 current_ir_graph = irg;
1139 res = new_bd_Return(db, block, store, arity, in);
1140 current_ir_graph = rem;
1143 } /* new_rd_Return */
1146 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1147 ir_node *store, ir_node *adr, ir_mode *mode) {
1149 ir_graph *rem = current_ir_graph;
1151 current_ir_graph = irg;
1152 res = new_bd_Load(db, block, store, adr, mode);
1153 current_ir_graph = rem;
1159 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1160 ir_node *store, ir_node *adr, ir_node *val) {
1162 ir_graph *rem = current_ir_graph;
1164 current_ir_graph = irg;
1165 res = new_bd_Store(db, block, store, adr, val);
1166 current_ir_graph = rem;
1169 } /* new_rd_Store */
1172 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1173 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1179 current_ir_graph = rem;
1182 } /* new_rd_Alloc */
1185 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1186 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1188 ir_graph *rem = current_ir_graph;
1190 current_ir_graph = irg;
1191 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1192 current_ir_graph = rem;
1198 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1199 ir_node *store, ir_node *objptr, ir_entity *ent) {
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1205 current_ir_graph = rem;
1208 } /* new_rd_simpleSel */
1211 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1212 int arity, ir_node **in, ir_entity *ent) {
1214 ir_graph *rem = current_ir_graph;
1216 current_ir_graph = irg;
1217 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1218 current_ir_graph = rem;
1224 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1225 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1227 ir_graph *rem = current_ir_graph;
1229 current_ir_graph = irg;
1230 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1231 current_ir_graph = rem;
1234 } /* new_rd_SymConst_type */
1237 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1238 symconst_symbol value, symconst_kind symkind) {
1239 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1240 } /* new_rd_SymConst */
1242 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.entity_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1246 } /* new_rd_SymConst_addr_ent */
1248 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.entity_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1252 } /* new_rd_SymConst_ofs_ent */
1254 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1255 symconst_symbol sym;
1256 sym.ident_p = symbol;
1257 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1258 } /* new_rd_SymConst_addr_name */
1260 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1261 symconst_symbol sym;
1262 sym.type_p = symbol;
1263 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1264 } /* new_rd_SymConst_type_tag */
1266 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1267 symconst_symbol sym;
1268 sym.type_p = symbol;
1269 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1270 } /* new_rd_SymConst_size */
1272 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1273 symconst_symbol sym;
1274 sym.type_p = symbol;
1275 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1276 } /* new_rd_SymConst_align */
1279 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1281 ir_graph *rem = current_ir_graph;
1284 current_ir_graph = irg;
1285 res = new_bd_Sync(db, block);
1286 current_ir_graph = rem;
1288 for (i = 0; i < arity; ++i)
1289 add_Sync_pred(res, in[i]);
1295 new_rd_Bad(ir_graph *irg) {
1296 return get_irg_bad(irg);
1300 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_Confirm(db, block, val, bound, cmp);
1306 current_ir_graph = rem;
1309 } /* new_rd_Confirm */
1312 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1314 ir_graph *rem = current_ir_graph;
1316 current_ir_graph = irg;
1317 res = new_bd_Unknown(m);
1318 current_ir_graph = rem;
1321 } /* new_rd_Unknown */
1324 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1326 ir_graph *rem = current_ir_graph;
1328 current_ir_graph = irg;
1329 res = new_bd_CallBegin(db, block, call);
1330 current_ir_graph = rem;
1333 } /* new_rd_CallBegin */
1336 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1339 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1340 set_irg_end_reg(irg, res);
1341 IRN_VRFY_IRG(res, irg);
1343 } /* new_rd_EndReg */
1346 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1349 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1350 set_irg_end_except(irg, res);
1351 IRN_VRFY_IRG (res, irg);
1353 } /* new_rd_EndExcept */
1356 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1358 ir_graph *rem = current_ir_graph;
1360 current_ir_graph = irg;
1361 res = new_bd_Break(db, block);
1362 current_ir_graph = rem;
1365 } /* new_rd_Break */
1368 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1371 ir_graph *rem = current_ir_graph;
1373 current_ir_graph = irg;
1374 res = new_bd_Filter(db, block, arg, mode, proj);
1375 current_ir_graph = rem;
1378 } /* new_rd_Filter */
1381 new_rd_NoMem(ir_graph *irg) {
1382 return get_irg_no_mem(irg);
1383 } /* new_rd_NoMem */
1386 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1387 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1393 current_ir_graph = rem;
1399 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1400 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1406 current_ir_graph = rem;
1411 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1412 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1418 current_ir_graph = rem;
1421 } /* new_rd_CopyB */
1424 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1425 ir_node *objptr, ir_type *type) {
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_InstOf(db, block, store, objptr, type);
1431 current_ir_graph = rem;
1434 } /* new_rd_InstOf */
1437 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1439 ir_graph *rem = current_ir_graph;
1441 current_ir_graph = irg;
1442 res = new_bd_Raise(db, block, store, obj);
1443 current_ir_graph = rem;
1446 } /* new_rd_Raise */
1448 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1449 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1451 ir_graph *rem = current_ir_graph;
1453 current_ir_graph = irg;
1454 res = new_bd_Bound(db, block, store, idx, lower, upper);
1455 current_ir_graph = rem;
1458 } /* new_rd_Bound */
1460 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1462 ir_graph *rem = current_ir_graph;
1464 current_ir_graph = irg;
1465 res = new_bd_Pin(db, block, node);
1466 current_ir_graph = rem;
1471 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1472 int arity, ir_node *in[], ir_asm_constraint *inputs,
1473 int n_outs, ir_asm_constraint *outputs,
1474 int n_clobber, ident *clobber[], ident *asm_text) {
1476 ir_graph *rem = current_ir_graph;
1478 current_ir_graph = irg;
1479 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1480 current_ir_graph = rem;
1486 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1487 return new_rd_Block(NULL, irg, arity, in);
1489 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1490 return new_rd_Start(NULL, irg, block);
1492 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1493 return new_rd_End(NULL, irg, block);
1495 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1496 return new_rd_Jmp(NULL, irg, block);
1498 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1499 return new_rd_IJmp(NULL, irg, block, tgt);
1501 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1502 return new_rd_Cond(NULL, irg, block, c);
1504 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1505 ir_node *store, int arity, ir_node **in) {
1506 return new_rd_Return(NULL, irg, block, store, arity, in);
1508 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1509 ir_mode *mode, tarval *con) {
1510 return new_rd_Const(NULL, irg, block, mode, con);
1512 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1513 ir_mode *mode, long value) {
1514 return new_rd_Const_long(NULL, irg, block, mode, value);
1516 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1517 ir_mode *mode, tarval *con, ir_type *tp) {
1518 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1520 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1521 symconst_symbol value, symconst_kind symkind) {
1522 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1524 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1525 ir_node *objptr, ir_entity *ent) {
1526 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1528 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1529 ir_node *objptr, int n_index, ir_node **index,
1531 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1533 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1534 ir_node *callee, int arity, ir_node **in,
1536 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1538 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1539 ir_node *op1, ir_node *op2, ir_mode *mode) {
1540 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1542 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1543 ir_node *op1, ir_node *op2, ir_mode *mode) {
1544 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1546 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1547 ir_node *op, ir_mode *mode) {
1548 return new_rd_Minus(NULL, irg, block, op, mode);
1550 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1559 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1560 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1562 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1563 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1564 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1566 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1567 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1568 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1570 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1571 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1572 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1574 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1575 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1576 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1578 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_mode *mode) {
1580 return new_rd_Abs(NULL, irg, block, op, mode);
1582 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1583 ir_node *op1, ir_node *op2, ir_mode *mode) {
1584 return new_rd_And(NULL, irg, block, op1, op2, mode);
1586 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1587 ir_node *op1, ir_node *op2, ir_mode *mode) {
1588 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1590 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1591 ir_node *op1, ir_node *op2, ir_mode *mode) {
1592 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1594 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1595 ir_node *op, ir_mode *mode) {
1596 return new_rd_Not(NULL, irg, block, op, mode);
1598 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_node *k, ir_mode *mode) {
1600 return new_rd_Shl(NULL, irg, block, op, k, mode);
1602 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1603 ir_node *op, ir_node *k, ir_mode *mode) {
1604 return new_rd_Shr(NULL, irg, block, op, k, mode);
1606 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1607 ir_node *op, ir_node *k, ir_mode *mode) {
1608 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1610 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1611 ir_node *op, ir_node *k, ir_mode *mode) {
1612 return new_rd_Rot(NULL, irg, block, op, k, mode);
1614 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1615 ir_node *op, ir_node *k, ir_mode *mode) {
1616 return new_rd_Carry(NULL, irg, block, op, k, mode);
1618 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1619 ir_node *op, ir_node *k, ir_mode *mode) {
1620 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1622 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1623 ir_node *op1, ir_node *op2) {
1624 return new_rd_Cmp(NULL, irg, block, op1, op2);
1626 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1627 ir_node *op, ir_mode *mode) {
1628 return new_rd_Conv(NULL, irg, block, op, mode);
1630 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1631 return new_rd_Cast(NULL, irg, block, op, to_tp);
1633 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1634 ir_node **in, ir_mode *mode) {
1635 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1637 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1638 ir_node *store, ir_node *adr, ir_mode *mode) {
1639 return new_rd_Load(NULL, irg, block, store, adr, mode);
1641 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1642 ir_node *store, ir_node *adr, ir_node *val) {
1643 return new_rd_Store(NULL, irg, block, store, adr, val);
1645 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1646 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1647 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1649 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1650 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1651 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1653 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1654 return new_rd_Sync(NULL, irg, block, arity, in);
1656 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1662 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1664 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1665 int arity, ir_node **in) {
1666 return new_rd_Tuple(NULL, irg, block, arity, in );
1668 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1669 ir_node *val, ir_mode *mode) {
1670 return new_rd_Id(NULL, irg, block, val, mode);
1672 ir_node *new_r_Bad(ir_graph *irg) {
1673 return new_rd_Bad(irg);
1675 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1676 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1678 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1679 return new_rd_Unknown(irg, m);
1681 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1682 return new_rd_CallBegin(NULL, irg, block, callee);
1684 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1685 return new_rd_EndReg(NULL, irg, block);
1687 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1688 return new_rd_EndExcept(NULL, irg, block);
1690 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1691 return new_rd_Break(NULL, irg, block);
1693 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1694 ir_mode *mode, long proj) {
1695 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1697 ir_node *new_r_NoMem(ir_graph *irg) {
1698 return new_rd_NoMem(irg);
1700 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1701 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1702 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1704 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1705 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1706 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1708 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1709 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1710 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1712 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1714 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1716 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1717 ir_node *store, ir_node *obj) {
1718 return new_rd_Raise(NULL, irg, block, store, obj);
1720 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1721 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1722 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1724 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1725 return new_rd_Pin(NULL, irg, block, node);
1727 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1728 int arity, ir_node *in[], ir_asm_constraint *inputs,
1729 int n_outs, ir_asm_constraint *outputs,
1730 int n_clobber, ident *clobber[], ident *asm_text) {
1731 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1734 /** ********************/
1735 /** public interfaces */
1736 /** construction tools */
1740 * - create a new Start node in the current block
1742 * @return s - pointer to the created Start node
1747 new_d_Start(dbg_info *db) {
1750 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1751 op_Start, mode_T, 0, NULL);
1753 res = optimize_node(res);
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 new_d_End(dbg_info *db) {
1761 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1762 op_End, mode_X, -1, NULL);
1763 res = optimize_node(res);
1764 IRN_VRFY_IRG(res, current_ir_graph);
1769 /* Constructs a Block with a fixed number of predecessors.
1770 Does set current_block. Can be used with automatic Phi
1771 node construction. */
1773 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1776 int has_unknown = 0;
1778 res = new_bd_Block(db, arity, in);
1780 /* Create and initialize array for Phi-node construction. */
1781 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1782 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1783 current_ir_graph->n_loc);
1784 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1787 for (i = arity-1; i >= 0; i--)
1788 if (get_irn_op(in[i]) == op_Unknown) {
1793 if (!has_unknown) res = optimize_node(res);
1794 current_ir_graph->current_block = res;
1796 IRN_VRFY_IRG(res, current_ir_graph);
1801 /* ***********************************************************************/
1802 /* Methods necessary for automatic Phi node creation */
1804 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1805 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1806 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1807 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1809 Call Graph: ( A ---> B == A "calls" B)
1811 get_value mature_immBlock
1819 get_r_value_internal |
1823 new_rd_Phi0 new_rd_Phi_in
1825 * *************************************************************************** */
1827 /** Creates a Phi node with 0 predecessors. */
1828 static INLINE ir_node *
1829 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1832 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1833 IRN_VRFY_IRG(res, irg);
1839 * Internal constructor of a Phi node by a phi_merge operation.
1841 * @param irg the graph on which the Phi will be constructed
1842 * @param block the block in which the Phi will be constructed
1843 * @param mode the mod eof the Phi node
1844 * @param in the input array of the phi node
1845 * @param ins number of elements in the input array
1846 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1847 * the value for which the new Phi is constructed
1849 static INLINE ir_node *
1850 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1851 ir_node **in, int ins, ir_node *phi0) {
1853 ir_node *res, *known;
1855 /* Allocate a new node on the obstack. The allocation copies the in
1857 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1858 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1860 /* This loop checks whether the Phi has more than one predecessor.
1861 If so, it is a real Phi node and we break the loop. Else the
1862 Phi node merges the same definition on several paths and therefore
1863 is not needed. Don't consider Bad nodes! */
1865 for (i = ins - 1; i >= 0; --i) {
1868 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1870 /* Optimize self referencing Phis: We can't detect them yet properly, as
1871 they still refer to the Phi0 they will replace. So replace right now. */
1872 if (phi0 && in[i] == phi0)
1875 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1884 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1887 edges_node_deleted(res, current_ir_graph);
1888 obstack_free(current_ir_graph->obst, res);
1889 if (is_Phi(known)) {
1890 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1891 order, an enclosing Phi know may get superfluous. */
1892 res = optimize_in_place_2(known);
1894 exchange(known, res);
1899 /* A undefined value, e.g., in unreachable code. */
1903 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1904 IRN_VRFY_IRG(res, irg);
1905 /* Memory Phis in endless loops must be kept alive.
1906 As we can't distinguish these easily we keep all of them alive. */
1907 if (is_Phi(res) && mode == mode_M)
1908 add_End_keepalive(get_irg_end(irg), res);
1912 } /* new_rd_Phi_in */
1915 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1917 #if PRECISE_EXC_CONTEXT
1919 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1922 * Construct a new frag_array for node n.
1923 * Copy the content from the current graph_arr of the corresponding block:
1924 * this is the current state.
1925 * Set ProjM(n) as current memory state.
1926 * Further the last entry in frag_arr of current block points to n. This
1927 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1929 static INLINE ir_node **new_frag_arr(ir_node *n) {
1933 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1934 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1935 sizeof(ir_node *)*current_ir_graph->n_loc);
1937 /* turn off optimization before allocating Proj nodes, as res isn't
1939 opt = get_opt_optimize(); set_optimize(0);
1940 /* Here we rely on the fact that all frag ops have Memory as first result! */
1941 if (get_irn_op(n) == op_Call)
1942 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1943 else if (get_irn_op(n) == op_CopyB)
1944 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1946 assert((pn_Quot_M == pn_DivMod_M) &&
1947 (pn_Quot_M == pn_Div_M) &&
1948 (pn_Quot_M == pn_Mod_M) &&
1949 (pn_Quot_M == pn_Load_M) &&
1950 (pn_Quot_M == pn_Store_M) &&
1951 (pn_Quot_M == pn_Alloc_M) &&
1952 (pn_Quot_M == pn_Bound_M));
1953 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1957 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1959 } /* new_frag_arr */
1962 * Returns the frag_arr from a node.
1964 static INLINE ir_node **get_frag_arr(ir_node *n) {
1965 switch (get_irn_opcode(n)) {
1967 return n->attr.call.exc.frag_arr;
1969 return n->attr.alloc.exc.frag_arr;
1971 return n->attr.load.exc.frag_arr;
1973 return n->attr.store.exc.frag_arr;
1975 return n->attr.except.frag_arr;
1977 } /* get_frag_arr */
1980 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1981 #ifdef DEBUG_libfirm
1984 for (i = 1024; i >= 0; --i)
1989 if (frag_arr[pos] == NULL)
1990 frag_arr[pos] = val;
1991 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1992 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1993 assert(arr != frag_arr && "Endless recursion detected");
1998 assert(!"potential endless recursion in set_frag_value");
1999 } /* set_frag_value */
2002 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2006 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2008 frag_arr = get_frag_arr(cfOp);
2009 res = frag_arr[pos];
2011 if (block->attr.block.graph_arr[pos] != NULL) {
2012 /* There was a set_value() after the cfOp and no get_value() before that
2013 set_value(). We must build a Phi node now. */
2014 if (block->attr.block.is_matured) {
2015 int ins = get_irn_arity(block);
2017 NEW_ARR_A(ir_node *, nin, ins);
2018 res = phi_merge(block, pos, mode, nin, ins);
2020 res = new_rd_Phi0(current_ir_graph, block, mode);
2021 res->attr.phi.u.pos = pos;
2022 res->attr.phi.next = block->attr.block.phis;
2023 block->attr.block.phis = res;
2025 assert(res != NULL);
2026 /* It's a Phi, we can write this into all graph_arrs with NULL */
2027 set_frag_value(block->attr.block.graph_arr, pos, res);
2029 res = get_r_value_internal(block, pos, mode);
2030 set_frag_value(block->attr.block.graph_arr, pos, res);
2034 } /* get_r_frag_value_internal */
2035 #endif /* PRECISE_EXC_CONTEXT */
2038 * Check whether a control flownode cf_pred represents an exception flow.
2040 * @param cf_pred the control flow node
2041 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2043 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2045 * Note: all projections from a raise are "exceptional control flow" we we handle it
2046 * like a normal Jmp, because there is no "regular" one.
2047 * That's why Raise is no "fragile_op"!
2049 if (is_fragile_op(prev_cf_op)) {
2050 if (is_Proj(cf_pred)) {
2051 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2052 /* the regular control flow, NO exception */
2055 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2058 /* Hmm, exception but not a Proj? */
2059 assert(!"unexpected condition: fragile op without a proj");
2063 } /* is_exception_flow */
2066 * Computes the predecessors for the real phi node, and then
2067 * allocates and returns this node. The routine called to allocate the
2068 * node might optimize it away and return a real value.
2069 * This function must be called with an in-array of proper size.
2072 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2073 ir_node *prevBlock, *res, *phi0, *phi0_all;
2076 /* If this block has no value at pos create a Phi0 and remember it
2077 in graph_arr to break recursions.
2078 Else we may not set graph_arr as there a later value is remembered. */
2080 if (block->attr.block.graph_arr[pos] == NULL) {
2081 ir_graph *irg = current_ir_graph;
2083 if (block == get_irg_start_block(irg)) {
2084 /* Collapsing to Bad tarvals is no good idea.
2085 So we call a user-supplied routine here that deals with this case as
2086 appropriate for the given language. Sorrily the only help we can give
2087 here is the position.
2089 Even if all variables are defined before use, it can happen that
2090 we get to the start block, if a Cond has been replaced by a tuple
2091 (bad, jmp). In this case we call the function needlessly, eventually
2092 generating an non existent error.
2093 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2096 if (default_initialize_local_variable != NULL) {
2097 ir_node *rem = get_cur_block();
2099 set_cur_block(block);
2100 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2104 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2105 /* We don't need to care about exception ops in the start block.
2106 There are none by definition. */
2107 return block->attr.block.graph_arr[pos];
2109 phi0 = new_rd_Phi0(irg, block, mode);
2110 block->attr.block.graph_arr[pos] = phi0;
2111 #if PRECISE_EXC_CONTEXT
2112 if (get_opt_precise_exc_context()) {
2113 /* Set graph_arr for fragile ops. Also here we should break recursion.
2114 We could choose a cyclic path through an cfop. But the recursion would
2115 break at some point. */
2116 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2122 /* This loop goes to all predecessor blocks of the block the Phi node
2123 is in and there finds the operands of the Phi node by calling
2124 get_r_value_internal. */
2125 for (i = 1; i <= ins; ++i) {
2126 ir_node *cf_pred = block->in[i];
2127 ir_node *prevCfOp = skip_Proj(cf_pred);
2129 if (is_Bad(prevCfOp)) {
2130 /* In case a Cond has been optimized we would get right to the start block
2131 with an invalid definition. */
2132 nin[i-1] = new_Bad();
2135 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2137 if (!is_Bad(prevBlock)) {
2138 #if PRECISE_EXC_CONTEXT
2139 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2140 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2141 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2144 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2146 nin[i-1] = new_Bad();
2150 /* We want to pass the Phi0 node to the constructor: this finds additional
2151 optimization possibilities.
2152 The Phi0 node either is allocated in this function, or it comes from
2153 a former call to get_r_value_internal(). In this case we may not yet
2154 exchange phi0, as this is done in mature_immBlock(). */
2156 phi0_all = block->attr.block.graph_arr[pos];
2157 if (!(is_Phi(phi0_all) &&
2158 (get_irn_arity(phi0_all) == 0) &&
2159 (get_nodes_block(phi0_all) == block)))
2165 /* After collecting all predecessors into the array nin a new Phi node
2166 with these predecessors is created. This constructor contains an
2167 optimization: If all predecessors of the Phi node are identical it
2168 returns the only operand instead of a new Phi node. */
2169 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2171 /* In case we allocated a Phi0 node at the beginning of this procedure,
2172 we need to exchange this Phi0 with the real Phi. */
2174 exchange(phi0, res);
2175 block->attr.block.graph_arr[pos] = res;
2176 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2177 only an optimization. */
2184 * This function returns the last definition of a value. In case
2185 * this value was last defined in a previous block, Phi nodes are
2186 * inserted. If the part of the firm graph containing the definition
2187 * is not yet constructed, a dummy Phi node is returned.
2189 * @param block the current block
2190 * @param pos the value number of the value searched
2191 * @param mode the mode of this value (needed for Phi construction)
2194 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2196 /* There are 4 cases to treat.
2198 1. The block is not mature and we visit it the first time. We can not
2199 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2200 predecessors is returned. This node is added to the linked list (block
2201 attribute "phis") of the containing block to be completed when this block is
2202 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2205 2. The value is already known in this block, graph_arr[pos] is set and we
2206 visit the block the first time. We can return the value without
2207 creating any new nodes.
2209 3. The block is mature and we visit it the first time. A Phi node needs
2210 to be created (phi_merge). If the Phi is not needed, as all it's
2211 operands are the same value reaching the block through different
2212 paths, it's optimized away and the value itself is returned.
2214 4. The block is mature, and we visit it the second time. Now two
2215 subcases are possible:
2216 * The value was computed completely the last time we were here. This
2217 is the case if there is no loop. We can return the proper value.
2218 * The recursion that visited this node and set the flag did not
2219 return yet. We are computing a value in a loop and need to
2220 break the recursion. This case only happens if we visited
2221 the same block with phi_merge before, which inserted a Phi0.
2222 So we return the Phi0.
2225 /* case 4 -- already visited. */
2226 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2227 /* As phi_merge allocates a Phi0 this value is always defined. Here
2228 is the critical difference of the two algorithms. */
2229 assert(block->attr.block.graph_arr[pos]);
2230 return block->attr.block.graph_arr[pos];
2233 /* visited the first time */
2234 set_irn_visited(block, get_irg_visited(current_ir_graph));
2236 /* Get the local valid value */
2237 res = block->attr.block.graph_arr[pos];
2239 /* case 2 -- If the value is actually computed, return it. */
2243 if (block->attr.block.is_matured) { /* case 3 */
2245 /* The Phi has the same amount of ins as the corresponding block. */
2246 int ins = get_irn_arity(block);
2248 NEW_ARR_A(ir_node *, nin, ins);
2250 /* Phi merge collects the predecessors and then creates a node. */
2251 res = phi_merge(block, pos, mode, nin, ins);
2253 } else { /* case 1 */
2254 /* The block is not mature, we don't know how many in's are needed. A Phi
2255 with zero predecessors is created. Such a Phi node is called Phi0
2256 node. The Phi0 is then added to the list of Phi0 nodes in this block
2257 to be matured by mature_immBlock later.
2258 The Phi0 has to remember the pos of it's internal value. If the real
2259 Phi is computed, pos is used to update the array with the local
2261 res = new_rd_Phi0(current_ir_graph, block, mode);
2262 res->attr.phi.u.pos = pos;
2263 res->attr.phi.next = block->attr.block.phis;
2264 block->attr.block.phis = res;
2267 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2269 /* The local valid value is available now. */
2270 block->attr.block.graph_arr[pos] = res;
2273 } /* get_r_value_internal */
2275 /* ************************************************************************** */
2278 * Finalize a Block node, when all control flows are known.
2279 * Acceptable parameters are only Block nodes.
2282 mature_immBlock(ir_node *block) {
2287 assert(is_Block(block));
2288 if (!get_Block_matured(block)) {
2289 ir_graph *irg = current_ir_graph;
2291 ins = ARR_LEN(block->in) - 1;
2292 /* Fix block parameters */
2293 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2295 /* An array for building the Phi nodes. */
2296 NEW_ARR_A(ir_node *, nin, ins);
2298 /* Traverse a chain of Phi nodes attached to this block and mature
2300 for (n = block->attr.block.phis; n; n = next) {
2301 inc_irg_visited(irg);
2302 next = n->attr.phi.next;
2303 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2306 block->attr.block.is_matured = 1;
2308 /* Now, as the block is a finished Firm node, we can optimize it.
2309 Since other nodes have been allocated since the block was created
2310 we can not free the node on the obstack. Therefore we have to call
2311 optimize_in_place().
2312 Unfortunately the optimization does not change a lot, as all allocated
2313 nodes refer to the unoptimized node.
2314 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2315 block = optimize_in_place_2(block);
2316 IRN_VRFY_IRG(block, irg);
2318 } /* mature_immBlock */
2321 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2322 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2326 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2327 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2331 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2332 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2333 } /* new_d_Const_long */
2336 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2337 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2338 } /* new_d_Const_type */
2342 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2343 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2347 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2348 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2352 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2355 assert(arg->op == op_Cond);
2356 arg->attr.cond.kind = fragmentary;
2357 arg->attr.cond.default_proj = max_proj;
2358 res = new_Proj(arg, mode_X, max_proj);
2360 } /* new_d_defaultProj */
2363 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2364 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2368 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2369 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2370 } /* new_d_strictConv */
2373 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2374 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2378 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2379 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2389 * Allocate a frag array for a node if the current graph state is phase_building.
2391 * @param irn the node for which the frag array should be allocated
2392 * @param op the opcode of the (original) node, if does not match opcode of irn,
2394 * @param frag_store the address of the frag store in irn attributes, if this
2395 * address contains a value != NULL, does nothing
2397 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2398 if (get_opt_precise_exc_context()) {
2399 if ((current_ir_graph->phase_state == phase_building) &&
2400 (get_irn_op(irn) == op) && /* Could be optimized away. */
2401 !*frag_store) /* Could be a cse where the arr is already set. */ {
2402 *frag_store = new_frag_arr(irn);
2405 } /* firm_alloc_frag_arr */
2408 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2410 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2411 #if PRECISE_EXC_CONTEXT
2412 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2419 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2421 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2422 #if PRECISE_EXC_CONTEXT
2423 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2427 } /* new_d_DivMod */
2430 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2432 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2433 #if PRECISE_EXC_CONTEXT
2434 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2441 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2443 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2444 #if PRECISE_EXC_CONTEXT
2445 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2452 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2454 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2455 #if PRECISE_EXC_CONTEXT
2456 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2475 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2476 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2480 new_d_Jmp(dbg_info *db) {
2481 return new_bd_Jmp(db, current_ir_graph->current_block);
2485 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2486 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2490 new_d_Cond(dbg_info *db, ir_node *c) {
2491 return new_bd_Cond(db, current_ir_graph->current_block, c);
2495 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2498 res = new_bd_Call(db, current_ir_graph->current_block,
2499 store, callee, arity, in, tp);
2500 #if PRECISE_EXC_CONTEXT
2501 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2508 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2509 return new_bd_Return(db, current_ir_graph->current_block,
2511 } /* new_d_Return */
2514 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2516 res = new_bd_Load(db, current_ir_graph->current_block,
2518 #if PRECISE_EXC_CONTEXT
2519 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2526 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2528 res = new_bd_Store(db, current_ir_graph->current_block,
2530 #if PRECISE_EXC_CONTEXT
2531 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2538 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2539 ir_where_alloc where) {
2541 res = new_bd_Alloc(db, current_ir_graph->current_block,
2542 store, size, alloc_type, where);
2543 #if PRECISE_EXC_CONTEXT
2544 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2551 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2552 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2553 return new_bd_Free(db, current_ir_graph->current_block,
2554 store, ptr, size, free_type, where);
2558 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2559 /* GL: objptr was called frame before. Frame was a bad choice for the name
2560 as the operand could as well be a pointer to a dynamic object. */
2562 return new_bd_Sel(db, current_ir_graph->current_block,
2563 store, objptr, 0, NULL, ent);
2564 } /* new_d_simpleSel */
2567 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2568 return new_bd_Sel(db, current_ir_graph->current_block,
2569 store, objptr, n_index, index, sel);
2573 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2574 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2576 } /* new_d_SymConst_type */
2579 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2580 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2581 value, kind, firm_unknown_type);
2582 } /* new_d_SymConst */
2585 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2586 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2592 return _new_d_Bad();
2596 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2597 return new_bd_Confirm(db, current_ir_graph->current_block,
2599 } /* new_d_Confirm */
2602 new_d_Unknown(ir_mode *m) {
2603 return new_bd_Unknown(m);
2604 } /* new_d_Unknown */
2607 new_d_CallBegin(dbg_info *db, ir_node *call) {
2608 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2609 } /* new_d_CallBegin */
2612 new_d_EndReg(dbg_info *db) {
2613 return new_bd_EndReg(db, current_ir_graph->current_block);
2614 } /* new_d_EndReg */
2617 new_d_EndExcept(dbg_info *db) {
2618 return new_bd_EndExcept(db, current_ir_graph->current_block);
2619 } /* new_d_EndExcept */
2622 new_d_Break(dbg_info *db) {
2623 return new_bd_Break(db, current_ir_graph->current_block);
2627 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2628 return new_bd_Filter(db, current_ir_graph->current_block,
2630 } /* new_d_Filter */
2633 (new_d_NoMem)(void) {
2634 return _new_d_NoMem();
2638 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2639 ir_node *ir_true, ir_mode *mode) {
2640 return new_bd_Mux(db, current_ir_graph->current_block,
2641 sel, ir_false, ir_true, mode);
2645 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2646 return new_bd_Psi(db, current_ir_graph->current_block,
2647 arity, conds, vals, mode);
2650 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2651 ir_node *dst, ir_node *src, ir_type *data_type) {
2653 res = new_bd_CopyB(db, current_ir_graph->current_block,
2654 store, dst, src, data_type);
2655 #if PRECISE_EXC_CONTEXT
2656 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2662 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2663 return new_bd_InstOf(db, current_ir_graph->current_block,
2664 store, objptr, type);
2665 } /* new_d_InstOf */
2668 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2669 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2672 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2673 ir_node *idx, ir_node *lower, ir_node *upper) {
2675 res = new_bd_Bound(db, current_ir_graph->current_block,
2676 store, idx, lower, upper);
2677 #if PRECISE_EXC_CONTEXT
2678 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2684 new_d_Pin(dbg_info *db, ir_node *node) {
2685 return new_bd_Pin(db, current_ir_graph->current_block, node);
2689 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2690 int n_outs, ir_asm_constraint *outputs,
2691 int n_clobber, ident *clobber[], ident *asm_text) {
2692 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2695 /* ********************************************************************* */
2696 /* Comfortable interface with automatic Phi node construction. */
2697 /* (Uses also constructors of ?? interface, except new_Block. */
2698 /* ********************************************************************* */
2700 /* Block construction */
2701 /* immature Block without predecessors */
2703 new_d_immBlock(dbg_info *db) {
2706 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2707 /* creates a new dynamic in-array as length of in is -1 */
2708 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2709 current_ir_graph->current_block = res;
2711 /* macroblock head */
2714 res->attr.block.is_matured = 0;
2715 res->attr.block.is_dead = 0;
2716 res->attr.block.is_mb_head = 1;
2717 res->attr.block.has_label = 0;
2718 res->attr.block.irg = current_ir_graph;
2719 res->attr.block.backedge = NULL;
2720 res->attr.block.in_cg = NULL;
2721 res->attr.block.cg_backedge = NULL;
2722 res->attr.block.extblk = NULL;
2723 res->attr.block.region = NULL;
2724 res->attr.block.mb_depth = 0;
2725 res->attr.block.label = 0;
2727 set_Block_block_visited(res, 0);
2729 /* Create and initialize array for Phi-node construction. */
2730 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2731 current_ir_graph->n_loc);
2732 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2734 /* Immature block may not be optimized! */
2735 IRN_VRFY_IRG(res, current_ir_graph);
2738 } /* new_d_immBlock */
2741 new_immBlock(void) {
2742 return new_d_immBlock(NULL);
2743 } /* new_immBlock */
2745 /* immature PartBlock with its predecessors */
2747 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2748 ir_node *res = new_d_immBlock(db);
2749 ir_node *blk = get_nodes_block(pred_jmp);
2751 res->in[0] = blk->in[0];
2752 assert(res->in[0] != NULL);
2753 add_immBlock_pred(res, pred_jmp);
2755 res->attr.block.is_mb_head = 0;
2756 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2759 } /* new_d_immPartBlock */
2762 new_immPartBlock(ir_node *pred_jmp) {
2763 return new_d_immPartBlock(NULL, pred_jmp);
2764 } /* new_immPartBlock */
2766 /* add an edge to a jmp/control flow node */
2768 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2769 int n = ARR_LEN(block->in) - 1;
2771 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2772 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2773 assert(is_ir_node(jmp));
2775 ARR_APP1(ir_node *, block->in, jmp);
2777 hook_set_irn_n(block, n, jmp, NULL);
2778 } /* add_immBlock_pred */
2780 /* changing the current block */
2782 set_cur_block(ir_node *target) {
2783 current_ir_graph->current_block = target;
2784 } /* set_cur_block */
2786 /* ************************ */
2787 /* parameter administration */
2789 /* get a value from the parameter array from the current block by its index */
2791 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2792 ir_graph *irg = current_ir_graph;
2793 assert(get_irg_phase_state(irg) == phase_building);
2794 inc_irg_visited(irg);
2797 return get_r_value_internal(irg->current_block, pos + 1, mode);
2800 /* get a value from the parameter array from the current block by its index */
2802 get_value(int pos, ir_mode *mode) {
2803 return get_d_value(NULL, pos, mode);
2806 /* set a value at position pos in the parameter array from the current block */
2808 set_value(int pos, ir_node *value) {
2809 ir_graph *irg = current_ir_graph;
2810 assert(get_irg_phase_state(irg) == phase_building);
2811 assert(pos+1 < irg->n_loc);
2812 assert(is_ir_node(value));
2813 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2816 /* Find the value number for a node in the current block.*/
2818 find_value(ir_node *value) {
2820 ir_node *bl = current_ir_graph->current_block;
2822 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2823 if (bl->attr.block.graph_arr[i] == value)
2828 /* get the current store */
2831 ir_graph *irg = current_ir_graph;
2833 assert(get_irg_phase_state(irg) == phase_building);
2834 /* GL: one could call get_value instead */
2835 inc_irg_visited(irg);
2836 return get_r_value_internal(irg->current_block, 0, mode_M);
2839 /* set the current store: handles automatic Sync construction for Load nodes */
2841 set_store(ir_node *store) {
2842 ir_node *load, *pload, *pred, *in[2];
2844 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2845 /* Beware: due to dead code elimination, a store might become a Bad node even in
2846 the construction phase. */
2847 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2849 if (get_opt_auto_create_sync()) {
2850 /* handle non-volatile Load nodes by automatically creating Sync's */
2851 load = skip_Proj(store);
2852 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2853 pred = get_Load_mem(load);
2855 if (is_Sync(pred)) {
2856 /* a Load after a Sync: move it up */
2857 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2859 set_Load_mem(load, get_memop_mem(mem));
2860 add_Sync_pred(pred, store);
2863 pload = skip_Proj(pred);
2864 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2865 /* a Load after a Load: create a new Sync */
2866 set_Load_mem(load, get_Load_mem(pload));
2870 store = new_Sync(2, in);
2875 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2879 keep_alive(ir_node *ka) {
2880 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2883 /* --- Useful access routines --- */
2884 /* Returns the current block of the current graph. To set the current
2885 block use set_cur_block. */
2886 ir_node *get_cur_block(void) {
2887 return get_irg_current_block(current_ir_graph);
2888 } /* get_cur_block */
2890 /* Returns the frame type of the current graph */
2891 ir_type *get_cur_frame_type(void) {
2892 return get_irg_frame_type(current_ir_graph);
2893 } /* get_cur_frame_type */
2896 /* ********************************************************************* */
2899 /* call once for each run of the library */
2901 firm_init_cons(uninitialized_local_variable_func_t *func) {
2902 default_initialize_local_variable = func;
2903 } /* firm_init_cons */
2906 irp_finalize_cons(void) {
2908 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2909 irg_finalize_cons(get_irp_irg(i));
2911 irp->phase_state = phase_high;
2912 } /* irp_finalize_cons */
2915 ir_node *new_Block(int arity, ir_node **in) {
2916 return new_d_Block(NULL, arity, in);
2918 ir_node *new_Start(void) {
2919 return new_d_Start(NULL);
2921 ir_node *new_End(void) {
2922 return new_d_End(NULL);
2924 ir_node *new_Jmp(void) {
2925 return new_d_Jmp(NULL);
2927 ir_node *new_IJmp(ir_node *tgt) {
2928 return new_d_IJmp(NULL, tgt);
2930 ir_node *new_Cond(ir_node *c) {
2931 return new_d_Cond(NULL, c);
2933 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2934 return new_d_Return(NULL, store, arity, in);
2936 ir_node *new_Const(ir_mode *mode, tarval *con) {
2937 return new_d_Const(NULL, mode, con);
2940 ir_node *new_Const_long(ir_mode *mode, long value) {
2941 return new_d_Const_long(NULL, mode, value);
2944 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2945 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2948 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2949 return new_d_SymConst_type(NULL, mode, value, kind, type);
2951 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2952 return new_d_SymConst(NULL, mode, value, kind);
2954 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2955 return new_d_simpleSel(NULL, store, objptr, ent);
2957 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2959 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2961 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2963 return new_d_Call(NULL, store, callee, arity, in, tp);
2965 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2966 return new_d_Add(NULL, op1, op2, mode);
2968 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2969 return new_d_Sub(NULL, op1, op2, mode);
2971 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2972 return new_d_Minus(NULL, op, mode);
2974 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2975 return new_d_Mul(NULL, op1, op2, mode);
2977 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2978 return new_d_Mulh(NULL, op1, op2, mode);
2980 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2981 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2983 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2984 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2986 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2987 return new_d_Div(NULL, memop, op1, op2, mode, state);
2989 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2990 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2992 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2993 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2995 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2996 return new_d_Abs(NULL, op, mode);
2998 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2999 return new_d_And(NULL, op1, op2, mode);
3001 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3002 return new_d_Or(NULL, op1, op2, mode);
3004 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3005 return new_d_Eor(NULL, op1, op2, mode);
3007 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3008 return new_d_Not(NULL, op, mode);
3010 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3011 return new_d_Shl(NULL, op, k, mode);
3013 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3014 return new_d_Shr(NULL, op, k, mode);
3016 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3017 return new_d_Shrs(NULL, op, k, mode);
3019 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3020 return new_d_Rot(NULL, op, k, mode);
3022 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3023 return new_d_Carry(NULL, op1, op2, mode);
3025 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3026 return new_d_Borrow(NULL, op1, op2, mode);
3028 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3029 return new_d_Cmp(NULL, op1, op2);
3031 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3032 return new_d_Conv(NULL, op, mode);
3034 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3035 return new_d_strictConv(NULL, op, mode);
3037 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3038 return new_d_Cast(NULL, op, to_tp);
3040 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3041 return new_d_Phi(NULL, arity, in, mode);
3043 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3044 return new_d_Load(NULL, store, addr, mode);
3046 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3047 return new_d_Store(NULL, store, addr, val);
3049 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3050 ir_where_alloc where) {
3051 return new_d_Alloc(NULL, store, size, alloc_type, where);
3053 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3054 ir_type *free_type, ir_where_alloc where) {
3055 return new_d_Free(NULL, store, ptr, size, free_type, where);
3057 ir_node *new_Sync(int arity, ir_node *in[]) {
3058 return new_d_Sync(NULL, arity, in);
3060 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3061 return new_d_Proj(NULL, arg, mode, proj);
3063 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3064 return new_d_defaultProj(NULL, arg, max_proj);
3066 ir_node *new_Tuple(int arity, ir_node **in) {
3067 return new_d_Tuple(NULL, arity, in);
3069 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3070 return new_d_Id(NULL, val, mode);
3072 ir_node *new_Bad(void) {
3075 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3076 return new_d_Confirm(NULL, val, bound, cmp);
3078 ir_node *new_Unknown(ir_mode *m) {
3079 return new_d_Unknown(m);
3081 ir_node *new_CallBegin(ir_node *callee) {
3082 return new_d_CallBegin(NULL, callee);
3084 ir_node *new_EndReg(void) {
3085 return new_d_EndReg(NULL);
3087 ir_node *new_EndExcept(void) {
3088 return new_d_EndExcept(NULL);
3090 ir_node *new_Break(void) {
3091 return new_d_Break(NULL);
3093 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3094 return new_d_Filter(NULL, arg, mode, proj);
3096 ir_node *new_NoMem(void) {
3097 return new_d_NoMem();
3099 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3100 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3102 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3103 return new_d_Psi(NULL, arity, conds, vals, mode);
3105 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3106 return new_d_CopyB(NULL, store, dst, src, data_type);
3108 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3109 return new_d_InstOf(NULL, store, objptr, ent);
3111 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3112 return new_d_Raise(NULL, store, obj);
3114 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3115 return new_d_Bound(NULL, store, idx, lower, upper);
3117 ir_node *new_Pin(ir_node *node) {
3118 return new_d_Pin(NULL, node);
3120 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3121 int n_outs, ir_asm_constraint *outputs,
3122 int n_clobber, ident *clobber[], ident *asm_text) {
3123 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3126 /* create a new anchor node */
3127 ir_node *new_Anchor(ir_graph *irg) {
3128 ir_node *in[anchor_last];
3129 memset(in, 0, sizeof(in));
3130 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);