2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
33 #include "irgraph_t.h"
37 #include "firm_common_t.h"
44 #include "irbackedge_t.h"
46 #include "iredges_t.h"
50 #if USE_EXPLICIT_PHI_IN_STACK
51 /* A stack needed for the automatic Phi node construction in constructor
52 Phi_in. Redefinition in irgraph.c!! */
57 typedef struct Phi_in_stack Phi_in_stack;
60 /* when we need verifying */
62 # define IRN_VRFY_IRG(res, irg)
64 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
68 * Language dependent variable initialization callback.
70 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
72 /* creates a bd constructor for a binop */
73 #define NEW_BD_BINOP(instr) \
75 new_bd_##instr(dbg_info *db, ir_node *block, \
76 ir_node *op1, ir_node *op2, ir_mode *mode) \
80 ir_graph *irg = current_ir_graph; \
83 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
84 res = optimize_node(res); \
85 IRN_VRFY_IRG(res, irg); \
89 /* creates a bd constructor for an unop */
90 #define NEW_BD_UNOP(instr) \
92 new_bd_##instr(dbg_info *db, ir_node *block, \
93 ir_node *op, ir_mode *mode) \
96 ir_graph *irg = current_ir_graph; \
97 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
98 res = optimize_node(res); \
99 IRN_VRFY_IRG(res, irg); \
103 /* creates a bd constructor for an divop */
104 #define NEW_BD_DIVOP(instr) \
106 new_bd_##instr(dbg_info *db, ir_node *block, \
107 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
111 ir_graph *irg = current_ir_graph; \
115 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
116 res->attr.divmod.exc.pin_state = state; \
117 res->attr.divmod.res_mode = mode; \
118 res->attr.divmod.no_remainder = 0; \
119 res = optimize_node(res); \
120 IRN_VRFY_IRG(res, irg); \
124 /* creates a rd constructor for a binop */
125 #define NEW_RD_BINOP(instr) \
127 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
128 ir_node *op1, ir_node *op2, ir_mode *mode) \
131 ir_graph *rem = current_ir_graph; \
132 current_ir_graph = irg; \
133 res = new_bd_##instr(db, block, op1, op2, mode); \
134 current_ir_graph = rem; \
138 /* creates a rd constructor for an unop */
139 #define NEW_RD_UNOP(instr) \
141 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
142 ir_node *op, ir_mode *mode) \
145 ir_graph *rem = current_ir_graph; \
146 current_ir_graph = irg; \
147 res = new_bd_##instr(db, block, op, mode); \
148 current_ir_graph = rem; \
152 /* creates a rd constructor for an divop */
153 #define NEW_RD_DIVOP(instr) \
155 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
156 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
159 ir_graph *rem = current_ir_graph; \
160 current_ir_graph = irg; \
161 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
162 current_ir_graph = rem; \
166 /* creates a d constructor for an binop */
167 #define NEW_D_BINOP(instr) \
169 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
170 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
173 /* creates a d constructor for an unop */
174 #define NEW_D_UNOP(instr) \
176 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
177 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
182 * Constructs a Block with a fixed number of predecessors.
183 * Does not set current_block. Cannot be used with automatic
184 * Phi node construction.
187 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
189 ir_graph *irg = current_ir_graph;
191 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
193 /* macroblock header */
196 res->attr.block.is_dead = 0;
197 res->attr.block.is_mb_head = 1;
198 res->attr.block.has_label = 0;
199 res->attr.block.irg = irg;
200 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
201 res->attr.block.in_cg = NULL;
202 res->attr.block.cg_backedge = NULL;
203 res->attr.block.extblk = NULL;
204 res->attr.block.mb_depth = 0;
205 res->attr.block.label = 0;
207 set_Block_matured(res, 1);
208 set_Block_block_visited(res, 0);
210 IRN_VRFY_IRG(res, irg);
215 new_bd_Start(dbg_info *db, ir_node *block) {
217 ir_graph *irg = current_ir_graph;
219 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
221 IRN_VRFY_IRG(res, irg);
226 new_bd_End(dbg_info *db, ir_node *block) {
228 ir_graph *irg = current_ir_graph;
230 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
232 IRN_VRFY_IRG(res, irg);
237 * Creates a Phi node with all predecessors. Calling this constructor
238 * is only allowed if the corresponding block is mature.
241 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
243 ir_graph *irg = current_ir_graph;
247 /* Don't assert that block matured: the use of this constructor is strongly
249 if (get_Block_matured(block))
250 assert(get_irn_arity(block) == arity);
252 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
254 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
256 for (i = arity - 1; i >= 0; --i)
257 if (get_irn_op(in[i]) == op_Unknown) {
262 if (!has_unknown) res = optimize_node(res);
263 IRN_VRFY_IRG(res, irg);
265 /* Memory Phis in endless loops must be kept alive.
266 As we can't distinguish these easily we keep all of them alive. */
267 if ((res->op == op_Phi) && (mode == mode_M))
268 add_End_keepalive(get_irg_end(irg), res);
273 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
275 ir_graph *irg = current_ir_graph;
278 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
279 res->attr.con.tv = con;
280 set_Const_type(res, tp); /* Call method because of complex assertion. */
281 res = optimize_node (res);
282 assert(get_Const_type(res) == tp);
283 IRN_VRFY_IRG(res, irg);
286 } /* new_bd_Const_type */
289 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
296 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
297 ir_graph *irg = current_ir_graph;
299 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
300 } /* new_bd_Const_long */
303 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
305 ir_graph *irg = current_ir_graph;
307 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
308 res = optimize_node(res);
309 IRN_VRFY_IRG(res, irg);
314 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
317 ir_graph *irg = current_ir_graph;
319 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
320 res->attr.proj = proj;
323 assert(get_Proj_pred(res));
324 assert(get_nodes_block(get_Proj_pred(res)));
326 res = optimize_node(res);
328 IRN_VRFY_IRG(res, irg);
333 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
336 ir_graph *irg = current_ir_graph;
338 assert(arg->op == op_Cond);
339 arg->attr.cond.kind = fragmentary;
340 arg->attr.cond.default_proj = max_proj;
341 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
343 } /* new_bd_defaultProj */
346 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
348 ir_graph *irg = current_ir_graph;
350 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
351 res->attr.conv.strict = strict_flag;
352 res = optimize_node(res);
353 IRN_VRFY_IRG(res, irg);
358 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
360 ir_graph *irg = current_ir_graph;
362 assert(is_atomic_type(to_tp));
364 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
365 res->attr.cast.totype = to_tp;
366 res = optimize_node(res);
367 IRN_VRFY_IRG(res, irg);
372 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
374 ir_graph *irg = current_ir_graph;
376 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
377 res = optimize_node (res);
378 IRN_VRFY_IRG(res, irg);
403 /** Creates a remainderless Div node. */
404 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
405 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
409 ir_graph *irg = current_ir_graph;
413 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
414 res->attr.divmod.exc.pin_state = state;
415 res->attr.divmod.res_mode = mode;
416 res->attr.divmod.no_remainder = 1;
417 res = optimize_node(res);
418 IRN_VRFY_IRG(res, irg);
423 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
426 ir_graph *irg = current_ir_graph;
429 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_bd_Jmp(dbg_info *db, ir_node *block) {
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
449 ir_graph *irg = current_ir_graph;
451 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
452 res = optimize_node(res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
460 ir_graph *irg = current_ir_graph;
462 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
463 res->attr.cond.kind = dense;
464 res->attr.cond.default_proj = 0;
465 res->attr.cond.pred = COND_JMP_PRED_NONE;
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
472 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
473 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
477 ir_graph *irg = current_ir_graph;
480 NEW_ARR_A(ir_node *, r_in, r_arity);
483 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
485 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
487 assert((get_unknown_type() == tp) || is_Method_type(tp));
488 set_Call_type(res, tp);
489 res->attr.call.exc.pin_state = op_pin_state_pinned;
490 res->attr.call.callee_arr = NULL;
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Return(dbg_info *db, ir_node *block,
498 ir_node *store, int arity, ir_node **in) {
502 ir_graph *irg = current_ir_graph;
505 NEW_ARR_A (ir_node *, r_in, r_arity);
507 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
508 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
509 res = optimize_node(res);
510 IRN_VRFY_IRG(res, irg);
512 } /* new_bd_Return */
515 new_bd_Load(dbg_info *db, ir_node *block,
516 ir_node *store, ir_node *adr, ir_mode *mode) {
519 ir_graph *irg = current_ir_graph;
523 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
524 res->attr.load.exc.pin_state = op_pin_state_pinned;
525 res->attr.load.load_mode = mode;
526 res->attr.load.volatility = volatility_non_volatile;
527 res->attr.load.aligned = align_is_aligned;
528 res = optimize_node(res);
529 IRN_VRFY_IRG(res, irg);
534 new_bd_Store(dbg_info *db, ir_node *block,
535 ir_node *store, ir_node *adr, ir_node *val) {
538 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
544 res->attr.store.exc.pin_state = op_pin_state_pinned;
545 res->attr.store.volatility = volatility_non_volatile;
546 res->attr.store.aligned = align_is_aligned;
547 res = optimize_node(res);
548 IRN_VRFY_IRG(res, irg);
553 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
554 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
557 ir_graph *irg = current_ir_graph;
561 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
562 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
563 res->attr.alloc.where = where;
564 res->attr.alloc.type = alloc_type;
565 res = optimize_node(res);
566 IRN_VRFY_IRG(res, irg);
571 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
572 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
575 ir_graph *irg = current_ir_graph;
580 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
581 res->attr.free.where = where;
582 res->attr.free.type = free_type;
583 res = optimize_node(res);
584 IRN_VRFY_IRG(res, irg);
589 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
590 int arity, ir_node **in, ir_entity *ent) {
594 ir_graph *irg = current_ir_graph;
595 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
597 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
600 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
603 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
605 * Sel's can select functions which should be of mode mode_P_code.
607 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
608 res->attr.sel.ent = ent;
609 res = optimize_node(res);
610 IRN_VRFY_IRG(res, irg);
615 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
616 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
617 ir_graph *irg = current_ir_graph;
618 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
620 res->attr.symc.num = symkind;
621 res->attr.symc.sym = value;
622 res->attr.symc.tp = tp;
624 res = optimize_node(res);
625 IRN_VRFY_IRG(res, irg);
627 } /* new_bd_SymConst_type */
630 new_bd_Sync(dbg_info *db, ir_node *block) {
632 ir_graph *irg = current_ir_graph;
634 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
635 /* no need to call optimize node here, Sync are always created with no predecessors */
636 IRN_VRFY_IRG(res, irg);
641 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
642 ir_node *in[2], *res;
643 ir_graph *irg = current_ir_graph;
647 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
648 res->attr.confirm.cmp = cmp;
649 res = optimize_node(res);
650 IRN_VRFY_IRG(res, irg);
652 } /* new_bd_Confirm */
655 new_bd_Unknown(ir_mode *m) {
657 ir_graph *irg = current_ir_graph;
659 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
660 res = optimize_node(res);
662 } /* new_bd_Unknown */
665 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
668 ir_graph *irg = current_ir_graph;
670 in[0] = get_Call_ptr(call);
671 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
672 /* res->attr.callbegin.irg = irg; */
673 res->attr.callbegin.call = call;
674 res = optimize_node(res);
675 IRN_VRFY_IRG(res, irg);
677 } /* new_bd_CallBegin */
680 new_bd_EndReg(dbg_info *db, ir_node *block) {
682 ir_graph *irg = current_ir_graph;
684 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
685 set_irg_end_reg(irg, res);
686 IRN_VRFY_IRG(res, irg);
688 } /* new_bd_EndReg */
691 new_bd_EndExcept(dbg_info *db, ir_node *block) {
693 ir_graph *irg = current_ir_graph;
695 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
696 set_irg_end_except(irg, res);
697 IRN_VRFY_IRG (res, irg);
699 } /* new_bd_EndExcept */
702 new_bd_Break(dbg_info *db, ir_node *block) {
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
707 res = optimize_node(res);
708 IRN_VRFY_IRG(res, irg);
713 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
716 ir_graph *irg = current_ir_graph;
718 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
719 res->attr.filter.proj = proj;
720 res->attr.filter.in_cg = NULL;
721 res->attr.filter.backedge = NULL;
724 assert(get_Proj_pred(res));
725 assert(get_nodes_block(get_Proj_pred(res)));
727 res = optimize_node(res);
728 IRN_VRFY_IRG(res, irg);
730 } /* new_bd_Filter */
733 new_bd_Mux(dbg_info *db, ir_node *block,
734 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
737 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
746 res = optimize_node(res);
747 IRN_VRFY_IRG(res, irg);
752 new_bd_Psi(dbg_info *db, ir_node *block,
753 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
756 ir_graph *irg = current_ir_graph;
759 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
761 for (i = 0; i < arity; ++i) {
763 in[2 * i + 1] = vals[i];
767 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
770 res = optimize_node(res);
771 IRN_VRFY_IRG(res, irg);
776 new_bd_CopyB(dbg_info *db, ir_node *block,
777 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
780 ir_graph *irg = current_ir_graph;
786 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
788 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
789 res->attr.copyb.data_type = data_type;
790 res = optimize_node(res);
791 IRN_VRFY_IRG(res, irg);
796 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
797 ir_node *objptr, ir_type *type) {
800 ir_graph *irg = current_ir_graph;
804 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
805 res->attr.instof.type = type;
806 res = optimize_node(res);
807 IRN_VRFY_IRG(res, irg);
809 } /* new_bd_InstOf */
812 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
815 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
820 res = optimize_node(res);
821 IRN_VRFY_IRG(res, irg);
826 new_bd_Bound(dbg_info *db, ir_node *block,
827 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
830 ir_graph *irg = current_ir_graph;
836 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
837 res->attr.bound.exc.pin_state = op_pin_state_pinned;
838 res = optimize_node(res);
839 IRN_VRFY_IRG(res, irg);
844 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
846 ir_graph *irg = current_ir_graph;
848 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
849 res = optimize_node(res);
850 IRN_VRFY_IRG(res, irg);
855 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
856 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
858 ir_graph *irg = current_ir_graph;
861 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
862 res->attr.assem.pin_state = op_pin_state_pinned;
863 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
864 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
865 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
866 res->attr.assem.asm_text = asm_text;
868 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
869 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
870 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
872 res = optimize_node(res);
873 IRN_VRFY_IRG(res, irg);
877 /* --------------------------------------------- */
878 /* private interfaces, for professional use only */
879 /* --------------------------------------------- */
881 /* Constructs a Block with a fixed number of predecessors.
882 Does not set current_block. Can not be used with automatic
883 Phi node construction. */
885 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
886 ir_graph *rem = current_ir_graph;
889 current_ir_graph = irg;
890 res = new_bd_Block(db, arity, in);
891 current_ir_graph = rem;
897 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
898 ir_graph *rem = current_ir_graph;
901 current_ir_graph = irg;
902 res = new_bd_Start(db, block);
903 current_ir_graph = rem;
909 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
911 ir_graph *rem = current_ir_graph;
913 current_ir_graph = irg;
914 res = new_bd_End(db, block);
915 current_ir_graph = rem;
920 /* Creates a Phi node with all predecessors. Calling this constructor
921 is only allowed if the corresponding block is mature. */
923 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Phi(db, block,arity, in, mode);
929 current_ir_graph = rem;
935 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
937 ir_graph *rem = current_ir_graph;
939 current_ir_graph = irg;
940 res = new_bd_Const_type(db, block, mode, con, tp);
941 current_ir_graph = rem;
944 } /* new_rd_Const_type */
947 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
953 current_ir_graph = rem;
959 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
960 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
961 } /* new_rd_Const_long */
964 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
966 ir_graph *rem = current_ir_graph;
968 current_ir_graph = irg;
969 res = new_bd_Id(db, block, val, mode);
970 current_ir_graph = rem;
976 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Proj(db, block, arg, mode, proj);
983 current_ir_graph = rem;
989 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
992 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_defaultProj(db, block, arg, max_proj);
996 current_ir_graph = rem;
999 } /* new_rd_defaultProj */
1002 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = irg;
1007 res = new_bd_Conv(db, block, op, mode, 0);
1008 current_ir_graph = rem;
1014 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1016 ir_graph *rem = current_ir_graph;
1018 current_ir_graph = irg;
1019 res = new_bd_Cast(db, block, op, to_tp);
1020 current_ir_graph = rem;
1026 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1028 ir_graph *rem = current_ir_graph;
1030 current_ir_graph = irg;
1031 res = new_bd_Tuple(db, block, arity, in);
1032 current_ir_graph = rem;
1035 } /* new_rd_Tuple */
1043 NEW_RD_DIVOP(DivMod)
1056 NEW_RD_BINOP(Borrow)
1058 /* creates a rd constructor for an divRL */
1059 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1060 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1063 ir_graph *rem = current_ir_graph;
1064 current_ir_graph = irg;
1065 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1066 current_ir_graph = rem;
1071 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1072 ir_node *op1, ir_node *op2) {
1074 ir_graph *rem = current_ir_graph;
1076 current_ir_graph = irg;
1077 res = new_bd_Cmp(db, block, op1, op2);
1078 current_ir_graph = rem;
1084 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_Jmp(db, block);
1090 current_ir_graph = rem;
1096 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_IJmp(db, block, tgt);
1102 current_ir_graph = rem;
1108 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1110 ir_graph *rem = current_ir_graph;
1112 current_ir_graph = irg;
1113 res = new_bd_Cond(db, block, c);
1114 current_ir_graph = rem;
1120 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1121 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1127 current_ir_graph = rem;
1133 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1134 ir_node *store, int arity, ir_node **in) {
1136 ir_graph *rem = current_ir_graph;
1138 current_ir_graph = irg;
1139 res = new_bd_Return(db, block, store, arity, in);
1140 current_ir_graph = rem;
1143 } /* new_rd_Return */
1146 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1147 ir_node *store, ir_node *adr, ir_mode *mode) {
1149 ir_graph *rem = current_ir_graph;
1151 current_ir_graph = irg;
1152 res = new_bd_Load(db, block, store, adr, mode);
1153 current_ir_graph = rem;
1159 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1160 ir_node *store, ir_node *adr, ir_node *val) {
1162 ir_graph *rem = current_ir_graph;
1164 current_ir_graph = irg;
1165 res = new_bd_Store(db, block, store, adr, val);
1166 current_ir_graph = rem;
1169 } /* new_rd_Store */
1172 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1173 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1175 ir_graph *rem = current_ir_graph;
1177 current_ir_graph = irg;
1178 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1179 current_ir_graph = rem;
1182 } /* new_rd_Alloc */
1185 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1186 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1188 ir_graph *rem = current_ir_graph;
1190 current_ir_graph = irg;
1191 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1192 current_ir_graph = rem;
1198 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1199 ir_node *store, ir_node *objptr, ir_entity *ent) {
1201 ir_graph *rem = current_ir_graph;
1203 current_ir_graph = irg;
1204 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1205 current_ir_graph = rem;
1208 } /* new_rd_simpleSel */
1211 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1212 int arity, ir_node **in, ir_entity *ent) {
1214 ir_graph *rem = current_ir_graph;
1216 current_ir_graph = irg;
1217 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1218 current_ir_graph = rem;
1224 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1225 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1227 ir_graph *rem = current_ir_graph;
1229 current_ir_graph = irg;
1230 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1231 current_ir_graph = rem;
1234 } /* new_rd_SymConst_type */
1237 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1238 symconst_symbol value, symconst_kind symkind) {
1239 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1240 } /* new_rd_SymConst */
1242 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1243 symconst_symbol sym;
1244 sym.entity_p = symbol;
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1246 } /* new_rd_SymConst_addr_ent */
1248 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1249 symconst_symbol sym;
1250 sym.entity_p = symbol;
1251 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1252 } /* new_rd_SymConst_ofs_ent */
1254 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1255 symconst_symbol sym;
1256 sym.ident_p = symbol;
1257 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1258 } /* new_rd_SymConst_addr_name */
1260 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1261 symconst_symbol sym;
1262 sym.type_p = symbol;
1263 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1264 } /* new_rd_SymConst_type_tag */
1266 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1267 symconst_symbol sym;
1268 sym.type_p = symbol;
1269 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1270 } /* new_rd_SymConst_size */
1272 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1273 symconst_symbol sym;
1274 sym.type_p = symbol;
1275 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1276 } /* new_rd_SymConst_align */
1279 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1281 ir_graph *rem = current_ir_graph;
1284 current_ir_graph = irg;
1285 res = new_bd_Sync(db, block);
1286 current_ir_graph = rem;
1288 for (i = 0; i < arity; ++i)
1289 add_Sync_pred(res, in[i]);
1295 new_rd_Bad(ir_graph *irg) {
1296 return get_irg_bad(irg);
1300 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1302 ir_graph *rem = current_ir_graph;
1304 current_ir_graph = irg;
1305 res = new_bd_Confirm(db, block, val, bound, cmp);
1306 current_ir_graph = rem;
1309 } /* new_rd_Confirm */
1312 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1314 ir_graph *rem = current_ir_graph;
1316 current_ir_graph = irg;
1317 res = new_bd_Unknown(m);
1318 current_ir_graph = rem;
1321 } /* new_rd_Unknown */
1324 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1326 ir_graph *rem = current_ir_graph;
1328 current_ir_graph = irg;
1329 res = new_bd_CallBegin(db, block, call);
1330 current_ir_graph = rem;
1333 } /* new_rd_CallBegin */
1336 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1339 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1340 set_irg_end_reg(irg, res);
1341 IRN_VRFY_IRG(res, irg);
1343 } /* new_rd_EndReg */
1346 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1349 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1350 set_irg_end_except(irg, res);
1351 IRN_VRFY_IRG (res, irg);
1353 } /* new_rd_EndExcept */
1356 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1358 ir_graph *rem = current_ir_graph;
1360 current_ir_graph = irg;
1361 res = new_bd_Break(db, block);
1362 current_ir_graph = rem;
1365 } /* new_rd_Break */
1368 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1371 ir_graph *rem = current_ir_graph;
1373 current_ir_graph = irg;
1374 res = new_bd_Filter(db, block, arg, mode, proj);
1375 current_ir_graph = rem;
1378 } /* new_rd_Filter */
1381 new_rd_NoMem(ir_graph *irg) {
1382 return get_irg_no_mem(irg);
1383 } /* new_rd_NoMem */
1386 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1387 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1389 ir_graph *rem = current_ir_graph;
1391 current_ir_graph = irg;
1392 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1393 current_ir_graph = rem;
1399 new_rd_Psi(dbg_info *db, ir_graph *irg, ir_node *block,
1400 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode) {
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1406 current_ir_graph = rem;
1411 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1412 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1418 current_ir_graph = rem;
1421 } /* new_rd_CopyB */
1424 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1425 ir_node *objptr, ir_type *type) {
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_InstOf(db, block, store, objptr, type);
1431 current_ir_graph = rem;
1434 } /* new_rd_InstOf */
1437 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1439 ir_graph *rem = current_ir_graph;
1441 current_ir_graph = irg;
1442 res = new_bd_Raise(db, block, store, obj);
1443 current_ir_graph = rem;
1446 } /* new_rd_Raise */
1448 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1449 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1451 ir_graph *rem = current_ir_graph;
1453 current_ir_graph = irg;
1454 res = new_bd_Bound(db, block, store, idx, lower, upper);
1455 current_ir_graph = rem;
1458 } /* new_rd_Bound */
1460 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1462 ir_graph *rem = current_ir_graph;
1464 current_ir_graph = irg;
1465 res = new_bd_Pin(db, block, node);
1466 current_ir_graph = rem;
1471 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1472 int arity, ir_node *in[], ir_asm_constraint *inputs,
1473 int n_outs, ir_asm_constraint *outputs,
1474 int n_clobber, ident *clobber[], ident *asm_text) {
1476 ir_graph *rem = current_ir_graph;
1478 current_ir_graph = irg;
1479 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1480 current_ir_graph = rem;
1486 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1487 return new_rd_Block(NULL, irg, arity, in);
1489 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1490 return new_rd_Start(NULL, irg, block);
1492 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1493 return new_rd_End(NULL, irg, block);
1495 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1496 return new_rd_Jmp(NULL, irg, block);
1498 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1499 return new_rd_IJmp(NULL, irg, block, tgt);
1501 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1502 return new_rd_Cond(NULL, irg, block, c);
1504 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1505 ir_node *store, int arity, ir_node **in) {
1506 return new_rd_Return(NULL, irg, block, store, arity, in);
1508 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1509 ir_mode *mode, tarval *con) {
1510 return new_rd_Const(NULL, irg, block, mode, con);
1512 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1513 ir_mode *mode, long value) {
1514 return new_rd_Const_long(NULL, irg, block, mode, value);
1516 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1517 ir_mode *mode, tarval *con, ir_type *tp) {
1518 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1520 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1521 symconst_symbol value, symconst_kind symkind) {
1522 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1524 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1525 ir_node *objptr, ir_entity *ent) {
1526 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1528 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1529 ir_node *objptr, int n_index, ir_node **index,
1531 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1533 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1534 ir_node *callee, int arity, ir_node **in,
1536 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1538 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1539 ir_node *op1, ir_node *op2, ir_mode *mode) {
1540 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1542 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1543 ir_node *op1, ir_node *op2, ir_mode *mode) {
1544 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1546 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1547 ir_node *op, ir_mode *mode) {
1548 return new_rd_Minus(NULL, irg, block, op, mode);
1550 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1559 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1560 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1562 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1563 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1564 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1566 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1567 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1568 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1570 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1571 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1572 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1574 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1575 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1576 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1578 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_mode *mode) {
1580 return new_rd_Abs(NULL, irg, block, op, mode);
1582 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1583 ir_node *op1, ir_node *op2, ir_mode *mode) {
1584 return new_rd_And(NULL, irg, block, op1, op2, mode);
1586 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1587 ir_node *op1, ir_node *op2, ir_mode *mode) {
1588 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1590 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1591 ir_node *op1, ir_node *op2, ir_mode *mode) {
1592 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1594 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1595 ir_node *op, ir_mode *mode) {
1596 return new_rd_Not(NULL, irg, block, op, mode);
1598 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_node *k, ir_mode *mode) {
1600 return new_rd_Shl(NULL, irg, block, op, k, mode);
1602 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1603 ir_node *op, ir_node *k, ir_mode *mode) {
1604 return new_rd_Shr(NULL, irg, block, op, k, mode);
1606 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1607 ir_node *op, ir_node *k, ir_mode *mode) {
1608 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1610 ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
1611 ir_node *op, ir_node *k, ir_mode *mode) {
1612 return new_rd_Rot(NULL, irg, block, op, k, mode);
1614 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1615 ir_node *op, ir_node *k, ir_mode *mode) {
1616 return new_rd_Carry(NULL, irg, block, op, k, mode);
1618 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1619 ir_node *op, ir_node *k, ir_mode *mode) {
1620 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1622 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1623 ir_node *op1, ir_node *op2) {
1624 return new_rd_Cmp(NULL, irg, block, op1, op2);
1626 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1627 ir_node *op, ir_mode *mode) {
1628 return new_rd_Conv(NULL, irg, block, op, mode);
1630 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1631 return new_rd_Cast(NULL, irg, block, op, to_tp);
1633 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1634 ir_node **in, ir_mode *mode) {
1635 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1637 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1638 ir_node *store, ir_node *adr, ir_mode *mode) {
1639 return new_rd_Load(NULL, irg, block, store, adr, mode);
1641 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1642 ir_node *store, ir_node *adr, ir_node *val) {
1643 return new_rd_Store(NULL, irg, block, store, adr, val);
1645 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1646 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1647 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1649 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1650 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1651 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1653 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1654 return new_rd_Sync(NULL, irg, block, arity, in);
1656 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1662 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1664 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1665 int arity, ir_node **in) {
1666 return new_rd_Tuple(NULL, irg, block, arity, in );
1668 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1669 ir_node *val, ir_mode *mode) {
1670 return new_rd_Id(NULL, irg, block, val, mode);
1672 ir_node *new_r_Bad(ir_graph *irg) {
1673 return new_rd_Bad(irg);
1675 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1676 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1678 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1679 return new_rd_Unknown(irg, m);
1681 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1682 return new_rd_CallBegin(NULL, irg, block, callee);
1684 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1685 return new_rd_EndReg(NULL, irg, block);
1687 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1688 return new_rd_EndExcept(NULL, irg, block);
1690 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1691 return new_rd_Break(NULL, irg, block);
1693 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1694 ir_mode *mode, long proj) {
1695 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1697 ir_node *new_r_NoMem(ir_graph *irg) {
1698 return new_rd_NoMem(irg);
1700 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1701 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1702 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1704 ir_node *new_r_Psi(ir_graph *irg, ir_node *block,
1705 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1706 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1708 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1709 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1710 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1712 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1714 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1716 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1717 ir_node *store, ir_node *obj) {
1718 return new_rd_Raise(NULL, irg, block, store, obj);
1720 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1721 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1722 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1724 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1725 return new_rd_Pin(NULL, irg, block, node);
1727 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1728 int arity, ir_node *in[], ir_asm_constraint *inputs,
1729 int n_outs, ir_asm_constraint *outputs,
1730 int n_clobber, ident *clobber[], ident *asm_text) {
1731 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1734 /** ********************/
1735 /** public interfaces */
1736 /** construction tools */
1740 * - create a new Start node in the current block
1742 * @return s - pointer to the created Start node
1747 new_d_Start(dbg_info *db) {
1750 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1751 op_Start, mode_T, 0, NULL);
1753 res = optimize_node(res);
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 new_d_End(dbg_info *db) {
1761 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1762 op_End, mode_X, -1, NULL);
1763 res = optimize_node(res);
1764 IRN_VRFY_IRG(res, current_ir_graph);
1769 /* Constructs a Block with a fixed number of predecessors.
1770 Does set current_block. Can be used with automatic Phi
1771 node construction. */
1773 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1776 int has_unknown = 0;
1778 res = new_bd_Block(db, arity, in);
1780 /* Create and initialize array for Phi-node construction. */
1781 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1782 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1783 current_ir_graph->n_loc);
1784 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1787 for (i = arity-1; i >= 0; i--)
1788 if (get_irn_op(in[i]) == op_Unknown) {
1793 if (!has_unknown) res = optimize_node(res);
1794 current_ir_graph->current_block = res;
1796 IRN_VRFY_IRG(res, current_ir_graph);
1801 /* ***********************************************************************/
1802 /* Methods necessary for automatic Phi node creation */
1804 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1805 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1806 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1807 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1809 Call Graph: ( A ---> B == A "calls" B)
1811 get_value mature_immBlock
1819 get_r_value_internal |
1823 new_rd_Phi0 new_rd_Phi_in
1825 * *************************************************************************** */
1827 /** Creates a Phi node with 0 predecessors. */
1828 static INLINE ir_node *
1829 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1832 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1833 IRN_VRFY_IRG(res, irg);
1838 static INLINE ir_node *
1839 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1840 ir_node **in, int ins, ir_node *phi0) {
1842 ir_node *res, *known;
1844 /* Allocate a new node on the obstack. The allocation copies the in
1846 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1847 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1849 /* This loop checks whether the Phi has more than one predecessor.
1850 If so, it is a real Phi node and we break the loop. Else the
1851 Phi node merges the same definition on several paths and therefore
1852 is not needed. Don't consider Bad nodes! */
1854 for (i=0; i < ins; ++i)
1858 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1860 /* Optimize self referencing Phis: We can't detect them yet properly, as
1861 they still refer to the Phi0 they will replace. So replace right now. */
1862 if (phi0 && in[i] == phi0) in[i] = res;
1864 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1872 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1875 edges_node_deleted(res, current_ir_graph);
1876 obstack_free (current_ir_graph->obst, res);
1877 if (is_Phi(known)) {
1878 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1879 order, an enclosing Phi know may get superfluous. */
1880 res = optimize_in_place_2(known);
1882 exchange(known, res);
1888 /* A undefined value, e.g., in unreachable code. */
1892 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
1893 IRN_VRFY_IRG(res, irg);
1894 /* Memory Phis in endless loops must be kept alive.
1895 As we can't distinguish these easily we keep all of them alive. */
1896 if ((res->op == op_Phi) && (mode == mode_M))
1897 add_End_keepalive(get_irg_end(irg), res);
1901 } /* new_rd_Phi_in */
1904 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1906 #if PRECISE_EXC_CONTEXT
1908 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1911 * Construct a new frag_array for node n.
1912 * Copy the content from the current graph_arr of the corresponding block:
1913 * this is the current state.
1914 * Set ProjM(n) as current memory state.
1915 * Further the last entry in frag_arr of current block points to n. This
1916 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1918 static INLINE ir_node **new_frag_arr(ir_node *n) {
1922 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1923 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1924 sizeof(ir_node *)*current_ir_graph->n_loc);
1926 /* turn off optimization before allocating Proj nodes, as res isn't
1928 opt = get_opt_optimize(); set_optimize(0);
1929 /* Here we rely on the fact that all frag ops have Memory as first result! */
1930 if (get_irn_op(n) == op_Call)
1931 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1932 else if (get_irn_op(n) == op_CopyB)
1933 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1935 assert((pn_Quot_M == pn_DivMod_M) &&
1936 (pn_Quot_M == pn_Div_M) &&
1937 (pn_Quot_M == pn_Mod_M) &&
1938 (pn_Quot_M == pn_Load_M) &&
1939 (pn_Quot_M == pn_Store_M) &&
1940 (pn_Quot_M == pn_Alloc_M) &&
1941 (pn_Quot_M == pn_Bound_M));
1942 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1946 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1948 } /* new_frag_arr */
1951 * Returns the frag_arr from a node.
1953 static INLINE ir_node **get_frag_arr(ir_node *n) {
1954 switch (get_irn_opcode(n)) {
1956 return n->attr.call.exc.frag_arr;
1958 return n->attr.alloc.exc.frag_arr;
1960 return n->attr.load.exc.frag_arr;
1962 return n->attr.store.exc.frag_arr;
1964 return n->attr.except.frag_arr;
1966 } /* get_frag_arr */
1969 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1971 if (!frag_arr[pos]) frag_arr[pos] = val;
1972 if (frag_arr[current_ir_graph->n_loc - 1]) {
1973 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1974 assert(arr != frag_arr && "Endless recursion detected");
1975 set_frag_value(arr, pos, val);
1980 for (i = 0; i < 1000; ++i) {
1981 if (!frag_arr[pos]) {
1982 frag_arr[pos] = val;
1984 if (frag_arr[current_ir_graph->n_loc - 1]) {
1985 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1991 assert(0 && "potential endless recursion");
1993 } /* set_frag_value */
1996 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2000 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2002 frag_arr = get_frag_arr(cfOp);
2003 res = frag_arr[pos];
2005 if (block->attr.block.graph_arr[pos]) {
2006 /* There was a set_value() after the cfOp and no get_value before that
2007 set_value(). We must build a Phi node now. */
2008 if (block->attr.block.is_matured) {
2009 int ins = get_irn_arity(block);
2011 NEW_ARR_A(ir_node *, nin, ins);
2012 res = phi_merge(block, pos, mode, nin, ins);
2014 res = new_rd_Phi0(current_ir_graph, block, mode);
2015 res->attr.phi.u.pos = pos;
2016 res->attr.phi.next = block->attr.block.phis;
2017 block->attr.block.phis = res;
2020 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2021 but this should be better: (remove comment if this works) */
2022 /* It's a Phi, we can write this into all graph_arrs with NULL */
2023 set_frag_value(block->attr.block.graph_arr, pos, res);
2025 res = get_r_value_internal(block, pos, mode);
2026 set_frag_value(block->attr.block.graph_arr, pos, res);
2030 } /* get_r_frag_value_internal */
2031 #endif /* PRECISE_EXC_CONTEXT */
2034 * check whether a control flow cf_pred is a exception flow.
2036 * @param cf_pred the control flow node
2037 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2039 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2041 * Note: all projections from a raise are "exceptional control flow" we we handle it
2042 * like a normal Jmp, because there is no "regular" one.
2043 * That's why Raise is no "fragile_op"!
2045 if (is_fragile_op(prev_cf_op)) {
2046 if (is_Proj(cf_pred)) {
2047 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2048 /* the regular control flow, NO exception */
2051 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2054 /* Hmm, exception but not a Proj? */
2055 assert(!"unexpected condition: fragile op without a proj");
2059 } /* is_exception_flow */
2062 * Computes the predecessors for the real phi node, and then
2063 * allocates and returns this node. The routine called to allocate the
2064 * node might optimize it away and return a real value.
2065 * This function must be called with an in-array of proper size.
2068 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2069 ir_node *prevBlock, *res, *phi0, *phi0_all;
2072 /* If this block has no value at pos create a Phi0 and remember it
2073 in graph_arr to break recursions.
2074 Else we may not set graph_arr as there a later value is remembered. */
2076 if (!block->attr.block.graph_arr[pos]) {
2077 if (block == get_irg_start_block(current_ir_graph)) {
2078 /* Collapsing to Bad tarvals is no good idea.
2079 So we call a user-supplied routine here that deals with this case as
2080 appropriate for the given language. Sorrily the only help we can give
2081 here is the position.
2083 Even if all variables are defined before use, it can happen that
2084 we get to the start block, if a Cond has been replaced by a tuple
2085 (bad, jmp). In this case we call the function needlessly, eventually
2086 generating an non existent error.
2087 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2090 if (default_initialize_local_variable) {
2091 ir_node *rem = get_cur_block();
2093 set_cur_block(block);
2094 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2098 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2099 /* We don't need to care about exception ops in the start block.
2100 There are none by definition. */
2101 return block->attr.block.graph_arr[pos];
2103 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2104 block->attr.block.graph_arr[pos] = phi0;
2105 #if PRECISE_EXC_CONTEXT
2106 if (get_opt_precise_exc_context()) {
2107 /* Set graph_arr for fragile ops. Also here we should break recursion.
2108 We could choose a cyclic path through an cfop. But the recursion would
2109 break at some point. */
2110 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2116 /* This loop goes to all predecessor blocks of the block the Phi node
2117 is in and there finds the operands of the Phi node by calling
2118 get_r_value_internal. */
2119 for (i = 1; i <= ins; ++i) {
2120 ir_node *cf_pred = block->in[i];
2121 ir_node *prevCfOp = skip_Proj(cf_pred);
2123 if (is_Bad(prevCfOp)) {
2124 /* In case a Cond has been optimized we would get right to the start block
2125 with an invalid definition. */
2126 nin[i-1] = new_Bad();
2129 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2131 if (!is_Bad(prevBlock)) {
2132 #if PRECISE_EXC_CONTEXT
2133 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2134 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2135 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2138 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2140 nin[i-1] = new_Bad();
2144 /* We want to pass the Phi0 node to the constructor: this finds additional
2145 optimization possibilities.
2146 The Phi0 node either is allocated in this function, or it comes from
2147 a former call to get_r_value_internal. In this case we may not yet
2148 exchange phi0, as this is done in mature_immBlock. */
2150 phi0_all = block->attr.block.graph_arr[pos];
2151 if (!((get_irn_op(phi0_all) == op_Phi) &&
2152 (get_irn_arity(phi0_all) == 0) &&
2153 (get_nodes_block(phi0_all) == block)))
2159 /* After collecting all predecessors into the array nin a new Phi node
2160 with these predecessors is created. This constructor contains an
2161 optimization: If all predecessors of the Phi node are identical it
2162 returns the only operand instead of a new Phi node. */
2163 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2165 /* In case we allocated a Phi0 node at the beginning of this procedure,
2166 we need to exchange this Phi0 with the real Phi. */
2168 exchange(phi0, res);
2169 block->attr.block.graph_arr[pos] = res;
2170 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2171 only an optimization. */
2178 * This function returns the last definition of a value. In case
2179 * this value was last defined in a previous block, Phi nodes are
2180 * inserted. If the part of the firm graph containing the definition
2181 * is not yet constructed, a dummy Phi node is returned.
2183 * @param block the current block
2184 * @param pos the value number of the value searched
2185 * @param mode the mode of this value (needed for Phi construction)
2188 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2190 /* There are 4 cases to treat.
2192 1. The block is not mature and we visit it the first time. We can not
2193 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2194 predecessors is returned. This node is added to the linked list (field
2195 "link") of the containing block to be completed when this block is
2196 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2199 2. The value is already known in this block, graph_arr[pos] is set and we
2200 visit the block the first time. We can return the value without
2201 creating any new nodes.
2203 3. The block is mature and we visit it the first time. A Phi node needs
2204 to be created (phi_merge). If the Phi is not needed, as all it's
2205 operands are the same value reaching the block through different
2206 paths, it's optimized away and the value itself is returned.
2208 4. The block is mature, and we visit it the second time. Now two
2209 subcases are possible:
2210 * The value was computed completely the last time we were here. This
2211 is the case if there is no loop. We can return the proper value.
2212 * The recursion that visited this node and set the flag did not
2213 return yet. We are computing a value in a loop and need to
2214 break the recursion. This case only happens if we visited
2215 the same block with phi_merge before, which inserted a Phi0.
2216 So we return the Phi0.
2219 /* case 4 -- already visited. */
2220 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2221 /* As phi_merge allocates a Phi0 this value is always defined. Here
2222 is the critical difference of the two algorithms. */
2223 assert(block->attr.block.graph_arr[pos]);
2224 return block->attr.block.graph_arr[pos];
2227 /* visited the first time */
2228 set_irn_visited(block, get_irg_visited(current_ir_graph));
2230 /* Get the local valid value */
2231 res = block->attr.block.graph_arr[pos];
2233 /* case 2 -- If the value is actually computed, return it. */
2234 if (res) { return res; };
2236 if (block->attr.block.is_matured) { /* case 3 */
2238 /* The Phi has the same amount of ins as the corresponding block. */
2239 int ins = get_irn_arity(block);
2241 NEW_ARR_A (ir_node *, nin, ins);
2243 /* Phi merge collects the predecessors and then creates a node. */
2244 res = phi_merge (block, pos, mode, nin, ins);
2246 } else { /* case 1 */
2247 /* The block is not mature, we don't know how many in's are needed. A Phi
2248 with zero predecessors is created. Such a Phi node is called Phi0
2249 node. The Phi0 is then added to the list of Phi0 nodes in this block
2250 to be matured by mature_immBlock later.
2251 The Phi0 has to remember the pos of it's internal value. If the real
2252 Phi is computed, pos is used to update the array with the local
2254 res = new_rd_Phi0(current_ir_graph, block, mode);
2255 res->attr.phi.u.pos = pos;
2256 res->attr.phi.next = block->attr.block.phis;
2257 block->attr.block.phis = res;
2260 /* If we get here, the frontend missed a use-before-definition error */
2263 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2264 assert(mode->code >= irm_F && mode->code <= irm_P);
2265 res = new_rd_Const(NULL, current_ir_graph, block, mode,
2266 get_mode_null(mode));
2269 /* The local valid value is available now. */
2270 block->attr.block.graph_arr[pos] = res;
2273 } /* get_r_value_internal */
2275 /* ************************************************************************** */
2278 * Finalize a Block node, when all control flows are known.
2279 * Acceptable parameters are only Block nodes.
2282 mature_immBlock(ir_node *block) {
2287 assert(is_Block(block));
2288 if (!get_Block_matured(block)) {
2289 ins = ARR_LEN(block->in)-1;
2290 /* Fix block parameters */
2291 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2293 /* An array for building the Phi nodes. */
2294 NEW_ARR_A(ir_node *, nin, ins);
2296 /* Traverse a chain of Phi nodes attached to this block and mature
2298 for (n = block->attr.block.phis; n; n = next) {
2299 inc_irg_visited(current_ir_graph);
2300 next = n->attr.phi.next;
2301 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2304 block->attr.block.is_matured = 1;
2306 /* Now, as the block is a finished firm node, we can optimize it.
2307 Since other nodes have been allocated since the block was created
2308 we can not free the node on the obstack. Therefore we have to call
2310 Unfortunately the optimization does not change a lot, as all allocated
2311 nodes refer to the unoptimized node.
2312 We can call _2, as global cse has no effect on blocks. */
2313 block = optimize_in_place_2(block);
2314 IRN_VRFY_IRG(block, current_ir_graph);
2316 } /* mature_immBlock */
2319 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2320 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2324 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2325 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2329 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2330 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2331 } /* new_d_Const_long */
2334 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2335 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2336 } /* new_d_Const_type */
2340 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2341 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2345 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2346 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2350 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2353 assert(arg->op == op_Cond);
2354 arg->attr.cond.kind = fragmentary;
2355 arg->attr.cond.default_proj = max_proj;
2356 res = new_Proj(arg, mode_X, max_proj);
2358 } /* new_d_defaultProj */
2361 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2362 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2366 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2367 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2368 } /* new_d_strictConv */
2371 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2372 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2376 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2377 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2387 * Allocate the frag array.
2389 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2390 if (get_opt_precise_exc_context()) {
2391 if ((current_ir_graph->phase_state == phase_building) &&
2392 (get_irn_op(res) == op) && /* Could be optimized away. */
2393 !*frag_store) /* Could be a cse where the arr is already set. */ {
2394 *frag_store = new_frag_arr(res);
2397 } /* allocate_frag_arr */
2400 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2402 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2403 #if PRECISE_EXC_CONTEXT
2404 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2411 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2413 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2414 #if PRECISE_EXC_CONTEXT
2415 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2419 } /* new_d_DivMod */
2422 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2424 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2425 #if PRECISE_EXC_CONTEXT
2426 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2433 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2435 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2436 #if PRECISE_EXC_CONTEXT
2437 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2444 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2446 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2447 #if PRECISE_EXC_CONTEXT
2448 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2467 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2468 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2472 new_d_Jmp(dbg_info *db) {
2473 return new_bd_Jmp(db, current_ir_graph->current_block);
2477 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2478 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2482 new_d_Cond(dbg_info *db, ir_node *c) {
2483 return new_bd_Cond(db, current_ir_graph->current_block, c);
2487 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2490 res = new_bd_Call(db, current_ir_graph->current_block,
2491 store, callee, arity, in, tp);
2492 #if PRECISE_EXC_CONTEXT
2493 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2500 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2501 return new_bd_Return(db, current_ir_graph->current_block,
2503 } /* new_d_Return */
2506 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2508 res = new_bd_Load(db, current_ir_graph->current_block,
2510 #if PRECISE_EXC_CONTEXT
2511 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2518 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2520 res = new_bd_Store(db, current_ir_graph->current_block,
2522 #if PRECISE_EXC_CONTEXT
2523 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2530 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2531 ir_where_alloc where) {
2533 res = new_bd_Alloc(db, current_ir_graph->current_block,
2534 store, size, alloc_type, where);
2535 #if PRECISE_EXC_CONTEXT
2536 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2543 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2544 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2545 return new_bd_Free(db, current_ir_graph->current_block,
2546 store, ptr, size, free_type, where);
2550 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2551 /* GL: objptr was called frame before. Frame was a bad choice for the name
2552 as the operand could as well be a pointer to a dynamic object. */
2554 return new_bd_Sel(db, current_ir_graph->current_block,
2555 store, objptr, 0, NULL, ent);
2556 } /* new_d_simpleSel */
2559 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2560 return new_bd_Sel(db, current_ir_graph->current_block,
2561 store, objptr, n_index, index, sel);
2565 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2566 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2568 } /* new_d_SymConst_type */
2571 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2572 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2573 value, kind, firm_unknown_type);
2574 } /* new_d_SymConst */
2577 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2578 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2584 return _new_d_Bad();
2588 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2589 return new_bd_Confirm(db, current_ir_graph->current_block,
2591 } /* new_d_Confirm */
2594 new_d_Unknown(ir_mode *m) {
2595 return new_bd_Unknown(m);
2596 } /* new_d_Unknown */
2599 new_d_CallBegin(dbg_info *db, ir_node *call) {
2600 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2601 } /* new_d_CallBegin */
2604 new_d_EndReg(dbg_info *db) {
2605 return new_bd_EndReg(db, current_ir_graph->current_block);
2606 } /* new_d_EndReg */
2609 new_d_EndExcept(dbg_info *db) {
2610 return new_bd_EndExcept(db, current_ir_graph->current_block);
2611 } /* new_d_EndExcept */
2614 new_d_Break(dbg_info *db) {
2615 return new_bd_Break(db, current_ir_graph->current_block);
2619 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2620 return new_bd_Filter(db, current_ir_graph->current_block,
2622 } /* new_d_Filter */
2625 (new_d_NoMem)(void) {
2626 return _new_d_NoMem();
2630 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2631 ir_node *ir_true, ir_mode *mode) {
2632 return new_bd_Mux(db, current_ir_graph->current_block,
2633 sel, ir_false, ir_true, mode);
2637 new_d_Psi(dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2638 return new_bd_Psi(db, current_ir_graph->current_block,
2639 arity, conds, vals, mode);
2642 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2643 ir_node *dst, ir_node *src, ir_type *data_type) {
2645 res = new_bd_CopyB(db, current_ir_graph->current_block,
2646 store, dst, src, data_type);
2647 #if PRECISE_EXC_CONTEXT
2648 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2654 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2655 return new_bd_InstOf(db, current_ir_graph->current_block,
2656 store, objptr, type);
2657 } /* new_d_InstOf */
2660 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2661 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2664 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2665 ir_node *idx, ir_node *lower, ir_node *upper) {
2667 res = new_bd_Bound(db, current_ir_graph->current_block,
2668 store, idx, lower, upper);
2669 #if PRECISE_EXC_CONTEXT
2670 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2676 new_d_Pin(dbg_info *db, ir_node *node) {
2677 return new_bd_Pin(db, current_ir_graph->current_block, node);
2681 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2682 int n_outs, ir_asm_constraint *outputs,
2683 int n_clobber, ident *clobber[], ident *asm_text) {
2684 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2687 /* ********************************************************************* */
2688 /* Comfortable interface with automatic Phi node construction. */
2689 /* (Uses also constructors of ?? interface, except new_Block. */
2690 /* ********************************************************************* */
2692 /* Block construction */
2693 /* immature Block without predecessors */
2695 new_d_immBlock(dbg_info *db) {
2698 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2699 /* creates a new dynamic in-array as length of in is -1 */
2700 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2701 current_ir_graph->current_block = res;
2703 /* macroblock head */
2706 res->attr.block.is_matured = 0;
2707 res->attr.block.is_dead = 0;
2708 res->attr.block.is_mb_head = 1;
2709 res->attr.block.has_label = 0;
2710 res->attr.block.irg = current_ir_graph;
2711 res->attr.block.backedge = NULL;
2712 res->attr.block.in_cg = NULL;
2713 res->attr.block.cg_backedge = NULL;
2714 res->attr.block.extblk = NULL;
2715 res->attr.block.region = NULL;
2716 res->attr.block.mb_depth = 0;
2717 res->attr.block.label = 0;
2719 set_Block_block_visited(res, 0);
2721 /* Create and initialize array for Phi-node construction. */
2722 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2723 current_ir_graph->n_loc);
2724 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2726 /* Immature block may not be optimized! */
2727 IRN_VRFY_IRG(res, current_ir_graph);
2730 } /* new_d_immBlock */
2733 new_immBlock(void) {
2734 return new_d_immBlock(NULL);
2735 } /* new_immBlock */
2737 /* immature PartBlock with its predecessors */
2739 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2740 ir_node *res = new_d_immBlock(db);
2741 ir_node *blk = get_nodes_block(pred_jmp);
2743 res->in[0] = blk->in[0];
2744 assert(res->in[0] != NULL);
2745 add_immBlock_pred(res, pred_jmp);
2747 res->attr.block.is_mb_head = 0;
2748 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2751 } /* new_d_immPartBlock */
2754 new_immPartBlock(ir_node *pred_jmp) {
2755 return new_d_immPartBlock(NULL, pred_jmp);
2756 } /* new_immPartBlock */
2758 /* add an edge to a jmp/control flow node */
2760 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2761 int n = ARR_LEN(block->in) - 1;
2763 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2764 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2765 assert(is_ir_node(jmp));
2767 ARR_APP1(ir_node *, block->in, jmp);
2769 hook_set_irn_n(block, n, jmp, NULL);
2770 } /* add_immBlock_pred */
2772 /* changing the current block */
2774 set_cur_block(ir_node *target) {
2775 current_ir_graph->current_block = target;
2776 } /* set_cur_block */
2778 /* ************************ */
2779 /* parameter administration */
2781 /* get a value from the parameter array from the current block by its index */
2783 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2784 ir_graph *irg = current_ir_graph;
2785 assert(get_irg_phase_state(irg) == phase_building);
2786 inc_irg_visited(irg);
2789 return get_r_value_internal(irg->current_block, pos + 1, mode);
2792 /* get a value from the parameter array from the current block by its index */
2794 get_value(int pos, ir_mode *mode) {
2795 return get_d_value(NULL, pos, mode);
2798 /* set a value at position pos in the parameter array from the current block */
2800 set_value(int pos, ir_node *value) {
2801 ir_graph *irg = current_ir_graph;
2802 assert(get_irg_phase_state(irg) == phase_building);
2803 assert(pos+1 < irg->n_loc);
2804 assert(is_ir_node(value));
2805 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2808 /* Find the value number for a node in the current block.*/
2810 find_value(ir_node *value) {
2812 ir_node *bl = current_ir_graph->current_block;
2814 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2815 if (bl->attr.block.graph_arr[i] == value)
2820 /* get the current store */
2823 ir_graph *irg = current_ir_graph;
2825 assert(get_irg_phase_state(irg) == phase_building);
2826 /* GL: one could call get_value instead */
2827 inc_irg_visited(irg);
2828 return get_r_value_internal(irg->current_block, 0, mode_M);
2831 /* set the current store: handles automatic Sync construction for Load nodes */
2833 set_store(ir_node *store) {
2834 ir_node *load, *pload, *pred, *in[2];
2836 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2837 /* Beware: due to dead code elimination, a store might become a Bad node even in
2838 the construction phase. */
2839 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2841 if (get_opt_auto_create_sync()) {
2842 /* handle non-volatile Load nodes by automatically creating Sync's */
2843 load = skip_Proj(store);
2844 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2845 pred = get_Load_mem(load);
2847 if (is_Sync(pred)) {
2848 /* a Load after a Sync: move it up */
2849 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2851 set_Load_mem(load, get_memop_mem(mem));
2852 add_Sync_pred(pred, store);
2855 pload = skip_Proj(pred);
2856 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2857 /* a Load after a Load: create a new Sync */
2858 set_Load_mem(load, get_Load_mem(pload));
2862 store = new_Sync(2, in);
2867 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2871 keep_alive(ir_node *ka) {
2872 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2875 /* --- Useful access routines --- */
2876 /* Returns the current block of the current graph. To set the current
2877 block use set_cur_block. */
2878 ir_node *get_cur_block(void) {
2879 return get_irg_current_block(current_ir_graph);
2880 } /* get_cur_block */
2882 /* Returns the frame type of the current graph */
2883 ir_type *get_cur_frame_type(void) {
2884 return get_irg_frame_type(current_ir_graph);
2885 } /* get_cur_frame_type */
2888 /* ********************************************************************* */
2891 /* call once for each run of the library */
2893 init_cons(uninitialized_local_variable_func_t *func) {
2894 default_initialize_local_variable = func;
2898 irp_finalize_cons(void) {
2900 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2901 irg_finalize_cons(get_irp_irg(i));
2903 irp->phase_state = phase_high;
2904 } /* irp_finalize_cons */
2907 ir_node *new_Block(int arity, ir_node **in) {
2908 return new_d_Block(NULL, arity, in);
2910 ir_node *new_Start(void) {
2911 return new_d_Start(NULL);
2913 ir_node *new_End(void) {
2914 return new_d_End(NULL);
2916 ir_node *new_Jmp(void) {
2917 return new_d_Jmp(NULL);
2919 ir_node *new_IJmp(ir_node *tgt) {
2920 return new_d_IJmp(NULL, tgt);
2922 ir_node *new_Cond(ir_node *c) {
2923 return new_d_Cond(NULL, c);
2925 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2926 return new_d_Return(NULL, store, arity, in);
2928 ir_node *new_Const(ir_mode *mode, tarval *con) {
2929 return new_d_Const(NULL, mode, con);
2932 ir_node *new_Const_long(ir_mode *mode, long value) {
2933 return new_d_Const_long(NULL, mode, value);
2936 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2937 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2940 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2941 return new_d_SymConst_type(NULL, mode, value, kind, type);
2943 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2944 return new_d_SymConst(NULL, mode, value, kind);
2946 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2947 return new_d_simpleSel(NULL, store, objptr, ent);
2949 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2951 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2953 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2955 return new_d_Call(NULL, store, callee, arity, in, tp);
2957 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2958 return new_d_Add(NULL, op1, op2, mode);
2960 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2961 return new_d_Sub(NULL, op1, op2, mode);
2963 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2964 return new_d_Minus(NULL, op, mode);
2966 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2967 return new_d_Mul(NULL, op1, op2, mode);
2969 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2970 return new_d_Mulh(NULL, op1, op2, mode);
2972 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2973 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2975 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2976 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2978 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2979 return new_d_Div(NULL, memop, op1, op2, mode, state);
2981 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2982 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2984 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2985 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2987 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2988 return new_d_Abs(NULL, op, mode);
2990 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2991 return new_d_And(NULL, op1, op2, mode);
2993 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2994 return new_d_Or(NULL, op1, op2, mode);
2996 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2997 return new_d_Eor(NULL, op1, op2, mode);
2999 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3000 return new_d_Not(NULL, op, mode);
3002 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3003 return new_d_Shl(NULL, op, k, mode);
3005 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3006 return new_d_Shr(NULL, op, k, mode);
3008 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3009 return new_d_Shrs(NULL, op, k, mode);
3011 ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
3012 return new_d_Rot(NULL, op, k, mode);
3014 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3015 return new_d_Carry(NULL, op1, op2, mode);
3017 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3018 return new_d_Borrow(NULL, op1, op2, mode);
3020 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3021 return new_d_Cmp(NULL, op1, op2);
3023 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3024 return new_d_Conv(NULL, op, mode);
3026 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3027 return new_d_strictConv(NULL, op, mode);
3029 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3030 return new_d_Cast(NULL, op, to_tp);
3032 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3033 return new_d_Phi(NULL, arity, in, mode);
3035 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
3036 return new_d_Load(NULL, store, addr, mode);
3038 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
3039 return new_d_Store(NULL, store, addr, val);
3041 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3042 ir_where_alloc where) {
3043 return new_d_Alloc(NULL, store, size, alloc_type, where);
3045 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3046 ir_type *free_type, ir_where_alloc where) {
3047 return new_d_Free(NULL, store, ptr, size, free_type, where);
3049 ir_node *new_Sync(int arity, ir_node *in[]) {
3050 return new_d_Sync(NULL, arity, in);
3052 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3053 return new_d_Proj(NULL, arg, mode, proj);
3055 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3056 return new_d_defaultProj(NULL, arg, max_proj);
3058 ir_node *new_Tuple(int arity, ir_node **in) {
3059 return new_d_Tuple(NULL, arity, in);
3061 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3062 return new_d_Id(NULL, val, mode);
3064 ir_node *new_Bad(void) {
3067 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3068 return new_d_Confirm(NULL, val, bound, cmp);
3070 ir_node *new_Unknown(ir_mode *m) {
3071 return new_d_Unknown(m);
3073 ir_node *new_CallBegin(ir_node *callee) {
3074 return new_d_CallBegin(NULL, callee);
3076 ir_node *new_EndReg(void) {
3077 return new_d_EndReg(NULL);
3079 ir_node *new_EndExcept(void) {
3080 return new_d_EndExcept(NULL);
3082 ir_node *new_Break(void) {
3083 return new_d_Break(NULL);
3085 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3086 return new_d_Filter(NULL, arg, mode, proj);
3088 ir_node *new_NoMem(void) {
3089 return new_d_NoMem();
3091 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3092 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3094 ir_node *new_Psi(int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3095 return new_d_Psi(NULL, arity, conds, vals, mode);
3097 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3098 return new_d_CopyB(NULL, store, dst, src, data_type);
3100 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3101 return new_d_InstOf(NULL, store, objptr, ent);
3103 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3104 return new_d_Raise(NULL, store, obj);
3106 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3107 return new_d_Bound(NULL, store, idx, lower, upper);
3109 ir_node *new_Pin(ir_node *node) {
3110 return new_d_Pin(NULL, node);
3112 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3113 int n_outs, ir_asm_constraint *outputs,
3114 int n_clobber, ident *clobber[], ident *asm_text) {
3115 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3118 /* create a new anchor node */
3119 ir_node *new_Anchor(ir_graph *irg) {
3120 ir_node *in[anchor_last];
3121 memset(in, 0, sizeof(in));
3122 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);