2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons_py.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
341 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
343 ir_graph *irg = current_ir_graph;
345 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
346 res->attr.conv.strict = strict_flag;
347 res = optimize_node(res);
348 IRN_VRFY_IRG(res, irg);
353 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
355 ir_graph *irg = current_ir_graph;
357 assert(is_atomic_type(to_tp));
359 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
360 res->attr.cast.totype = to_tp;
361 res = optimize_node(res);
362 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
402 /** Creates a remainderless Div node. */
403 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
404 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
408 ir_graph *irg = current_ir_graph;
412 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
413 res->attr.divmod.exc.pin_state = state;
414 res->attr.divmod.res_mode = mode;
415 res->attr.divmod.no_remainder = 1;
416 res = optimize_node(res);
417 IRN_VRFY_IRG(res, irg);
423 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
426 ir_graph *irg = current_ir_graph;
429 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
430 res = optimize_node(res);
431 IRN_VRFY_IRG(res, irg);
436 new_bd_Jmp(dbg_info *db, ir_node *block) {
438 ir_graph *irg = current_ir_graph;
440 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
441 res = optimize_node(res);
442 IRN_VRFY_IRG(res, irg);
447 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
449 ir_graph *irg = current_ir_graph;
451 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
452 res = optimize_node(res);
453 IRN_VRFY_IRG(res, irg);
458 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
460 ir_graph *irg = current_ir_graph;
462 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
463 res->attr.cond.kind = dense;
464 res->attr.cond.default_proj = 0;
465 res->attr.cond.pred = COND_JMP_PRED_NONE;
466 res = optimize_node(res);
467 IRN_VRFY_IRG(res, irg);
473 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
474 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
478 ir_graph *irg = current_ir_graph;
481 NEW_ARR_A(ir_node *, r_in, r_arity);
484 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
486 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
488 assert((get_unknown_type() == tp) || is_Method_type(tp));
489 set_Call_type(res, tp);
490 res->attr.call.exc.pin_state = op_pin_state_pinned;
491 res->attr.call.callee_arr = NULL;
492 res = optimize_node(res);
493 IRN_VRFY_IRG(res, irg);
498 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
499 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
503 ir_graph *irg = current_ir_graph;
506 NEW_ARR_A(ir_node *, r_in, r_arity);
508 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
510 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
512 assert((get_unknown_type() == tp) || is_Method_type(tp));
513 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
514 res->attr.builtin.kind = kind;
515 res->attr.builtin.builtin_tp = tp;
516 res = optimize_node(res);
517 IRN_VRFY_IRG(res, irg);
519 } /* new_bd_Buildin */
523 new_bd_Return(dbg_info *db, ir_node *block,
524 ir_node *store, int arity, ir_node **in) {
528 ir_graph *irg = current_ir_graph;
531 NEW_ARR_A (ir_node *, r_in, r_arity);
533 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
534 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
535 res = optimize_node(res);
536 IRN_VRFY_IRG(res, irg);
538 } /* new_bd_Return */
541 new_bd_Load(dbg_info *db, ir_node *block,
542 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
545 ir_graph *irg = current_ir_graph;
549 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
550 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
551 res->attr.load.mode = mode;
552 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
553 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
554 res = optimize_node(res);
555 IRN_VRFY_IRG(res, irg);
560 new_bd_Store(dbg_info *db, ir_node *block,
561 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
564 ir_graph *irg = current_ir_graph;
569 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
570 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
571 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
572 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
574 res = optimize_node(res);
575 IRN_VRFY_IRG(res, irg);
581 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
582 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
585 ir_graph *irg = current_ir_graph;
589 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
590 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
591 res->attr.alloc.where = where;
592 res->attr.alloc.type = alloc_type;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
600 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
601 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
604 ir_graph *irg = current_ir_graph;
609 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
610 res->attr.free.where = where;
611 res->attr.free.type = free_type;
612 res = optimize_node(res);
613 IRN_VRFY_IRG(res, irg);
619 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
620 int arity, ir_node **in, ir_entity *ent) {
624 ir_graph *irg = current_ir_graph;
625 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
627 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
630 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
633 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
635 * Sel's can select functions which should be of mode mode_P_code.
637 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
638 res->attr.sel.entity = ent;
639 res = optimize_node(res);
640 IRN_VRFY_IRG(res, irg);
645 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
646 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
647 ir_graph *irg = current_ir_graph;
648 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
650 res->attr.symc.kind = symkind;
651 res->attr.symc.sym = value;
652 res->attr.symc.tp = tp;
654 res = optimize_node(res);
655 IRN_VRFY_IRG(res, irg);
657 } /* new_bd_SymConst_type */
660 new_bd_Sync(dbg_info *db, ir_node *block) {
662 ir_graph *irg = current_ir_graph;
664 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
665 /* no need to call optimize node here, Sync are always created with no predecessors */
666 IRN_VRFY_IRG(res, irg);
672 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
673 ir_node *in[2], *res;
674 ir_graph *irg = current_ir_graph;
678 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
679 res->attr.confirm.cmp = cmp;
680 res = optimize_node(res);
681 IRN_VRFY_IRG(res, irg);
683 } /* new_bd_Confirm */
686 new_bd_Unknown(ir_mode *m) {
688 ir_graph *irg = current_ir_graph;
690 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
691 res = optimize_node(res);
693 } /* new_bd_Unknown */
697 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
700 ir_graph *irg = current_ir_graph;
702 in[0] = get_Call_ptr(call);
703 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
704 /* res->attr.callbegin.irg = irg; */
705 res->attr.callbegin.call = call;
706 res = optimize_node(res);
707 IRN_VRFY_IRG(res, irg);
709 } /* new_bd_CallBegin */
712 new_bd_EndReg(dbg_info *db, ir_node *block) {
714 ir_graph *irg = current_ir_graph;
716 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
717 set_irg_end_reg(irg, res);
718 IRN_VRFY_IRG(res, irg);
720 } /* new_bd_EndReg */
723 new_bd_EndExcept(dbg_info *db, ir_node *block) {
725 ir_graph *irg = current_ir_graph;
727 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
728 set_irg_end_except(irg, res);
729 IRN_VRFY_IRG (res, irg);
731 } /* new_bd_EndExcept */
734 new_bd_Break(dbg_info *db, ir_node *block) {
736 ir_graph *irg = current_ir_graph;
738 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
739 res = optimize_node(res);
740 IRN_VRFY_IRG(res, irg);
745 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
748 ir_graph *irg = current_ir_graph;
750 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
751 res->attr.filter.proj = proj;
752 res->attr.filter.in_cg = NULL;
753 res->attr.filter.backedge = NULL;
756 assert(get_Proj_pred(res));
757 assert(get_nodes_block(get_Proj_pred(res)));
759 res = optimize_node(res);
760 IRN_VRFY_IRG(res, irg);
762 } /* new_bd_Filter */
766 new_bd_Mux(dbg_info *db, ir_node *block,
767 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
770 ir_graph *irg = current_ir_graph;
776 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
779 res = optimize_node(res);
780 IRN_VRFY_IRG(res, irg);
786 new_bd_CopyB(dbg_info *db, ir_node *block,
787 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
790 ir_graph *irg = current_ir_graph;
796 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
798 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
799 res->attr.copyb.data_type = data_type;
800 res = optimize_node(res);
801 IRN_VRFY_IRG(res, irg);
806 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
807 ir_node *objptr, ir_type *type) {
810 ir_graph *irg = current_ir_graph;
814 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
815 res->attr.instof.exc.pin_state = op_pin_state_floats;
816 res->attr.instof.type = type;
817 res = optimize_node(res);
818 IRN_VRFY_IRG(res, irg);
820 } /* new_bd_InstOf */
823 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
826 ir_graph *irg = current_ir_graph;
830 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
831 res = optimize_node(res);
832 IRN_VRFY_IRG(res, irg);
837 new_bd_Bound(dbg_info *db, ir_node *block,
838 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
841 ir_graph *irg = current_ir_graph;
847 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
848 res->attr.bound.exc.pin_state = op_pin_state_pinned;
849 res = optimize_node(res);
850 IRN_VRFY_IRG(res, irg);
856 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
858 ir_graph *irg = current_ir_graph;
860 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
861 res = optimize_node(res);
862 IRN_VRFY_IRG(res, irg);
868 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
869 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
871 ir_graph *irg = current_ir_graph;
873 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
874 res->attr.assem.pin_state = op_pin_state_pinned;
875 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
876 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
877 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
878 res->attr.assem.asm_text = asm_text;
880 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
881 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
882 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
884 res = optimize_node(res);
885 IRN_VRFY_IRG(res, irg);
889 /* --------------------------------------------- */
890 /* private interfaces, for professional use only */
891 /* --------------------------------------------- */
894 /* Constructs a Block with a fixed number of predecessors.
895 Does not set current_block. Can not be used with automatic
896 Phi node construction. */
898 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
899 ir_graph *rem = current_ir_graph;
902 current_ir_graph = irg;
903 res = new_bd_Block(db, arity, in);
904 current_ir_graph = rem;
910 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
911 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Start(db, block);
916 current_ir_graph = rem;
922 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
924 ir_graph *rem = current_ir_graph;
926 current_ir_graph = irg;
927 res = new_bd_End(db, block);
928 current_ir_graph = rem;
934 /* Creates a Phi node with all predecessors. Calling this constructor
935 is only allowed if the corresponding block is mature. */
937 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
939 ir_graph *rem = current_ir_graph;
941 current_ir_graph = irg;
942 res = new_bd_Phi(db, block,arity, in, mode);
943 current_ir_graph = rem;
949 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
951 ir_graph *rem = current_ir_graph;
953 current_ir_graph = irg;
954 res = new_bd_Const_type(db, con, tp);
955 current_ir_graph = rem;
958 } /* new_rd_Const_type */
961 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
963 //#ifdef USE_ORIGINAL
964 ir_graph *rem = current_ir_graph;
966 current_ir_graph = irg;
967 res = new_bd_Const_type(db, con, firm_unknown_type);
968 current_ir_graph = rem;
970 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
977 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
978 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
979 } /* new_rd_Const_long */
983 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_Id(db, block, val, mode);
989 current_ir_graph = rem;
995 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Proj(db, block, arg, mode, proj);
1002 current_ir_graph = rem;
1009 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1012 ir_graph *rem = current_ir_graph;
1014 current_ir_graph = irg;
1015 res = new_bd_defaultProj(db, block, arg, max_proj);
1016 current_ir_graph = rem;
1019 } /* new_rd_defaultProj */
1023 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1025 ir_graph *rem = current_ir_graph;
1027 current_ir_graph = irg;
1028 res = new_bd_Conv(db, block, op, mode, 0);
1029 current_ir_graph = rem;
1036 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1038 ir_graph *rem = current_ir_graph;
1040 current_ir_graph = irg;
1041 res = new_bd_Conv(db, block, op, mode, 1);
1042 current_ir_graph = rem;
1045 } /* new_rd_strictConv */
1048 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1050 ir_graph *rem = current_ir_graph;
1052 current_ir_graph = irg;
1053 res = new_bd_Cast(db, block, op, to_tp);
1054 current_ir_graph = rem;
1061 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1063 ir_graph *rem = current_ir_graph;
1065 current_ir_graph = irg;
1066 res = new_bd_Tuple(db, block, arity, in);
1067 current_ir_graph = rem;
1070 } /* new_rd_Tuple */
1079 NEW_RD_DIVOP(DivMod)
1093 NEW_RD_BINOP(Borrow)
1096 /* creates a rd constructor for an divRL */
1097 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1098 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1101 ir_graph *rem = current_ir_graph;
1102 current_ir_graph = irg;
1103 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1104 current_ir_graph = rem;
1110 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1111 ir_node *op1, ir_node *op2) {
1113 ir_graph *rem = current_ir_graph;
1115 current_ir_graph = irg;
1116 res = new_bd_Cmp(db, block, op1, op2);
1117 current_ir_graph = rem;
1123 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Jmp(db, block);
1129 current_ir_graph = rem;
1135 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1137 ir_graph *rem = current_ir_graph;
1139 current_ir_graph = irg;
1140 res = new_bd_IJmp(db, block, tgt);
1141 current_ir_graph = rem;
1147 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1149 ir_graph *rem = current_ir_graph;
1151 current_ir_graph = irg;
1152 res = new_bd_Cond(db, block, c);
1153 current_ir_graph = rem;
1160 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1161 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1163 ir_graph *rem = current_ir_graph;
1165 current_ir_graph = irg;
1166 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1167 current_ir_graph = rem;
1173 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1174 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1176 ir_graph *rem = current_ir_graph;
1178 current_ir_graph = irg;
1179 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1180 current_ir_graph = rem;
1183 } /* new_rd_Builtin */
1187 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1188 ir_node *store, int arity, ir_node **in) {
1190 ir_graph *rem = current_ir_graph;
1192 current_ir_graph = irg;
1193 res = new_bd_Return(db, block, store, arity, in);
1194 current_ir_graph = rem;
1197 } /* new_rd_Return */
1200 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1201 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1203 ir_graph *rem = current_ir_graph;
1205 current_ir_graph = irg;
1206 res = new_bd_Load(db, block, store, adr, mode, flags);
1207 current_ir_graph = rem;
1213 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1214 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1216 ir_graph *rem = current_ir_graph;
1218 current_ir_graph = irg;
1219 res = new_bd_Store(db, block, store, adr, val, flags);
1220 current_ir_graph = rem;
1223 } /* new_rd_Store */
1227 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1228 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1230 ir_graph *rem = current_ir_graph;
1232 current_ir_graph = irg;
1233 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1234 current_ir_graph = rem;
1237 } /* new_rd_Alloc */
1241 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1242 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1244 ir_graph *rem = current_ir_graph;
1246 current_ir_graph = irg;
1247 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1248 current_ir_graph = rem;
1255 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1256 ir_node *store, ir_node *objptr, ir_entity *ent) {
1258 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1262 current_ir_graph = rem;
1265 } /* new_rd_simpleSel */
1269 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1270 int arity, ir_node **in, ir_entity *ent) {
1272 ir_graph *rem = current_ir_graph;
1274 current_ir_graph = irg;
1275 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1276 current_ir_graph = rem;
1283 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1284 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1286 ir_graph *rem = current_ir_graph;
1288 current_ir_graph = irg;
1289 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1290 current_ir_graph = rem;
1293 } /* new_rd_SymConst_type */
1296 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1297 symconst_symbol value, symconst_kind symkind) {
1298 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1299 } /* new_rd_SymConst */
1301 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1302 symconst_symbol sym;
1303 sym.entity_p = symbol;
1304 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1305 } /* new_rd_SymConst_addr_ent */
1307 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1308 symconst_symbol sym;
1309 sym.entity_p = symbol;
1310 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1311 } /* new_rd_SymConst_ofs_ent */
1313 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1314 symconst_symbol sym;
1315 sym.ident_p = symbol;
1316 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1317 } /* new_rd_SymConst_addr_name */
1319 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1320 symconst_symbol sym;
1321 sym.type_p = symbol;
1322 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1323 } /* new_rd_SymConst_type_tag */
1325 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1326 symconst_symbol sym;
1327 sym.type_p = symbol;
1328 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1329 } /* new_rd_SymConst_size */
1331 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1332 symconst_symbol sym;
1333 sym.type_p = symbol;
1334 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1335 } /* new_rd_SymConst_align */
1338 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1340 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Sync(db, block);
1345 current_ir_graph = rem;
1347 for (i = 0; i < arity; ++i)
1348 add_Sync_pred(res, in[i]);
1355 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1357 ir_graph *rem = current_ir_graph;
1359 current_ir_graph = irg;
1360 res = new_bd_Confirm(db, block, val, bound, cmp);
1361 current_ir_graph = rem;
1364 } /* new_rd_Confirm */
1367 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Unknown(m);
1373 current_ir_graph = rem;
1376 } /* new_rd_Unknown */
1380 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1382 ir_graph *rem = current_ir_graph;
1384 current_ir_graph = irg;
1385 res = new_bd_CallBegin(db, block, call);
1386 current_ir_graph = rem;
1389 } /* new_rd_CallBegin */
1392 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1395 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1396 set_irg_end_reg(irg, res);
1397 IRN_VRFY_IRG(res, irg);
1399 } /* new_rd_EndReg */
1402 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1405 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1406 set_irg_end_except(irg, res);
1407 IRN_VRFY_IRG (res, irg);
1409 } /* new_rd_EndExcept */
1412 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1414 ir_graph *rem = current_ir_graph;
1416 current_ir_graph = irg;
1417 res = new_bd_Break(db, block);
1418 current_ir_graph = rem;
1421 } /* new_rd_Break */
1424 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_Filter(db, block, arg, mode, proj);
1431 current_ir_graph = rem;
1434 } /* new_rd_Filter */
1438 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1439 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1441 ir_graph *rem = current_ir_graph;
1443 current_ir_graph = irg;
1444 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1445 current_ir_graph = rem;
1451 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1452 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1454 ir_graph *rem = current_ir_graph;
1456 current_ir_graph = irg;
1457 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1458 current_ir_graph = rem;
1461 } /* new_rd_CopyB */
1464 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1465 ir_node *objptr, ir_type *type) {
1467 ir_graph *rem = current_ir_graph;
1469 current_ir_graph = irg;
1470 res = new_bd_InstOf(db, block, store, objptr, type);
1471 current_ir_graph = rem;
1474 } /* new_rd_InstOf */
1477 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1479 ir_graph *rem = current_ir_graph;
1481 current_ir_graph = irg;
1482 res = new_bd_Raise(db, block, store, obj);
1483 current_ir_graph = rem;
1486 } /* new_rd_Raise */
1488 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1489 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1491 ir_graph *rem = current_ir_graph;
1493 current_ir_graph = irg;
1494 res = new_bd_Bound(db, block, store, idx, lower, upper);
1495 current_ir_graph = rem;
1498 } /* new_rd_Bound */
1501 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1503 ir_graph *rem = current_ir_graph;
1505 current_ir_graph = irg;
1506 res = new_bd_Pin(db, block, node);
1507 current_ir_graph = rem;
1513 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1514 int arity, ir_node *in[], ir_asm_constraint *inputs,
1515 int n_outs, ir_asm_constraint *outputs,
1516 int n_clobber, ident *clobber[], ident *asm_text) {
1518 ir_graph *rem = current_ir_graph;
1520 current_ir_graph = irg;
1521 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1522 current_ir_graph = rem;
1529 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1530 return new_rd_Block(NULL, irg, arity, in);
1532 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1533 return new_rd_Start(NULL, irg, block);
1535 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1536 return new_rd_End(NULL, irg, block);
1538 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1539 return new_rd_Jmp(NULL, irg, block);
1541 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1542 return new_rd_IJmp(NULL, irg, block, tgt);
1544 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1545 return new_rd_Cond(NULL, irg, block, c);
1547 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1548 ir_node *store, int arity, ir_node **in) {
1549 return new_rd_Return(NULL, irg, block, store, arity, in);
1552 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1553 return new_rd_Const(NULL, irg, con);
1555 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1556 return new_rd_Const_long(NULL, irg, mode, value);
1558 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1559 return new_rd_Const_type(NULL, irg, con, tp);
1561 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1562 symconst_symbol value, symconst_kind symkind) {
1563 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1565 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1566 ir_node *objptr, ir_entity *ent) {
1567 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1570 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1571 ir_node *objptr, int n_index, ir_node **index,
1573 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1576 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1577 ir_node *callee, int arity, ir_node **in,
1579 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1581 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1582 ir_builtin_kind kind, int arity, ir_node **in,
1584 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1587 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1588 ir_node *op1, ir_node *op2, ir_mode *mode) {
1589 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1591 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1592 ir_node *op1, ir_node *op2, ir_mode *mode) {
1593 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1595 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1596 ir_node *op, ir_mode *mode) {
1597 return new_rd_Minus(NULL, irg, block, op, mode);
1599 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1600 ir_node *op1, ir_node *op2, ir_mode *mode) {
1601 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1603 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1604 ir_node *op1, ir_node *op2, ir_mode *mode) {
1605 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1608 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1609 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1610 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1612 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1613 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1614 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1616 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1617 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1618 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1620 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1621 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1622 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1624 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1625 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1626 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1629 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1630 ir_node *op, ir_mode *mode) {
1631 return new_rd_Abs(NULL, irg, block, op, mode);
1633 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1634 ir_node *op1, ir_node *op2, ir_mode *mode) {
1635 return new_rd_And(NULL, irg, block, op1, op2, mode);
1637 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1638 ir_node *op1, ir_node *op2, ir_mode *mode) {
1639 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1641 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1642 ir_node *op1, ir_node *op2, ir_mode *mode) {
1643 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1645 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1646 ir_node *op, ir_mode *mode) {
1647 return new_rd_Not(NULL, irg, block, op, mode);
1649 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1650 ir_node *op, ir_node *k, ir_mode *mode) {
1651 return new_rd_Shl(NULL, irg, block, op, k, mode);
1653 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1654 ir_node *op, ir_node *k, ir_mode *mode) {
1655 return new_rd_Shr(NULL, irg, block, op, k, mode);
1657 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1658 ir_node *op, ir_node *k, ir_mode *mode) {
1659 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1661 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1662 ir_node *op, ir_node *k, ir_mode *mode) {
1663 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1665 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1666 ir_node *op, ir_node *k, ir_mode *mode) {
1667 return new_rd_Carry(NULL, irg, block, op, k, mode);
1669 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1670 ir_node *op, ir_node *k, ir_mode *mode) {
1671 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1673 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1674 ir_node *op1, ir_node *op2) {
1675 return new_rd_Cmp(NULL, irg, block, op1, op2);
1677 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1678 ir_node *op, ir_mode *mode) {
1679 return new_rd_Conv(NULL, irg, block, op, mode);
1682 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1683 ir_node *op, ir_mode *mode) {
1684 return new_rd_strictConv(NULL, irg, block, op, mode);
1686 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1687 return new_rd_Cast(NULL, irg, block, op, to_tp);
1689 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1690 ir_node **in, ir_mode *mode) {
1691 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1694 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1695 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1696 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1698 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1699 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1700 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1703 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1704 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1705 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1708 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1709 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1710 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1713 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1714 return new_rd_Sync(NULL, irg, block, arity, in);
1717 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1718 ir_mode *mode, long proj) {
1719 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1722 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1724 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1727 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1728 int arity, ir_node **in) {
1729 return new_rd_Tuple(NULL, irg, block, arity, in );
1731 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1732 ir_node *val, ir_mode *mode) {
1733 return new_rd_Id(NULL, irg, block, val, mode);
1736 ir_node *new_r_Bad(ir_graph *irg) {
1737 return get_irg_bad(irg);
1740 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1741 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1743 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1744 return new_rd_Unknown(irg, m);
1747 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1748 return new_rd_CallBegin(NULL, irg, block, callee);
1750 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1751 return new_rd_EndReg(NULL, irg, block);
1753 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1754 return new_rd_EndExcept(NULL, irg, block);
1756 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1757 return new_rd_Break(NULL, irg, block);
1759 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1760 ir_mode *mode, long proj) {
1761 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1763 ir_node *new_r_NoMem(ir_graph *irg) {
1764 return get_irg_no_mem(irg);
1767 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1768 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1769 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1772 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1773 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1774 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1776 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1778 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1780 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1781 ir_node *store, ir_node *obj) {
1782 return new_rd_Raise(NULL, irg, block, store, obj);
1784 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1785 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1786 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1789 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1790 return new_rd_Pin(NULL, irg, block, node);
1793 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1794 int arity, ir_node *in[], ir_asm_constraint *inputs,
1795 int n_outs, ir_asm_constraint *outputs,
1796 int n_clobber, ident *clobber[], ident *asm_text) {
1797 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1800 /** ********************/
1801 /** public interfaces */
1802 /** construction tools */
1808 * - create a new Start node in the current block
1810 * @return s - pointer to the created Start node
1815 new_d_Start(dbg_info *db) {
1818 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1819 op_Start, mode_T, 0, NULL);
1821 res = optimize_node(res);
1822 IRN_VRFY_IRG(res, current_ir_graph);
1827 new_d_End(dbg_info *db) {
1829 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1830 op_End, mode_X, -1, NULL);
1831 res = optimize_node(res);
1832 IRN_VRFY_IRG(res, current_ir_graph);
1837 /* Constructs a Block with a fixed number of predecessors.
1838 Does set current_block. Can be used with automatic Phi
1839 node construction. */
1841 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1844 int has_unknown = 0;
1847 res = new_bd_Block(db, arity, in);
1849 res = new_rd_Block(db, current_ir_graph, arity, in);
1852 /* Create and initialize array for Phi-node construction. */
1853 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1854 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1855 current_ir_graph->n_loc);
1856 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1859 for (i = arity-1; i >= 0; i--)
1860 if (is_Unknown(in[i])) {
1865 if (!has_unknown) res = optimize_node(res);
1867 current_ir_graph->current_block = res;
1869 IRN_VRFY_IRG(res, current_ir_graph);
1875 /* ***********************************************************************/
1876 /* Methods necessary for automatic Phi node creation */
1878 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1879 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1880 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1881 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1883 Call Graph: ( A ---> B == A "calls" B)
1885 get_value mature_immBlock
1893 get_r_value_internal |
1897 new_rd_Phi0 new_rd_Phi_in
1899 * *************************************************************************** */
1901 /** Creates a Phi node with 0 predecessors. */
1902 static inline ir_node *
1903 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1906 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1907 IRN_VRFY_IRG(res, irg);
1913 * Internal constructor of a Phi node by a phi_merge operation.
1915 * @param irg the graph on which the Phi will be constructed
1916 * @param block the block in which the Phi will be constructed
1917 * @param mode the mod eof the Phi node
1918 * @param in the input array of the phi node
1919 * @param ins number of elements in the input array
1920 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1921 * the value for which the new Phi is constructed
1923 static inline ir_node *
1924 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1925 ir_node **in, int ins, ir_node *phi0) {
1927 ir_node *res, *known;
1929 /* Allocate a new node on the obstack. The allocation copies the in
1931 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1932 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1934 /* This loop checks whether the Phi has more than one predecessor.
1935 If so, it is a real Phi node and we break the loop. Else the
1936 Phi node merges the same definition on several paths and therefore
1938 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1940 for (i = ins - 1; i >= 0; --i) {
1943 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1945 /* Optimize self referencing Phis: We can't detect them yet properly, as
1946 they still refer to the Phi0 they will replace. So replace right now. */
1947 if (phi0 && in[i] == phi0)
1950 if (in[i] == res || in[i] == known)
1959 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1962 edges_node_deleted(res, current_ir_graph);
1963 obstack_free(current_ir_graph->obst, res);
1964 if (is_Phi(known)) {
1965 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1966 order, an enclosing Phi know may get superfluous. */
1967 res = optimize_in_place_2(known);
1969 exchange(known, res);
1974 /* A undefined value, e.g., in unreachable code. */
1978 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1979 IRN_VRFY_IRG(res, irg);
1980 /* Memory Phis in endless loops must be kept alive.
1981 As we can't distinguish these easily we keep all of them alive. */
1982 if (is_Phi(res) && mode == mode_M)
1983 add_End_keepalive(get_irg_end(irg), res);
1987 } /* new_rd_Phi_in */
1990 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1992 #if PRECISE_EXC_CONTEXT
1994 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1997 * Construct a new frag_array for node n.
1998 * Copy the content from the current graph_arr of the corresponding block:
1999 * this is the current state.
2000 * Set ProjM(n) as current memory state.
2001 * Further the last entry in frag_arr of current block points to n. This
2002 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2004 static inline ir_node **new_frag_arr(ir_node *n) {
2008 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2009 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2010 sizeof(ir_node *)*current_ir_graph->n_loc);
2012 /* turn off optimization before allocating Proj nodes, as res isn't
2014 opt = get_opt_optimize(); set_optimize(0);
2015 /* Here we rely on the fact that all frag ops have Memory as first result! */
2017 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2018 } else if (is_CopyB(n)) {
2019 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2021 assert((pn_Quot_M == pn_DivMod_M) &&
2022 (pn_Quot_M == pn_Div_M) &&
2023 (pn_Quot_M == pn_Mod_M) &&
2024 (pn_Quot_M == pn_Load_M) &&
2025 (pn_Quot_M == pn_Store_M) &&
2026 (pn_Quot_M == pn_Alloc_M) &&
2027 (pn_Quot_M == pn_Bound_M));
2028 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2032 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2034 } /* new_frag_arr */
2037 * Returns the frag_arr from a node.
2039 static inline ir_node **get_frag_arr(ir_node *n) {
2040 switch (get_irn_opcode(n)) {
2042 return n->attr.call.exc.frag_arr;
2044 return n->attr.alloc.exc.frag_arr;
2046 return n->attr.load.exc.frag_arr;
2048 return n->attr.store.exc.frag_arr;
2050 return n->attr.except.frag_arr;
2052 } /* get_frag_arr */
2055 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2056 #ifdef DEBUG_libfirm
2059 for (i = 1024; i >= 0; --i)
2064 if (frag_arr[pos] == NULL)
2065 frag_arr[pos] = val;
2066 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2067 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2068 assert(arr != frag_arr && "Endless recursion detected");
2073 assert(!"potential endless recursion in set_frag_value");
2074 } /* set_frag_value */
2077 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2081 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2083 frag_arr = get_frag_arr(cfOp);
2084 res = frag_arr[pos];
2086 if (block->attr.block.graph_arr[pos] != NULL) {
2087 /* There was a set_value() after the cfOp and no get_value() before that
2088 set_value(). We must build a Phi node now. */
2089 if (block->attr.block.is_matured) {
2090 int ins = get_irn_arity(block);
2092 NEW_ARR_A(ir_node *, nin, ins);
2093 res = phi_merge(block, pos, mode, nin, ins);
2095 res = new_rd_Phi0(current_ir_graph, block, mode);
2096 res->attr.phi.u.pos = pos;
2097 res->attr.phi.next = block->attr.block.phis;
2098 block->attr.block.phis = res;
2100 assert(res != NULL);
2101 /* It's a Phi, we can write this into all graph_arrs with NULL */
2102 set_frag_value(block->attr.block.graph_arr, pos, res);
2104 res = get_r_value_internal(block, pos, mode);
2105 set_frag_value(block->attr.block.graph_arr, pos, res);
2109 } /* get_r_frag_value_internal */
2110 #endif /* PRECISE_EXC_CONTEXT */
2113 * Check whether a control flownode cf_pred represents an exception flow.
2115 * @param cf_pred the control flow node
2116 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2118 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2120 * Note: all projections from a raise are "exceptional control flow" we we handle it
2121 * like a normal Jmp, because there is no "regular" one.
2122 * That's why Raise is no "fragile_op"!
2124 if (is_fragile_op(prev_cf_op)) {
2125 if (is_Proj(cf_pred)) {
2126 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2127 /* the regular control flow, NO exception */
2130 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2133 /* Hmm, exception but not a Proj? */
2134 assert(!"unexpected condition: fragile op without a proj");
2138 } /* is_exception_flow */
2141 * Computes the predecessors for the real phi node, and then
2142 * allocates and returns this node. The routine called to allocate the
2143 * node might optimize it away and return a real value.
2144 * This function must be called with an in-array of proper size.
2147 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2148 ir_node *prevBlock, *res, *phi0, *phi0_all;
2151 /* If this block has no value at pos create a Phi0 and remember it
2152 in graph_arr to break recursions.
2153 Else we may not set graph_arr as there a later value is remembered. */
2155 if (block->attr.block.graph_arr[pos] == NULL) {
2156 ir_graph *irg = current_ir_graph;
2158 if (block == get_irg_start_block(irg)) {
2159 /* Collapsing to Bad tarvals is no good idea.
2160 So we call a user-supplied routine here that deals with this case as
2161 appropriate for the given language. Sorrily the only help we can give
2162 here is the position.
2164 Even if all variables are defined before use, it can happen that
2165 we get to the start block, if a Cond has been replaced by a tuple
2166 (bad, jmp). In this case we call the function needlessly, eventually
2167 generating an non existent error.
2168 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2171 if (default_initialize_local_variable != NULL) {
2172 ir_node *rem = get_cur_block();
2174 set_cur_block(block);
2175 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2179 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2180 /* We don't need to care about exception ops in the start block.
2181 There are none by definition. */
2182 return block->attr.block.graph_arr[pos];
2184 phi0 = new_rd_Phi0(irg, block, mode);
2185 block->attr.block.graph_arr[pos] = phi0;
2186 #if PRECISE_EXC_CONTEXT
2187 if (get_opt_precise_exc_context()) {
2188 /* Set graph_arr for fragile ops. Also here we should break recursion.
2189 We could choose a cyclic path through an cfop. But the recursion would
2190 break at some point. */
2191 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2197 /* This loop goes to all predecessor blocks of the block the Phi node
2198 is in and there finds the operands of the Phi node by calling
2199 get_r_value_internal. */
2200 for (i = 1; i <= ins; ++i) {
2201 ir_node *cf_pred = block->in[i];
2202 ir_node *prevCfOp = skip_Proj(cf_pred);
2204 if (is_Bad(prevCfOp)) {
2205 /* In case a Cond has been optimized we would get right to the start block
2206 with an invalid definition. */
2207 nin[i-1] = new_Bad();
2210 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2212 if (!is_Bad(prevBlock)) {
2213 #if PRECISE_EXC_CONTEXT
2214 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2215 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2216 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2219 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2221 nin[i-1] = new_Bad();
2225 /* We want to pass the Phi0 node to the constructor: this finds additional
2226 optimization possibilities.
2227 The Phi0 node either is allocated in this function, or it comes from
2228 a former call to get_r_value_internal(). In this case we may not yet
2229 exchange phi0, as this is done in mature_immBlock(). */
2231 phi0_all = block->attr.block.graph_arr[pos];
2232 if (! is_Phi0(phi0_all) ||
2233 get_irn_arity(phi0_all) != 0 ||
2234 get_nodes_block(phi0_all) != block)
2240 /* After collecting all predecessors into the array nin a new Phi node
2241 with these predecessors is created. This constructor contains an
2242 optimization: If all predecessors of the Phi node are identical it
2243 returns the only operand instead of a new Phi node. */
2244 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2246 /* In case we allocated a Phi0 node at the beginning of this procedure,
2247 we need to exchange this Phi0 with the real Phi. */
2249 exchange(phi0, res);
2250 block->attr.block.graph_arr[pos] = res;
2251 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2252 only an optimization. */
2259 * This function returns the last definition of a value. In case
2260 * this value was last defined in a previous block, Phi nodes are
2261 * inserted. If the part of the firm graph containing the definition
2262 * is not yet constructed, a dummy Phi node is returned.
2264 * @param block the current block
2265 * @param pos the value number of the value searched
2266 * @param mode the mode of this value (needed for Phi construction)
2269 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2271 /* There are 4 cases to treat.
2273 1. The block is not mature and we visit it the first time. We can not
2274 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2275 predecessors is returned. This node is added to the linked list (block
2276 attribute "phis") of the containing block to be completed when this block is
2277 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2280 2. The value is already known in this block, graph_arr[pos] is set and we
2281 visit the block the first time. We can return the value without
2282 creating any new nodes.
2284 3. The block is mature and we visit it the first time. A Phi node needs
2285 to be created (phi_merge). If the Phi is not needed, as all it's
2286 operands are the same value reaching the block through different
2287 paths, it's optimized away and the value itself is returned.
2289 4. The block is mature, and we visit it the second time. Now two
2290 subcases are possible:
2291 * The value was computed completely the last time we were here. This
2292 is the case if there is no loop. We can return the proper value.
2293 * The recursion that visited this node and set the flag did not
2294 return yet. We are computing a value in a loop and need to
2295 break the recursion. This case only happens if we visited
2296 the same block with phi_merge before, which inserted a Phi0.
2297 So we return the Phi0.
2300 /* case 4 -- already visited. */
2301 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2302 /* As phi_merge allocates a Phi0 this value is always defined. Here
2303 is the critical difference of the two algorithms. */
2304 assert(block->attr.block.graph_arr[pos]);
2305 return block->attr.block.graph_arr[pos];
2308 /* visited the first time */
2309 set_irn_visited(block, get_irg_visited(current_ir_graph));
2311 /* Get the local valid value */
2312 res = block->attr.block.graph_arr[pos];
2314 /* case 2 -- If the value is actually computed, return it. */
2318 if (block->attr.block.is_matured) { /* case 3 */
2320 /* The Phi has the same amount of ins as the corresponding block. */
2321 int ins = get_irn_arity(block);
2323 NEW_ARR_A(ir_node *, nin, ins);
2325 /* Phi merge collects the predecessors and then creates a node. */
2326 res = phi_merge(block, pos, mode, nin, ins);
2328 } else { /* case 1 */
2329 /* The block is not mature, we don't know how many in's are needed. A Phi
2330 with zero predecessors is created. Such a Phi node is called Phi0
2331 node. The Phi0 is then added to the list of Phi0 nodes in this block
2332 to be matured by mature_immBlock later.
2333 The Phi0 has to remember the pos of it's internal value. If the real
2334 Phi is computed, pos is used to update the array with the local
2336 res = new_rd_Phi0(current_ir_graph, block, mode);
2337 res->attr.phi.u.pos = pos;
2338 res->attr.phi.next = block->attr.block.phis;
2339 block->attr.block.phis = res;
2342 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2344 /* The local valid value is available now. */
2345 block->attr.block.graph_arr[pos] = res;
2348 } /* get_r_value_internal */
2350 /* ************************************************************************** */
2353 * Finalize a Block node, when all control flows are known.
2354 * Acceptable parameters are only Block nodes.
2357 mature_immBlock(ir_node *block) {
2362 assert(is_Block(block));
2363 if (!get_Block_matured(block)) {
2364 ir_graph *irg = current_ir_graph;
2366 ins = ARR_LEN(block->in) - 1;
2367 /* Fix block parameters */
2368 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2370 /* An array for building the Phi nodes. */
2371 NEW_ARR_A(ir_node *, nin, ins);
2373 /* Traverse a chain of Phi nodes attached to this block and mature
2375 for (n = block->attr.block.phis; n; n = next) {
2376 inc_irg_visited(irg);
2377 next = n->attr.phi.next;
2378 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2381 block->attr.block.is_matured = 1;
2383 /* Now, as the block is a finished Firm node, we can optimize it.
2384 Since other nodes have been allocated since the block was created
2385 we can not free the node on the obstack. Therefore we have to call
2386 optimize_in_place().
2387 Unfortunately the optimization does not change a lot, as all allocated
2388 nodes refer to the unoptimized node.
2389 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2390 block = optimize_in_place_2(block);
2391 IRN_VRFY_IRG(block, irg);
2393 } /* mature_immBlock */
2396 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2397 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2401 new_d_Const(dbg_info *db, tarval *con) {
2402 return new_bd_Const(db, con);
2406 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2407 return new_bd_Const_long(db, mode, value);
2408 } /* new_d_Const_long */
2411 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2412 return new_bd_Const_type(db, con, tp);
2413 } /* new_d_Const_type */
2418 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2419 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2423 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2424 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2429 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2431 assert(arg->op == op_Cond);
2432 arg->attr.cond.kind = fragmentary;
2433 arg->attr.cond.default_proj = max_proj;
2434 res = new_d_Proj(db, arg, mode_X, max_proj);
2436 } /* new_d_defaultProj */
2440 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2441 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2446 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2447 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2448 } /* new_d_strictConv */
2451 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2452 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2457 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2458 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2469 * Allocate a frag array for a node if the current graph state is phase_building.
2471 * @param irn the node for which the frag array should be allocated
2472 * @param op the opcode of the (original) node, if does not match opcode of irn,
2474 * @param frag_store the address of the frag store in irn attributes, if this
2475 * address contains a value != NULL, does nothing
2477 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2478 if (get_opt_precise_exc_context()) {
2479 if ((current_ir_graph->phase_state == phase_building) &&
2480 (get_irn_op(irn) == op) && /* Could be optimized away. */
2481 !*frag_store) /* Could be a cse where the arr is already set. */ {
2482 *frag_store = new_frag_arr(irn);
2485 } /* firm_alloc_frag_arr */
2488 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2490 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2491 #if PRECISE_EXC_CONTEXT
2492 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2499 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2501 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2502 #if PRECISE_EXC_CONTEXT
2503 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2507 } /* new_d_DivMod */
2510 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2512 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2513 #if PRECISE_EXC_CONTEXT
2514 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2521 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2523 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2524 #if PRECISE_EXC_CONTEXT
2525 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2532 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2534 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2535 #if PRECISE_EXC_CONTEXT
2536 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2556 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2557 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2561 new_d_Jmp(dbg_info *db) {
2562 return new_bd_Jmp(db, current_ir_graph->current_block);
2566 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2567 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2571 new_d_Cond(dbg_info *db, ir_node *c) {
2572 return new_bd_Cond(db, current_ir_graph->current_block, c);
2577 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2580 res = new_bd_Call(db, current_ir_graph->current_block,
2581 store, callee, arity, in, tp);
2582 #if PRECISE_EXC_CONTEXT
2583 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2590 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2592 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2593 } /* new_d_Builtin */
2597 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2598 return new_bd_Return(db, current_ir_graph->current_block,
2600 } /* new_d_Return */
2603 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2605 res = new_bd_Load(db, current_ir_graph->current_block,
2606 store, addr, mode, flags);
2607 #if PRECISE_EXC_CONTEXT
2608 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2615 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2617 res = new_bd_Store(db, current_ir_graph->current_block,
2618 store, addr, val, flags);
2619 #if PRECISE_EXC_CONTEXT
2620 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2628 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2629 ir_where_alloc where) {
2631 res = new_bd_Alloc(db, current_ir_graph->current_block,
2632 store, size, alloc_type, where);
2633 #if PRECISE_EXC_CONTEXT
2634 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2642 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2643 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2644 return new_bd_Free(db, current_ir_graph->current_block,
2645 store, ptr, size, free_type, where);
2650 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2651 /* GL: objptr was called frame before. Frame was a bad choice for the name
2652 as the operand could as well be a pointer to a dynamic object. */
2654 return new_bd_Sel(db, current_ir_graph->current_block,
2655 store, objptr, 0, NULL, ent);
2656 } /* new_d_simpleSel */
2660 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2661 return new_bd_Sel(db, current_ir_graph->current_block,
2662 store, objptr, n_index, index, sel);
2667 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2668 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2670 } /* new_d_SymConst_type */
2673 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2674 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2675 value, kind, firm_unknown_type);
2676 } /* new_d_SymConst */
2679 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2680 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2685 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2686 return new_bd_Confirm(db, current_ir_graph->current_block,
2688 } /* new_d_Confirm */
2691 new_d_Unknown(ir_mode *m) {
2692 return new_bd_Unknown(m);
2693 } /* new_d_Unknown */
2697 new_d_CallBegin(dbg_info *db, ir_node *call) {
2698 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2699 } /* new_d_CallBegin */
2702 new_d_EndReg(dbg_info *db) {
2703 return new_bd_EndReg(db, current_ir_graph->current_block);
2704 } /* new_d_EndReg */
2707 new_d_EndExcept(dbg_info *db) {
2708 return new_bd_EndExcept(db, current_ir_graph->current_block);
2709 } /* new_d_EndExcept */
2712 new_d_Break(dbg_info *db) {
2713 return new_bd_Break(db, current_ir_graph->current_block);
2717 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2718 return new_bd_Filter(db, current_ir_graph->current_block,
2720 } /* new_d_Filter */
2724 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2725 ir_node *ir_true, ir_mode *mode) {
2726 return new_bd_Mux(db, current_ir_graph->current_block,
2727 sel, ir_false, ir_true, mode);
2731 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2732 ir_node *dst, ir_node *src, ir_type *data_type) {
2734 res = new_bd_CopyB(db, current_ir_graph->current_block,
2735 store, dst, src, data_type);
2736 #if PRECISE_EXC_CONTEXT
2737 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2743 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2744 return new_bd_InstOf(db, current_ir_graph->current_block,
2745 store, objptr, type);
2746 } /* new_d_InstOf */
2749 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2750 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2753 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2754 ir_node *idx, ir_node *lower, ir_node *upper) {
2756 res = new_bd_Bound(db, current_ir_graph->current_block,
2757 store, idx, lower, upper);
2758 #if PRECISE_EXC_CONTEXT
2759 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2766 new_d_Pin(dbg_info *db, ir_node *node) {
2767 return new_bd_Pin(db, current_ir_graph->current_block, node);
2772 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2773 int n_outs, ir_asm_constraint *outputs,
2774 int n_clobber, ident *clobber[], ident *asm_text) {
2775 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2778 /* ********************************************************************* */
2779 /* Comfortable interface with automatic Phi node construction. */
2780 /* (Uses also constructors of ?? interface, except new_Block. */
2781 /* ********************************************************************* */
2783 /* Block construction */
2784 /* immature Block without predecessors */
2786 new_d_immBlock(dbg_info *db) {
2789 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2790 /* creates a new dynamic in-array as length of in is -1 */
2791 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2793 /* macroblock head */
2796 res->attr.block.is_matured = 0;
2797 res->attr.block.is_dead = 0;
2798 res->attr.block.is_mb_head = 1;
2799 res->attr.block.has_label = 0;
2800 res->attr.block.irg = current_ir_graph;
2801 res->attr.block.backedge = NULL;
2802 res->attr.block.in_cg = NULL;
2803 res->attr.block.cg_backedge = NULL;
2804 res->attr.block.extblk = NULL;
2805 res->attr.block.region = NULL;
2806 res->attr.block.mb_depth = 0;
2807 res->attr.block.label = 0;
2809 set_Block_block_visited(res, 0);
2811 /* Create and initialize array for Phi-node construction. */
2812 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2813 current_ir_graph->n_loc);
2814 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2816 /* Immature block may not be optimized! */
2817 IRN_VRFY_IRG(res, current_ir_graph);
2820 } /* new_d_immBlock */
2823 new_immBlock(void) {
2824 return new_d_immBlock(NULL);
2825 } /* new_immBlock */
2827 /* immature PartBlock with its predecessors */
2829 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2830 ir_node *res = new_d_immBlock(db);
2831 ir_node *blk = get_nodes_block(pred_jmp);
2833 res->in[0] = blk->in[0];
2834 assert(res->in[0] != NULL);
2835 add_immBlock_pred(res, pred_jmp);
2837 res->attr.block.is_mb_head = 0;
2838 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2841 } /* new_d_immPartBlock */
2844 new_immPartBlock(ir_node *pred_jmp) {
2845 return new_d_immPartBlock(NULL, pred_jmp);
2846 } /* new_immPartBlock */
2848 /* add an edge to a jmp/control flow node */
2850 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2851 int n = ARR_LEN(block->in) - 1;
2853 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2854 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2855 assert(is_ir_node(jmp));
2857 ARR_APP1(ir_node *, block->in, jmp);
2859 hook_set_irn_n(block, n, jmp, NULL);
2860 } /* add_immBlock_pred */
2862 /* changing the current block */
2864 set_cur_block(ir_node *target) {
2865 current_ir_graph->current_block = target;
2866 } /* set_cur_block */
2868 /* ************************ */
2869 /* parameter administration */
2871 /* get a value from the parameter array from the current block by its index */
2873 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2874 ir_graph *irg = current_ir_graph;
2875 assert(get_irg_phase_state(irg) == phase_building);
2876 inc_irg_visited(irg);
2881 return get_r_value_internal(irg->current_block, pos + 1, mode);
2884 /* get a value from the parameter array from the current block by its index */
2886 get_value(int pos, ir_mode *mode) {
2887 return get_d_value(NULL, pos, mode);
2890 /* set a value at position pos in the parameter array from the current block */
2892 set_value(int pos, ir_node *value) {
2893 ir_graph *irg = current_ir_graph;
2894 assert(get_irg_phase_state(irg) == phase_building);
2896 assert(pos+1 < irg->n_loc);
2897 assert(is_ir_node(value));
2898 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2901 /* Find the value number for a node in the current block.*/
2903 find_value(ir_node *value) {
2905 ir_node *bl = current_ir_graph->current_block;
2907 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2908 if (bl->attr.block.graph_arr[i] == value)
2913 /* get the current store */
2916 ir_graph *irg = current_ir_graph;
2918 assert(get_irg_phase_state(irg) == phase_building);
2919 /* GL: one could call get_value instead */
2920 inc_irg_visited(irg);
2921 return get_r_value_internal(irg->current_block, 0, mode_M);
2924 /* set the current store: handles automatic Sync construction for Load nodes */
2926 set_store(ir_node *store) {
2927 ir_node *load, *pload, *pred, *in[2];
2929 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2930 /* Beware: due to dead code elimination, a store might become a Bad node even in
2931 the construction phase. */
2932 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2934 if (get_opt_auto_create_sync()) {
2935 /* handle non-volatile Load nodes by automatically creating Sync's */
2936 load = skip_Proj(store);
2937 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2938 pred = get_Load_mem(load);
2940 if (is_Sync(pred)) {
2941 /* a Load after a Sync: move it up */
2942 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2944 set_Load_mem(load, get_memop_mem(mem));
2945 add_Sync_pred(pred, store);
2948 pload = skip_Proj(pred);
2949 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2950 /* a Load after a Load: create a new Sync */
2951 set_Load_mem(load, get_Load_mem(pload));
2955 store = new_Sync(2, in);
2960 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2964 keep_alive(ir_node *ka) {
2965 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2968 /* --- Useful access routines --- */
2969 /* Returns the current block of the current graph. To set the current
2970 block use set_cur_block. */
2971 ir_node *get_cur_block(void) {
2972 return get_irg_current_block(current_ir_graph);
2973 } /* get_cur_block */
2975 /* Returns the frame type of the current graph */
2976 ir_type *get_cur_frame_type(void) {
2977 return get_irg_frame_type(current_ir_graph);
2978 } /* get_cur_frame_type */
2981 /* ********************************************************************* */
2984 /* call once for each run of the library */
2986 firm_init_cons(uninitialized_local_variable_func_t *func) {
2987 default_initialize_local_variable = func;
2988 } /* firm_init_cons */
2991 irp_finalize_cons(void) {
2993 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2994 irg_finalize_cons(get_irp_irg(i));
2996 irp->phase_state = phase_high;
2997 } /* irp_finalize_cons */
3000 ir_node *new_Block(int arity, ir_node **in) {
3001 return new_d_Block(NULL, arity, in);
3003 ir_node *new_Start(void) {
3004 return new_d_Start(NULL);
3006 ir_node *new_End(void) {
3007 return new_d_End(NULL);
3009 ir_node *new_Jmp(void) {
3010 return new_d_Jmp(NULL);
3012 ir_node *new_IJmp(ir_node *tgt) {
3013 return new_d_IJmp(NULL, tgt);
3015 ir_node *new_Cond(ir_node *c) {
3016 return new_d_Cond(NULL, c);
3018 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3019 return new_d_Return(NULL, store, arity, in);
3022 ir_node *new_Const(tarval *con) {
3023 return new_d_Const(NULL, con);
3026 ir_node *new_Const_long(ir_mode *mode, long value) {
3027 return new_d_Const_long(NULL, mode, value);
3030 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3031 return new_d_Const_type(NULL, con, tp);
3034 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
3035 return new_d_SymConst_type(NULL, mode, value, kind, type);
3037 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3038 return new_d_SymConst(NULL, mode, value, kind);
3040 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3041 return new_d_simpleSel(NULL, store, objptr, ent);
3044 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3046 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3049 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3051 return new_d_Call(NULL, store, callee, arity, in, tp);
3053 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3055 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3058 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3059 return new_d_Add(NULL, op1, op2, mode);
3061 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3062 return new_d_Sub(NULL, op1, op2, mode);
3064 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3065 return new_d_Minus(NULL, op, mode);
3067 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3068 return new_d_Mul(NULL, op1, op2, mode);
3070 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3071 return new_d_Mulh(NULL, op1, op2, mode);
3074 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3075 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3077 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3078 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3080 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3081 return new_d_Div(NULL, memop, op1, op2, mode, state);
3083 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3084 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3086 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3087 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3090 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3091 return new_d_Abs(NULL, op, mode);
3093 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3094 return new_d_And(NULL, op1, op2, mode);
3096 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3097 return new_d_Or(NULL, op1, op2, mode);
3099 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3100 return new_d_Eor(NULL, op1, op2, mode);
3102 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3103 return new_d_Not(NULL, op, mode);
3105 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3106 return new_d_Shl(NULL, op, k, mode);
3108 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3109 return new_d_Shr(NULL, op, k, mode);
3111 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3112 return new_d_Shrs(NULL, op, k, mode);
3114 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3115 return new_d_Rotl(NULL, op, k, mode);
3117 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3118 return new_d_Carry(NULL, op1, op2, mode);
3120 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3121 return new_d_Borrow(NULL, op1, op2, mode);
3123 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3124 return new_d_Cmp(NULL, op1, op2);
3126 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3127 return new_d_Conv(NULL, op, mode);
3130 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3131 return new_d_strictConv(NULL, op, mode);
3133 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3134 return new_d_Cast(NULL, op, to_tp);
3136 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3137 return new_d_Phi(NULL, arity, in, mode);
3140 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3141 return new_d_Load(NULL, store, addr, mode, flags);
3143 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3144 return new_d_Store(NULL, store, addr, val, flags);
3147 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3148 ir_where_alloc where) {
3149 return new_d_Alloc(NULL, store, size, alloc_type, where);
3152 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3153 ir_type *free_type, ir_where_alloc where) {
3154 return new_d_Free(NULL, store, ptr, size, free_type, where);
3157 ir_node *new_Sync(int arity, ir_node *in[]) {
3158 return new_d_Sync(NULL, arity, in);
3161 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3162 return new_d_Proj(NULL, arg, mode, proj);
3165 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3166 return new_d_defaultProj(NULL, arg, max_proj);
3169 ir_node *new_Tuple(int arity, ir_node **in) {
3170 return new_d_Tuple(NULL, arity, in);
3172 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3173 return new_d_Id(NULL, val, mode);
3176 ir_node *new_Bad(void) {
3177 return get_irg_bad(current_ir_graph);
3180 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3181 return new_d_Confirm(NULL, val, bound, cmp);
3183 ir_node *new_Unknown(ir_mode *m) {
3184 return new_d_Unknown(m);
3187 ir_node *new_CallBegin(ir_node *callee) {
3188 return new_d_CallBegin(NULL, callee);
3190 ir_node *new_EndReg(void) {
3191 return new_d_EndReg(NULL);
3193 ir_node *new_EndExcept(void) {
3194 return new_d_EndExcept(NULL);
3196 ir_node *new_Break(void) {
3197 return new_d_Break(NULL);
3199 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3200 return new_d_Filter(NULL, arg, mode, proj);
3202 ir_node *new_NoMem(void) {
3203 return get_irg_no_mem(current_ir_graph);
3206 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3207 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3210 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3211 return new_d_CopyB(NULL, store, dst, src, data_type);
3213 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3214 return new_d_InstOf(NULL, store, objptr, ent);
3216 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3217 return new_d_Raise(NULL, store, obj);
3219 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3220 return new_d_Bound(NULL, store, idx, lower, upper);
3223 ir_node *new_Pin(ir_node *node) {
3224 return new_d_Pin(NULL, node);
3227 ir_node *new_Dummy(ir_mode *m) {
3228 ir_graph *irg = current_ir_graph;
3229 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3231 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3232 int n_outs, ir_asm_constraint *outputs,
3233 int n_clobber, ident *clobber[], ident *asm_text) {
3234 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3237 /* create a new anchor node */
3238 ir_node *new_Anchor(ir_graph *irg) {
3239 ir_node *in[anchor_last];
3240 memset(in, 0, sizeof(in));
3241 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);