2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.resmode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.type = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 /** Creates a remainderless Div node. */
400 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
401 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
410 res->attr.divmod.exc.pin_state = state;
411 res->attr.divmod.resmode = mode;
412 res->attr.divmod.no_remainder = 1;
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
422 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Jmp(dbg_info *db, ir_node *block) {
434 ir_graph *irg = current_ir_graph;
436 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
437 res = optimize_node(res);
438 IRN_VRFY_IRG(res, irg);
443 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
445 ir_graph *irg = current_ir_graph;
447 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
448 res = optimize_node(res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
456 ir_graph *irg = current_ir_graph;
458 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
459 res->attr.cond.kind = dense;
460 res->attr.cond.default_proj = 0;
461 res->attr.cond.pred = COND_JMP_PRED_NONE;
462 res = optimize_node(res);
463 IRN_VRFY_IRG(res, irg);
468 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
469 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
473 ir_graph *irg = current_ir_graph;
476 NEW_ARR_A(ir_node *, r_in, r_arity);
479 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
481 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
483 assert((get_unknown_type() == tp) || is_Method_type(tp));
484 set_Call_type(res, tp);
485 res->attr.call.exc.pin_state = op_pin_state_pinned;
486 res->attr.call.callee_arr = NULL;
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
494 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
495 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
499 ir_graph *irg = current_ir_graph;
502 NEW_ARR_A(ir_node *, r_in, r_arity);
504 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
506 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
508 assert((get_unknown_type() == tp) || is_Method_type(tp));
509 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
510 res->attr.builtin.kind = kind;
511 res->attr.builtin.builtin_tp = tp;
512 res = optimize_node(res);
513 IRN_VRFY_IRG(res, irg);
515 } /* new_bd_Buildin */
519 new_bd_Return(dbg_info *db, ir_node *block,
520 ir_node *store, int arity, ir_node **in) {
524 ir_graph *irg = current_ir_graph;
527 NEW_ARR_A (ir_node *, r_in, r_arity);
529 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
530 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
531 res = optimize_node(res);
532 IRN_VRFY_IRG(res, irg);
534 } /* new_bd_Return */
537 new_bd_Load(dbg_info *db, ir_node *block,
538 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
541 ir_graph *irg = current_ir_graph;
545 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
546 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
547 res->attr.load.mode = mode;
548 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
549 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Store(dbg_info *db, ir_node *block,
557 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
560 ir_graph *irg = current_ir_graph;
565 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
566 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
567 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
568 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
570 res = optimize_node(res);
571 IRN_VRFY_IRG(res, irg);
577 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
578 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
581 ir_graph *irg = current_ir_graph;
585 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
586 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
587 res->attr.alloc.where = where;
588 res->attr.alloc.type = alloc_type;
589 res = optimize_node(res);
590 IRN_VRFY_IRG(res, irg);
596 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
597 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
600 ir_graph *irg = current_ir_graph;
605 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
606 res->attr.free.where = where;
607 res->attr.free.type = free_type;
608 res = optimize_node(res);
609 IRN_VRFY_IRG(res, irg);
615 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
616 int arity, ir_node **in, ir_entity *ent) {
620 ir_graph *irg = current_ir_graph;
621 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
623 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
626 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
629 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
631 * Sel's can select functions which should be of mode mode_P_code.
633 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
634 res->attr.sel.entity = ent;
635 res = optimize_node(res);
636 IRN_VRFY_IRG(res, irg);
641 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
642 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
643 ir_graph *irg = current_ir_graph;
644 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
646 res->attr.symc.kind = symkind;
647 res->attr.symc.sym = value;
648 res->attr.symc.tp = tp;
650 res = optimize_node(res);
651 IRN_VRFY_IRG(res, irg);
653 } /* new_bd_SymConst_type */
656 new_bd_Sync(dbg_info *db, ir_node *block) {
658 ir_graph *irg = current_ir_graph;
660 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
661 /* no need to call optimize node here, Sync are always created with no predecessors */
662 IRN_VRFY_IRG(res, irg);
668 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
669 ir_node *in[2], *res;
670 ir_graph *irg = current_ir_graph;
674 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
675 res->attr.confirm.cmp = cmp;
676 res = optimize_node(res);
677 IRN_VRFY_IRG(res, irg);
679 } /* new_bd_Confirm */
682 new_bd_Unknown(ir_mode *m) {
684 ir_graph *irg = current_ir_graph;
686 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
687 res = optimize_node(res);
689 } /* new_bd_Unknown */
693 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
696 ir_graph *irg = current_ir_graph;
698 in[0] = get_Call_ptr(call);
699 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
700 /* res->attr.callbegin.irg = irg; */
701 res->attr.callbegin.call = call;
702 res = optimize_node(res);
703 IRN_VRFY_IRG(res, irg);
705 } /* new_bd_CallBegin */
708 new_bd_EndReg(dbg_info *db, ir_node *block) {
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
713 set_irg_end_reg(irg, res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_EndReg */
719 new_bd_EndExcept(dbg_info *db, ir_node *block) {
721 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
724 set_irg_end_except(irg, res);
725 IRN_VRFY_IRG (res, irg);
727 } /* new_bd_EndExcept */
730 new_bd_Break(dbg_info *db, ir_node *block) {
732 ir_graph *irg = current_ir_graph;
734 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
744 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
747 res->attr.filter.proj = proj;
748 res->attr.filter.in_cg = NULL;
749 res->attr.filter.backedge = NULL;
752 assert(get_Proj_pred(res));
753 assert(get_nodes_block(get_Proj_pred(res)));
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
758 } /* new_bd_Filter */
762 new_bd_Mux(dbg_info *db, ir_node *block,
763 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
766 ir_graph *irg = current_ir_graph;
772 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
775 res = optimize_node(res);
776 IRN_VRFY_IRG(res, irg);
782 new_bd_CopyB(dbg_info *db, ir_node *block,
783 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
786 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
794 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
795 res->attr.copyb.data_type = data_type;
796 res = optimize_node(res);
797 IRN_VRFY_IRG(res, irg);
802 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
803 ir_node *objptr, ir_type *type) {
806 ir_graph *irg = current_ir_graph;
810 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
811 res->attr.instof.exc.pin_state = op_pin_state_floats;
812 res->attr.instof.type = type;
813 res = optimize_node(res);
814 IRN_VRFY_IRG(res, irg);
816 } /* new_bd_InstOf */
819 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
822 ir_graph *irg = current_ir_graph;
826 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
827 res = optimize_node(res);
828 IRN_VRFY_IRG(res, irg);
833 new_bd_Bound(dbg_info *db, ir_node *block,
834 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
837 ir_graph *irg = current_ir_graph;
843 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
844 res->attr.bound.exc.pin_state = op_pin_state_pinned;
845 res = optimize_node(res);
846 IRN_VRFY_IRG(res, irg);
852 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
854 ir_graph *irg = current_ir_graph;
856 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
857 res = optimize_node(res);
858 IRN_VRFY_IRG(res, irg);
864 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
865 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
867 ir_graph *irg = current_ir_graph;
869 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
870 res->attr.assem.pin_state = op_pin_state_pinned;
871 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
872 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
873 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
874 res->attr.assem.asm_text = asm_text;
876 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
877 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
878 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
880 res = optimize_node(res);
881 IRN_VRFY_IRG(res, irg);
885 /* --------------------------------------------- */
886 /* private interfaces, for professional use only */
887 /* --------------------------------------------- */
890 /* Constructs a Block with a fixed number of predecessors.
891 Does not set current_block. Can not be used with automatic
892 Phi node construction. */
894 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
895 ir_graph *rem = current_ir_graph;
898 current_ir_graph = irg;
899 res = new_bd_Block(db, arity, in);
900 current_ir_graph = rem;
906 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
907 ir_graph *rem = current_ir_graph;
910 current_ir_graph = irg;
911 res = new_bd_Start(db, block);
912 current_ir_graph = rem;
918 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
920 ir_graph *rem = current_ir_graph;
922 current_ir_graph = irg;
923 res = new_bd_End(db, block);
924 current_ir_graph = rem;
930 /* Creates a Phi node with all predecessors. Calling this constructor
931 is only allowed if the corresponding block is mature. */
933 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
935 ir_graph *rem = current_ir_graph;
937 current_ir_graph = irg;
938 res = new_bd_Phi(db, block,arity, in, mode);
939 current_ir_graph = rem;
945 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
947 ir_graph *rem = current_ir_graph;
949 current_ir_graph = irg;
950 res = new_bd_Const_type(db, con, tp);
951 current_ir_graph = rem;
954 } /* new_rd_Const_type */
957 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
959 //#ifdef USE_ORIGINAL
960 ir_graph *rem = current_ir_graph;
962 current_ir_graph = irg;
963 res = new_bd_Const_type(db, con, firm_unknown_type);
964 current_ir_graph = rem;
966 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
973 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
974 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
975 } /* new_rd_Const_long */
979 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
981 ir_graph *rem = current_ir_graph;
983 current_ir_graph = irg;
984 res = new_bd_Id(db, block, val, mode);
985 current_ir_graph = rem;
991 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
994 ir_graph *rem = current_ir_graph;
996 current_ir_graph = irg;
997 res = new_bd_Proj(db, block, arg, mode, proj);
998 current_ir_graph = rem;
1005 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1008 ir_graph *rem = current_ir_graph;
1010 current_ir_graph = irg;
1011 res = new_bd_defaultProj(db, block, arg, max_proj);
1012 current_ir_graph = rem;
1015 } /* new_rd_defaultProj */
1019 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1021 ir_graph *rem = current_ir_graph;
1023 current_ir_graph = irg;
1024 res = new_bd_Conv(db, block, op, mode, 0);
1025 current_ir_graph = rem;
1031 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1033 ir_graph *rem = current_ir_graph;
1035 current_ir_graph = irg;
1036 res = new_bd_Conv(db, block, op, mode, 1);
1037 current_ir_graph = rem;
1040 } /* new_rd_strictConv */
1043 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1045 ir_graph *rem = current_ir_graph;
1047 current_ir_graph = irg;
1048 res = new_bd_Cast(db, block, op, to_tp);
1049 current_ir_graph = rem;
1055 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1057 ir_graph *rem = current_ir_graph;
1059 current_ir_graph = irg;
1060 res = new_bd_Tuple(db, block, arity, in);
1061 current_ir_graph = rem;
1064 } /* new_rd_Tuple */
1072 NEW_RD_DIVOP(DivMod)
1085 NEW_RD_BINOP(Borrow)
1087 /* creates a rd constructor for an divRL */
1088 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1089 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1092 ir_graph *rem = current_ir_graph;
1093 current_ir_graph = irg;
1094 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1095 current_ir_graph = rem;
1100 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1101 ir_node *op1, ir_node *op2) {
1103 ir_graph *rem = current_ir_graph;
1105 current_ir_graph = irg;
1106 res = new_bd_Cmp(db, block, op1, op2);
1107 current_ir_graph = rem;
1113 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1115 ir_graph *rem = current_ir_graph;
1117 current_ir_graph = irg;
1118 res = new_bd_Jmp(db, block);
1119 current_ir_graph = rem;
1125 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1127 ir_graph *rem = current_ir_graph;
1129 current_ir_graph = irg;
1130 res = new_bd_IJmp(db, block, tgt);
1131 current_ir_graph = rem;
1137 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Cond(db, block, c);
1143 current_ir_graph = rem;
1149 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1150 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1152 ir_graph *rem = current_ir_graph;
1154 current_ir_graph = irg;
1155 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1156 current_ir_graph = rem;
1163 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1164 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1166 ir_graph *rem = current_ir_graph;
1168 current_ir_graph = irg;
1169 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1170 current_ir_graph = rem;
1173 } /* new_rd_Builtin */
1177 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1178 ir_node *store, int arity, ir_node **in) {
1180 ir_graph *rem = current_ir_graph;
1182 current_ir_graph = irg;
1183 res = new_bd_Return(db, block, store, arity, in);
1184 current_ir_graph = rem;
1187 } /* new_rd_Return */
1190 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1191 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1193 ir_graph *rem = current_ir_graph;
1195 current_ir_graph = irg;
1196 res = new_bd_Load(db, block, store, adr, mode, flags);
1197 current_ir_graph = rem;
1203 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1204 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1206 ir_graph *rem = current_ir_graph;
1208 current_ir_graph = irg;
1209 res = new_bd_Store(db, block, store, adr, val, flags);
1210 current_ir_graph = rem;
1213 } /* new_rd_Store */
1217 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1218 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1220 ir_graph *rem = current_ir_graph;
1222 current_ir_graph = irg;
1223 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1224 current_ir_graph = rem;
1227 } /* new_rd_Alloc */
1231 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1232 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1234 ir_graph *rem = current_ir_graph;
1236 current_ir_graph = irg;
1237 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1238 current_ir_graph = rem;
1245 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1246 ir_node *store, ir_node *objptr, ir_entity *ent) {
1248 ir_graph *rem = current_ir_graph;
1250 current_ir_graph = irg;
1251 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1252 current_ir_graph = rem;
1255 } /* new_rd_simpleSel */
1259 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1260 int arity, ir_node **in, ir_entity *ent) {
1262 ir_graph *rem = current_ir_graph;
1264 current_ir_graph = irg;
1265 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1266 current_ir_graph = rem;
1273 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1274 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1276 ir_graph *rem = current_ir_graph;
1278 current_ir_graph = irg;
1279 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1280 current_ir_graph = rem;
1283 } /* new_rd_SymConst_type */
1286 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1287 symconst_symbol value, symconst_kind symkind) {
1288 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1289 } /* new_rd_SymConst */
1291 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1292 symconst_symbol sym;
1293 sym.entity_p = symbol;
1294 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1295 } /* new_rd_SymConst_addr_ent */
1297 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1298 symconst_symbol sym;
1299 sym.entity_p = symbol;
1300 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1301 } /* new_rd_SymConst_ofs_ent */
1303 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1304 symconst_symbol sym;
1305 sym.ident_p = symbol;
1306 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1307 } /* new_rd_SymConst_addr_name */
1309 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1310 symconst_symbol sym;
1311 sym.type_p = symbol;
1312 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1313 } /* new_rd_SymConst_type_tag */
1315 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1316 symconst_symbol sym;
1317 sym.type_p = symbol;
1318 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1319 } /* new_rd_SymConst_size */
1321 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1322 symconst_symbol sym;
1323 sym.type_p = symbol;
1324 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1325 } /* new_rd_SymConst_align */
1328 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1330 ir_graph *rem = current_ir_graph;
1333 current_ir_graph = irg;
1334 res = new_bd_Sync(db, block);
1335 current_ir_graph = rem;
1337 for (i = 0; i < arity; ++i)
1338 add_Sync_pred(res, in[i]);
1345 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1347 ir_graph *rem = current_ir_graph;
1349 current_ir_graph = irg;
1350 res = new_bd_Confirm(db, block, val, bound, cmp);
1351 current_ir_graph = rem;
1354 } /* new_rd_Confirm */
1357 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1359 ir_graph *rem = current_ir_graph;
1361 current_ir_graph = irg;
1362 res = new_bd_Unknown(m);
1363 current_ir_graph = rem;
1366 } /* new_rd_Unknown */
1370 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1372 ir_graph *rem = current_ir_graph;
1374 current_ir_graph = irg;
1375 res = new_bd_CallBegin(db, block, call);
1376 current_ir_graph = rem;
1379 } /* new_rd_CallBegin */
1382 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1385 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1386 set_irg_end_reg(irg, res);
1387 IRN_VRFY_IRG(res, irg);
1389 } /* new_rd_EndReg */
1392 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1395 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1396 set_irg_end_except(irg, res);
1397 IRN_VRFY_IRG (res, irg);
1399 } /* new_rd_EndExcept */
1402 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1404 ir_graph *rem = current_ir_graph;
1406 current_ir_graph = irg;
1407 res = new_bd_Break(db, block);
1408 current_ir_graph = rem;
1411 } /* new_rd_Break */
1414 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1417 ir_graph *rem = current_ir_graph;
1419 current_ir_graph = irg;
1420 res = new_bd_Filter(db, block, arg, mode, proj);
1421 current_ir_graph = rem;
1424 } /* new_rd_Filter */
1428 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1429 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1431 ir_graph *rem = current_ir_graph;
1433 current_ir_graph = irg;
1434 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1435 current_ir_graph = rem;
1441 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1442 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1444 ir_graph *rem = current_ir_graph;
1446 current_ir_graph = irg;
1447 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1448 current_ir_graph = rem;
1451 } /* new_rd_CopyB */
1454 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1455 ir_node *objptr, ir_type *type) {
1457 ir_graph *rem = current_ir_graph;
1459 current_ir_graph = irg;
1460 res = new_bd_InstOf(db, block, store, objptr, type);
1461 current_ir_graph = rem;
1464 } /* new_rd_InstOf */
1467 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1469 ir_graph *rem = current_ir_graph;
1471 current_ir_graph = irg;
1472 res = new_bd_Raise(db, block, store, obj);
1473 current_ir_graph = rem;
1476 } /* new_rd_Raise */
1478 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1479 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1481 ir_graph *rem = current_ir_graph;
1483 current_ir_graph = irg;
1484 res = new_bd_Bound(db, block, store, idx, lower, upper);
1485 current_ir_graph = rem;
1488 } /* new_rd_Bound */
1491 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1493 ir_graph *rem = current_ir_graph;
1495 current_ir_graph = irg;
1496 res = new_bd_Pin(db, block, node);
1497 current_ir_graph = rem;
1503 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1504 int arity, ir_node *in[], ir_asm_constraint *inputs,
1505 int n_outs, ir_asm_constraint *outputs,
1506 int n_clobber, ident *clobber[], ident *asm_text) {
1508 ir_graph *rem = current_ir_graph;
1510 current_ir_graph = irg;
1511 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1512 current_ir_graph = rem;
1519 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1520 return new_rd_Block(NULL, irg, arity, in);
1522 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1523 return new_rd_Start(NULL, irg, block);
1525 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1526 return new_rd_End(NULL, irg, block);
1528 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1529 return new_rd_Jmp(NULL, irg, block);
1531 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1532 return new_rd_IJmp(NULL, irg, block, tgt);
1534 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1535 return new_rd_Cond(NULL, irg, block, c);
1537 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1538 ir_node *store, int arity, ir_node **in) {
1539 return new_rd_Return(NULL, irg, block, store, arity, in);
1542 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1543 return new_rd_Const(NULL, irg, con);
1545 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1546 return new_rd_Const_long(NULL, irg, mode, value);
1548 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1549 return new_rd_Const_type(NULL, irg, con, tp);
1551 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1552 symconst_symbol value, symconst_kind symkind) {
1553 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1555 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1556 ir_node *objptr, ir_entity *ent) {
1557 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1560 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1561 ir_node *objptr, int n_index, ir_node **index,
1563 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1565 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1566 ir_node *callee, int arity, ir_node **in,
1568 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1571 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1572 ir_builtin_kind kind, int arity, ir_node **in,
1574 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1577 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1578 ir_node *op1, ir_node *op2, ir_mode *mode) {
1579 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1581 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1582 ir_node *op1, ir_node *op2, ir_mode *mode) {
1583 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1585 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1586 ir_node *op, ir_mode *mode) {
1587 return new_rd_Minus(NULL, irg, block, op, mode);
1589 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1590 ir_node *op1, ir_node *op2, ir_mode *mode) {
1591 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1593 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1594 ir_node *op1, ir_node *op2, ir_mode *mode) {
1595 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1597 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1598 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1599 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1601 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1602 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1603 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1605 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1606 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1607 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1609 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1610 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1611 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1613 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1614 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1615 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1617 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1618 ir_node *op, ir_mode *mode) {
1619 return new_rd_Abs(NULL, irg, block, op, mode);
1621 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1622 ir_node *op1, ir_node *op2, ir_mode *mode) {
1623 return new_rd_And(NULL, irg, block, op1, op2, mode);
1625 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1626 ir_node *op1, ir_node *op2, ir_mode *mode) {
1627 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1629 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1630 ir_node *op1, ir_node *op2, ir_mode *mode) {
1631 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1633 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1634 ir_node *op, ir_mode *mode) {
1635 return new_rd_Not(NULL, irg, block, op, mode);
1637 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1638 ir_node *op, ir_node *k, ir_mode *mode) {
1639 return new_rd_Shl(NULL, irg, block, op, k, mode);
1641 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1642 ir_node *op, ir_node *k, ir_mode *mode) {
1643 return new_rd_Shr(NULL, irg, block, op, k, mode);
1645 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1646 ir_node *op, ir_node *k, ir_mode *mode) {
1647 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1649 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1650 ir_node *op, ir_node *k, ir_mode *mode) {
1651 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1653 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1654 ir_node *op, ir_node *k, ir_mode *mode) {
1655 return new_rd_Carry(NULL, irg, block, op, k, mode);
1657 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1658 ir_node *op, ir_node *k, ir_mode *mode) {
1659 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1661 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1662 ir_node *op1, ir_node *op2) {
1663 return new_rd_Cmp(NULL, irg, block, op1, op2);
1665 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1666 ir_node *op, ir_mode *mode) {
1667 return new_rd_Conv(NULL, irg, block, op, mode);
1669 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1670 ir_node *op, ir_mode *mode) {
1671 return new_rd_strictConv(NULL, irg, block, op, mode);
1674 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1675 ir_node **in, ir_mode *mode) {
1676 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1679 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1680 return new_rd_Cast(NULL, irg, block, op, to_tp);
1682 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1683 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1684 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1686 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1687 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1688 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1691 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1692 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1693 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1696 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1697 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1698 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1701 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1702 return new_rd_Sync(NULL, irg, block, arity, in);
1705 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1706 ir_mode *mode, long proj) {
1707 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1710 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1712 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1715 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1716 int arity, ir_node **in) {
1717 return new_rd_Tuple(NULL, irg, block, arity, in );
1719 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1720 ir_node *val, ir_mode *mode) {
1721 return new_rd_Id(NULL, irg, block, val, mode);
1724 ir_node *new_r_Bad(ir_graph *irg) {
1725 return get_irg_bad(irg);
1728 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1729 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1731 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1732 return new_rd_Unknown(irg, m);
1735 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1736 return new_rd_CallBegin(NULL, irg, block, callee);
1738 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1739 return new_rd_EndReg(NULL, irg, block);
1741 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1742 return new_rd_EndExcept(NULL, irg, block);
1744 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1745 return new_rd_Break(NULL, irg, block);
1747 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1748 ir_mode *mode, long proj) {
1749 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1751 ir_node *new_r_NoMem(ir_graph *irg) {
1752 return get_irg_no_mem(irg);
1755 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1756 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1757 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1760 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1761 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1762 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1764 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1766 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1768 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1769 ir_node *store, ir_node *obj) {
1770 return new_rd_Raise(NULL, irg, block, store, obj);
1772 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1773 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1774 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1777 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1778 return new_rd_Pin(NULL, irg, block, node);
1781 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1782 int arity, ir_node *in[], ir_asm_constraint *inputs,
1783 int n_outs, ir_asm_constraint *outputs,
1784 int n_clobber, ident *clobber[], ident *asm_text) {
1785 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1788 /** ********************/
1789 /** public interfaces */
1790 /** construction tools */
1796 * - create a new Start node in the current block
1798 * @return s - pointer to the created Start node
1803 new_d_Start(dbg_info *db) {
1806 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1807 op_Start, mode_T, 0, NULL);
1809 res = optimize_node(res);
1810 IRN_VRFY_IRG(res, current_ir_graph);
1815 new_d_End(dbg_info *db) {
1817 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1818 op_End, mode_X, -1, NULL);
1819 res = optimize_node(res);
1820 IRN_VRFY_IRG(res, current_ir_graph);
1825 /* Constructs a Block with a fixed number of predecessors.
1826 Does set current_block. Can be used with automatic Phi
1827 node construction. */
1829 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1832 int has_unknown = 0;
1835 res = new_bd_Block(db, arity, in);
1837 res = new_rd_Block(db, current_ir_graph, arity, in);
1840 /* Create and initialize array for Phi-node construction. */
1841 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1842 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1843 current_ir_graph->n_loc);
1844 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1847 for (i = arity-1; i >= 0; i--)
1848 if (is_Unknown(in[i])) {
1853 if (!has_unknown) res = optimize_node(res);
1855 current_ir_graph->current_block = res;
1857 IRN_VRFY_IRG(res, current_ir_graph);
1863 /* ***********************************************************************/
1864 /* Methods necessary for automatic Phi node creation */
1866 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1867 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1868 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1869 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1871 Call Graph: ( A ---> B == A "calls" B)
1873 get_value mature_immBlock
1881 get_r_value_internal |
1885 new_rd_Phi0 new_rd_Phi_in
1887 * *************************************************************************** */
1889 /** Creates a Phi node with 0 predecessors. */
1890 static inline ir_node *
1891 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1894 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1895 IRN_VRFY_IRG(res, irg);
1901 * Internal constructor of a Phi node by a phi_merge operation.
1903 * @param irg the graph on which the Phi will be constructed
1904 * @param block the block in which the Phi will be constructed
1905 * @param mode the mod eof the Phi node
1906 * @param in the input array of the phi node
1907 * @param ins number of elements in the input array
1908 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1909 * the value for which the new Phi is constructed
1911 static inline ir_node *
1912 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1913 ir_node **in, int ins, ir_node *phi0) {
1915 ir_node *res, *known;
1917 /* Allocate a new node on the obstack. The allocation copies the in
1919 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1920 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1922 /* This loop checks whether the Phi has more than one predecessor.
1923 If so, it is a real Phi node and we break the loop. Else the
1924 Phi node merges the same definition on several paths and therefore
1926 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1928 for (i = ins - 1; i >= 0; --i) {
1931 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1933 /* Optimize self referencing Phis: We can't detect them yet properly, as
1934 they still refer to the Phi0 they will replace. So replace right now. */
1935 if (phi0 && in[i] == phi0)
1938 if (in[i] == res || in[i] == known)
1947 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1950 edges_node_deleted(res, current_ir_graph);
1951 obstack_free(current_ir_graph->obst, res);
1952 if (is_Phi(known)) {
1953 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1954 order, an enclosing Phi know may get superfluous. */
1955 res = optimize_in_place_2(known);
1957 exchange(known, res);
1962 /* A undefined value, e.g., in unreachable code. */
1966 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1967 IRN_VRFY_IRG(res, irg);
1968 /* Memory Phis in endless loops must be kept alive.
1969 As we can't distinguish these easily we keep all of them alive. */
1970 if (is_Phi(res) && mode == mode_M)
1971 add_End_keepalive(get_irg_end(irg), res);
1975 } /* new_rd_Phi_in */
1978 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1980 #if PRECISE_EXC_CONTEXT
1982 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1985 * Construct a new frag_array for node n.
1986 * Copy the content from the current graph_arr of the corresponding block:
1987 * this is the current state.
1988 * Set ProjM(n) as current memory state.
1989 * Further the last entry in frag_arr of current block points to n. This
1990 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1992 static inline ir_node **new_frag_arr(ir_node *n) {
1996 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1997 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1998 sizeof(ir_node *)*current_ir_graph->n_loc);
2000 /* turn off optimization before allocating Proj nodes, as res isn't
2002 opt = get_opt_optimize(); set_optimize(0);
2003 /* Here we rely on the fact that all frag ops have Memory as first result! */
2005 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2006 } else if (is_CopyB(n)) {
2007 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2009 assert((pn_Quot_M == pn_DivMod_M) &&
2010 (pn_Quot_M == pn_Div_M) &&
2011 (pn_Quot_M == pn_Mod_M) &&
2012 (pn_Quot_M == pn_Load_M) &&
2013 (pn_Quot_M == pn_Store_M) &&
2014 (pn_Quot_M == pn_Alloc_M) &&
2015 (pn_Quot_M == pn_Bound_M));
2016 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2020 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2022 } /* new_frag_arr */
2025 * Returns the frag_arr from a node.
2027 static inline ir_node **get_frag_arr(ir_node *n) {
2028 switch (get_irn_opcode(n)) {
2030 return n->attr.call.exc.frag_arr;
2032 return n->attr.alloc.exc.frag_arr;
2034 return n->attr.load.exc.frag_arr;
2036 return n->attr.store.exc.frag_arr;
2038 return n->attr.except.frag_arr;
2040 } /* get_frag_arr */
2043 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2044 #ifdef DEBUG_libfirm
2047 for (i = 1024; i >= 0; --i)
2052 if (frag_arr[pos] == NULL)
2053 frag_arr[pos] = val;
2054 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2055 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2056 assert(arr != frag_arr && "Endless recursion detected");
2061 assert(!"potential endless recursion in set_frag_value");
2062 } /* set_frag_value */
2065 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2069 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2071 frag_arr = get_frag_arr(cfOp);
2072 res = frag_arr[pos];
2074 if (block->attr.block.graph_arr[pos] != NULL) {
2075 /* There was a set_value() after the cfOp and no get_value() before that
2076 set_value(). We must build a Phi node now. */
2077 if (block->attr.block.is_matured) {
2078 int ins = get_irn_arity(block);
2080 NEW_ARR_A(ir_node *, nin, ins);
2081 res = phi_merge(block, pos, mode, nin, ins);
2083 res = new_rd_Phi0(current_ir_graph, block, mode);
2084 res->attr.phi.u.pos = pos;
2085 res->attr.phi.next = block->attr.block.phis;
2086 block->attr.block.phis = res;
2088 assert(res != NULL);
2089 /* It's a Phi, we can write this into all graph_arrs with NULL */
2090 set_frag_value(block->attr.block.graph_arr, pos, res);
2092 res = get_r_value_internal(block, pos, mode);
2093 set_frag_value(block->attr.block.graph_arr, pos, res);
2097 } /* get_r_frag_value_internal */
2098 #endif /* PRECISE_EXC_CONTEXT */
2101 * Check whether a control flownode cf_pred represents an exception flow.
2103 * @param cf_pred the control flow node
2104 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2106 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2108 * Note: all projections from a raise are "exceptional control flow" we we handle it
2109 * like a normal Jmp, because there is no "regular" one.
2110 * That's why Raise is no "fragile_op"!
2112 if (is_fragile_op(prev_cf_op)) {
2113 if (is_Proj(cf_pred)) {
2114 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2115 /* the regular control flow, NO exception */
2118 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2121 /* Hmm, exception but not a Proj? */
2122 assert(!"unexpected condition: fragile op without a proj");
2126 } /* is_exception_flow */
2129 * Computes the predecessors for the real phi node, and then
2130 * allocates and returns this node. The routine called to allocate the
2131 * node might optimize it away and return a real value.
2132 * This function must be called with an in-array of proper size.
2135 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2136 ir_node *prevBlock, *res, *phi0, *phi0_all;
2139 /* If this block has no value at pos create a Phi0 and remember it
2140 in graph_arr to break recursions.
2141 Else we may not set graph_arr as there a later value is remembered. */
2143 if (block->attr.block.graph_arr[pos] == NULL) {
2144 ir_graph *irg = current_ir_graph;
2146 if (block == get_irg_start_block(irg)) {
2147 /* Collapsing to Bad tarvals is no good idea.
2148 So we call a user-supplied routine here that deals with this case as
2149 appropriate for the given language. Sorrily the only help we can give
2150 here is the position.
2152 Even if all variables are defined before use, it can happen that
2153 we get to the start block, if a Cond has been replaced by a tuple
2154 (bad, jmp). In this case we call the function needlessly, eventually
2155 generating an non existent error.
2156 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2159 if (default_initialize_local_variable != NULL) {
2160 ir_node *rem = get_cur_block();
2162 set_cur_block(block);
2163 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2167 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2168 /* We don't need to care about exception ops in the start block.
2169 There are none by definition. */
2170 return block->attr.block.graph_arr[pos];
2172 phi0 = new_rd_Phi0(irg, block, mode);
2173 block->attr.block.graph_arr[pos] = phi0;
2174 #if PRECISE_EXC_CONTEXT
2175 if (get_opt_precise_exc_context()) {
2176 /* Set graph_arr for fragile ops. Also here we should break recursion.
2177 We could choose a cyclic path through an cfop. But the recursion would
2178 break at some point. */
2179 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2185 /* This loop goes to all predecessor blocks of the block the Phi node
2186 is in and there finds the operands of the Phi node by calling
2187 get_r_value_internal. */
2188 for (i = 1; i <= ins; ++i) {
2189 ir_node *cf_pred = block->in[i];
2190 ir_node *prevCfOp = skip_Proj(cf_pred);
2192 if (is_Bad(prevCfOp)) {
2193 /* In case a Cond has been optimized we would get right to the start block
2194 with an invalid definition. */
2195 nin[i-1] = new_Bad();
2198 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2200 if (!is_Bad(prevBlock)) {
2201 #if PRECISE_EXC_CONTEXT
2202 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2203 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2204 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2207 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2209 nin[i-1] = new_Bad();
2213 /* We want to pass the Phi0 node to the constructor: this finds additional
2214 optimization possibilities.
2215 The Phi0 node either is allocated in this function, or it comes from
2216 a former call to get_r_value_internal(). In this case we may not yet
2217 exchange phi0, as this is done in mature_immBlock(). */
2219 phi0_all = block->attr.block.graph_arr[pos];
2220 if (! is_Phi0(phi0_all) ||
2221 get_irn_arity(phi0_all) != 0 ||
2222 get_nodes_block(phi0_all) != block)
2228 /* After collecting all predecessors into the array nin a new Phi node
2229 with these predecessors is created. This constructor contains an
2230 optimization: If all predecessors of the Phi node are identical it
2231 returns the only operand instead of a new Phi node. */
2232 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2234 /* In case we allocated a Phi0 node at the beginning of this procedure,
2235 we need to exchange this Phi0 with the real Phi. */
2237 exchange(phi0, res);
2238 block->attr.block.graph_arr[pos] = res;
2239 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2240 only an optimization. */
2247 * This function returns the last definition of a value. In case
2248 * this value was last defined in a previous block, Phi nodes are
2249 * inserted. If the part of the firm graph containing the definition
2250 * is not yet constructed, a dummy Phi node is returned.
2252 * @param block the current block
2253 * @param pos the value number of the value searched
2254 * @param mode the mode of this value (needed for Phi construction)
2257 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2259 /* There are 4 cases to treat.
2261 1. The block is not mature and we visit it the first time. We can not
2262 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2263 predecessors is returned. This node is added to the linked list (block
2264 attribute "phis") of the containing block to be completed when this block is
2265 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2268 2. The value is already known in this block, graph_arr[pos] is set and we
2269 visit the block the first time. We can return the value without
2270 creating any new nodes.
2272 3. The block is mature and we visit it the first time. A Phi node needs
2273 to be created (phi_merge). If the Phi is not needed, as all it's
2274 operands are the same value reaching the block through different
2275 paths, it's optimized away and the value itself is returned.
2277 4. The block is mature, and we visit it the second time. Now two
2278 subcases are possible:
2279 * The value was computed completely the last time we were here. This
2280 is the case if there is no loop. We can return the proper value.
2281 * The recursion that visited this node and set the flag did not
2282 return yet. We are computing a value in a loop and need to
2283 break the recursion. This case only happens if we visited
2284 the same block with phi_merge before, which inserted a Phi0.
2285 So we return the Phi0.
2288 /* case 4 -- already visited. */
2289 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2290 /* As phi_merge allocates a Phi0 this value is always defined. Here
2291 is the critical difference of the two algorithms. */
2292 assert(block->attr.block.graph_arr[pos]);
2293 return block->attr.block.graph_arr[pos];
2296 /* visited the first time */
2297 set_irn_visited(block, get_irg_visited(current_ir_graph));
2299 /* Get the local valid value */
2300 res = block->attr.block.graph_arr[pos];
2302 /* case 2 -- If the value is actually computed, return it. */
2306 if (block->attr.block.is_matured) { /* case 3 */
2308 /* The Phi has the same amount of ins as the corresponding block. */
2309 int ins = get_irn_arity(block);
2311 NEW_ARR_A(ir_node *, nin, ins);
2313 /* Phi merge collects the predecessors and then creates a node. */
2314 res = phi_merge(block, pos, mode, nin, ins);
2316 } else { /* case 1 */
2317 /* The block is not mature, we don't know how many in's are needed. A Phi
2318 with zero predecessors is created. Such a Phi node is called Phi0
2319 node. The Phi0 is then added to the list of Phi0 nodes in this block
2320 to be matured by mature_immBlock later.
2321 The Phi0 has to remember the pos of it's internal value. If the real
2322 Phi is computed, pos is used to update the array with the local
2324 res = new_rd_Phi0(current_ir_graph, block, mode);
2325 res->attr.phi.u.pos = pos;
2326 res->attr.phi.next = block->attr.block.phis;
2327 block->attr.block.phis = res;
2330 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2332 /* The local valid value is available now. */
2333 block->attr.block.graph_arr[pos] = res;
2336 } /* get_r_value_internal */
2338 /* ************************************************************************** */
2341 * Finalize a Block node, when all control flows are known.
2342 * Acceptable parameters are only Block nodes.
2345 mature_immBlock(ir_node *block) {
2350 assert(is_Block(block));
2351 if (!get_Block_matured(block)) {
2352 ir_graph *irg = current_ir_graph;
2354 ins = ARR_LEN(block->in) - 1;
2355 /* Fix block parameters */
2356 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2358 /* An array for building the Phi nodes. */
2359 NEW_ARR_A(ir_node *, nin, ins);
2361 /* Traverse a chain of Phi nodes attached to this block and mature
2363 for (n = block->attr.block.phis; n; n = next) {
2364 inc_irg_visited(irg);
2365 next = n->attr.phi.next;
2366 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2369 block->attr.block.is_matured = 1;
2371 /* Now, as the block is a finished Firm node, we can optimize it.
2372 Since other nodes have been allocated since the block was created
2373 we can not free the node on the obstack. Therefore we have to call
2374 optimize_in_place().
2375 Unfortunately the optimization does not change a lot, as all allocated
2376 nodes refer to the unoptimized node.
2377 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2378 block = optimize_in_place_2(block);
2379 IRN_VRFY_IRG(block, irg);
2381 } /* mature_immBlock */
2384 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2385 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2389 new_d_Const(dbg_info *db, tarval *con) {
2390 return new_bd_Const(db, con);
2394 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2395 return new_bd_Const_long(db, mode, value);
2396 } /* new_d_Const_long */
2399 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2400 return new_bd_Const_type(db, con, tp);
2401 } /* new_d_Const_type */
2406 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2407 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2411 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2412 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2417 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2419 assert(arg->op == op_Cond);
2420 arg->attr.cond.kind = fragmentary;
2421 arg->attr.cond.default_proj = max_proj;
2422 res = new_d_Proj(db, arg, mode_X, max_proj);
2424 } /* new_d_defaultProj */
2428 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2429 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2433 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2434 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2435 } /* new_d_strictConv */
2438 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2439 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2443 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2444 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2455 * Allocate a frag array for a node if the current graph state is phase_building.
2457 * @param irn the node for which the frag array should be allocated
2458 * @param op the opcode of the (original) node, if does not match opcode of irn,
2460 * @param frag_store the address of the frag store in irn attributes, if this
2461 * address contains a value != NULL, does nothing
2463 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2464 if (get_opt_precise_exc_context()) {
2465 if ((current_ir_graph->phase_state == phase_building) &&
2466 (get_irn_op(irn) == op) && /* Could be optimized away. */
2467 !*frag_store) /* Could be a cse where the arr is already set. */ {
2468 *frag_store = new_frag_arr(irn);
2471 } /* firm_alloc_frag_arr */
2475 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2477 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2478 #if PRECISE_EXC_CONTEXT
2479 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2486 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2488 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2489 #if PRECISE_EXC_CONTEXT
2490 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2494 } /* new_d_DivMod */
2497 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2499 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2500 #if PRECISE_EXC_CONTEXT
2501 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2508 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2510 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2511 #if PRECISE_EXC_CONTEXT
2512 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2519 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2521 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2522 #if PRECISE_EXC_CONTEXT
2523 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2542 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2543 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2547 new_d_Jmp(dbg_info *db) {
2548 return new_bd_Jmp(db, current_ir_graph->current_block);
2552 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2553 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2557 new_d_Cond(dbg_info *db, ir_node *c) {
2558 return new_bd_Cond(db, current_ir_graph->current_block, c);
2562 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2565 res = new_bd_Call(db, current_ir_graph->current_block,
2566 store, callee, arity, in, tp);
2567 #if PRECISE_EXC_CONTEXT
2568 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2576 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2578 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2579 } /* new_d_Builtin */
2583 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2584 return new_bd_Return(db, current_ir_graph->current_block,
2586 } /* new_d_Return */
2589 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2591 res = new_bd_Load(db, current_ir_graph->current_block,
2592 store, addr, mode, flags);
2593 #if PRECISE_EXC_CONTEXT
2594 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2601 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2603 res = new_bd_Store(db, current_ir_graph->current_block,
2604 store, addr, val, flags);
2605 #if PRECISE_EXC_CONTEXT
2606 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2614 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2615 ir_where_alloc where) {
2617 res = new_bd_Alloc(db, current_ir_graph->current_block,
2618 store, size, alloc_type, where);
2619 #if PRECISE_EXC_CONTEXT
2620 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2628 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2629 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2630 return new_bd_Free(db, current_ir_graph->current_block,
2631 store, ptr, size, free_type, where);
2636 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2637 /* GL: objptr was called frame before. Frame was a bad choice for the name
2638 as the operand could as well be a pointer to a dynamic object. */
2640 return new_bd_Sel(db, current_ir_graph->current_block,
2641 store, objptr, 0, NULL, ent);
2642 } /* new_d_simpleSel */
2646 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2647 return new_bd_Sel(db, current_ir_graph->current_block,
2648 store, objptr, n_index, index, sel);
2653 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2654 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2656 } /* new_d_SymConst_type */
2659 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2660 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2661 value, kind, firm_unknown_type);
2662 } /* new_d_SymConst */
2665 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2666 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2671 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2672 return new_bd_Confirm(db, current_ir_graph->current_block,
2674 } /* new_d_Confirm */
2677 new_d_Unknown(ir_mode *m) {
2678 return new_bd_Unknown(m);
2679 } /* new_d_Unknown */
2683 new_d_CallBegin(dbg_info *db, ir_node *call) {
2684 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2685 } /* new_d_CallBegin */
2688 new_d_EndReg(dbg_info *db) {
2689 return new_bd_EndReg(db, current_ir_graph->current_block);
2690 } /* new_d_EndReg */
2693 new_d_EndExcept(dbg_info *db) {
2694 return new_bd_EndExcept(db, current_ir_graph->current_block);
2695 } /* new_d_EndExcept */
2698 new_d_Break(dbg_info *db) {
2699 return new_bd_Break(db, current_ir_graph->current_block);
2703 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2704 return new_bd_Filter(db, current_ir_graph->current_block,
2706 } /* new_d_Filter */
2710 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2711 ir_node *ir_true, ir_mode *mode) {
2712 return new_bd_Mux(db, current_ir_graph->current_block,
2713 sel, ir_false, ir_true, mode);
2717 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2718 ir_node *dst, ir_node *src, ir_type *data_type) {
2720 res = new_bd_CopyB(db, current_ir_graph->current_block,
2721 store, dst, src, data_type);
2722 #if PRECISE_EXC_CONTEXT
2723 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2729 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2730 return new_bd_InstOf(db, current_ir_graph->current_block,
2731 store, objptr, type);
2732 } /* new_d_InstOf */
2735 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2736 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2739 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2740 ir_node *idx, ir_node *lower, ir_node *upper) {
2742 res = new_bd_Bound(db, current_ir_graph->current_block,
2743 store, idx, lower, upper);
2744 #if PRECISE_EXC_CONTEXT
2745 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2752 new_d_Pin(dbg_info *db, ir_node *node) {
2753 return new_bd_Pin(db, current_ir_graph->current_block, node);
2758 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2759 int n_outs, ir_asm_constraint *outputs,
2760 int n_clobber, ident *clobber[], ident *asm_text) {
2761 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2764 /* ********************************************************************* */
2765 /* Comfortable interface with automatic Phi node construction. */
2766 /* (Uses also constructors of ?? interface, except new_Block. */
2767 /* ********************************************************************* */
2769 /* Block construction */
2770 /* immature Block without predecessors */
2772 new_d_immBlock(dbg_info *db) {
2775 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2776 /* creates a new dynamic in-array as length of in is -1 */
2777 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2779 /* macroblock head */
2782 res->attr.block.is_matured = 0;
2783 res->attr.block.is_dead = 0;
2784 res->attr.block.is_mb_head = 1;
2785 res->attr.block.has_label = 0;
2786 res->attr.block.irg = current_ir_graph;
2787 res->attr.block.backedge = NULL;
2788 res->attr.block.in_cg = NULL;
2789 res->attr.block.cg_backedge = NULL;
2790 res->attr.block.extblk = NULL;
2791 res->attr.block.region = NULL;
2792 res->attr.block.mb_depth = 0;
2793 res->attr.block.label = 0;
2795 set_Block_block_visited(res, 0);
2797 /* Create and initialize array for Phi-node construction. */
2798 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2799 current_ir_graph->n_loc);
2800 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2802 /* Immature block may not be optimized! */
2803 IRN_VRFY_IRG(res, current_ir_graph);
2806 } /* new_d_immBlock */
2809 new_immBlock(void) {
2810 return new_d_immBlock(NULL);
2811 } /* new_immBlock */
2813 /* immature PartBlock with its predecessors */
2815 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2816 ir_node *res = new_d_immBlock(db);
2817 ir_node *blk = get_nodes_block(pred_jmp);
2819 res->in[0] = blk->in[0];
2820 assert(res->in[0] != NULL);
2821 add_immBlock_pred(res, pred_jmp);
2823 res->attr.block.is_mb_head = 0;
2824 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2827 } /* new_d_immPartBlock */
2830 new_immPartBlock(ir_node *pred_jmp) {
2831 return new_d_immPartBlock(NULL, pred_jmp);
2832 } /* new_immPartBlock */
2834 /* add an edge to a jmp/control flow node */
2836 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2837 int n = ARR_LEN(block->in) - 1;
2839 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2840 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2841 assert(is_ir_node(jmp));
2843 ARR_APP1(ir_node *, block->in, jmp);
2845 hook_set_irn_n(block, n, jmp, NULL);
2846 } /* add_immBlock_pred */
2848 /* changing the current block */
2850 set_cur_block(ir_node *target) {
2851 current_ir_graph->current_block = target;
2852 } /* set_cur_block */
2854 /* ************************ */
2855 /* parameter administration */
2857 /* get a value from the parameter array from the current block by its index */
2859 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2860 ir_graph *irg = current_ir_graph;
2861 assert(get_irg_phase_state(irg) == phase_building);
2862 inc_irg_visited(irg);
2867 return get_r_value_internal(irg->current_block, pos + 1, mode);
2870 /* get a value from the parameter array from the current block by its index */
2872 get_value(int pos, ir_mode *mode) {
2873 return get_d_value(NULL, pos, mode);
2876 /* set a value at position pos in the parameter array from the current block */
2878 set_value(int pos, ir_node *value) {
2879 ir_graph *irg = current_ir_graph;
2880 assert(get_irg_phase_state(irg) == phase_building);
2882 assert(pos+1 < irg->n_loc);
2883 assert(is_ir_node(value));
2884 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2887 /* Find the value number for a node in the current block.*/
2889 find_value(ir_node *value) {
2891 ir_node *bl = current_ir_graph->current_block;
2893 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2894 if (bl->attr.block.graph_arr[i] == value)
2899 /* get the current store */
2902 ir_graph *irg = current_ir_graph;
2904 assert(get_irg_phase_state(irg) == phase_building);
2905 /* GL: one could call get_value instead */
2906 inc_irg_visited(irg);
2907 return get_r_value_internal(irg->current_block, 0, mode_M);
2910 /* set the current store: handles automatic Sync construction for Load nodes */
2912 set_store(ir_node *store) {
2913 ir_node *load, *pload, *pred, *in[2];
2915 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2916 /* Beware: due to dead code elimination, a store might become a Bad node even in
2917 the construction phase. */
2918 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2920 if (get_opt_auto_create_sync()) {
2921 /* handle non-volatile Load nodes by automatically creating Sync's */
2922 load = skip_Proj(store);
2923 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2924 pred = get_Load_mem(load);
2926 if (is_Sync(pred)) {
2927 /* a Load after a Sync: move it up */
2928 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2930 set_Load_mem(load, get_memop_mem(mem));
2931 add_Sync_pred(pred, store);
2934 pload = skip_Proj(pred);
2935 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2936 /* a Load after a Load: create a new Sync */
2937 set_Load_mem(load, get_Load_mem(pload));
2941 store = new_Sync(2, in);
2946 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2950 keep_alive(ir_node *ka) {
2951 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2954 /* --- Useful access routines --- */
2955 /* Returns the current block of the current graph. To set the current
2956 block use set_cur_block. */
2957 ir_node *get_cur_block(void) {
2958 return get_irg_current_block(current_ir_graph);
2959 } /* get_cur_block */
2961 /* Returns the frame type of the current graph */
2962 ir_type *get_cur_frame_type(void) {
2963 return get_irg_frame_type(current_ir_graph);
2964 } /* get_cur_frame_type */
2967 /* ********************************************************************* */
2970 /* call once for each run of the library */
2972 firm_init_cons(uninitialized_local_variable_func_t *func) {
2973 default_initialize_local_variable = func;
2974 } /* firm_init_cons */
2977 irp_finalize_cons(void) {
2979 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2980 irg_finalize_cons(get_irp_irg(i));
2982 irp->phase_state = phase_high;
2983 } /* irp_finalize_cons */
2986 ir_node *new_Block(int arity, ir_node **in) {
2987 return new_d_Block(NULL, arity, in);
2989 ir_node *new_Start(void) {
2990 return new_d_Start(NULL);
2992 ir_node *new_End(void) {
2993 return new_d_End(NULL);
2995 ir_node *new_Jmp(void) {
2996 return new_d_Jmp(NULL);
2998 ir_node *new_IJmp(ir_node *tgt) {
2999 return new_d_IJmp(NULL, tgt);
3001 ir_node *new_Cond(ir_node *c) {
3002 return new_d_Cond(NULL, c);
3004 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3005 return new_d_Return(NULL, store, arity, in);
3008 ir_node *new_Const(tarval *con) {
3009 return new_d_Const(NULL, con);
3012 ir_node *new_Const_long(ir_mode *mode, long value) {
3013 return new_d_Const_long(NULL, mode, value);
3016 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3017 return new_d_Const_type(NULL, con, tp);
3020 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
3021 return new_d_SymConst_type(NULL, mode, value, kind, type);
3023 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3024 return new_d_SymConst(NULL, mode, value, kind);
3026 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3027 return new_d_simpleSel(NULL, store, objptr, ent);
3030 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3032 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3034 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3036 return new_d_Call(NULL, store, callee, arity, in, tp);
3039 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3041 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3044 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3045 return new_d_Add(NULL, op1, op2, mode);
3047 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3048 return new_d_Sub(NULL, op1, op2, mode);
3050 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3051 return new_d_Minus(NULL, op, mode);
3053 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3054 return new_d_Mul(NULL, op1, op2, mode);
3056 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3057 return new_d_Mulh(NULL, op1, op2, mode);
3059 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3060 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3062 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3063 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3065 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3066 return new_d_Div(NULL, memop, op1, op2, mode, state);
3068 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3069 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3071 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3072 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3074 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3075 return new_d_Abs(NULL, op, mode);
3077 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3078 return new_d_And(NULL, op1, op2, mode);
3080 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3081 return new_d_Or(NULL, op1, op2, mode);
3083 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3084 return new_d_Eor(NULL, op1, op2, mode);
3086 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3087 return new_d_Not(NULL, op, mode);
3089 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3090 return new_d_Shl(NULL, op, k, mode);
3092 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3093 return new_d_Shr(NULL, op, k, mode);
3095 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3096 return new_d_Shrs(NULL, op, k, mode);
3098 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3099 return new_d_Rotl(NULL, op, k, mode);
3101 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3102 return new_d_Carry(NULL, op1, op2, mode);
3104 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3105 return new_d_Borrow(NULL, op1, op2, mode);
3107 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3108 return new_d_Cmp(NULL, op1, op2);
3110 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3111 return new_d_Conv(NULL, op, mode);
3113 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3114 return new_d_strictConv(NULL, op, mode);
3117 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3118 return new_d_Phi(NULL, arity, in, mode);
3121 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3122 return new_d_Cast(NULL, op, to_tp);
3124 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3125 return new_d_Load(NULL, store, addr, mode, flags);
3127 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3128 return new_d_Store(NULL, store, addr, val, flags);
3131 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3132 ir_where_alloc where) {
3133 return new_d_Alloc(NULL, store, size, alloc_type, where);
3136 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3137 ir_type *free_type, ir_where_alloc where) {
3138 return new_d_Free(NULL, store, ptr, size, free_type, where);
3141 ir_node *new_Sync(int arity, ir_node *in[]) {
3142 return new_d_Sync(NULL, arity, in);
3145 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3146 return new_d_Proj(NULL, arg, mode, proj);
3149 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3150 return new_d_defaultProj(NULL, arg, max_proj);
3153 ir_node *new_Tuple(int arity, ir_node **in) {
3154 return new_d_Tuple(NULL, arity, in);
3156 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3157 return new_d_Id(NULL, val, mode);
3160 ir_node *new_Bad(void) {
3161 return get_irg_bad(current_ir_graph);
3164 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3165 return new_d_Confirm(NULL, val, bound, cmp);
3167 ir_node *new_Unknown(ir_mode *m) {
3168 return new_d_Unknown(m);
3171 ir_node *new_CallBegin(ir_node *callee) {
3172 return new_d_CallBegin(NULL, callee);
3174 ir_node *new_EndReg(void) {
3175 return new_d_EndReg(NULL);
3177 ir_node *new_EndExcept(void) {
3178 return new_d_EndExcept(NULL);
3180 ir_node *new_Break(void) {
3181 return new_d_Break(NULL);
3183 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3184 return new_d_Filter(NULL, arg, mode, proj);
3186 ir_node *new_NoMem(void) {
3187 return get_irg_no_mem(current_ir_graph);
3190 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3191 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3194 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3195 return new_d_CopyB(NULL, store, dst, src, data_type);
3197 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3198 return new_d_InstOf(NULL, store, objptr, ent);
3200 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3201 return new_d_Raise(NULL, store, obj);
3203 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3204 return new_d_Bound(NULL, store, idx, lower, upper);
3207 ir_node *new_Pin(ir_node *node) {
3208 return new_d_Pin(NULL, node);
3211 ir_node *new_Dummy(ir_mode *m) {
3212 ir_graph *irg = current_ir_graph;
3213 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3215 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3216 int n_outs, ir_asm_constraint *outputs,
3217 int n_clobber, ident *clobber[], ident *asm_text) {
3218 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3221 /* create a new anchor node */
3222 ir_node *new_Anchor(ir_graph *irg) {
3223 ir_node *in[anchor_last];
3224 memset(in, 0, sizeof(in));
3225 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);