2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.resmode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
341 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
343 ir_graph *irg = current_ir_graph;
345 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
346 res->attr.conv.strict = strict_flag;
347 res = optimize_node(res);
348 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.type = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
400 /** Creates a remainderless Div node. */
401 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
402 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
411 res->attr.divmod.exc.pin_state = state;
412 res->attr.divmod.resmode = mode;
413 res->attr.divmod.no_remainder = 1;
414 res = optimize_node(res);
415 IRN_VRFY_IRG(res, irg);
421 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
424 ir_graph *irg = current_ir_graph;
427 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
428 res = optimize_node(res);
429 IRN_VRFY_IRG(res, irg);
434 new_bd_Jmp(dbg_info *db, ir_node *block) {
436 ir_graph *irg = current_ir_graph;
438 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
439 res = optimize_node(res);
440 IRN_VRFY_IRG(res, irg);
445 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
447 ir_graph *irg = current_ir_graph;
449 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
450 res = optimize_node(res);
451 IRN_VRFY_IRG(res, irg);
456 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
458 ir_graph *irg = current_ir_graph;
460 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
461 res->attr.cond.kind = dense;
462 res->attr.cond.default_proj = 0;
463 res->attr.cond.pred = COND_JMP_PRED_NONE;
464 res = optimize_node(res);
465 IRN_VRFY_IRG(res, irg);
471 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
472 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
476 ir_graph *irg = current_ir_graph;
479 NEW_ARR_A(ir_node *, r_in, r_arity);
482 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
484 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
486 assert((get_unknown_type() == tp) || is_Method_type(tp));
487 set_Call_type(res, tp);
488 res->attr.call.exc.pin_state = op_pin_state_pinned;
489 res->attr.call.callee_arr = NULL;
490 res = optimize_node(res);
491 IRN_VRFY_IRG(res, irg);
496 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
497 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
501 ir_graph *irg = current_ir_graph;
504 NEW_ARR_A(ir_node *, r_in, r_arity);
506 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
508 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
510 assert((get_unknown_type() == tp) || is_Method_type(tp));
511 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
512 res->attr.builtin.kind = kind;
513 res->attr.builtin.builtin_tp = tp;
514 res = optimize_node(res);
515 IRN_VRFY_IRG(res, irg);
517 } /* new_bd_Buildin */
521 new_bd_Return(dbg_info *db, ir_node *block,
522 ir_node *store, int arity, ir_node **in) {
526 ir_graph *irg = current_ir_graph;
529 NEW_ARR_A (ir_node *, r_in, r_arity);
531 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
532 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
533 res = optimize_node(res);
534 IRN_VRFY_IRG(res, irg);
536 } /* new_bd_Return */
539 new_bd_Load(dbg_info *db, ir_node *block,
540 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
543 ir_graph *irg = current_ir_graph;
547 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
548 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
549 res->attr.load.mode = mode;
550 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
551 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
552 res = optimize_node(res);
553 IRN_VRFY_IRG(res, irg);
558 new_bd_Store(dbg_info *db, ir_node *block,
559 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
562 ir_graph *irg = current_ir_graph;
567 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
568 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
569 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
570 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
572 res = optimize_node(res);
573 IRN_VRFY_IRG(res, irg);
579 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
580 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
583 ir_graph *irg = current_ir_graph;
587 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
588 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
589 res->attr.alloc.where = where;
590 res->attr.alloc.type = alloc_type;
591 res = optimize_node(res);
592 IRN_VRFY_IRG(res, irg);
598 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
599 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
602 ir_graph *irg = current_ir_graph;
607 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
608 res->attr.free.where = where;
609 res->attr.free.type = free_type;
610 res = optimize_node(res);
611 IRN_VRFY_IRG(res, irg);
617 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
618 int arity, ir_node **in, ir_entity *ent) {
622 ir_graph *irg = current_ir_graph;
623 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
625 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
628 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
631 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
633 * Sel's can select functions which should be of mode mode_P_code.
635 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
636 res->attr.sel.entity = ent;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
644 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
645 ir_graph *irg = current_ir_graph;
646 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
648 res->attr.symc.kind = symkind;
649 res->attr.symc.sym = value;
650 res->attr.symc.tp = tp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_SymConst_type */
658 new_bd_Sync(dbg_info *db, ir_node *block) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
663 /* no need to call optimize node here, Sync are always created with no predecessors */
664 IRN_VRFY_IRG(res, irg);
670 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
671 ir_node *in[2], *res;
672 ir_graph *irg = current_ir_graph;
676 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
677 res->attr.confirm.cmp = cmp;
678 res = optimize_node(res);
679 IRN_VRFY_IRG(res, irg);
681 } /* new_bd_Confirm */
684 new_bd_Unknown(ir_mode *m) {
686 ir_graph *irg = current_ir_graph;
688 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
689 res = optimize_node(res);
691 } /* new_bd_Unknown */
695 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
698 ir_graph *irg = current_ir_graph;
700 in[0] = get_Call_ptr(call);
701 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
702 /* res->attr.callbegin.irg = irg; */
703 res->attr.callbegin.call = call;
704 res = optimize_node(res);
705 IRN_VRFY_IRG(res, irg);
707 } /* new_bd_CallBegin */
710 new_bd_EndReg(dbg_info *db, ir_node *block) {
712 ir_graph *irg = current_ir_graph;
714 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
715 set_irg_end_reg(irg, res);
716 IRN_VRFY_IRG(res, irg);
718 } /* new_bd_EndReg */
721 new_bd_EndExcept(dbg_info *db, ir_node *block) {
723 ir_graph *irg = current_ir_graph;
725 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
726 set_irg_end_except(irg, res);
727 IRN_VRFY_IRG (res, irg);
729 } /* new_bd_EndExcept */
732 new_bd_Break(dbg_info *db, ir_node *block) {
734 ir_graph *irg = current_ir_graph;
736 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
737 res = optimize_node(res);
738 IRN_VRFY_IRG(res, irg);
743 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
746 ir_graph *irg = current_ir_graph;
748 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
749 res->attr.filter.proj = proj;
750 res->attr.filter.in_cg = NULL;
751 res->attr.filter.backedge = NULL;
754 assert(get_Proj_pred(res));
755 assert(get_nodes_block(get_Proj_pred(res)));
757 res = optimize_node(res);
758 IRN_VRFY_IRG(res, irg);
760 } /* new_bd_Filter */
764 new_bd_Mux(dbg_info *db, ir_node *block,
765 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
768 ir_graph *irg = current_ir_graph;
774 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
777 res = optimize_node(res);
778 IRN_VRFY_IRG(res, irg);
784 new_bd_CopyB(dbg_info *db, ir_node *block,
785 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
788 ir_graph *irg = current_ir_graph;
794 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
796 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
797 res->attr.copyb.data_type = data_type;
798 res = optimize_node(res);
799 IRN_VRFY_IRG(res, irg);
804 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
805 ir_node *objptr, ir_type *type) {
808 ir_graph *irg = current_ir_graph;
812 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
813 res->attr.instof.exc.pin_state = op_pin_state_floats;
814 res->attr.instof.type = type;
815 res = optimize_node(res);
816 IRN_VRFY_IRG(res, irg);
818 } /* new_bd_InstOf */
821 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
824 ir_graph *irg = current_ir_graph;
828 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
829 res = optimize_node(res);
830 IRN_VRFY_IRG(res, irg);
835 new_bd_Bound(dbg_info *db, ir_node *block,
836 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
839 ir_graph *irg = current_ir_graph;
845 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
846 res->attr.bound.exc.pin_state = op_pin_state_pinned;
847 res = optimize_node(res);
848 IRN_VRFY_IRG(res, irg);
854 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
856 ir_graph *irg = current_ir_graph;
858 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
859 res = optimize_node(res);
860 IRN_VRFY_IRG(res, irg);
866 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
867 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
869 ir_graph *irg = current_ir_graph;
871 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
872 res->attr.assem.pin_state = op_pin_state_pinned;
873 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
874 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
875 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
876 res->attr.assem.asm_text = asm_text;
878 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
879 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
880 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
882 res = optimize_node(res);
883 IRN_VRFY_IRG(res, irg);
887 /* --------------------------------------------- */
888 /* private interfaces, for professional use only */
889 /* --------------------------------------------- */
892 /* Constructs a Block with a fixed number of predecessors.
893 Does not set current_block. Can not be used with automatic
894 Phi node construction. */
896 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
897 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_Block(db, arity, in);
902 current_ir_graph = rem;
908 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
909 ir_graph *rem = current_ir_graph;
912 current_ir_graph = irg;
913 res = new_bd_Start(db, block);
914 current_ir_graph = rem;
920 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
922 ir_graph *rem = current_ir_graph;
924 current_ir_graph = irg;
925 res = new_bd_End(db, block);
926 current_ir_graph = rem;
932 /* Creates a Phi node with all predecessors. Calling this constructor
933 is only allowed if the corresponding block is mature. */
935 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
937 ir_graph *rem = current_ir_graph;
939 current_ir_graph = irg;
940 res = new_bd_Phi(db, block,arity, in, mode);
941 current_ir_graph = rem;
947 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
949 ir_graph *rem = current_ir_graph;
951 current_ir_graph = irg;
952 res = new_bd_Const_type(db, con, tp);
953 current_ir_graph = rem;
956 } /* new_rd_Const_type */
959 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
961 //#ifdef USE_ORIGINAL
962 ir_graph *rem = current_ir_graph;
964 current_ir_graph = irg;
965 res = new_bd_Const_type(db, con, firm_unknown_type);
966 current_ir_graph = rem;
968 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
975 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
976 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
977 } /* new_rd_Const_long */
981 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
983 ir_graph *rem = current_ir_graph;
985 current_ir_graph = irg;
986 res = new_bd_Id(db, block, val, mode);
987 current_ir_graph = rem;
993 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
996 ir_graph *rem = current_ir_graph;
998 current_ir_graph = irg;
999 res = new_bd_Proj(db, block, arg, mode, proj);
1000 current_ir_graph = rem;
1007 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1010 ir_graph *rem = current_ir_graph;
1012 current_ir_graph = irg;
1013 res = new_bd_defaultProj(db, block, arg, max_proj);
1014 current_ir_graph = rem;
1017 } /* new_rd_defaultProj */
1021 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1023 ir_graph *rem = current_ir_graph;
1025 current_ir_graph = irg;
1026 res = new_bd_Conv(db, block, op, mode, 0);
1027 current_ir_graph = rem;
1034 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1036 ir_graph *rem = current_ir_graph;
1038 current_ir_graph = irg;
1039 res = new_bd_Conv(db, block, op, mode, 1);
1040 current_ir_graph = rem;
1043 } /* new_rd_strictConv */
1047 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1049 ir_graph *rem = current_ir_graph;
1051 current_ir_graph = irg;
1052 res = new_bd_Cast(db, block, op, to_tp);
1053 current_ir_graph = rem;
1059 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1061 ir_graph *rem = current_ir_graph;
1063 current_ir_graph = irg;
1064 res = new_bd_Tuple(db, block, arity, in);
1065 current_ir_graph = rem;
1068 } /* new_rd_Tuple */
1076 NEW_RD_DIVOP(DivMod)
1089 NEW_RD_BINOP(Borrow)
1092 /* creates a rd constructor for an divRL */
1093 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1094 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1097 ir_graph *rem = current_ir_graph;
1098 current_ir_graph = irg;
1099 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1100 current_ir_graph = rem;
1106 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1107 ir_node *op1, ir_node *op2) {
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Cmp(db, block, op1, op2);
1113 current_ir_graph = rem;
1119 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1121 ir_graph *rem = current_ir_graph;
1123 current_ir_graph = irg;
1124 res = new_bd_Jmp(db, block);
1125 current_ir_graph = rem;
1131 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1133 ir_graph *rem = current_ir_graph;
1135 current_ir_graph = irg;
1136 res = new_bd_IJmp(db, block, tgt);
1137 current_ir_graph = rem;
1143 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1145 ir_graph *rem = current_ir_graph;
1147 current_ir_graph = irg;
1148 res = new_bd_Cond(db, block, c);
1149 current_ir_graph = rem;
1156 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1157 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1163 current_ir_graph = rem;
1169 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1170 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1172 ir_graph *rem = current_ir_graph;
1174 current_ir_graph = irg;
1175 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1176 current_ir_graph = rem;
1179 } /* new_rd_Builtin */
1183 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *store, int arity, ir_node **in) {
1186 ir_graph *rem = current_ir_graph;
1188 current_ir_graph = irg;
1189 res = new_bd_Return(db, block, store, arity, in);
1190 current_ir_graph = rem;
1193 } /* new_rd_Return */
1196 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1197 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1199 ir_graph *rem = current_ir_graph;
1201 current_ir_graph = irg;
1202 res = new_bd_Load(db, block, store, adr, mode, flags);
1203 current_ir_graph = rem;
1209 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1210 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1212 ir_graph *rem = current_ir_graph;
1214 current_ir_graph = irg;
1215 res = new_bd_Store(db, block, store, adr, val, flags);
1216 current_ir_graph = rem;
1219 } /* new_rd_Store */
1223 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1224 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1226 ir_graph *rem = current_ir_graph;
1228 current_ir_graph = irg;
1229 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1230 current_ir_graph = rem;
1233 } /* new_rd_Alloc */
1237 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1238 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1240 ir_graph *rem = current_ir_graph;
1242 current_ir_graph = irg;
1243 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1244 current_ir_graph = rem;
1251 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1252 ir_node *store, ir_node *objptr, ir_entity *ent) {
1254 ir_graph *rem = current_ir_graph;
1256 current_ir_graph = irg;
1257 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1258 current_ir_graph = rem;
1261 } /* new_rd_simpleSel */
1265 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1266 int arity, ir_node **in, ir_entity *ent) {
1268 ir_graph *rem = current_ir_graph;
1270 current_ir_graph = irg;
1271 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1272 current_ir_graph = rem;
1279 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1280 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1282 ir_graph *rem = current_ir_graph;
1284 current_ir_graph = irg;
1285 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1286 current_ir_graph = rem;
1289 } /* new_rd_SymConst_type */
1292 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1293 symconst_symbol value, symconst_kind symkind) {
1294 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1295 } /* new_rd_SymConst */
1297 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1298 symconst_symbol sym;
1299 sym.entity_p = symbol;
1300 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1301 } /* new_rd_SymConst_addr_ent */
1303 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1304 symconst_symbol sym;
1305 sym.entity_p = symbol;
1306 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1307 } /* new_rd_SymConst_ofs_ent */
1309 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1310 symconst_symbol sym;
1311 sym.ident_p = symbol;
1312 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1313 } /* new_rd_SymConst_addr_name */
1315 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1316 symconst_symbol sym;
1317 sym.type_p = symbol;
1318 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1319 } /* new_rd_SymConst_type_tag */
1321 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1322 symconst_symbol sym;
1323 sym.type_p = symbol;
1324 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1325 } /* new_rd_SymConst_size */
1327 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1328 symconst_symbol sym;
1329 sym.type_p = symbol;
1330 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1331 } /* new_rd_SymConst_align */
1334 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1336 ir_graph *rem = current_ir_graph;
1339 current_ir_graph = irg;
1340 res = new_bd_Sync(db, block);
1341 current_ir_graph = rem;
1343 for (i = 0; i < arity; ++i)
1344 add_Sync_pred(res, in[i]);
1351 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1353 ir_graph *rem = current_ir_graph;
1355 current_ir_graph = irg;
1356 res = new_bd_Confirm(db, block, val, bound, cmp);
1357 current_ir_graph = rem;
1360 } /* new_rd_Confirm */
1363 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_Unknown(m);
1369 current_ir_graph = rem;
1372 } /* new_rd_Unknown */
1376 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1378 ir_graph *rem = current_ir_graph;
1380 current_ir_graph = irg;
1381 res = new_bd_CallBegin(db, block, call);
1382 current_ir_graph = rem;
1385 } /* new_rd_CallBegin */
1388 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1391 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1392 set_irg_end_reg(irg, res);
1393 IRN_VRFY_IRG(res, irg);
1395 } /* new_rd_EndReg */
1398 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1401 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1402 set_irg_end_except(irg, res);
1403 IRN_VRFY_IRG (res, irg);
1405 } /* new_rd_EndExcept */
1408 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Break(db, block);
1414 current_ir_graph = rem;
1417 } /* new_rd_Break */
1420 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Filter(db, block, arg, mode, proj);
1427 current_ir_graph = rem;
1430 } /* new_rd_Filter */
1434 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1435 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1437 ir_graph *rem = current_ir_graph;
1439 current_ir_graph = irg;
1440 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1441 current_ir_graph = rem;
1447 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1448 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1450 ir_graph *rem = current_ir_graph;
1452 current_ir_graph = irg;
1453 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1454 current_ir_graph = rem;
1457 } /* new_rd_CopyB */
1460 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1461 ir_node *objptr, ir_type *type) {
1463 ir_graph *rem = current_ir_graph;
1465 current_ir_graph = irg;
1466 res = new_bd_InstOf(db, block, store, objptr, type);
1467 current_ir_graph = rem;
1470 } /* new_rd_InstOf */
1473 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1475 ir_graph *rem = current_ir_graph;
1477 current_ir_graph = irg;
1478 res = new_bd_Raise(db, block, store, obj);
1479 current_ir_graph = rem;
1482 } /* new_rd_Raise */
1484 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1485 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1487 ir_graph *rem = current_ir_graph;
1489 current_ir_graph = irg;
1490 res = new_bd_Bound(db, block, store, idx, lower, upper);
1491 current_ir_graph = rem;
1494 } /* new_rd_Bound */
1497 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1499 ir_graph *rem = current_ir_graph;
1501 current_ir_graph = irg;
1502 res = new_bd_Pin(db, block, node);
1503 current_ir_graph = rem;
1509 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1510 int arity, ir_node *in[], ir_asm_constraint *inputs,
1511 int n_outs, ir_asm_constraint *outputs,
1512 int n_clobber, ident *clobber[], ident *asm_text) {
1514 ir_graph *rem = current_ir_graph;
1516 current_ir_graph = irg;
1517 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1518 current_ir_graph = rem;
1525 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1526 return new_rd_Block(NULL, irg, arity, in);
1528 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1529 return new_rd_Start(NULL, irg, block);
1531 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1532 return new_rd_End(NULL, irg, block);
1534 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1535 return new_rd_Jmp(NULL, irg, block);
1537 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1538 return new_rd_IJmp(NULL, irg, block, tgt);
1540 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1541 return new_rd_Cond(NULL, irg, block, c);
1543 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1544 ir_node *store, int arity, ir_node **in) {
1545 return new_rd_Return(NULL, irg, block, store, arity, in);
1548 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1549 return new_rd_Const(NULL, irg, con);
1551 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1552 return new_rd_Const_long(NULL, irg, mode, value);
1554 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1555 return new_rd_Const_type(NULL, irg, con, tp);
1557 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1558 symconst_symbol value, symconst_kind symkind) {
1559 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1561 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1562 ir_node *objptr, ir_entity *ent) {
1563 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1566 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1567 ir_node *objptr, int n_index, ir_node **index,
1569 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1572 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1573 ir_node *callee, int arity, ir_node **in,
1575 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1577 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1578 ir_builtin_kind kind, int arity, ir_node **in,
1580 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1583 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2, ir_mode *mode) {
1585 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1587 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1588 ir_node *op1, ir_node *op2, ir_mode *mode) {
1589 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1591 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1592 ir_node *op, ir_mode *mode) {
1593 return new_rd_Minus(NULL, irg, block, op, mode);
1595 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1596 ir_node *op1, ir_node *op2, ir_mode *mode) {
1597 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1599 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1600 ir_node *op1, ir_node *op2, ir_mode *mode) {
1601 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1603 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1604 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1605 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1607 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1608 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1609 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1611 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1612 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1613 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1616 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1617 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1618 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1621 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1622 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1623 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1625 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1626 ir_node *op, ir_mode *mode) {
1627 return new_rd_Abs(NULL, irg, block, op, mode);
1629 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1630 ir_node *op1, ir_node *op2, ir_mode *mode) {
1631 return new_rd_And(NULL, irg, block, op1, op2, mode);
1633 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1634 ir_node *op1, ir_node *op2, ir_mode *mode) {
1635 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1637 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1638 ir_node *op1, ir_node *op2, ir_mode *mode) {
1639 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1641 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1642 ir_node *op, ir_mode *mode) {
1643 return new_rd_Not(NULL, irg, block, op, mode);
1645 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1646 ir_node *op, ir_node *k, ir_mode *mode) {
1647 return new_rd_Shl(NULL, irg, block, op, k, mode);
1649 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1650 ir_node *op, ir_node *k, ir_mode *mode) {
1651 return new_rd_Shr(NULL, irg, block, op, k, mode);
1653 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1654 ir_node *op, ir_node *k, ir_mode *mode) {
1655 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1657 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1658 ir_node *op, ir_node *k, ir_mode *mode) {
1659 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1661 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1662 ir_node *op, ir_node *k, ir_mode *mode) {
1663 return new_rd_Carry(NULL, irg, block, op, k, mode);
1665 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1666 ir_node *op, ir_node *k, ir_mode *mode) {
1667 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1669 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1670 ir_node *op1, ir_node *op2) {
1671 return new_rd_Cmp(NULL, irg, block, op1, op2);
1673 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1674 ir_node *op, ir_mode *mode) {
1675 return new_rd_Conv(NULL, irg, block, op, mode);
1678 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1679 ir_node *op, ir_mode *mode) {
1680 return new_rd_strictConv(NULL, irg, block, op, mode);
1682 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1683 ir_node **in, ir_mode *mode) {
1684 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1687 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1688 return new_rd_Cast(NULL, irg, block, op, to_tp);
1690 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1691 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1692 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1694 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1695 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1696 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1699 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1700 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1701 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1704 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1705 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1706 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1709 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1710 return new_rd_Sync(NULL, irg, block, arity, in);
1713 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1714 ir_mode *mode, long proj) {
1715 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1718 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1720 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1723 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1724 int arity, ir_node **in) {
1725 return new_rd_Tuple(NULL, irg, block, arity, in );
1727 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1728 ir_node *val, ir_mode *mode) {
1729 return new_rd_Id(NULL, irg, block, val, mode);
1732 ir_node *new_r_Bad(ir_graph *irg) {
1733 return get_irg_bad(irg);
1736 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1737 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1739 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1740 return new_rd_Unknown(irg, m);
1743 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1744 return new_rd_CallBegin(NULL, irg, block, callee);
1746 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1747 return new_rd_EndReg(NULL, irg, block);
1749 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1750 return new_rd_EndExcept(NULL, irg, block);
1752 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1753 return new_rd_Break(NULL, irg, block);
1755 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1756 ir_mode *mode, long proj) {
1757 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1759 ir_node *new_r_NoMem(ir_graph *irg) {
1760 return get_irg_no_mem(irg);
1763 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1764 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1765 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1768 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1769 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1770 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1772 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1774 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1776 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1777 ir_node *store, ir_node *obj) {
1778 return new_rd_Raise(NULL, irg, block, store, obj);
1780 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1781 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1782 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1785 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1786 return new_rd_Pin(NULL, irg, block, node);
1789 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1790 int arity, ir_node *in[], ir_asm_constraint *inputs,
1791 int n_outs, ir_asm_constraint *outputs,
1792 int n_clobber, ident *clobber[], ident *asm_text) {
1793 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1796 /** ********************/
1797 /** public interfaces */
1798 /** construction tools */
1804 * - create a new Start node in the current block
1806 * @return s - pointer to the created Start node
1811 new_d_Start(dbg_info *db) {
1814 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1815 op_Start, mode_T, 0, NULL);
1817 res = optimize_node(res);
1818 IRN_VRFY_IRG(res, current_ir_graph);
1823 new_d_End(dbg_info *db) {
1825 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1826 op_End, mode_X, -1, NULL);
1827 res = optimize_node(res);
1828 IRN_VRFY_IRG(res, current_ir_graph);
1833 /* Constructs a Block with a fixed number of predecessors.
1834 Does set current_block. Can be used with automatic Phi
1835 node construction. */
1837 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1840 int has_unknown = 0;
1843 res = new_bd_Block(db, arity, in);
1845 res = new_rd_Block(db, current_ir_graph, arity, in);
1848 /* Create and initialize array for Phi-node construction. */
1849 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1850 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1851 current_ir_graph->n_loc);
1852 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1855 for (i = arity-1; i >= 0; i--)
1856 if (is_Unknown(in[i])) {
1861 if (!has_unknown) res = optimize_node(res);
1863 current_ir_graph->current_block = res;
1865 IRN_VRFY_IRG(res, current_ir_graph);
1871 /* ***********************************************************************/
1872 /* Methods necessary for automatic Phi node creation */
1874 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1875 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1876 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1877 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1879 Call Graph: ( A ---> B == A "calls" B)
1881 get_value mature_immBlock
1889 get_r_value_internal |
1893 new_rd_Phi0 new_rd_Phi_in
1895 * *************************************************************************** */
1897 /** Creates a Phi node with 0 predecessors. */
1898 static inline ir_node *
1899 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1902 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1903 IRN_VRFY_IRG(res, irg);
1909 * Internal constructor of a Phi node by a phi_merge operation.
1911 * @param irg the graph on which the Phi will be constructed
1912 * @param block the block in which the Phi will be constructed
1913 * @param mode the mod eof the Phi node
1914 * @param in the input array of the phi node
1915 * @param ins number of elements in the input array
1916 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1917 * the value for which the new Phi is constructed
1919 static inline ir_node *
1920 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1921 ir_node **in, int ins, ir_node *phi0) {
1923 ir_node *res, *known;
1925 /* Allocate a new node on the obstack. The allocation copies the in
1927 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1928 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1930 /* This loop checks whether the Phi has more than one predecessor.
1931 If so, it is a real Phi node and we break the loop. Else the
1932 Phi node merges the same definition on several paths and therefore
1934 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1936 for (i = ins - 1; i >= 0; --i) {
1939 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1941 /* Optimize self referencing Phis: We can't detect them yet properly, as
1942 they still refer to the Phi0 they will replace. So replace right now. */
1943 if (phi0 && in[i] == phi0)
1946 if (in[i] == res || in[i] == known)
1955 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1958 edges_node_deleted(res, current_ir_graph);
1959 obstack_free(current_ir_graph->obst, res);
1960 if (is_Phi(known)) {
1961 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1962 order, an enclosing Phi know may get superfluous. */
1963 res = optimize_in_place_2(known);
1965 exchange(known, res);
1970 /* A undefined value, e.g., in unreachable code. */
1974 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1975 IRN_VRFY_IRG(res, irg);
1976 /* Memory Phis in endless loops must be kept alive.
1977 As we can't distinguish these easily we keep all of them alive. */
1978 if (is_Phi(res) && mode == mode_M)
1979 add_End_keepalive(get_irg_end(irg), res);
1983 } /* new_rd_Phi_in */
1986 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1988 #if PRECISE_EXC_CONTEXT
1990 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1993 * Construct a new frag_array for node n.
1994 * Copy the content from the current graph_arr of the corresponding block:
1995 * this is the current state.
1996 * Set ProjM(n) as current memory state.
1997 * Further the last entry in frag_arr of current block points to n. This
1998 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2000 static inline ir_node **new_frag_arr(ir_node *n) {
2004 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2005 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2006 sizeof(ir_node *)*current_ir_graph->n_loc);
2008 /* turn off optimization before allocating Proj nodes, as res isn't
2010 opt = get_opt_optimize(); set_optimize(0);
2011 /* Here we rely on the fact that all frag ops have Memory as first result! */
2013 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2014 } else if (is_CopyB(n)) {
2015 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2017 assert((pn_Quot_M == pn_DivMod_M) &&
2018 (pn_Quot_M == pn_Div_M) &&
2019 (pn_Quot_M == pn_Mod_M) &&
2020 (pn_Quot_M == pn_Load_M) &&
2021 (pn_Quot_M == pn_Store_M) &&
2022 (pn_Quot_M == pn_Alloc_M) &&
2023 (pn_Quot_M == pn_Bound_M));
2024 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2028 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2030 } /* new_frag_arr */
2033 * Returns the frag_arr from a node.
2035 static inline ir_node **get_frag_arr(ir_node *n) {
2036 switch (get_irn_opcode(n)) {
2038 return n->attr.call.exc.frag_arr;
2040 return n->attr.alloc.exc.frag_arr;
2042 return n->attr.load.exc.frag_arr;
2044 return n->attr.store.exc.frag_arr;
2046 return n->attr.except.frag_arr;
2048 } /* get_frag_arr */
2051 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2052 #ifdef DEBUG_libfirm
2055 for (i = 1024; i >= 0; --i)
2060 if (frag_arr[pos] == NULL)
2061 frag_arr[pos] = val;
2062 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2063 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2064 assert(arr != frag_arr && "Endless recursion detected");
2069 assert(!"potential endless recursion in set_frag_value");
2070 } /* set_frag_value */
2073 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2077 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2079 frag_arr = get_frag_arr(cfOp);
2080 res = frag_arr[pos];
2082 if (block->attr.block.graph_arr[pos] != NULL) {
2083 /* There was a set_value() after the cfOp and no get_value() before that
2084 set_value(). We must build a Phi node now. */
2085 if (block->attr.block.is_matured) {
2086 int ins = get_irn_arity(block);
2088 NEW_ARR_A(ir_node *, nin, ins);
2089 res = phi_merge(block, pos, mode, nin, ins);
2091 res = new_rd_Phi0(current_ir_graph, block, mode);
2092 res->attr.phi.u.pos = pos;
2093 res->attr.phi.next = block->attr.block.phis;
2094 block->attr.block.phis = res;
2096 assert(res != NULL);
2097 /* It's a Phi, we can write this into all graph_arrs with NULL */
2098 set_frag_value(block->attr.block.graph_arr, pos, res);
2100 res = get_r_value_internal(block, pos, mode);
2101 set_frag_value(block->attr.block.graph_arr, pos, res);
2105 } /* get_r_frag_value_internal */
2106 #endif /* PRECISE_EXC_CONTEXT */
2109 * Check whether a control flownode cf_pred represents an exception flow.
2111 * @param cf_pred the control flow node
2112 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2114 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2116 * Note: all projections from a raise are "exceptional control flow" we we handle it
2117 * like a normal Jmp, because there is no "regular" one.
2118 * That's why Raise is no "fragile_op"!
2120 if (is_fragile_op(prev_cf_op)) {
2121 if (is_Proj(cf_pred)) {
2122 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2123 /* the regular control flow, NO exception */
2126 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2129 /* Hmm, exception but not a Proj? */
2130 assert(!"unexpected condition: fragile op without a proj");
2134 } /* is_exception_flow */
2137 * Computes the predecessors for the real phi node, and then
2138 * allocates and returns this node. The routine called to allocate the
2139 * node might optimize it away and return a real value.
2140 * This function must be called with an in-array of proper size.
2143 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2144 ir_node *prevBlock, *res, *phi0, *phi0_all;
2147 /* If this block has no value at pos create a Phi0 and remember it
2148 in graph_arr to break recursions.
2149 Else we may not set graph_arr as there a later value is remembered. */
2151 if (block->attr.block.graph_arr[pos] == NULL) {
2152 ir_graph *irg = current_ir_graph;
2154 if (block == get_irg_start_block(irg)) {
2155 /* Collapsing to Bad tarvals is no good idea.
2156 So we call a user-supplied routine here that deals with this case as
2157 appropriate for the given language. Sorrily the only help we can give
2158 here is the position.
2160 Even if all variables are defined before use, it can happen that
2161 we get to the start block, if a Cond has been replaced by a tuple
2162 (bad, jmp). In this case we call the function needlessly, eventually
2163 generating an non existent error.
2164 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2167 if (default_initialize_local_variable != NULL) {
2168 ir_node *rem = get_cur_block();
2170 set_cur_block(block);
2171 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2175 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2176 /* We don't need to care about exception ops in the start block.
2177 There are none by definition. */
2178 return block->attr.block.graph_arr[pos];
2180 phi0 = new_rd_Phi0(irg, block, mode);
2181 block->attr.block.graph_arr[pos] = phi0;
2182 #if PRECISE_EXC_CONTEXT
2183 if (get_opt_precise_exc_context()) {
2184 /* Set graph_arr for fragile ops. Also here we should break recursion.
2185 We could choose a cyclic path through an cfop. But the recursion would
2186 break at some point. */
2187 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2193 /* This loop goes to all predecessor blocks of the block the Phi node
2194 is in and there finds the operands of the Phi node by calling
2195 get_r_value_internal. */
2196 for (i = 1; i <= ins; ++i) {
2197 ir_node *cf_pred = block->in[i];
2198 ir_node *prevCfOp = skip_Proj(cf_pred);
2200 if (is_Bad(prevCfOp)) {
2201 /* In case a Cond has been optimized we would get right to the start block
2202 with an invalid definition. */
2203 nin[i-1] = new_Bad();
2206 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2208 if (!is_Bad(prevBlock)) {
2209 #if PRECISE_EXC_CONTEXT
2210 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2211 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2212 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2215 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2217 nin[i-1] = new_Bad();
2221 /* We want to pass the Phi0 node to the constructor: this finds additional
2222 optimization possibilities.
2223 The Phi0 node either is allocated in this function, or it comes from
2224 a former call to get_r_value_internal(). In this case we may not yet
2225 exchange phi0, as this is done in mature_immBlock(). */
2227 phi0_all = block->attr.block.graph_arr[pos];
2228 if (! is_Phi0(phi0_all) ||
2229 get_irn_arity(phi0_all) != 0 ||
2230 get_nodes_block(phi0_all) != block)
2236 /* After collecting all predecessors into the array nin a new Phi node
2237 with these predecessors is created. This constructor contains an
2238 optimization: If all predecessors of the Phi node are identical it
2239 returns the only operand instead of a new Phi node. */
2240 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2242 /* In case we allocated a Phi0 node at the beginning of this procedure,
2243 we need to exchange this Phi0 with the real Phi. */
2245 exchange(phi0, res);
2246 block->attr.block.graph_arr[pos] = res;
2247 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2248 only an optimization. */
2255 * This function returns the last definition of a value. In case
2256 * this value was last defined in a previous block, Phi nodes are
2257 * inserted. If the part of the firm graph containing the definition
2258 * is not yet constructed, a dummy Phi node is returned.
2260 * @param block the current block
2261 * @param pos the value number of the value searched
2262 * @param mode the mode of this value (needed for Phi construction)
2265 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2267 /* There are 4 cases to treat.
2269 1. The block is not mature and we visit it the first time. We can not
2270 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2271 predecessors is returned. This node is added to the linked list (block
2272 attribute "phis") of the containing block to be completed when this block is
2273 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2276 2. The value is already known in this block, graph_arr[pos] is set and we
2277 visit the block the first time. We can return the value without
2278 creating any new nodes.
2280 3. The block is mature and we visit it the first time. A Phi node needs
2281 to be created (phi_merge). If the Phi is not needed, as all it's
2282 operands are the same value reaching the block through different
2283 paths, it's optimized away and the value itself is returned.
2285 4. The block is mature, and we visit it the second time. Now two
2286 subcases are possible:
2287 * The value was computed completely the last time we were here. This
2288 is the case if there is no loop. We can return the proper value.
2289 * The recursion that visited this node and set the flag did not
2290 return yet. We are computing a value in a loop and need to
2291 break the recursion. This case only happens if we visited
2292 the same block with phi_merge before, which inserted a Phi0.
2293 So we return the Phi0.
2296 /* case 4 -- already visited. */
2297 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2298 /* As phi_merge allocates a Phi0 this value is always defined. Here
2299 is the critical difference of the two algorithms. */
2300 assert(block->attr.block.graph_arr[pos]);
2301 return block->attr.block.graph_arr[pos];
2304 /* visited the first time */
2305 set_irn_visited(block, get_irg_visited(current_ir_graph));
2307 /* Get the local valid value */
2308 res = block->attr.block.graph_arr[pos];
2310 /* case 2 -- If the value is actually computed, return it. */
2314 if (block->attr.block.is_matured) { /* case 3 */
2316 /* The Phi has the same amount of ins as the corresponding block. */
2317 int ins = get_irn_arity(block);
2319 NEW_ARR_A(ir_node *, nin, ins);
2321 /* Phi merge collects the predecessors and then creates a node. */
2322 res = phi_merge(block, pos, mode, nin, ins);
2324 } else { /* case 1 */
2325 /* The block is not mature, we don't know how many in's are needed. A Phi
2326 with zero predecessors is created. Such a Phi node is called Phi0
2327 node. The Phi0 is then added to the list of Phi0 nodes in this block
2328 to be matured by mature_immBlock later.
2329 The Phi0 has to remember the pos of it's internal value. If the real
2330 Phi is computed, pos is used to update the array with the local
2332 res = new_rd_Phi0(current_ir_graph, block, mode);
2333 res->attr.phi.u.pos = pos;
2334 res->attr.phi.next = block->attr.block.phis;
2335 block->attr.block.phis = res;
2338 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2340 /* The local valid value is available now. */
2341 block->attr.block.graph_arr[pos] = res;
2344 } /* get_r_value_internal */
2346 /* ************************************************************************** */
2349 * Finalize a Block node, when all control flows are known.
2350 * Acceptable parameters are only Block nodes.
2353 mature_immBlock(ir_node *block) {
2358 assert(is_Block(block));
2359 if (!get_Block_matured(block)) {
2360 ir_graph *irg = current_ir_graph;
2362 ins = ARR_LEN(block->in) - 1;
2363 /* Fix block parameters */
2364 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2366 /* An array for building the Phi nodes. */
2367 NEW_ARR_A(ir_node *, nin, ins);
2369 /* Traverse a chain of Phi nodes attached to this block and mature
2371 for (n = block->attr.block.phis; n; n = next) {
2372 inc_irg_visited(irg);
2373 next = n->attr.phi.next;
2374 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2377 block->attr.block.is_matured = 1;
2379 /* Now, as the block is a finished Firm node, we can optimize it.
2380 Since other nodes have been allocated since the block was created
2381 we can not free the node on the obstack. Therefore we have to call
2382 optimize_in_place().
2383 Unfortunately the optimization does not change a lot, as all allocated
2384 nodes refer to the unoptimized node.
2385 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2386 block = optimize_in_place_2(block);
2387 IRN_VRFY_IRG(block, irg);
2389 } /* mature_immBlock */
2392 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2393 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2397 new_d_Const(dbg_info *db, tarval *con) {
2398 return new_bd_Const(db, con);
2402 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2403 return new_bd_Const_long(db, mode, value);
2404 } /* new_d_Const_long */
2407 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2408 return new_bd_Const_type(db, con, tp);
2409 } /* new_d_Const_type */
2414 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2415 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2419 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2420 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2425 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2427 assert(arg->op == op_Cond);
2428 arg->attr.cond.kind = fragmentary;
2429 arg->attr.cond.default_proj = max_proj;
2430 res = new_d_Proj(db, arg, mode_X, max_proj);
2432 } /* new_d_defaultProj */
2436 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2437 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2442 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2443 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2444 } /* new_d_strictConv */
2448 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2449 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2453 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2454 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2465 * Allocate a frag array for a node if the current graph state is phase_building.
2467 * @param irn the node for which the frag array should be allocated
2468 * @param op the opcode of the (original) node, if does not match opcode of irn,
2470 * @param frag_store the address of the frag store in irn attributes, if this
2471 * address contains a value != NULL, does nothing
2473 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2474 if (get_opt_precise_exc_context()) {
2475 if ((current_ir_graph->phase_state == phase_building) &&
2476 (get_irn_op(irn) == op) && /* Could be optimized away. */
2477 !*frag_store) /* Could be a cse where the arr is already set. */ {
2478 *frag_store = new_frag_arr(irn);
2481 } /* firm_alloc_frag_arr */
2485 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2487 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2488 #if PRECISE_EXC_CONTEXT
2489 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2496 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2498 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2499 #if PRECISE_EXC_CONTEXT
2500 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2504 } /* new_d_DivMod */
2507 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2509 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2510 #if PRECISE_EXC_CONTEXT
2511 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2519 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2521 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2522 #if PRECISE_EXC_CONTEXT
2523 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2531 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2533 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2534 #if PRECISE_EXC_CONTEXT
2535 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2554 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2555 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2559 new_d_Jmp(dbg_info *db) {
2560 return new_bd_Jmp(db, current_ir_graph->current_block);
2564 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2565 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2569 new_d_Cond(dbg_info *db, ir_node *c) {
2570 return new_bd_Cond(db, current_ir_graph->current_block, c);
2575 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2578 res = new_bd_Call(db, current_ir_graph->current_block,
2579 store, callee, arity, in, tp);
2580 #if PRECISE_EXC_CONTEXT
2581 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2588 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2590 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2591 } /* new_d_Builtin */
2595 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2596 return new_bd_Return(db, current_ir_graph->current_block,
2598 } /* new_d_Return */
2601 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2603 res = new_bd_Load(db, current_ir_graph->current_block,
2604 store, addr, mode, flags);
2605 #if PRECISE_EXC_CONTEXT
2606 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2613 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2615 res = new_bd_Store(db, current_ir_graph->current_block,
2616 store, addr, val, flags);
2617 #if PRECISE_EXC_CONTEXT
2618 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2626 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2627 ir_where_alloc where) {
2629 res = new_bd_Alloc(db, current_ir_graph->current_block,
2630 store, size, alloc_type, where);
2631 #if PRECISE_EXC_CONTEXT
2632 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2640 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2641 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2642 return new_bd_Free(db, current_ir_graph->current_block,
2643 store, ptr, size, free_type, where);
2648 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2649 /* GL: objptr was called frame before. Frame was a bad choice for the name
2650 as the operand could as well be a pointer to a dynamic object. */
2652 return new_bd_Sel(db, current_ir_graph->current_block,
2653 store, objptr, 0, NULL, ent);
2654 } /* new_d_simpleSel */
2658 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2659 return new_bd_Sel(db, current_ir_graph->current_block,
2660 store, objptr, n_index, index, sel);
2665 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2666 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2668 } /* new_d_SymConst_type */
2671 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2672 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2673 value, kind, firm_unknown_type);
2674 } /* new_d_SymConst */
2677 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2678 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2683 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2684 return new_bd_Confirm(db, current_ir_graph->current_block,
2686 } /* new_d_Confirm */
2689 new_d_Unknown(ir_mode *m) {
2690 return new_bd_Unknown(m);
2691 } /* new_d_Unknown */
2695 new_d_CallBegin(dbg_info *db, ir_node *call) {
2696 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2697 } /* new_d_CallBegin */
2700 new_d_EndReg(dbg_info *db) {
2701 return new_bd_EndReg(db, current_ir_graph->current_block);
2702 } /* new_d_EndReg */
2705 new_d_EndExcept(dbg_info *db) {
2706 return new_bd_EndExcept(db, current_ir_graph->current_block);
2707 } /* new_d_EndExcept */
2710 new_d_Break(dbg_info *db) {
2711 return new_bd_Break(db, current_ir_graph->current_block);
2715 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2716 return new_bd_Filter(db, current_ir_graph->current_block,
2718 } /* new_d_Filter */
2722 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2723 ir_node *ir_true, ir_mode *mode) {
2724 return new_bd_Mux(db, current_ir_graph->current_block,
2725 sel, ir_false, ir_true, mode);
2729 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2730 ir_node *dst, ir_node *src, ir_type *data_type) {
2732 res = new_bd_CopyB(db, current_ir_graph->current_block,
2733 store, dst, src, data_type);
2734 #if PRECISE_EXC_CONTEXT
2735 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2741 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2742 return new_bd_InstOf(db, current_ir_graph->current_block,
2743 store, objptr, type);
2744 } /* new_d_InstOf */
2747 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2748 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2751 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2752 ir_node *idx, ir_node *lower, ir_node *upper) {
2754 res = new_bd_Bound(db, current_ir_graph->current_block,
2755 store, idx, lower, upper);
2756 #if PRECISE_EXC_CONTEXT
2757 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2764 new_d_Pin(dbg_info *db, ir_node *node) {
2765 return new_bd_Pin(db, current_ir_graph->current_block, node);
2770 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2771 int n_outs, ir_asm_constraint *outputs,
2772 int n_clobber, ident *clobber[], ident *asm_text) {
2773 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2776 /* ********************************************************************* */
2777 /* Comfortable interface with automatic Phi node construction. */
2778 /* (Uses also constructors of ?? interface, except new_Block. */
2779 /* ********************************************************************* */
2781 /* Block construction */
2782 /* immature Block without predecessors */
2784 new_d_immBlock(dbg_info *db) {
2787 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2788 /* creates a new dynamic in-array as length of in is -1 */
2789 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2791 /* macroblock head */
2794 res->attr.block.is_matured = 0;
2795 res->attr.block.is_dead = 0;
2796 res->attr.block.is_mb_head = 1;
2797 res->attr.block.has_label = 0;
2798 res->attr.block.irg = current_ir_graph;
2799 res->attr.block.backedge = NULL;
2800 res->attr.block.in_cg = NULL;
2801 res->attr.block.cg_backedge = NULL;
2802 res->attr.block.extblk = NULL;
2803 res->attr.block.region = NULL;
2804 res->attr.block.mb_depth = 0;
2805 res->attr.block.label = 0;
2807 set_Block_block_visited(res, 0);
2809 /* Create and initialize array for Phi-node construction. */
2810 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2811 current_ir_graph->n_loc);
2812 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2814 /* Immature block may not be optimized! */
2815 IRN_VRFY_IRG(res, current_ir_graph);
2818 } /* new_d_immBlock */
2821 new_immBlock(void) {
2822 return new_d_immBlock(NULL);
2823 } /* new_immBlock */
2825 /* immature PartBlock with its predecessors */
2827 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2828 ir_node *res = new_d_immBlock(db);
2829 ir_node *blk = get_nodes_block(pred_jmp);
2831 res->in[0] = blk->in[0];
2832 assert(res->in[0] != NULL);
2833 add_immBlock_pred(res, pred_jmp);
2835 res->attr.block.is_mb_head = 0;
2836 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2839 } /* new_d_immPartBlock */
2842 new_immPartBlock(ir_node *pred_jmp) {
2843 return new_d_immPartBlock(NULL, pred_jmp);
2844 } /* new_immPartBlock */
2846 /* add an edge to a jmp/control flow node */
2848 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2849 int n = ARR_LEN(block->in) - 1;
2851 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2852 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2853 assert(is_ir_node(jmp));
2855 ARR_APP1(ir_node *, block->in, jmp);
2857 hook_set_irn_n(block, n, jmp, NULL);
2858 } /* add_immBlock_pred */
2860 /* changing the current block */
2862 set_cur_block(ir_node *target) {
2863 current_ir_graph->current_block = target;
2864 } /* set_cur_block */
2866 /* ************************ */
2867 /* parameter administration */
2869 /* get a value from the parameter array from the current block by its index */
2871 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2872 ir_graph *irg = current_ir_graph;
2873 assert(get_irg_phase_state(irg) == phase_building);
2874 inc_irg_visited(irg);
2879 return get_r_value_internal(irg->current_block, pos + 1, mode);
2882 /* get a value from the parameter array from the current block by its index */
2884 get_value(int pos, ir_mode *mode) {
2885 return get_d_value(NULL, pos, mode);
2888 /* set a value at position pos in the parameter array from the current block */
2890 set_value(int pos, ir_node *value) {
2891 ir_graph *irg = current_ir_graph;
2892 assert(get_irg_phase_state(irg) == phase_building);
2894 assert(pos+1 < irg->n_loc);
2895 assert(is_ir_node(value));
2896 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2899 /* Find the value number for a node in the current block.*/
2901 find_value(ir_node *value) {
2903 ir_node *bl = current_ir_graph->current_block;
2905 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2906 if (bl->attr.block.graph_arr[i] == value)
2911 /* get the current store */
2914 ir_graph *irg = current_ir_graph;
2916 assert(get_irg_phase_state(irg) == phase_building);
2917 /* GL: one could call get_value instead */
2918 inc_irg_visited(irg);
2919 return get_r_value_internal(irg->current_block, 0, mode_M);
2922 /* set the current store: handles automatic Sync construction for Load nodes */
2924 set_store(ir_node *store) {
2925 ir_node *load, *pload, *pred, *in[2];
2927 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2928 /* Beware: due to dead code elimination, a store might become a Bad node even in
2929 the construction phase. */
2930 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2932 if (get_opt_auto_create_sync()) {
2933 /* handle non-volatile Load nodes by automatically creating Sync's */
2934 load = skip_Proj(store);
2935 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2936 pred = get_Load_mem(load);
2938 if (is_Sync(pred)) {
2939 /* a Load after a Sync: move it up */
2940 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2942 set_Load_mem(load, get_memop_mem(mem));
2943 add_Sync_pred(pred, store);
2946 pload = skip_Proj(pred);
2947 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2948 /* a Load after a Load: create a new Sync */
2949 set_Load_mem(load, get_Load_mem(pload));
2953 store = new_Sync(2, in);
2958 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2962 keep_alive(ir_node *ka) {
2963 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2966 /* --- Useful access routines --- */
2967 /* Returns the current block of the current graph. To set the current
2968 block use set_cur_block. */
2969 ir_node *get_cur_block(void) {
2970 return get_irg_current_block(current_ir_graph);
2971 } /* get_cur_block */
2973 /* Returns the frame type of the current graph */
2974 ir_type *get_cur_frame_type(void) {
2975 return get_irg_frame_type(current_ir_graph);
2976 } /* get_cur_frame_type */
2979 /* ********************************************************************* */
2982 /* call once for each run of the library */
2984 firm_init_cons(uninitialized_local_variable_func_t *func) {
2985 default_initialize_local_variable = func;
2986 } /* firm_init_cons */
2989 irp_finalize_cons(void) {
2991 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2992 irg_finalize_cons(get_irp_irg(i));
2994 irp->phase_state = phase_high;
2995 } /* irp_finalize_cons */
2998 ir_node *new_Block(int arity, ir_node **in) {
2999 return new_d_Block(NULL, arity, in);
3001 ir_node *new_Start(void) {
3002 return new_d_Start(NULL);
3004 ir_node *new_End(void) {
3005 return new_d_End(NULL);
3007 ir_node *new_Jmp(void) {
3008 return new_d_Jmp(NULL);
3010 ir_node *new_IJmp(ir_node *tgt) {
3011 return new_d_IJmp(NULL, tgt);
3013 ir_node *new_Cond(ir_node *c) {
3014 return new_d_Cond(NULL, c);
3016 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
3017 return new_d_Return(NULL, store, arity, in);
3020 ir_node *new_Const(tarval *con) {
3021 return new_d_Const(NULL, con);
3024 ir_node *new_Const_long(ir_mode *mode, long value) {
3025 return new_d_Const_long(NULL, mode, value);
3028 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3029 return new_d_Const_type(NULL, con, tp);
3032 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
3033 return new_d_SymConst_type(NULL, mode, value, kind, type);
3035 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3036 return new_d_SymConst(NULL, mode, value, kind);
3038 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3039 return new_d_simpleSel(NULL, store, objptr, ent);
3042 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3044 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3047 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3049 return new_d_Call(NULL, store, callee, arity, in, tp);
3051 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3053 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3056 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3057 return new_d_Add(NULL, op1, op2, mode);
3059 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3060 return new_d_Sub(NULL, op1, op2, mode);
3062 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3063 return new_d_Minus(NULL, op, mode);
3065 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3066 return new_d_Mul(NULL, op1, op2, mode);
3068 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3069 return new_d_Mulh(NULL, op1, op2, mode);
3071 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3072 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3074 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3075 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3077 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3078 return new_d_Div(NULL, memop, op1, op2, mode, state);
3081 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3082 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3085 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3086 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3088 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3089 return new_d_Abs(NULL, op, mode);
3091 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3092 return new_d_And(NULL, op1, op2, mode);
3094 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3095 return new_d_Or(NULL, op1, op2, mode);
3097 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3098 return new_d_Eor(NULL, op1, op2, mode);
3100 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3101 return new_d_Not(NULL, op, mode);
3103 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3104 return new_d_Shl(NULL, op, k, mode);
3106 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3107 return new_d_Shr(NULL, op, k, mode);
3109 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3110 return new_d_Shrs(NULL, op, k, mode);
3112 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3113 return new_d_Rotl(NULL, op, k, mode);
3115 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3116 return new_d_Carry(NULL, op1, op2, mode);
3118 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3119 return new_d_Borrow(NULL, op1, op2, mode);
3121 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3122 return new_d_Cmp(NULL, op1, op2);
3124 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3125 return new_d_Conv(NULL, op, mode);
3128 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3129 return new_d_strictConv(NULL, op, mode);
3131 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3132 return new_d_Phi(NULL, arity, in, mode);
3135 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3136 return new_d_Cast(NULL, op, to_tp);
3138 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3139 return new_d_Load(NULL, store, addr, mode, flags);
3141 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3142 return new_d_Store(NULL, store, addr, val, flags);
3145 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3146 ir_where_alloc where) {
3147 return new_d_Alloc(NULL, store, size, alloc_type, where);
3150 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3151 ir_type *free_type, ir_where_alloc where) {
3152 return new_d_Free(NULL, store, ptr, size, free_type, where);
3155 ir_node *new_Sync(int arity, ir_node *in[]) {
3156 return new_d_Sync(NULL, arity, in);
3159 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3160 return new_d_Proj(NULL, arg, mode, proj);
3163 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3164 return new_d_defaultProj(NULL, arg, max_proj);
3167 ir_node *new_Tuple(int arity, ir_node **in) {
3168 return new_d_Tuple(NULL, arity, in);
3170 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3171 return new_d_Id(NULL, val, mode);
3174 ir_node *new_Bad(void) {
3175 return get_irg_bad(current_ir_graph);
3178 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3179 return new_d_Confirm(NULL, val, bound, cmp);
3181 ir_node *new_Unknown(ir_mode *m) {
3182 return new_d_Unknown(m);
3185 ir_node *new_CallBegin(ir_node *callee) {
3186 return new_d_CallBegin(NULL, callee);
3188 ir_node *new_EndReg(void) {
3189 return new_d_EndReg(NULL);
3191 ir_node *new_EndExcept(void) {
3192 return new_d_EndExcept(NULL);
3194 ir_node *new_Break(void) {
3195 return new_d_Break(NULL);
3197 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3198 return new_d_Filter(NULL, arg, mode, proj);
3200 ir_node *new_NoMem(void) {
3201 return get_irg_no_mem(current_ir_graph);
3204 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3205 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3208 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3209 return new_d_CopyB(NULL, store, dst, src, data_type);
3211 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3212 return new_d_InstOf(NULL, store, objptr, ent);
3214 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3215 return new_d_Raise(NULL, store, obj);
3217 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3218 return new_d_Bound(NULL, store, idx, lower, upper);
3221 ir_node *new_Pin(ir_node *node) {
3222 return new_d_Pin(NULL, node);
3225 ir_node *new_Dummy(ir_mode *m) {
3226 ir_graph *irg = current_ir_graph;
3227 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3229 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3230 int n_outs, ir_asm_constraint *outputs,
3231 int n_clobber, ident *clobber[], ident *asm_text) {
3232 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3235 /* create a new anchor node */
3236 ir_node *new_Anchor(ir_graph *irg) {
3237 ir_node *in[anchor_last];
3238 memset(in, 0, sizeof(in));
3239 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);