2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.resmode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.type = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 /** Creates a remainderless Div node. */
400 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
401 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
410 res->attr.divmod.exc.pin_state = state;
411 res->attr.divmod.resmode = mode;
412 res->attr.divmod.no_remainder = 1;
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
422 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Jmp(dbg_info *db, ir_node *block) {
434 ir_graph *irg = current_ir_graph;
436 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
437 res = optimize_node(res);
438 IRN_VRFY_IRG(res, irg);
443 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
445 ir_graph *irg = current_ir_graph;
447 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
448 res = optimize_node(res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
456 ir_graph *irg = current_ir_graph;
458 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
459 res->attr.cond.kind = dense;
460 res->attr.cond.default_proj = 0;
461 res->attr.cond.pred = COND_JMP_PRED_NONE;
462 res = optimize_node(res);
463 IRN_VRFY_IRG(res, irg);
468 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
469 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
473 ir_graph *irg = current_ir_graph;
476 NEW_ARR_A(ir_node *, r_in, r_arity);
479 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
481 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
483 assert((get_unknown_type() == tp) || is_Method_type(tp));
484 set_Call_type(res, tp);
485 res->attr.call.exc.pin_state = op_pin_state_pinned;
486 res->attr.call.callee_arr = NULL;
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
494 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
498 ir_graph *irg = current_ir_graph;
501 NEW_ARR_A(ir_node *, r_in, r_arity);
503 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
505 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
507 assert((get_unknown_type() == tp) || is_Method_type(tp));
508 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
509 res->attr.builtin.kind = kind;
510 res->attr.builtin.type = tp;
511 res = optimize_node(res);
512 IRN_VRFY_IRG(res, irg);
514 } /* new_bd_Buildin */
517 new_bd_Return(dbg_info *db, ir_node *block,
518 ir_node *store, int arity, ir_node **in) {
522 ir_graph *irg = current_ir_graph;
525 NEW_ARR_A (ir_node *, r_in, r_arity);
527 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
528 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
532 } /* new_bd_Return */
535 new_bd_Load(dbg_info *db, ir_node *block,
536 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
539 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
544 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
545 res->attr.load.mode = mode;
546 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
547 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Store(dbg_info *db, ir_node *block,
555 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
558 ir_graph *irg = current_ir_graph;
563 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
564 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
565 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
566 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
578 ir_graph *irg = current_ir_graph;
582 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
583 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
584 res->attr.alloc.where = where;
585 res->attr.alloc.type = alloc_type;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
593 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
596 ir_graph *irg = current_ir_graph;
601 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
602 res->attr.free.where = where;
603 res->attr.free.type = free_type;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
611 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
612 int arity, ir_node **in, ir_entity *ent) {
616 ir_graph *irg = current_ir_graph;
617 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
619 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
622 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
625 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
627 * Sel's can select functions which should be of mode mode_P_code.
629 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
630 res->attr.sel.entity = ent;
631 res = optimize_node(res);
632 IRN_VRFY_IRG(res, irg);
637 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
638 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
639 ir_graph *irg = current_ir_graph;
640 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
642 res->attr.symc.kind = symkind;
643 res->attr.symc.sym = value;
644 res->attr.symc.tp = tp;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
649 } /* new_bd_SymConst_type */
652 new_bd_Sync(dbg_info *db, ir_node *block) {
654 ir_graph *irg = current_ir_graph;
656 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
657 /* no need to call optimize node here, Sync are always created with no predecessors */
658 IRN_VRFY_IRG(res, irg);
664 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
665 ir_node *in[2], *res;
666 ir_graph *irg = current_ir_graph;
670 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
671 res->attr.confirm.cmp = cmp;
672 res = optimize_node(res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_Confirm */
678 new_bd_Unknown(ir_mode *m) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
683 res = optimize_node(res);
685 } /* new_bd_Unknown */
689 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
692 ir_graph *irg = current_ir_graph;
694 in[0] = get_Call_ptr(call);
695 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
696 /* res->attr.callbegin.irg = irg; */
697 res->attr.callbegin.call = call;
698 res = optimize_node(res);
699 IRN_VRFY_IRG(res, irg);
701 } /* new_bd_CallBegin */
704 new_bd_EndReg(dbg_info *db, ir_node *block) {
706 ir_graph *irg = current_ir_graph;
708 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
709 set_irg_end_reg(irg, res);
710 IRN_VRFY_IRG(res, irg);
712 } /* new_bd_EndReg */
715 new_bd_EndExcept(dbg_info *db, ir_node *block) {
717 ir_graph *irg = current_ir_graph;
719 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
720 set_irg_end_except(irg, res);
721 IRN_VRFY_IRG (res, irg);
723 } /* new_bd_EndExcept */
727 new_bd_Break(dbg_info *db, ir_node *block) {
729 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
732 res = optimize_node(res);
733 IRN_VRFY_IRG(res, irg);
738 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
741 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
744 res->attr.filter.proj = proj;
745 res->attr.filter.in_cg = NULL;
746 res->attr.filter.backedge = NULL;
749 assert(get_Proj_pred(res));
750 assert(get_nodes_block(get_Proj_pred(res)));
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
755 } /* new_bd_Filter */
758 new_bd_Mux(dbg_info *db, ir_node *block,
759 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
762 ir_graph *irg = current_ir_graph;
768 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
771 res = optimize_node(res);
772 IRN_VRFY_IRG(res, irg);
778 new_bd_CopyB(dbg_info *db, ir_node *block,
779 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
782 ir_graph *irg = current_ir_graph;
788 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
790 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
791 res->attr.copyb.data_type = data_type;
792 res = optimize_node(res);
793 IRN_VRFY_IRG(res, irg);
798 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
799 ir_node *objptr, ir_type *type) {
802 ir_graph *irg = current_ir_graph;
806 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
807 res->attr.instof.exc.pin_state = op_pin_state_floats;
808 res->attr.instof.type = type;
809 res = optimize_node(res);
810 IRN_VRFY_IRG(res, irg);
812 } /* new_bd_InstOf */
815 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
818 ir_graph *irg = current_ir_graph;
822 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Bound(dbg_info *db, ir_node *block,
830 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
833 ir_graph *irg = current_ir_graph;
839 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
840 res->attr.bound.exc.pin_state = op_pin_state_pinned;
841 res = optimize_node(res);
842 IRN_VRFY_IRG(res, irg);
848 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
850 ir_graph *irg = current_ir_graph;
852 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
853 res = optimize_node(res);
854 IRN_VRFY_IRG(res, irg);
860 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
861 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
863 ir_graph *irg = current_ir_graph;
865 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
866 res->attr.assem.pin_state = op_pin_state_pinned;
867 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
868 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
869 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
870 res->attr.assem.asm_text = asm_text;
872 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
873 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
874 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
876 res = optimize_node(res);
877 IRN_VRFY_IRG(res, irg);
881 /* --------------------------------------------- */
882 /* private interfaces, for professional use only */
883 /* --------------------------------------------- */
886 /* Constructs a Block with a fixed number of predecessors.
887 Does not set current_block. Can not be used with automatic
888 Phi node construction. */
890 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
891 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_Block(db, arity, in);
896 current_ir_graph = rem;
902 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
903 ir_graph *rem = current_ir_graph;
906 current_ir_graph = irg;
907 res = new_bd_Start(db, block);
908 current_ir_graph = rem;
914 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
916 ir_graph *rem = current_ir_graph;
918 current_ir_graph = irg;
919 res = new_bd_End(db, block);
920 current_ir_graph = rem;
926 /* Creates a Phi node with all predecessors. Calling this constructor
927 is only allowed if the corresponding block is mature. */
929 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
931 ir_graph *rem = current_ir_graph;
933 current_ir_graph = irg;
934 res = new_bd_Phi(db, block,arity, in, mode);
935 current_ir_graph = rem;
941 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
943 ir_graph *rem = current_ir_graph;
945 current_ir_graph = irg;
946 res = new_bd_Const_type(db, con, tp);
947 current_ir_graph = rem;
950 } /* new_rd_Const_type */
953 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
955 //#ifdef USE_ORIGINAL
956 ir_graph *rem = current_ir_graph;
958 current_ir_graph = irg;
959 res = new_bd_Const_type(db, con, firm_unknown_type);
960 current_ir_graph = rem;
962 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
969 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
970 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
971 } /* new_rd_Const_long */
975 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
977 ir_graph *rem = current_ir_graph;
979 current_ir_graph = irg;
980 res = new_bd_Id(db, block, val, mode);
981 current_ir_graph = rem;
987 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
990 ir_graph *rem = current_ir_graph;
992 current_ir_graph = irg;
993 res = new_bd_Proj(db, block, arg, mode, proj);
994 current_ir_graph = rem;
1001 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1004 ir_graph *rem = current_ir_graph;
1006 current_ir_graph = irg;
1007 res = new_bd_defaultProj(db, block, arg, max_proj);
1008 current_ir_graph = rem;
1011 } /* new_rd_defaultProj */
1015 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1017 ir_graph *rem = current_ir_graph;
1019 current_ir_graph = irg;
1020 res = new_bd_Conv(db, block, op, mode, 0);
1021 current_ir_graph = rem;
1027 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1029 ir_graph *rem = current_ir_graph;
1031 current_ir_graph = irg;
1032 res = new_bd_Conv(db, block, op, mode, 1);
1033 current_ir_graph = rem;
1036 } /* new_rd_strictConv */
1039 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1041 ir_graph *rem = current_ir_graph;
1043 current_ir_graph = irg;
1044 res = new_bd_Cast(db, block, op, to_tp);
1045 current_ir_graph = rem;
1051 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1053 ir_graph *rem = current_ir_graph;
1055 current_ir_graph = irg;
1056 res = new_bd_Tuple(db, block, arity, in);
1057 current_ir_graph = rem;
1060 } /* new_rd_Tuple */
1068 NEW_RD_DIVOP(DivMod)
1081 NEW_RD_BINOP(Borrow)
1083 /* creates a rd constructor for an divRL */
1084 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1085 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1088 ir_graph *rem = current_ir_graph;
1089 current_ir_graph = irg;
1090 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1091 current_ir_graph = rem;
1096 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1097 ir_node *op1, ir_node *op2) {
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Cmp(db, block, op1, op2);
1103 current_ir_graph = rem;
1109 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Jmp(db, block);
1115 current_ir_graph = rem;
1121 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1123 ir_graph *rem = current_ir_graph;
1125 current_ir_graph = irg;
1126 res = new_bd_IJmp(db, block, tgt);
1127 current_ir_graph = rem;
1133 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1135 ir_graph *rem = current_ir_graph;
1137 current_ir_graph = irg;
1138 res = new_bd_Cond(db, block, c);
1139 current_ir_graph = rem;
1145 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1146 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1148 ir_graph *rem = current_ir_graph;
1150 current_ir_graph = irg;
1151 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1152 current_ir_graph = rem;
1158 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1159 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1161 ir_graph *rem = current_ir_graph;
1163 current_ir_graph = irg;
1164 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1165 current_ir_graph = rem;
1168 } /* new_rd_Builtin */
1171 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1172 ir_node *store, int arity, ir_node **in) {
1174 ir_graph *rem = current_ir_graph;
1176 current_ir_graph = irg;
1177 res = new_bd_Return(db, block, store, arity, in);
1178 current_ir_graph = rem;
1181 } /* new_rd_Return */
1184 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1185 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Load(db, block, store, adr, mode, flags);
1191 current_ir_graph = rem;
1197 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1198 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1200 ir_graph *rem = current_ir_graph;
1202 current_ir_graph = irg;
1203 res = new_bd_Store(db, block, store, adr, val, flags);
1204 current_ir_graph = rem;
1207 } /* new_rd_Store */
1210 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1211 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1213 ir_graph *rem = current_ir_graph;
1215 current_ir_graph = irg;
1216 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1217 current_ir_graph = rem;
1220 } /* new_rd_Alloc */
1223 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1224 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1226 ir_graph *rem = current_ir_graph;
1228 current_ir_graph = irg;
1229 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1230 current_ir_graph = rem;
1237 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1238 ir_node *store, ir_node *objptr, ir_entity *ent) {
1240 ir_graph *rem = current_ir_graph;
1242 current_ir_graph = irg;
1243 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1244 current_ir_graph = rem;
1247 } /* new_rd_simpleSel */
1251 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1252 int arity, ir_node **in, ir_entity *ent) {
1254 ir_graph *rem = current_ir_graph;
1256 current_ir_graph = irg;
1257 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1258 current_ir_graph = rem;
1265 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1266 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1268 ir_graph *rem = current_ir_graph;
1270 current_ir_graph = irg;
1271 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1272 current_ir_graph = rem;
1275 } /* new_rd_SymConst_type */
1278 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1279 symconst_symbol value, symconst_kind symkind) {
1280 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1281 } /* new_rd_SymConst */
1283 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1284 symconst_symbol sym;
1285 sym.entity_p = symbol;
1286 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1287 } /* new_rd_SymConst_addr_ent */
1289 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1290 symconst_symbol sym;
1291 sym.entity_p = symbol;
1292 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1293 } /* new_rd_SymConst_ofs_ent */
1295 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1296 symconst_symbol sym;
1297 sym.ident_p = symbol;
1298 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1299 } /* new_rd_SymConst_addr_name */
1301 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1302 symconst_symbol sym;
1303 sym.type_p = symbol;
1304 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1305 } /* new_rd_SymConst_type_tag */
1307 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1308 symconst_symbol sym;
1309 sym.type_p = symbol;
1310 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1311 } /* new_rd_SymConst_size */
1313 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1314 symconst_symbol sym;
1315 sym.type_p = symbol;
1316 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1317 } /* new_rd_SymConst_align */
1320 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1322 ir_graph *rem = current_ir_graph;
1325 current_ir_graph = irg;
1326 res = new_bd_Sync(db, block);
1327 current_ir_graph = rem;
1329 for (i = 0; i < arity; ++i)
1330 add_Sync_pred(res, in[i]);
1337 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1339 ir_graph *rem = current_ir_graph;
1341 current_ir_graph = irg;
1342 res = new_bd_Confirm(db, block, val, bound, cmp);
1343 current_ir_graph = rem;
1346 } /* new_rd_Confirm */
1349 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1351 ir_graph *rem = current_ir_graph;
1353 current_ir_graph = irg;
1354 res = new_bd_Unknown(m);
1355 current_ir_graph = rem;
1358 } /* new_rd_Unknown */
1362 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1364 ir_graph *rem = current_ir_graph;
1366 current_ir_graph = irg;
1367 res = new_bd_CallBegin(db, block, call);
1368 current_ir_graph = rem;
1371 } /* new_rd_CallBegin */
1374 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1377 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1378 set_irg_end_reg(irg, res);
1379 IRN_VRFY_IRG(res, irg);
1381 } /* new_rd_EndReg */
1384 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1387 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1388 set_irg_end_except(irg, res);
1389 IRN_VRFY_IRG (res, irg);
1391 } /* new_rd_EndExcept */
1395 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_Break(db, block);
1401 current_ir_graph = rem;
1404 } /* new_rd_Break */
1407 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Filter(db, block, arg, mode, proj);
1414 current_ir_graph = rem;
1417 } /* new_rd_Filter */
1420 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1427 current_ir_graph = rem;
1433 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1434 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1440 current_ir_graph = rem;
1443 } /* new_rd_CopyB */
1446 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1447 ir_node *objptr, ir_type *type) {
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_InstOf(db, block, store, objptr, type);
1453 current_ir_graph = rem;
1456 } /* new_rd_InstOf */
1459 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1461 ir_graph *rem = current_ir_graph;
1463 current_ir_graph = irg;
1464 res = new_bd_Raise(db, block, store, obj);
1465 current_ir_graph = rem;
1468 } /* new_rd_Raise */
1470 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1471 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1473 ir_graph *rem = current_ir_graph;
1475 current_ir_graph = irg;
1476 res = new_bd_Bound(db, block, store, idx, lower, upper);
1477 current_ir_graph = rem;
1480 } /* new_rd_Bound */
1483 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1485 ir_graph *rem = current_ir_graph;
1487 current_ir_graph = irg;
1488 res = new_bd_Pin(db, block, node);
1489 current_ir_graph = rem;
1495 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1496 int arity, ir_node *in[], ir_asm_constraint *inputs,
1497 int n_outs, ir_asm_constraint *outputs,
1498 int n_clobber, ident *clobber[], ident *asm_text) {
1500 ir_graph *rem = current_ir_graph;
1502 current_ir_graph = irg;
1503 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1504 current_ir_graph = rem;
1511 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1512 return new_rd_Block(NULL, irg, arity, in);
1514 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1515 return new_rd_Start(NULL, irg, block);
1517 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1518 return new_rd_End(NULL, irg, block);
1520 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1521 return new_rd_Jmp(NULL, irg, block);
1523 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1524 return new_rd_IJmp(NULL, irg, block, tgt);
1526 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1527 return new_rd_Cond(NULL, irg, block, c);
1529 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1530 ir_node *store, int arity, ir_node **in) {
1531 return new_rd_Return(NULL, irg, block, store, arity, in);
1534 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1535 return new_rd_Const(NULL, irg, con);
1537 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1538 return new_rd_Const_long(NULL, irg, mode, value);
1540 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1541 return new_rd_Const_type(NULL, irg, con, tp);
1543 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1544 symconst_symbol value, symconst_kind symkind) {
1545 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1547 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1548 ir_node *objptr, ir_entity *ent) {
1549 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1552 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1553 ir_node *objptr, int n_index, ir_node **index,
1555 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1557 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1558 ir_node *callee, int arity, ir_node **in,
1560 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1562 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1563 ir_builtin_kind kind, int arity, ir_node **in,
1565 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1567 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1568 ir_node *op1, ir_node *op2, ir_mode *mode) {
1569 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1572 ir_node *op1, ir_node *op2, ir_mode *mode) {
1573 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1575 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_mode *mode) {
1577 return new_rd_Minus(NULL, irg, block, op, mode);
1579 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1580 ir_node *op1, ir_node *op2, ir_mode *mode) {
1581 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1583 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2, ir_mode *mode) {
1585 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1587 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1588 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1589 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1591 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1592 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1593 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1595 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1596 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1597 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1599 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1600 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1601 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1603 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1604 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1605 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1607 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1608 ir_node *op, ir_mode *mode) {
1609 return new_rd_Abs(NULL, irg, block, op, mode);
1611 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1612 ir_node *op1, ir_node *op2, ir_mode *mode) {
1613 return new_rd_And(NULL, irg, block, op1, op2, mode);
1615 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1616 ir_node *op1, ir_node *op2, ir_mode *mode) {
1617 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1619 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1620 ir_node *op1, ir_node *op2, ir_mode *mode) {
1621 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1623 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1624 ir_node *op, ir_mode *mode) {
1625 return new_rd_Not(NULL, irg, block, op, mode);
1627 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1628 ir_node *op, ir_node *k, ir_mode *mode) {
1629 return new_rd_Shl(NULL, irg, block, op, k, mode);
1631 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1632 ir_node *op, ir_node *k, ir_mode *mode) {
1633 return new_rd_Shr(NULL, irg, block, op, k, mode);
1635 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1636 ir_node *op, ir_node *k, ir_mode *mode) {
1637 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1639 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1640 ir_node *op, ir_node *k, ir_mode *mode) {
1641 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1643 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1644 ir_node *op, ir_node *k, ir_mode *mode) {
1645 return new_rd_Carry(NULL, irg, block, op, k, mode);
1647 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1648 ir_node *op, ir_node *k, ir_mode *mode) {
1649 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1651 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1652 ir_node *op1, ir_node *op2) {
1653 return new_rd_Cmp(NULL, irg, block, op1, op2);
1655 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1656 ir_node *op, ir_mode *mode) {
1657 return new_rd_Conv(NULL, irg, block, op, mode);
1659 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1660 ir_node *op, ir_mode *mode) {
1661 return new_rd_strictConv(NULL, irg, block, op, mode);
1664 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1665 ir_node **in, ir_mode *mode) {
1666 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1669 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1670 return new_rd_Cast(NULL, irg, block, op, to_tp);
1672 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1673 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1674 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1676 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1677 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1678 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1680 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1681 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1682 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1684 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1685 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1686 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1689 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1690 return new_rd_Sync(NULL, irg, block, arity, in);
1693 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1694 ir_mode *mode, long proj) {
1695 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1698 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1700 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1703 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1704 int arity, ir_node **in) {
1705 return new_rd_Tuple(NULL, irg, block, arity, in );
1707 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1708 ir_node *val, ir_mode *mode) {
1709 return new_rd_Id(NULL, irg, block, val, mode);
1712 ir_node *new_r_Bad(ir_graph *irg) {
1713 return get_irg_bad(irg);
1716 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1717 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1719 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1720 return new_rd_Unknown(irg, m);
1723 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1724 return new_rd_CallBegin(NULL, irg, block, callee);
1726 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1727 return new_rd_EndReg(NULL, irg, block);
1729 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1730 return new_rd_EndExcept(NULL, irg, block);
1733 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1734 return new_rd_Break(NULL, irg, block);
1736 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1737 ir_mode *mode, long proj) {
1738 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1741 ir_node *new_r_NoMem(ir_graph *irg) {
1742 return get_irg_no_mem(irg);
1745 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1746 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1747 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1750 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1751 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1752 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1754 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1756 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1758 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1759 ir_node *store, ir_node *obj) {
1760 return new_rd_Raise(NULL, irg, block, store, obj);
1762 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1763 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1764 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1767 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1768 return new_rd_Pin(NULL, irg, block, node);
1771 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1772 int arity, ir_node *in[], ir_asm_constraint *inputs,
1773 int n_outs, ir_asm_constraint *outputs,
1774 int n_clobber, ident *clobber[], ident *asm_text) {
1775 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1778 /** ********************/
1779 /** public interfaces */
1780 /** construction tools */
1786 * - create a new Start node in the current block
1788 * @return s - pointer to the created Start node
1793 new_d_Start(dbg_info *db) {
1796 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1797 op_Start, mode_T, 0, NULL);
1799 res = optimize_node(res);
1800 IRN_VRFY_IRG(res, current_ir_graph);
1805 new_d_End(dbg_info *db) {
1807 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1808 op_End, mode_X, -1, NULL);
1809 res = optimize_node(res);
1810 IRN_VRFY_IRG(res, current_ir_graph);
1815 /* Constructs a Block with a fixed number of predecessors.
1816 Does set current_block. Can be used with automatic Phi
1817 node construction. */
1819 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1822 int has_unknown = 0;
1825 res = new_bd_Block(db, arity, in);
1827 res = new_rd_Block(db, current_ir_graph, arity, in);
1830 /* Create and initialize array for Phi-node construction. */
1831 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1832 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1833 current_ir_graph->n_loc);
1834 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1837 for (i = arity-1; i >= 0; i--)
1838 if (is_Unknown(in[i])) {
1843 if (!has_unknown) res = optimize_node(res);
1845 current_ir_graph->current_block = res;
1847 IRN_VRFY_IRG(res, current_ir_graph);
1853 /* ***********************************************************************/
1854 /* Methods necessary for automatic Phi node creation */
1856 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1857 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1858 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1859 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1861 Call Graph: ( A ---> B == A "calls" B)
1863 get_value mature_immBlock
1871 get_r_value_internal |
1875 new_rd_Phi0 new_rd_Phi_in
1877 * *************************************************************************** */
1879 /** Creates a Phi node with 0 predecessors. */
1880 static inline ir_node *
1881 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1884 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1885 IRN_VRFY_IRG(res, irg);
1891 * Internal constructor of a Phi node by a phi_merge operation.
1893 * @param irg the graph on which the Phi will be constructed
1894 * @param block the block in which the Phi will be constructed
1895 * @param mode the mod eof the Phi node
1896 * @param in the input array of the phi node
1897 * @param ins number of elements in the input array
1898 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1899 * the value for which the new Phi is constructed
1901 static inline ir_node *
1902 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1903 ir_node **in, int ins, ir_node *phi0) {
1905 ir_node *res, *known;
1907 /* Allocate a new node on the obstack. The allocation copies the in
1909 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1910 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1912 /* This loop checks whether the Phi has more than one predecessor.
1913 If so, it is a real Phi node and we break the loop. Else the
1914 Phi node merges the same definition on several paths and therefore
1916 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1918 for (i = ins - 1; i >= 0; --i) {
1921 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1923 /* Optimize self referencing Phis: We can't detect them yet properly, as
1924 they still refer to the Phi0 they will replace. So replace right now. */
1925 if (phi0 && in[i] == phi0)
1928 if (in[i] == res || in[i] == known)
1937 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1940 edges_node_deleted(res, current_ir_graph);
1941 obstack_free(current_ir_graph->obst, res);
1942 if (is_Phi(known)) {
1943 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1944 order, an enclosing Phi know may get superfluous. */
1945 res = optimize_in_place_2(known);
1947 exchange(known, res);
1952 /* A undefined value, e.g., in unreachable code. */
1956 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1957 IRN_VRFY_IRG(res, irg);
1958 /* Memory Phis in endless loops must be kept alive.
1959 As we can't distinguish these easily we keep all of them alive. */
1960 if (is_Phi(res) && mode == mode_M)
1961 add_End_keepalive(get_irg_end(irg), res);
1965 } /* new_rd_Phi_in */
1968 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1970 #if PRECISE_EXC_CONTEXT
1972 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1975 * Construct a new frag_array for node n.
1976 * Copy the content from the current graph_arr of the corresponding block:
1977 * this is the current state.
1978 * Set ProjM(n) as current memory state.
1979 * Further the last entry in frag_arr of current block points to n. This
1980 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1982 static inline ir_node **new_frag_arr(ir_node *n) {
1986 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1987 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1988 sizeof(ir_node *)*current_ir_graph->n_loc);
1990 /* turn off optimization before allocating Proj nodes, as res isn't
1992 opt = get_opt_optimize(); set_optimize(0);
1993 /* Here we rely on the fact that all frag ops have Memory as first result! */
1995 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1996 } else if (is_CopyB(n)) {
1997 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1999 assert((pn_Quot_M == pn_DivMod_M) &&
2000 (pn_Quot_M == pn_Div_M) &&
2001 (pn_Quot_M == pn_Mod_M) &&
2002 (pn_Quot_M == pn_Load_M) &&
2003 (pn_Quot_M == pn_Store_M) &&
2004 (pn_Quot_M == pn_Alloc_M) &&
2005 (pn_Quot_M == pn_Bound_M));
2006 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2010 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2012 } /* new_frag_arr */
2015 * Returns the frag_arr from a node.
2017 static inline ir_node **get_frag_arr(ir_node *n) {
2018 switch (get_irn_opcode(n)) {
2020 return n->attr.call.exc.frag_arr;
2022 return n->attr.alloc.exc.frag_arr;
2024 return n->attr.load.exc.frag_arr;
2026 return n->attr.store.exc.frag_arr;
2028 return n->attr.except.frag_arr;
2030 } /* get_frag_arr */
2033 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2034 #ifdef DEBUG_libfirm
2037 for (i = 1024; i >= 0; --i)
2042 if (frag_arr[pos] == NULL)
2043 frag_arr[pos] = val;
2044 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2045 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2046 assert(arr != frag_arr && "Endless recursion detected");
2051 assert(!"potential endless recursion in set_frag_value");
2052 } /* set_frag_value */
2055 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2059 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2061 frag_arr = get_frag_arr(cfOp);
2062 res = frag_arr[pos];
2064 if (block->attr.block.graph_arr[pos] != NULL) {
2065 /* There was a set_value() after the cfOp and no get_value() before that
2066 set_value(). We must build a Phi node now. */
2067 if (block->attr.block.is_matured) {
2068 int ins = get_irn_arity(block);
2070 NEW_ARR_A(ir_node *, nin, ins);
2071 res = phi_merge(block, pos, mode, nin, ins);
2073 res = new_rd_Phi0(current_ir_graph, block, mode);
2074 res->attr.phi.u.pos = pos;
2075 res->attr.phi.next = block->attr.block.phis;
2076 block->attr.block.phis = res;
2078 assert(res != NULL);
2079 /* It's a Phi, we can write this into all graph_arrs with NULL */
2080 set_frag_value(block->attr.block.graph_arr, pos, res);
2082 res = get_r_value_internal(block, pos, mode);
2083 set_frag_value(block->attr.block.graph_arr, pos, res);
2087 } /* get_r_frag_value_internal */
2088 #endif /* PRECISE_EXC_CONTEXT */
2091 * Check whether a control flownode cf_pred represents an exception flow.
2093 * @param cf_pred the control flow node
2094 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2096 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2098 * Note: all projections from a raise are "exceptional control flow" we we handle it
2099 * like a normal Jmp, because there is no "regular" one.
2100 * That's why Raise is no "fragile_op"!
2102 if (is_fragile_op(prev_cf_op)) {
2103 if (is_Proj(cf_pred)) {
2104 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2105 /* the regular control flow, NO exception */
2108 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2111 /* Hmm, exception but not a Proj? */
2112 assert(!"unexpected condition: fragile op without a proj");
2116 } /* is_exception_flow */
2119 * Computes the predecessors for the real phi node, and then
2120 * allocates and returns this node. The routine called to allocate the
2121 * node might optimize it away and return a real value.
2122 * This function must be called with an in-array of proper size.
2125 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2126 ir_node *prevBlock, *res, *phi0, *phi0_all;
2129 /* If this block has no value at pos create a Phi0 and remember it
2130 in graph_arr to break recursions.
2131 Else we may not set graph_arr as there a later value is remembered. */
2133 if (block->attr.block.graph_arr[pos] == NULL) {
2134 ir_graph *irg = current_ir_graph;
2136 if (block == get_irg_start_block(irg)) {
2137 /* Collapsing to Bad tarvals is no good idea.
2138 So we call a user-supplied routine here that deals with this case as
2139 appropriate for the given language. Sorrily the only help we can give
2140 here is the position.
2142 Even if all variables are defined before use, it can happen that
2143 we get to the start block, if a Cond has been replaced by a tuple
2144 (bad, jmp). In this case we call the function needlessly, eventually
2145 generating an non existent error.
2146 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2149 if (default_initialize_local_variable != NULL) {
2150 ir_node *rem = get_cur_block();
2152 set_cur_block(block);
2153 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2157 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2158 /* We don't need to care about exception ops in the start block.
2159 There are none by definition. */
2160 return block->attr.block.graph_arr[pos];
2162 phi0 = new_rd_Phi0(irg, block, mode);
2163 block->attr.block.graph_arr[pos] = phi0;
2164 #if PRECISE_EXC_CONTEXT
2165 if (get_opt_precise_exc_context()) {
2166 /* Set graph_arr for fragile ops. Also here we should break recursion.
2167 We could choose a cyclic path through an cfop. But the recursion would
2168 break at some point. */
2169 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2175 /* This loop goes to all predecessor blocks of the block the Phi node
2176 is in and there finds the operands of the Phi node by calling
2177 get_r_value_internal. */
2178 for (i = 1; i <= ins; ++i) {
2179 ir_node *cf_pred = block->in[i];
2180 ir_node *prevCfOp = skip_Proj(cf_pred);
2182 if (is_Bad(prevCfOp)) {
2183 /* In case a Cond has been optimized we would get right to the start block
2184 with an invalid definition. */
2185 nin[i-1] = new_Bad();
2188 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2190 if (!is_Bad(prevBlock)) {
2191 #if PRECISE_EXC_CONTEXT
2192 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2193 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2194 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2197 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2199 nin[i-1] = new_Bad();
2203 /* We want to pass the Phi0 node to the constructor: this finds additional
2204 optimization possibilities.
2205 The Phi0 node either is allocated in this function, or it comes from
2206 a former call to get_r_value_internal(). In this case we may not yet
2207 exchange phi0, as this is done in mature_immBlock(). */
2209 phi0_all = block->attr.block.graph_arr[pos];
2210 if (! is_Phi0(phi0_all) ||
2211 get_irn_arity(phi0_all) != 0 ||
2212 get_nodes_block(phi0_all) != block)
2218 /* After collecting all predecessors into the array nin a new Phi node
2219 with these predecessors is created. This constructor contains an
2220 optimization: If all predecessors of the Phi node are identical it
2221 returns the only operand instead of a new Phi node. */
2222 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2224 /* In case we allocated a Phi0 node at the beginning of this procedure,
2225 we need to exchange this Phi0 with the real Phi. */
2227 exchange(phi0, res);
2228 block->attr.block.graph_arr[pos] = res;
2229 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2230 only an optimization. */
2237 * This function returns the last definition of a value. In case
2238 * this value was last defined in a previous block, Phi nodes are
2239 * inserted. If the part of the firm graph containing the definition
2240 * is not yet constructed, a dummy Phi node is returned.
2242 * @param block the current block
2243 * @param pos the value number of the value searched
2244 * @param mode the mode of this value (needed for Phi construction)
2247 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2249 /* There are 4 cases to treat.
2251 1. The block is not mature and we visit it the first time. We can not
2252 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2253 predecessors is returned. This node is added to the linked list (block
2254 attribute "phis") of the containing block to be completed when this block is
2255 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2258 2. The value is already known in this block, graph_arr[pos] is set and we
2259 visit the block the first time. We can return the value without
2260 creating any new nodes.
2262 3. The block is mature and we visit it the first time. A Phi node needs
2263 to be created (phi_merge). If the Phi is not needed, as all it's
2264 operands are the same value reaching the block through different
2265 paths, it's optimized away and the value itself is returned.
2267 4. The block is mature, and we visit it the second time. Now two
2268 subcases are possible:
2269 * The value was computed completely the last time we were here. This
2270 is the case if there is no loop. We can return the proper value.
2271 * The recursion that visited this node and set the flag did not
2272 return yet. We are computing a value in a loop and need to
2273 break the recursion. This case only happens if we visited
2274 the same block with phi_merge before, which inserted a Phi0.
2275 So we return the Phi0.
2278 /* case 4 -- already visited. */
2279 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2280 /* As phi_merge allocates a Phi0 this value is always defined. Here
2281 is the critical difference of the two algorithms. */
2282 assert(block->attr.block.graph_arr[pos]);
2283 return block->attr.block.graph_arr[pos];
2286 /* visited the first time */
2287 set_irn_visited(block, get_irg_visited(current_ir_graph));
2289 /* Get the local valid value */
2290 res = block->attr.block.graph_arr[pos];
2292 /* case 2 -- If the value is actually computed, return it. */
2296 if (block->attr.block.is_matured) { /* case 3 */
2298 /* The Phi has the same amount of ins as the corresponding block. */
2299 int ins = get_irn_arity(block);
2301 NEW_ARR_A(ir_node *, nin, ins);
2303 /* Phi merge collects the predecessors and then creates a node. */
2304 res = phi_merge(block, pos, mode, nin, ins);
2306 } else { /* case 1 */
2307 /* The block is not mature, we don't know how many in's are needed. A Phi
2308 with zero predecessors is created. Such a Phi node is called Phi0
2309 node. The Phi0 is then added to the list of Phi0 nodes in this block
2310 to be matured by mature_immBlock later.
2311 The Phi0 has to remember the pos of it's internal value. If the real
2312 Phi is computed, pos is used to update the array with the local
2314 res = new_rd_Phi0(current_ir_graph, block, mode);
2315 res->attr.phi.u.pos = pos;
2316 res->attr.phi.next = block->attr.block.phis;
2317 block->attr.block.phis = res;
2320 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2322 /* The local valid value is available now. */
2323 block->attr.block.graph_arr[pos] = res;
2326 } /* get_r_value_internal */
2328 /* ************************************************************************** */
2331 * Finalize a Block node, when all control flows are known.
2332 * Acceptable parameters are only Block nodes.
2335 mature_immBlock(ir_node *block) {
2340 assert(is_Block(block));
2341 if (!get_Block_matured(block)) {
2342 ir_graph *irg = current_ir_graph;
2344 ins = ARR_LEN(block->in) - 1;
2345 /* Fix block parameters */
2346 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2348 /* An array for building the Phi nodes. */
2349 NEW_ARR_A(ir_node *, nin, ins);
2351 /* Traverse a chain of Phi nodes attached to this block and mature
2353 for (n = block->attr.block.phis; n; n = next) {
2354 inc_irg_visited(irg);
2355 next = n->attr.phi.next;
2356 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2359 block->attr.block.is_matured = 1;
2361 /* Now, as the block is a finished Firm node, we can optimize it.
2362 Since other nodes have been allocated since the block was created
2363 we can not free the node on the obstack. Therefore we have to call
2364 optimize_in_place().
2365 Unfortunately the optimization does not change a lot, as all allocated
2366 nodes refer to the unoptimized node.
2367 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2368 block = optimize_in_place_2(block);
2369 IRN_VRFY_IRG(block, irg);
2371 } /* mature_immBlock */
2374 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2375 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2379 new_d_Const(dbg_info *db, tarval *con) {
2380 return new_bd_Const(db, con);
2384 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2385 return new_bd_Const_long(db, mode, value);
2386 } /* new_d_Const_long */
2389 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2390 return new_bd_Const_type(db, con, tp);
2391 } /* new_d_Const_type */
2396 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2397 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2401 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2402 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2407 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2409 assert(arg->op == op_Cond);
2410 arg->attr.cond.kind = fragmentary;
2411 arg->attr.cond.default_proj = max_proj;
2412 res = new_d_Proj(db, arg, mode_X, max_proj);
2414 } /* new_d_defaultProj */
2418 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2419 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2423 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2424 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2425 } /* new_d_strictConv */
2428 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2429 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2433 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2434 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2445 * Allocate a frag array for a node if the current graph state is phase_building.
2447 * @param irn the node for which the frag array should be allocated
2448 * @param op the opcode of the (original) node, if does not match opcode of irn,
2450 * @param frag_store the address of the frag store in irn attributes, if this
2451 * address contains a value != NULL, does nothing
2453 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2454 if (get_opt_precise_exc_context()) {
2455 if ((current_ir_graph->phase_state == phase_building) &&
2456 (get_irn_op(irn) == op) && /* Could be optimized away. */
2457 !*frag_store) /* Could be a cse where the arr is already set. */ {
2458 *frag_store = new_frag_arr(irn);
2461 } /* firm_alloc_frag_arr */
2465 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2467 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2468 #if PRECISE_EXC_CONTEXT
2469 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2476 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2478 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2479 #if PRECISE_EXC_CONTEXT
2480 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2484 } /* new_d_DivMod */
2487 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2489 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2490 #if PRECISE_EXC_CONTEXT
2491 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2498 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2500 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2501 #if PRECISE_EXC_CONTEXT
2502 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2509 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2511 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2512 #if PRECISE_EXC_CONTEXT
2513 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2532 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2533 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2537 new_d_Jmp(dbg_info *db) {
2538 return new_bd_Jmp(db, current_ir_graph->current_block);
2542 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2543 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2547 new_d_Cond(dbg_info *db, ir_node *c) {
2548 return new_bd_Cond(db, current_ir_graph->current_block, c);
2552 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2555 res = new_bd_Call(db, current_ir_graph->current_block,
2556 store, callee, arity, in, tp);
2557 #if PRECISE_EXC_CONTEXT
2558 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2565 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2567 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2568 } /* new_d_Builtin */
2571 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2572 return new_bd_Return(db, current_ir_graph->current_block,
2574 } /* new_d_Return */
2577 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2579 res = new_bd_Load(db, current_ir_graph->current_block,
2580 store, addr, mode, flags);
2581 #if PRECISE_EXC_CONTEXT
2582 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2589 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2591 res = new_bd_Store(db, current_ir_graph->current_block,
2592 store, addr, val, flags);
2593 #if PRECISE_EXC_CONTEXT
2594 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2601 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2602 ir_where_alloc where) {
2604 res = new_bd_Alloc(db, current_ir_graph->current_block,
2605 store, size, alloc_type, where);
2606 #if PRECISE_EXC_CONTEXT
2607 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2614 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2615 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2616 return new_bd_Free(db, current_ir_graph->current_block,
2617 store, ptr, size, free_type, where);
2622 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2623 /* GL: objptr was called frame before. Frame was a bad choice for the name
2624 as the operand could as well be a pointer to a dynamic object. */
2626 return new_bd_Sel(db, current_ir_graph->current_block,
2627 store, objptr, 0, NULL, ent);
2628 } /* new_d_simpleSel */
2632 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2633 return new_bd_Sel(db, current_ir_graph->current_block,
2634 store, objptr, n_index, index, sel);
2639 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2640 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2642 } /* new_d_SymConst_type */
2645 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2646 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2647 value, kind, firm_unknown_type);
2648 } /* new_d_SymConst */
2651 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2652 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2657 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2658 return new_bd_Confirm(db, current_ir_graph->current_block,
2660 } /* new_d_Confirm */
2663 new_d_Unknown(ir_mode *m) {
2664 return new_bd_Unknown(m);
2665 } /* new_d_Unknown */
2669 new_d_CallBegin(dbg_info *db, ir_node *call) {
2670 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2671 } /* new_d_CallBegin */
2674 new_d_EndReg(dbg_info *db) {
2675 return new_bd_EndReg(db, current_ir_graph->current_block);
2676 } /* new_d_EndReg */
2679 new_d_EndExcept(dbg_info *db) {
2680 return new_bd_EndExcept(db, current_ir_graph->current_block);
2681 } /* new_d_EndExcept */
2685 new_d_Break(dbg_info *db) {
2686 return new_bd_Break(db, current_ir_graph->current_block);
2690 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2691 return new_bd_Filter(db, current_ir_graph->current_block,
2693 } /* new_d_Filter */
2696 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2697 ir_node *ir_true, ir_mode *mode) {
2698 return new_bd_Mux(db, current_ir_graph->current_block,
2699 sel, ir_false, ir_true, mode);
2703 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2704 ir_node *dst, ir_node *src, ir_type *data_type) {
2706 res = new_bd_CopyB(db, current_ir_graph->current_block,
2707 store, dst, src, data_type);
2708 #if PRECISE_EXC_CONTEXT
2709 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2715 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2716 return new_bd_InstOf(db, current_ir_graph->current_block,
2717 store, objptr, type);
2718 } /* new_d_InstOf */
2721 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2722 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2725 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2726 ir_node *idx, ir_node *lower, ir_node *upper) {
2728 res = new_bd_Bound(db, current_ir_graph->current_block,
2729 store, idx, lower, upper);
2730 #if PRECISE_EXC_CONTEXT
2731 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2738 new_d_Pin(dbg_info *db, ir_node *node) {
2739 return new_bd_Pin(db, current_ir_graph->current_block, node);
2744 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2745 int n_outs, ir_asm_constraint *outputs,
2746 int n_clobber, ident *clobber[], ident *asm_text) {
2747 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2750 /* ********************************************************************* */
2751 /* Comfortable interface with automatic Phi node construction. */
2752 /* (Uses also constructors of ?? interface, except new_Block. */
2753 /* ********************************************************************* */
2755 /* Block construction */
2756 /* immature Block without predecessors */
2758 new_d_immBlock(dbg_info *db) {
2761 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2762 /* creates a new dynamic in-array as length of in is -1 */
2763 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2765 /* macroblock head */
2768 res->attr.block.is_matured = 0;
2769 res->attr.block.is_dead = 0;
2770 res->attr.block.is_mb_head = 1;
2771 res->attr.block.has_label = 0;
2772 res->attr.block.irg = current_ir_graph;
2773 res->attr.block.backedge = NULL;
2774 res->attr.block.in_cg = NULL;
2775 res->attr.block.cg_backedge = NULL;
2776 res->attr.block.extblk = NULL;
2777 res->attr.block.region = NULL;
2778 res->attr.block.mb_depth = 0;
2779 res->attr.block.label = 0;
2781 set_Block_block_visited(res, 0);
2783 /* Create and initialize array for Phi-node construction. */
2784 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2785 current_ir_graph->n_loc);
2786 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2788 /* Immature block may not be optimized! */
2789 IRN_VRFY_IRG(res, current_ir_graph);
2792 } /* new_d_immBlock */
2795 new_immBlock(void) {
2796 return new_d_immBlock(NULL);
2797 } /* new_immBlock */
2799 /* immature PartBlock with its predecessors */
2801 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2802 ir_node *res = new_d_immBlock(db);
2803 ir_node *blk = get_nodes_block(pred_jmp);
2805 res->in[0] = blk->in[0];
2806 assert(res->in[0] != NULL);
2807 add_immBlock_pred(res, pred_jmp);
2809 res->attr.block.is_mb_head = 0;
2810 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2813 } /* new_d_immPartBlock */
2816 new_immPartBlock(ir_node *pred_jmp) {
2817 return new_d_immPartBlock(NULL, pred_jmp);
2818 } /* new_immPartBlock */
2820 /* add an edge to a jmp/control flow node */
2822 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2823 int n = ARR_LEN(block->in) - 1;
2825 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2826 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2827 assert(is_ir_node(jmp));
2829 ARR_APP1(ir_node *, block->in, jmp);
2831 hook_set_irn_n(block, n, jmp, NULL);
2832 } /* add_immBlock_pred */
2834 /* changing the current block */
2836 set_cur_block(ir_node *target) {
2837 current_ir_graph->current_block = target;
2838 } /* set_cur_block */
2840 /* ************************ */
2841 /* parameter administration */
2843 /* get a value from the parameter array from the current block by its index */
2845 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2846 ir_graph *irg = current_ir_graph;
2847 assert(get_irg_phase_state(irg) == phase_building);
2848 inc_irg_visited(irg);
2853 return get_r_value_internal(irg->current_block, pos + 1, mode);
2856 /* get a value from the parameter array from the current block by its index */
2858 get_value(int pos, ir_mode *mode) {
2859 return get_d_value(NULL, pos, mode);
2862 /* set a value at position pos in the parameter array from the current block */
2864 set_value(int pos, ir_node *value) {
2865 ir_graph *irg = current_ir_graph;
2866 assert(get_irg_phase_state(irg) == phase_building);
2868 assert(pos+1 < irg->n_loc);
2869 assert(is_ir_node(value));
2870 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2873 /* Find the value number for a node in the current block.*/
2875 find_value(ir_node *value) {
2877 ir_node *bl = current_ir_graph->current_block;
2879 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2880 if (bl->attr.block.graph_arr[i] == value)
2885 /* get the current store */
2888 ir_graph *irg = current_ir_graph;
2890 assert(get_irg_phase_state(irg) == phase_building);
2891 /* GL: one could call get_value instead */
2892 inc_irg_visited(irg);
2893 return get_r_value_internal(irg->current_block, 0, mode_M);
2896 /* set the current store: handles automatic Sync construction for Load nodes */
2898 set_store(ir_node *store) {
2899 ir_node *load, *pload, *pred, *in[2];
2901 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2902 /* Beware: due to dead code elimination, a store might become a Bad node even in
2903 the construction phase. */
2904 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2906 if (get_opt_auto_create_sync()) {
2907 /* handle non-volatile Load nodes by automatically creating Sync's */
2908 load = skip_Proj(store);
2909 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2910 pred = get_Load_mem(load);
2912 if (is_Sync(pred)) {
2913 /* a Load after a Sync: move it up */
2914 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2916 set_Load_mem(load, get_memop_mem(mem));
2917 add_Sync_pred(pred, store);
2920 pload = skip_Proj(pred);
2921 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2922 /* a Load after a Load: create a new Sync */
2923 set_Load_mem(load, get_Load_mem(pload));
2927 store = new_Sync(2, in);
2932 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2936 keep_alive(ir_node *ka) {
2937 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2940 /* --- Useful access routines --- */
2941 /* Returns the current block of the current graph. To set the current
2942 block use set_cur_block. */
2943 ir_node *get_cur_block(void) {
2944 return get_irg_current_block(current_ir_graph);
2945 } /* get_cur_block */
2947 /* Returns the frame type of the current graph */
2948 ir_type *get_cur_frame_type(void) {
2949 return get_irg_frame_type(current_ir_graph);
2950 } /* get_cur_frame_type */
2953 /* ********************************************************************* */
2956 /* call once for each run of the library */
2958 firm_init_cons(uninitialized_local_variable_func_t *func) {
2959 default_initialize_local_variable = func;
2960 } /* firm_init_cons */
2963 irp_finalize_cons(void) {
2965 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2966 irg_finalize_cons(get_irp_irg(i));
2968 irp->phase_state = phase_high;
2969 } /* irp_finalize_cons */
2972 ir_node *new_Block(int arity, ir_node **in) {
2973 return new_d_Block(NULL, arity, in);
2975 ir_node *new_Start(void) {
2976 return new_d_Start(NULL);
2978 ir_node *new_End(void) {
2979 return new_d_End(NULL);
2981 ir_node *new_Jmp(void) {
2982 return new_d_Jmp(NULL);
2984 ir_node *new_IJmp(ir_node *tgt) {
2985 return new_d_IJmp(NULL, tgt);
2987 ir_node *new_Cond(ir_node *c) {
2988 return new_d_Cond(NULL, c);
2990 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2991 return new_d_Return(NULL, store, arity, in);
2994 ir_node *new_Const(tarval *con) {
2995 return new_d_Const(NULL, con);
2998 ir_node *new_Const_long(ir_mode *mode, long value) {
2999 return new_d_Const_long(NULL, mode, value);
3002 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3003 return new_d_Const_type(NULL, con, tp);
3006 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
3007 return new_d_SymConst_type(NULL, mode, value, kind, type);
3009 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3010 return new_d_SymConst(NULL, mode, value, kind);
3012 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3013 return new_d_simpleSel(NULL, store, objptr, ent);
3016 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3018 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3020 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3022 return new_d_Call(NULL, store, callee, arity, in, tp);
3024 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3026 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3028 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3029 return new_d_Add(NULL, op1, op2, mode);
3031 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3032 return new_d_Sub(NULL, op1, op2, mode);
3034 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3035 return new_d_Minus(NULL, op, mode);
3037 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3038 return new_d_Mul(NULL, op1, op2, mode);
3040 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3041 return new_d_Mulh(NULL, op1, op2, mode);
3043 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3044 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3046 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3047 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3049 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3050 return new_d_Div(NULL, memop, op1, op2, mode, state);
3052 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3053 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3055 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3056 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3058 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3059 return new_d_Abs(NULL, op, mode);
3061 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3062 return new_d_And(NULL, op1, op2, mode);
3064 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3065 return new_d_Or(NULL, op1, op2, mode);
3067 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3068 return new_d_Eor(NULL, op1, op2, mode);
3070 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3071 return new_d_Not(NULL, op, mode);
3073 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3074 return new_d_Shl(NULL, op, k, mode);
3076 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3077 return new_d_Shr(NULL, op, k, mode);
3079 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3080 return new_d_Shrs(NULL, op, k, mode);
3082 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3083 return new_d_Rotl(NULL, op, k, mode);
3085 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3086 return new_d_Carry(NULL, op1, op2, mode);
3088 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3089 return new_d_Borrow(NULL, op1, op2, mode);
3091 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3092 return new_d_Cmp(NULL, op1, op2);
3094 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3095 return new_d_Conv(NULL, op, mode);
3097 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3098 return new_d_strictConv(NULL, op, mode);
3101 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3102 return new_d_Phi(NULL, arity, in, mode);
3105 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3106 return new_d_Cast(NULL, op, to_tp);
3108 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3109 return new_d_Load(NULL, store, addr, mode, flags);
3111 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3112 return new_d_Store(NULL, store, addr, val, flags);
3114 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3115 ir_where_alloc where) {
3116 return new_d_Alloc(NULL, store, size, alloc_type, where);
3118 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3119 ir_type *free_type, ir_where_alloc where) {
3120 return new_d_Free(NULL, store, ptr, size, free_type, where);
3123 ir_node *new_Sync(int arity, ir_node *in[]) {
3124 return new_d_Sync(NULL, arity, in);
3127 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3128 return new_d_Proj(NULL, arg, mode, proj);
3131 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3132 return new_d_defaultProj(NULL, arg, max_proj);
3135 ir_node *new_Tuple(int arity, ir_node **in) {
3136 return new_d_Tuple(NULL, arity, in);
3138 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3139 return new_d_Id(NULL, val, mode);
3142 ir_node *new_Bad(void) {
3143 return get_irg_bad(current_ir_graph);
3146 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3147 return new_d_Confirm(NULL, val, bound, cmp);
3149 ir_node *new_Unknown(ir_mode *m) {
3150 return new_d_Unknown(m);
3153 ir_node *new_CallBegin(ir_node *callee) {
3154 return new_d_CallBegin(NULL, callee);
3156 ir_node *new_EndReg(void) {
3157 return new_d_EndReg(NULL);
3159 ir_node *new_EndExcept(void) {
3160 return new_d_EndExcept(NULL);
3163 ir_node *new_Break(void) {
3164 return new_d_Break(NULL);
3166 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3167 return new_d_Filter(NULL, arg, mode, proj);
3170 ir_node *new_NoMem(void) {
3171 return get_irg_no_mem(current_ir_graph);
3174 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3175 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3178 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3179 return new_d_CopyB(NULL, store, dst, src, data_type);
3181 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3182 return new_d_InstOf(NULL, store, objptr, ent);
3184 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3185 return new_d_Raise(NULL, store, obj);
3187 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3188 return new_d_Bound(NULL, store, idx, lower, upper);
3191 ir_node *new_Pin(ir_node *node) {
3192 return new_d_Pin(NULL, node);
3195 ir_node *new_Dummy(ir_mode *m) {
3196 ir_graph *irg = current_ir_graph;
3197 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3199 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3200 int n_outs, ir_asm_constraint *outputs,
3201 int n_clobber, ident *clobber[], ident *asm_text) {
3202 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3205 /* create a new anchor node */
3206 ir_node *new_Anchor(ir_graph *irg) {
3207 ir_node *in[anchor_last];
3208 memset(in, 0, sizeof(in));
3209 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);