2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.resmode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.type = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 /** Creates a remainderless Div node. */
400 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
401 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
410 res->attr.divmod.exc.pin_state = state;
411 res->attr.divmod.resmode = mode;
412 res->attr.divmod.no_remainder = 1;
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
422 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Jmp(dbg_info *db, ir_node *block) {
434 ir_graph *irg = current_ir_graph;
436 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
437 res = optimize_node(res);
438 IRN_VRFY_IRG(res, irg);
443 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
445 ir_graph *irg = current_ir_graph;
447 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
448 res = optimize_node(res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
456 ir_graph *irg = current_ir_graph;
458 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
459 res->attr.cond.kind = dense;
460 res->attr.cond.default_proj = 0;
461 res->attr.cond.pred = COND_JMP_PRED_NONE;
462 res = optimize_node(res);
463 IRN_VRFY_IRG(res, irg);
468 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
469 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
473 ir_graph *irg = current_ir_graph;
476 NEW_ARR_A(ir_node *, r_in, r_arity);
479 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
481 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
483 assert((get_unknown_type() == tp) || is_Method_type(tp));
484 set_Call_type(res, tp);
485 res->attr.call.exc.pin_state = op_pin_state_pinned;
486 res->attr.call.callee_arr = NULL;
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
494 int arity, ir_node **in, ir_builtin_kind kind, ir_type *tp) {
498 ir_graph *irg = current_ir_graph;
501 NEW_ARR_A(ir_node *, r_in, r_arity);
503 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
505 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
507 assert((get_unknown_type() == tp) || is_Method_type(tp));
508 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
509 res->attr.builtin.kind = kind;
510 res->attr.builtin.type = tp;
511 res = optimize_node(res);
512 IRN_VRFY_IRG(res, irg);
514 } /* new_bd_Buildin */
517 new_bd_Return(dbg_info *db, ir_node *block,
518 ir_node *store, int arity, ir_node **in) {
522 ir_graph *irg = current_ir_graph;
525 NEW_ARR_A (ir_node *, r_in, r_arity);
527 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
528 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
532 } /* new_bd_Return */
535 new_bd_Load(dbg_info *db, ir_node *block,
536 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
539 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
544 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
545 res->attr.load.mode = mode;
546 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
547 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Store(dbg_info *db, ir_node *block,
555 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
558 ir_graph *irg = current_ir_graph;
563 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
564 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
565 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
566 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
578 ir_graph *irg = current_ir_graph;
582 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
583 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
584 res->attr.alloc.where = where;
585 res->attr.alloc.type = alloc_type;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
593 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
596 ir_graph *irg = current_ir_graph;
601 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
602 res->attr.free.where = where;
603 res->attr.free.type = free_type;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
611 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
612 int arity, ir_node **in, ir_entity *ent) {
616 ir_graph *irg = current_ir_graph;
617 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
619 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
622 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
625 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
627 * Sel's can select functions which should be of mode mode_P_code.
629 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
630 res->attr.sel.entity = ent;
631 res = optimize_node(res);
632 IRN_VRFY_IRG(res, irg);
637 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
638 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
639 ir_graph *irg = current_ir_graph;
640 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
642 res->attr.symc.kind = symkind;
643 res->attr.symc.sym = value;
644 res->attr.symc.tp = tp;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
649 } /* new_bd_SymConst_type */
652 new_bd_Sync(dbg_info *db, ir_node *block) {
654 ir_graph *irg = current_ir_graph;
656 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
657 /* no need to call optimize node here, Sync are always created with no predecessors */
658 IRN_VRFY_IRG(res, irg);
664 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
665 ir_node *in[2], *res;
666 ir_graph *irg = current_ir_graph;
670 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
671 res->attr.confirm.cmp = cmp;
672 res = optimize_node(res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_Confirm */
678 new_bd_Unknown(ir_mode *m) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
683 res = optimize_node(res);
685 } /* new_bd_Unknown */
689 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
692 ir_graph *irg = current_ir_graph;
694 in[0] = get_Call_ptr(call);
695 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
696 /* res->attr.callbegin.irg = irg; */
697 res->attr.callbegin.call = call;
698 res = optimize_node(res);
699 IRN_VRFY_IRG(res, irg);
701 } /* new_bd_CallBegin */
704 new_bd_EndReg(dbg_info *db, ir_node *block) {
706 ir_graph *irg = current_ir_graph;
708 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
709 set_irg_end_reg(irg, res);
710 IRN_VRFY_IRG(res, irg);
712 } /* new_bd_EndReg */
715 new_bd_EndExcept(dbg_info *db, ir_node *block) {
717 ir_graph *irg = current_ir_graph;
719 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
720 set_irg_end_except(irg, res);
721 IRN_VRFY_IRG (res, irg);
723 } /* new_bd_EndExcept */
727 new_bd_Break(dbg_info *db, ir_node *block) {
729 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
732 res = optimize_node(res);
733 IRN_VRFY_IRG(res, irg);
738 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
741 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
744 res->attr.filter.proj = proj;
745 res->attr.filter.in_cg = NULL;
746 res->attr.filter.backedge = NULL;
749 assert(get_Proj_pred(res));
750 assert(get_nodes_block(get_Proj_pred(res)));
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
755 } /* new_bd_Filter */
758 new_bd_Mux(dbg_info *db, ir_node *block,
759 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
762 ir_graph *irg = current_ir_graph;
768 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
771 res = optimize_node(res);
772 IRN_VRFY_IRG(res, irg);
777 new_bd_CopyB(dbg_info *db, ir_node *block,
778 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
781 ir_graph *irg = current_ir_graph;
787 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
789 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
790 res->attr.copyb.type = data_type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
798 ir_node *objptr, ir_type *type) {
801 ir_graph *irg = current_ir_graph;
805 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
806 res->attr.instof.exc.pin_state = op_pin_state_floats;
807 res->attr.instof.type = type;
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
811 } /* new_bd_InstOf */
814 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
817 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_Bound(dbg_info *db, ir_node *block,
829 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
832 ir_graph *irg = current_ir_graph;
838 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
839 res->attr.bound.exc.pin_state = op_pin_state_pinned;
840 res = optimize_node(res);
841 IRN_VRFY_IRG(res, irg);
846 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
848 ir_graph *irg = current_ir_graph;
850 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
858 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
859 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
861 ir_graph *irg = current_ir_graph;
863 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
864 res->attr.assem.pin_state = op_pin_state_pinned;
865 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
866 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
867 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
868 res->attr.assem.asm_text = asm_text;
870 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
871 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
872 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
874 res = optimize_node(res);
875 IRN_VRFY_IRG(res, irg);
879 /* --------------------------------------------- */
880 /* private interfaces, for professional use only */
881 /* --------------------------------------------- */
884 /* Constructs a Block with a fixed number of predecessors.
885 Does not set current_block. Can not be used with automatic
886 Phi node construction. */
888 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
889 ir_graph *rem = current_ir_graph;
892 current_ir_graph = irg;
893 res = new_bd_Block(db, arity, in);
894 current_ir_graph = rem;
900 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
901 ir_graph *rem = current_ir_graph;
904 current_ir_graph = irg;
905 res = new_bd_Start(db, block);
906 current_ir_graph = rem;
912 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
914 ir_graph *rem = current_ir_graph;
916 current_ir_graph = irg;
917 res = new_bd_End(db, block);
918 current_ir_graph = rem;
924 /* Creates a Phi node with all predecessors. Calling this constructor
925 is only allowed if the corresponding block is mature. */
927 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
929 ir_graph *rem = current_ir_graph;
931 current_ir_graph = irg;
932 res = new_bd_Phi(db, block,arity, in, mode);
933 current_ir_graph = rem;
939 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
941 ir_graph *rem = current_ir_graph;
943 current_ir_graph = irg;
944 res = new_bd_Const_type(db, con, tp);
945 current_ir_graph = rem;
948 } /* new_rd_Const_type */
951 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
953 //#ifdef USE_ORIGINAL
954 ir_graph *rem = current_ir_graph;
956 current_ir_graph = irg;
957 res = new_bd_Const_type(db, con, firm_unknown_type);
958 current_ir_graph = rem;
960 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
967 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
968 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
969 } /* new_rd_Const_long */
973 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
975 ir_graph *rem = current_ir_graph;
977 current_ir_graph = irg;
978 res = new_bd_Id(db, block, val, mode);
979 current_ir_graph = rem;
985 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
988 ir_graph *rem = current_ir_graph;
990 current_ir_graph = irg;
991 res = new_bd_Proj(db, block, arg, mode, proj);
992 current_ir_graph = rem;
999 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1002 ir_graph *rem = current_ir_graph;
1004 current_ir_graph = irg;
1005 res = new_bd_defaultProj(db, block, arg, max_proj);
1006 current_ir_graph = rem;
1009 } /* new_rd_defaultProj */
1013 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1015 ir_graph *rem = current_ir_graph;
1017 current_ir_graph = irg;
1018 res = new_bd_Conv(db, block, op, mode, 0);
1019 current_ir_graph = rem;
1025 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1027 ir_graph *rem = current_ir_graph;
1029 current_ir_graph = irg;
1030 res = new_bd_Conv(db, block, op, mode, 1);
1031 current_ir_graph = rem;
1034 } /* new_rd_strictConv */
1037 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1039 ir_graph *rem = current_ir_graph;
1041 current_ir_graph = irg;
1042 res = new_bd_Cast(db, block, op, to_tp);
1043 current_ir_graph = rem;
1049 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1051 ir_graph *rem = current_ir_graph;
1053 current_ir_graph = irg;
1054 res = new_bd_Tuple(db, block, arity, in);
1055 current_ir_graph = rem;
1058 } /* new_rd_Tuple */
1066 NEW_RD_DIVOP(DivMod)
1079 NEW_RD_BINOP(Borrow)
1081 /* creates a rd constructor for an divRL */
1082 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1083 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1086 ir_graph *rem = current_ir_graph;
1087 current_ir_graph = irg;
1088 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1089 current_ir_graph = rem;
1094 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1095 ir_node *op1, ir_node *op2) {
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Cmp(db, block, op1, op2);
1101 current_ir_graph = rem;
1107 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Jmp(db, block);
1113 current_ir_graph = rem;
1119 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1121 ir_graph *rem = current_ir_graph;
1123 current_ir_graph = irg;
1124 res = new_bd_IJmp(db, block, tgt);
1125 current_ir_graph = rem;
1131 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1133 ir_graph *rem = current_ir_graph;
1135 current_ir_graph = irg;
1136 res = new_bd_Cond(db, block, c);
1137 current_ir_graph = rem;
1143 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1144 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1146 ir_graph *rem = current_ir_graph;
1148 current_ir_graph = irg;
1149 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1150 current_ir_graph = rem;
1156 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1157 int arity, ir_node **in, ir_builtin_kind kind, ir_type *tp) {
1159 ir_graph *rem = current_ir_graph;
1161 current_ir_graph = irg;
1162 res = new_bd_Builtin(db, block, store, arity, in, kind, tp);
1163 current_ir_graph = rem;
1166 } /* new_rd_Builtin */
1169 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1170 ir_node *store, int arity, ir_node **in) {
1172 ir_graph *rem = current_ir_graph;
1174 current_ir_graph = irg;
1175 res = new_bd_Return(db, block, store, arity, in);
1176 current_ir_graph = rem;
1179 } /* new_rd_Return */
1182 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1183 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1185 ir_graph *rem = current_ir_graph;
1187 current_ir_graph = irg;
1188 res = new_bd_Load(db, block, store, adr, mode, flags);
1189 current_ir_graph = rem;
1195 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1196 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1198 ir_graph *rem = current_ir_graph;
1200 current_ir_graph = irg;
1201 res = new_bd_Store(db, block, store, adr, val, flags);
1202 current_ir_graph = rem;
1205 } /* new_rd_Store */
1208 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1209 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1211 ir_graph *rem = current_ir_graph;
1213 current_ir_graph = irg;
1214 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1215 current_ir_graph = rem;
1218 } /* new_rd_Alloc */
1221 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1222 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1224 ir_graph *rem = current_ir_graph;
1226 current_ir_graph = irg;
1227 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1228 current_ir_graph = rem;
1235 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1236 ir_node *store, ir_node *objptr, ir_entity *ent) {
1238 ir_graph *rem = current_ir_graph;
1240 current_ir_graph = irg;
1241 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1242 current_ir_graph = rem;
1245 } /* new_rd_simpleSel */
1249 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1250 int arity, ir_node **in, ir_entity *ent) {
1252 ir_graph *rem = current_ir_graph;
1254 current_ir_graph = irg;
1255 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1256 current_ir_graph = rem;
1263 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1264 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1266 ir_graph *rem = current_ir_graph;
1268 current_ir_graph = irg;
1269 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1270 current_ir_graph = rem;
1273 } /* new_rd_SymConst_type */
1276 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1277 symconst_symbol value, symconst_kind symkind) {
1278 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1279 } /* new_rd_SymConst */
1281 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1282 symconst_symbol sym;
1283 sym.entity_p = symbol;
1284 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1285 } /* new_rd_SymConst_addr_ent */
1287 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1288 symconst_symbol sym;
1289 sym.entity_p = symbol;
1290 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1291 } /* new_rd_SymConst_ofs_ent */
1293 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1294 symconst_symbol sym;
1295 sym.ident_p = symbol;
1296 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1297 } /* new_rd_SymConst_addr_name */
1299 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1300 symconst_symbol sym;
1301 sym.type_p = symbol;
1302 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1303 } /* new_rd_SymConst_type_tag */
1305 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1306 symconst_symbol sym;
1307 sym.type_p = symbol;
1308 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1309 } /* new_rd_SymConst_size */
1311 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1312 symconst_symbol sym;
1313 sym.type_p = symbol;
1314 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1315 } /* new_rd_SymConst_align */
1318 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1320 ir_graph *rem = current_ir_graph;
1323 current_ir_graph = irg;
1324 res = new_bd_Sync(db, block);
1325 current_ir_graph = rem;
1327 for (i = 0; i < arity; ++i)
1328 add_Sync_pred(res, in[i]);
1335 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1337 ir_graph *rem = current_ir_graph;
1339 current_ir_graph = irg;
1340 res = new_bd_Confirm(db, block, val, bound, cmp);
1341 current_ir_graph = rem;
1344 } /* new_rd_Confirm */
1347 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1349 ir_graph *rem = current_ir_graph;
1351 current_ir_graph = irg;
1352 res = new_bd_Unknown(m);
1353 current_ir_graph = rem;
1356 } /* new_rd_Unknown */
1360 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_CallBegin(db, block, call);
1366 current_ir_graph = rem;
1369 } /* new_rd_CallBegin */
1372 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1375 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1376 set_irg_end_reg(irg, res);
1377 IRN_VRFY_IRG(res, irg);
1379 } /* new_rd_EndReg */
1382 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1385 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1386 set_irg_end_except(irg, res);
1387 IRN_VRFY_IRG (res, irg);
1389 } /* new_rd_EndExcept */
1393 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1395 ir_graph *rem = current_ir_graph;
1397 current_ir_graph = irg;
1398 res = new_bd_Break(db, block);
1399 current_ir_graph = rem;
1402 } /* new_rd_Break */
1405 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1408 ir_graph *rem = current_ir_graph;
1410 current_ir_graph = irg;
1411 res = new_bd_Filter(db, block, arg, mode, proj);
1412 current_ir_graph = rem;
1415 } /* new_rd_Filter */
1418 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1419 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1421 ir_graph *rem = current_ir_graph;
1423 current_ir_graph = irg;
1424 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1425 current_ir_graph = rem;
1430 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1431 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1433 ir_graph *rem = current_ir_graph;
1435 current_ir_graph = irg;
1436 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1437 current_ir_graph = rem;
1440 } /* new_rd_CopyB */
1443 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1444 ir_node *objptr, ir_type *type) {
1446 ir_graph *rem = current_ir_graph;
1448 current_ir_graph = irg;
1449 res = new_bd_InstOf(db, block, store, objptr, type);
1450 current_ir_graph = rem;
1453 } /* new_rd_InstOf */
1456 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1458 ir_graph *rem = current_ir_graph;
1460 current_ir_graph = irg;
1461 res = new_bd_Raise(db, block, store, obj);
1462 current_ir_graph = rem;
1465 } /* new_rd_Raise */
1467 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1468 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1470 ir_graph *rem = current_ir_graph;
1472 current_ir_graph = irg;
1473 res = new_bd_Bound(db, block, store, idx, lower, upper);
1474 current_ir_graph = rem;
1477 } /* new_rd_Bound */
1479 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1481 ir_graph *rem = current_ir_graph;
1483 current_ir_graph = irg;
1484 res = new_bd_Pin(db, block, node);
1485 current_ir_graph = rem;
1491 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1492 int arity, ir_node *in[], ir_asm_constraint *inputs,
1493 int n_outs, ir_asm_constraint *outputs,
1494 int n_clobber, ident *clobber[], ident *asm_text) {
1496 ir_graph *rem = current_ir_graph;
1498 current_ir_graph = irg;
1499 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1500 current_ir_graph = rem;
1507 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1508 return new_rd_Block(NULL, irg, arity, in);
1510 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1511 return new_rd_Start(NULL, irg, block);
1513 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1514 return new_rd_End(NULL, irg, block);
1516 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1517 return new_rd_Jmp(NULL, irg, block);
1519 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1520 return new_rd_IJmp(NULL, irg, block, tgt);
1522 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1523 return new_rd_Cond(NULL, irg, block, c);
1525 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1526 ir_node *store, int arity, ir_node **in) {
1527 return new_rd_Return(NULL, irg, block, store, arity, in);
1530 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1531 return new_rd_Const(NULL, irg, con);
1533 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1534 return new_rd_Const_long(NULL, irg, mode, value);
1536 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1537 return new_rd_Const_type(NULL, irg, con, tp);
1539 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1540 symconst_symbol value, symconst_kind symkind) {
1541 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1543 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1544 ir_node *objptr, ir_entity *ent) {
1545 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1548 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1549 ir_node *objptr, int n_index, ir_node **index,
1551 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1553 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1554 ir_node *callee, int arity, ir_node **in,
1556 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1558 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1559 int arity, ir_node **in, ir_builtin_kind kind,
1561 return new_rd_Builtin(NULL, irg, block, store, arity, in, kind, tp);
1563 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1564 ir_node *op1, ir_node *op2, ir_mode *mode) {
1565 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1567 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1568 ir_node *op1, ir_node *op2, ir_mode *mode) {
1569 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_mode *mode) {
1573 return new_rd_Minus(NULL, irg, block, op, mode);
1575 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1576 ir_node *op1, ir_node *op2, ir_mode *mode) {
1577 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1579 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1580 ir_node *op1, ir_node *op2, ir_mode *mode) {
1581 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1583 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1584 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1585 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1587 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1588 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1589 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1591 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1592 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1593 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1595 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1596 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1597 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1599 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1600 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1601 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1603 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1604 ir_node *op, ir_mode *mode) {
1605 return new_rd_Abs(NULL, irg, block, op, mode);
1607 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1608 ir_node *op1, ir_node *op2, ir_mode *mode) {
1609 return new_rd_And(NULL, irg, block, op1, op2, mode);
1611 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1612 ir_node *op1, ir_node *op2, ir_mode *mode) {
1613 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1615 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1616 ir_node *op1, ir_node *op2, ir_mode *mode) {
1617 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1619 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1620 ir_node *op, ir_mode *mode) {
1621 return new_rd_Not(NULL, irg, block, op, mode);
1623 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1624 ir_node *op, ir_node *k, ir_mode *mode) {
1625 return new_rd_Shl(NULL, irg, block, op, k, mode);
1627 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1628 ir_node *op, ir_node *k, ir_mode *mode) {
1629 return new_rd_Shr(NULL, irg, block, op, k, mode);
1631 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1632 ir_node *op, ir_node *k, ir_mode *mode) {
1633 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1635 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1636 ir_node *op, ir_node *k, ir_mode *mode) {
1637 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1639 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1640 ir_node *op, ir_node *k, ir_mode *mode) {
1641 return new_rd_Carry(NULL, irg, block, op, k, mode);
1643 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1644 ir_node *op, ir_node *k, ir_mode *mode) {
1645 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1647 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1648 ir_node *op1, ir_node *op2) {
1649 return new_rd_Cmp(NULL, irg, block, op1, op2);
1651 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1652 ir_node *op, ir_mode *mode) {
1653 return new_rd_Conv(NULL, irg, block, op, mode);
1655 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1656 ir_node *op, ir_mode *mode) {
1657 return new_rd_strictConv(NULL, irg, block, op, mode);
1660 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1661 ir_node **in, ir_mode *mode) {
1662 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1665 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1666 return new_rd_Cast(NULL, irg, block, op, to_tp);
1668 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1669 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1670 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1672 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1673 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1674 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1676 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1677 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1678 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1680 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1681 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1682 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1685 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1686 return new_rd_Sync(NULL, irg, block, arity, in);
1689 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1690 ir_mode *mode, long proj) {
1691 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1694 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1696 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1699 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1700 int arity, ir_node **in) {
1701 return new_rd_Tuple(NULL, irg, block, arity, in );
1703 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1704 ir_node *val, ir_mode *mode) {
1705 return new_rd_Id(NULL, irg, block, val, mode);
1708 ir_node *new_r_Bad(ir_graph *irg) {
1709 return get_irg_bad(irg);
1712 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1713 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1715 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1716 return new_rd_Unknown(irg, m);
1719 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1720 return new_rd_CallBegin(NULL, irg, block, callee);
1722 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1723 return new_rd_EndReg(NULL, irg, block);
1725 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1726 return new_rd_EndExcept(NULL, irg, block);
1729 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1730 return new_rd_Break(NULL, irg, block);
1732 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1733 ir_mode *mode, long proj) {
1734 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1737 ir_node *new_r_NoMem(ir_graph *irg) {
1738 return get_irg_no_mem(irg);
1741 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1742 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1743 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1745 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1746 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1747 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1749 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1751 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1753 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1754 ir_node *store, ir_node *obj) {
1755 return new_rd_Raise(NULL, irg, block, store, obj);
1757 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1758 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1759 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1761 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1762 return new_rd_Pin(NULL, irg, block, node);
1765 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1766 int arity, ir_node *in[], ir_asm_constraint *inputs,
1767 int n_outs, ir_asm_constraint *outputs,
1768 int n_clobber, ident *clobber[], ident *asm_text) {
1769 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1772 /** ********************/
1773 /** public interfaces */
1774 /** construction tools */
1780 * - create a new Start node in the current block
1782 * @return s - pointer to the created Start node
1787 new_d_Start(dbg_info *db) {
1790 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1791 op_Start, mode_T, 0, NULL);
1793 res = optimize_node(res);
1794 IRN_VRFY_IRG(res, current_ir_graph);
1799 new_d_End(dbg_info *db) {
1801 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1802 op_End, mode_X, -1, NULL);
1803 res = optimize_node(res);
1804 IRN_VRFY_IRG(res, current_ir_graph);
1809 /* Constructs a Block with a fixed number of predecessors.
1810 Does set current_block. Can be used with automatic Phi
1811 node construction. */
1813 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1816 int has_unknown = 0;
1819 res = new_bd_Block(db, arity, in);
1821 res = new_rd_Block(db, current_ir_graph, arity, in);
1824 /* Create and initialize array for Phi-node construction. */
1825 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1826 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1827 current_ir_graph->n_loc);
1828 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1831 for (i = arity-1; i >= 0; i--)
1832 if (is_Unknown(in[i])) {
1837 if (!has_unknown) res = optimize_node(res);
1839 current_ir_graph->current_block = res;
1841 IRN_VRFY_IRG(res, current_ir_graph);
1847 /* ***********************************************************************/
1848 /* Methods necessary for automatic Phi node creation */
1850 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1851 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1852 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1853 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1855 Call Graph: ( A ---> B == A "calls" B)
1857 get_value mature_immBlock
1865 get_r_value_internal |
1869 new_rd_Phi0 new_rd_Phi_in
1871 * *************************************************************************** */
1873 /** Creates a Phi node with 0 predecessors. */
1874 static inline ir_node *
1875 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1878 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1879 IRN_VRFY_IRG(res, irg);
1885 * Internal constructor of a Phi node by a phi_merge operation.
1887 * @param irg the graph on which the Phi will be constructed
1888 * @param block the block in which the Phi will be constructed
1889 * @param mode the mod eof the Phi node
1890 * @param in the input array of the phi node
1891 * @param ins number of elements in the input array
1892 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1893 * the value for which the new Phi is constructed
1895 static inline ir_node *
1896 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1897 ir_node **in, int ins, ir_node *phi0) {
1899 ir_node *res, *known;
1901 /* Allocate a new node on the obstack. The allocation copies the in
1903 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1904 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1906 /* This loop checks whether the Phi has more than one predecessor.
1907 If so, it is a real Phi node and we break the loop. Else the
1908 Phi node merges the same definition on several paths and therefore
1910 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1912 for (i = ins - 1; i >= 0; --i) {
1915 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1917 /* Optimize self referencing Phis: We can't detect them yet properly, as
1918 they still refer to the Phi0 they will replace. So replace right now. */
1919 if (phi0 && in[i] == phi0)
1922 if (in[i] == res || in[i] == known)
1931 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1934 edges_node_deleted(res, current_ir_graph);
1935 obstack_free(current_ir_graph->obst, res);
1936 if (is_Phi(known)) {
1937 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1938 order, an enclosing Phi know may get superfluous. */
1939 res = optimize_in_place_2(known);
1941 exchange(known, res);
1946 /* A undefined value, e.g., in unreachable code. */
1950 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1951 IRN_VRFY_IRG(res, irg);
1952 /* Memory Phis in endless loops must be kept alive.
1953 As we can't distinguish these easily we keep all of them alive. */
1954 if (is_Phi(res) && mode == mode_M)
1955 add_End_keepalive(get_irg_end(irg), res);
1959 } /* new_rd_Phi_in */
1962 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1964 #if PRECISE_EXC_CONTEXT
1966 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1969 * Construct a new frag_array for node n.
1970 * Copy the content from the current graph_arr of the corresponding block:
1971 * this is the current state.
1972 * Set ProjM(n) as current memory state.
1973 * Further the last entry in frag_arr of current block points to n. This
1974 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1976 static inline ir_node **new_frag_arr(ir_node *n) {
1980 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1981 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1982 sizeof(ir_node *)*current_ir_graph->n_loc);
1984 /* turn off optimization before allocating Proj nodes, as res isn't
1986 opt = get_opt_optimize(); set_optimize(0);
1987 /* Here we rely on the fact that all frag ops have Memory as first result! */
1989 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1990 } else if (is_CopyB(n)) {
1991 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1993 assert((pn_Quot_M == pn_DivMod_M) &&
1994 (pn_Quot_M == pn_Div_M) &&
1995 (pn_Quot_M == pn_Mod_M) &&
1996 (pn_Quot_M == pn_Load_M) &&
1997 (pn_Quot_M == pn_Store_M) &&
1998 (pn_Quot_M == pn_Alloc_M) &&
1999 (pn_Quot_M == pn_Bound_M));
2000 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2004 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2006 } /* new_frag_arr */
2009 * Returns the frag_arr from a node.
2011 static inline ir_node **get_frag_arr(ir_node *n) {
2012 switch (get_irn_opcode(n)) {
2014 return n->attr.call.exc.frag_arr;
2016 return n->attr.alloc.exc.frag_arr;
2018 return n->attr.load.exc.frag_arr;
2020 return n->attr.store.exc.frag_arr;
2022 return n->attr.except.frag_arr;
2024 } /* get_frag_arr */
2027 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2028 #ifdef DEBUG_libfirm
2031 for (i = 1024; i >= 0; --i)
2036 if (frag_arr[pos] == NULL)
2037 frag_arr[pos] = val;
2038 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2039 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2040 assert(arr != frag_arr && "Endless recursion detected");
2045 assert(!"potential endless recursion in set_frag_value");
2046 } /* set_frag_value */
2049 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2053 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2055 frag_arr = get_frag_arr(cfOp);
2056 res = frag_arr[pos];
2058 if (block->attr.block.graph_arr[pos] != NULL) {
2059 /* There was a set_value() after the cfOp and no get_value() before that
2060 set_value(). We must build a Phi node now. */
2061 if (block->attr.block.is_matured) {
2062 int ins = get_irn_arity(block);
2064 NEW_ARR_A(ir_node *, nin, ins);
2065 res = phi_merge(block, pos, mode, nin, ins);
2067 res = new_rd_Phi0(current_ir_graph, block, mode);
2068 res->attr.phi.u.pos = pos;
2069 res->attr.phi.next = block->attr.block.phis;
2070 block->attr.block.phis = res;
2072 assert(res != NULL);
2073 /* It's a Phi, we can write this into all graph_arrs with NULL */
2074 set_frag_value(block->attr.block.graph_arr, pos, res);
2076 res = get_r_value_internal(block, pos, mode);
2077 set_frag_value(block->attr.block.graph_arr, pos, res);
2081 } /* get_r_frag_value_internal */
2082 #endif /* PRECISE_EXC_CONTEXT */
2085 * Check whether a control flownode cf_pred represents an exception flow.
2087 * @param cf_pred the control flow node
2088 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2090 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2092 * Note: all projections from a raise are "exceptional control flow" we we handle it
2093 * like a normal Jmp, because there is no "regular" one.
2094 * That's why Raise is no "fragile_op"!
2096 if (is_fragile_op(prev_cf_op)) {
2097 if (is_Proj(cf_pred)) {
2098 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2099 /* the regular control flow, NO exception */
2102 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2105 /* Hmm, exception but not a Proj? */
2106 assert(!"unexpected condition: fragile op without a proj");
2110 } /* is_exception_flow */
2113 * Computes the predecessors for the real phi node, and then
2114 * allocates and returns this node. The routine called to allocate the
2115 * node might optimize it away and return a real value.
2116 * This function must be called with an in-array of proper size.
2119 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2120 ir_node *prevBlock, *res, *phi0, *phi0_all;
2123 /* If this block has no value at pos create a Phi0 and remember it
2124 in graph_arr to break recursions.
2125 Else we may not set graph_arr as there a later value is remembered. */
2127 if (block->attr.block.graph_arr[pos] == NULL) {
2128 ir_graph *irg = current_ir_graph;
2130 if (block == get_irg_start_block(irg)) {
2131 /* Collapsing to Bad tarvals is no good idea.
2132 So we call a user-supplied routine here that deals with this case as
2133 appropriate for the given language. Sorrily the only help we can give
2134 here is the position.
2136 Even if all variables are defined before use, it can happen that
2137 we get to the start block, if a Cond has been replaced by a tuple
2138 (bad, jmp). In this case we call the function needlessly, eventually
2139 generating an non existent error.
2140 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2143 if (default_initialize_local_variable != NULL) {
2144 ir_node *rem = get_cur_block();
2146 set_cur_block(block);
2147 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2151 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2152 /* We don't need to care about exception ops in the start block.
2153 There are none by definition. */
2154 return block->attr.block.graph_arr[pos];
2156 phi0 = new_rd_Phi0(irg, block, mode);
2157 block->attr.block.graph_arr[pos] = phi0;
2158 #if PRECISE_EXC_CONTEXT
2159 if (get_opt_precise_exc_context()) {
2160 /* Set graph_arr for fragile ops. Also here we should break recursion.
2161 We could choose a cyclic path through an cfop. But the recursion would
2162 break at some point. */
2163 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2169 /* This loop goes to all predecessor blocks of the block the Phi node
2170 is in and there finds the operands of the Phi node by calling
2171 get_r_value_internal. */
2172 for (i = 1; i <= ins; ++i) {
2173 ir_node *cf_pred = block->in[i];
2174 ir_node *prevCfOp = skip_Proj(cf_pred);
2176 if (is_Bad(prevCfOp)) {
2177 /* In case a Cond has been optimized we would get right to the start block
2178 with an invalid definition. */
2179 nin[i-1] = new_Bad();
2182 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2184 if (!is_Bad(prevBlock)) {
2185 #if PRECISE_EXC_CONTEXT
2186 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2187 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2188 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2191 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2193 nin[i-1] = new_Bad();
2197 /* We want to pass the Phi0 node to the constructor: this finds additional
2198 optimization possibilities.
2199 The Phi0 node either is allocated in this function, or it comes from
2200 a former call to get_r_value_internal(). In this case we may not yet
2201 exchange phi0, as this is done in mature_immBlock(). */
2203 phi0_all = block->attr.block.graph_arr[pos];
2204 if (! is_Phi0(phi0_all) ||
2205 get_irn_arity(phi0_all) != 0 ||
2206 get_nodes_block(phi0_all) != block)
2212 /* After collecting all predecessors into the array nin a new Phi node
2213 with these predecessors is created. This constructor contains an
2214 optimization: If all predecessors of the Phi node are identical it
2215 returns the only operand instead of a new Phi node. */
2216 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2218 /* In case we allocated a Phi0 node at the beginning of this procedure,
2219 we need to exchange this Phi0 with the real Phi. */
2221 exchange(phi0, res);
2222 block->attr.block.graph_arr[pos] = res;
2223 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2224 only an optimization. */
2231 * This function returns the last definition of a value. In case
2232 * this value was last defined in a previous block, Phi nodes are
2233 * inserted. If the part of the firm graph containing the definition
2234 * is not yet constructed, a dummy Phi node is returned.
2236 * @param block the current block
2237 * @param pos the value number of the value searched
2238 * @param mode the mode of this value (needed for Phi construction)
2241 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2243 /* There are 4 cases to treat.
2245 1. The block is not mature and we visit it the first time. We can not
2246 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2247 predecessors is returned. This node is added to the linked list (block
2248 attribute "phis") of the containing block to be completed when this block is
2249 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2252 2. The value is already known in this block, graph_arr[pos] is set and we
2253 visit the block the first time. We can return the value without
2254 creating any new nodes.
2256 3. The block is mature and we visit it the first time. A Phi node needs
2257 to be created (phi_merge). If the Phi is not needed, as all it's
2258 operands are the same value reaching the block through different
2259 paths, it's optimized away and the value itself is returned.
2261 4. The block is mature, and we visit it the second time. Now two
2262 subcases are possible:
2263 * The value was computed completely the last time we were here. This
2264 is the case if there is no loop. We can return the proper value.
2265 * The recursion that visited this node and set the flag did not
2266 return yet. We are computing a value in a loop and need to
2267 break the recursion. This case only happens if we visited
2268 the same block with phi_merge before, which inserted a Phi0.
2269 So we return the Phi0.
2272 /* case 4 -- already visited. */
2273 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2274 /* As phi_merge allocates a Phi0 this value is always defined. Here
2275 is the critical difference of the two algorithms. */
2276 assert(block->attr.block.graph_arr[pos]);
2277 return block->attr.block.graph_arr[pos];
2280 /* visited the first time */
2281 set_irn_visited(block, get_irg_visited(current_ir_graph));
2283 /* Get the local valid value */
2284 res = block->attr.block.graph_arr[pos];
2286 /* case 2 -- If the value is actually computed, return it. */
2290 if (block->attr.block.is_matured) { /* case 3 */
2292 /* The Phi has the same amount of ins as the corresponding block. */
2293 int ins = get_irn_arity(block);
2295 NEW_ARR_A(ir_node *, nin, ins);
2297 /* Phi merge collects the predecessors and then creates a node. */
2298 res = phi_merge(block, pos, mode, nin, ins);
2300 } else { /* case 1 */
2301 /* The block is not mature, we don't know how many in's are needed. A Phi
2302 with zero predecessors is created. Such a Phi node is called Phi0
2303 node. The Phi0 is then added to the list of Phi0 nodes in this block
2304 to be matured by mature_immBlock later.
2305 The Phi0 has to remember the pos of it's internal value. If the real
2306 Phi is computed, pos is used to update the array with the local
2308 res = new_rd_Phi0(current_ir_graph, block, mode);
2309 res->attr.phi.u.pos = pos;
2310 res->attr.phi.next = block->attr.block.phis;
2311 block->attr.block.phis = res;
2314 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2316 /* The local valid value is available now. */
2317 block->attr.block.graph_arr[pos] = res;
2320 } /* get_r_value_internal */
2322 /* ************************************************************************** */
2325 * Finalize a Block node, when all control flows are known.
2326 * Acceptable parameters are only Block nodes.
2329 mature_immBlock(ir_node *block) {
2334 assert(is_Block(block));
2335 if (!get_Block_matured(block)) {
2336 ir_graph *irg = current_ir_graph;
2338 ins = ARR_LEN(block->in) - 1;
2339 /* Fix block parameters */
2340 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2342 /* An array for building the Phi nodes. */
2343 NEW_ARR_A(ir_node *, nin, ins);
2345 /* Traverse a chain of Phi nodes attached to this block and mature
2347 for (n = block->attr.block.phis; n; n = next) {
2348 inc_irg_visited(irg);
2349 next = n->attr.phi.next;
2350 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2353 block->attr.block.is_matured = 1;
2355 /* Now, as the block is a finished Firm node, we can optimize it.
2356 Since other nodes have been allocated since the block was created
2357 we can not free the node on the obstack. Therefore we have to call
2358 optimize_in_place().
2359 Unfortunately the optimization does not change a lot, as all allocated
2360 nodes refer to the unoptimized node.
2361 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2362 block = optimize_in_place_2(block);
2363 IRN_VRFY_IRG(block, irg);
2365 } /* mature_immBlock */
2368 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2369 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2373 new_d_Const(dbg_info *db, tarval *con) {
2374 return new_bd_Const(db, con);
2378 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2379 return new_bd_Const_long(db, mode, value);
2380 } /* new_d_Const_long */
2383 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2384 return new_bd_Const_type(db, con, tp);
2385 } /* new_d_Const_type */
2390 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2391 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2395 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2396 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2401 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2403 assert(arg->op == op_Cond);
2404 arg->attr.cond.kind = fragmentary;
2405 arg->attr.cond.default_proj = max_proj;
2406 res = new_d_Proj(db, arg, mode_X, max_proj);
2408 } /* new_d_defaultProj */
2412 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2413 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2417 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2418 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2419 } /* new_d_strictConv */
2422 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2423 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2427 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2428 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2439 * Allocate a frag array for a node if the current graph state is phase_building.
2441 * @param irn the node for which the frag array should be allocated
2442 * @param op the opcode of the (original) node, if does not match opcode of irn,
2444 * @param frag_store the address of the frag store in irn attributes, if this
2445 * address contains a value != NULL, does nothing
2447 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2448 if (get_opt_precise_exc_context()) {
2449 if ((current_ir_graph->phase_state == phase_building) &&
2450 (get_irn_op(irn) == op) && /* Could be optimized away. */
2451 !*frag_store) /* Could be a cse where the arr is already set. */ {
2452 *frag_store = new_frag_arr(irn);
2455 } /* firm_alloc_frag_arr */
2459 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2461 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2462 #if PRECISE_EXC_CONTEXT
2463 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2470 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2472 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2473 #if PRECISE_EXC_CONTEXT
2474 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2478 } /* new_d_DivMod */
2481 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2483 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2484 #if PRECISE_EXC_CONTEXT
2485 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2492 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2494 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2495 #if PRECISE_EXC_CONTEXT
2496 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2503 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2505 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2506 #if PRECISE_EXC_CONTEXT
2507 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2526 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2527 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2531 new_d_Jmp(dbg_info *db) {
2532 return new_bd_Jmp(db, current_ir_graph->current_block);
2536 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2537 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2541 new_d_Cond(dbg_info *db, ir_node *c) {
2542 return new_bd_Cond(db, current_ir_graph->current_block, c);
2546 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2549 res = new_bd_Call(db, current_ir_graph->current_block,
2550 store, callee, arity, in, tp);
2551 #if PRECISE_EXC_CONTEXT
2552 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2559 new_d_Builtin(dbg_info *db, ir_node *store, int arity, ir_node **in,
2560 ir_builtin_kind kind, ir_type *tp) {
2561 return new_bd_Builtin(db, current_ir_graph->current_block, store, arity, in, tp, kind);
2562 } /* new_d_Builtin */
2565 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2566 return new_bd_Return(db, current_ir_graph->current_block,
2568 } /* new_d_Return */
2571 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2573 res = new_bd_Load(db, current_ir_graph->current_block,
2574 store, addr, mode, flags);
2575 #if PRECISE_EXC_CONTEXT
2576 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2583 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2585 res = new_bd_Store(db, current_ir_graph->current_block,
2586 store, addr, val, flags);
2587 #if PRECISE_EXC_CONTEXT
2588 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2595 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2596 ir_where_alloc where) {
2598 res = new_bd_Alloc(db, current_ir_graph->current_block,
2599 store, size, alloc_type, where);
2600 #if PRECISE_EXC_CONTEXT
2601 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2608 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2609 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2610 return new_bd_Free(db, current_ir_graph->current_block,
2611 store, ptr, size, free_type, where);
2616 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2617 /* GL: objptr was called frame before. Frame was a bad choice for the name
2618 as the operand could as well be a pointer to a dynamic object. */
2620 return new_bd_Sel(db, current_ir_graph->current_block,
2621 store, objptr, 0, NULL, ent);
2622 } /* new_d_simpleSel */
2626 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2627 return new_bd_Sel(db, current_ir_graph->current_block,
2628 store, objptr, n_index, index, sel);
2633 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2634 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2636 } /* new_d_SymConst_type */
2639 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2640 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2641 value, kind, firm_unknown_type);
2642 } /* new_d_SymConst */
2645 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2646 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2651 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2652 return new_bd_Confirm(db, current_ir_graph->current_block,
2654 } /* new_d_Confirm */
2657 new_d_Unknown(ir_mode *m) {
2658 return new_bd_Unknown(m);
2659 } /* new_d_Unknown */
2663 new_d_CallBegin(dbg_info *db, ir_node *call) {
2664 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2665 } /* new_d_CallBegin */
2668 new_d_EndReg(dbg_info *db) {
2669 return new_bd_EndReg(db, current_ir_graph->current_block);
2670 } /* new_d_EndReg */
2673 new_d_EndExcept(dbg_info *db) {
2674 return new_bd_EndExcept(db, current_ir_graph->current_block);
2675 } /* new_d_EndExcept */
2679 new_d_Break(dbg_info *db) {
2680 return new_bd_Break(db, current_ir_graph->current_block);
2684 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2685 return new_bd_Filter(db, current_ir_graph->current_block,
2687 } /* new_d_Filter */
2690 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2691 ir_node *ir_true, ir_mode *mode) {
2692 return new_bd_Mux(db, current_ir_graph->current_block,
2693 sel, ir_false, ir_true, mode);
2696 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2697 ir_node *dst, ir_node *src, ir_type *data_type) {
2699 res = new_bd_CopyB(db, current_ir_graph->current_block,
2700 store, dst, src, data_type);
2701 #if PRECISE_EXC_CONTEXT
2702 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2708 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2709 return new_bd_InstOf(db, current_ir_graph->current_block,
2710 store, objptr, type);
2711 } /* new_d_InstOf */
2714 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2715 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2718 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2719 ir_node *idx, ir_node *lower, ir_node *upper) {
2721 res = new_bd_Bound(db, current_ir_graph->current_block,
2722 store, idx, lower, upper);
2723 #if PRECISE_EXC_CONTEXT
2724 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2730 new_d_Pin(dbg_info *db, ir_node *node) {
2731 return new_bd_Pin(db, current_ir_graph->current_block, node);
2736 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2737 int n_outs, ir_asm_constraint *outputs,
2738 int n_clobber, ident *clobber[], ident *asm_text) {
2739 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2742 /* ********************************************************************* */
2743 /* Comfortable interface with automatic Phi node construction. */
2744 /* (Uses also constructors of ?? interface, except new_Block. */
2745 /* ********************************************************************* */
2747 /* Block construction */
2748 /* immature Block without predecessors */
2750 new_d_immBlock(dbg_info *db) {
2753 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2754 /* creates a new dynamic in-array as length of in is -1 */
2755 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2757 /* macroblock head */
2760 res->attr.block.is_matured = 0;
2761 res->attr.block.is_dead = 0;
2762 res->attr.block.is_mb_head = 1;
2763 res->attr.block.has_label = 0;
2764 res->attr.block.irg = current_ir_graph;
2765 res->attr.block.backedge = NULL;
2766 res->attr.block.in_cg = NULL;
2767 res->attr.block.cg_backedge = NULL;
2768 res->attr.block.extblk = NULL;
2769 res->attr.block.region = NULL;
2770 res->attr.block.mb_depth = 0;
2771 res->attr.block.label = 0;
2773 set_Block_block_visited(res, 0);
2775 /* Create and initialize array for Phi-node construction. */
2776 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2777 current_ir_graph->n_loc);
2778 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2780 /* Immature block may not be optimized! */
2781 IRN_VRFY_IRG(res, current_ir_graph);
2784 } /* new_d_immBlock */
2787 new_immBlock(void) {
2788 return new_d_immBlock(NULL);
2789 } /* new_immBlock */
2791 /* immature PartBlock with its predecessors */
2793 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2794 ir_node *res = new_d_immBlock(db);
2795 ir_node *blk = get_nodes_block(pred_jmp);
2797 res->in[0] = blk->in[0];
2798 assert(res->in[0] != NULL);
2799 add_immBlock_pred(res, pred_jmp);
2801 res->attr.block.is_mb_head = 0;
2802 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2805 } /* new_d_immPartBlock */
2808 new_immPartBlock(ir_node *pred_jmp) {
2809 return new_d_immPartBlock(NULL, pred_jmp);
2810 } /* new_immPartBlock */
2812 /* add an edge to a jmp/control flow node */
2814 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2815 int n = ARR_LEN(block->in) - 1;
2817 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2818 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2819 assert(is_ir_node(jmp));
2821 ARR_APP1(ir_node *, block->in, jmp);
2823 hook_set_irn_n(block, n, jmp, NULL);
2824 } /* add_immBlock_pred */
2826 /* changing the current block */
2828 set_cur_block(ir_node *target) {
2829 current_ir_graph->current_block = target;
2830 } /* set_cur_block */
2832 /* ************************ */
2833 /* parameter administration */
2835 /* get a value from the parameter array from the current block by its index */
2837 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2838 ir_graph *irg = current_ir_graph;
2839 assert(get_irg_phase_state(irg) == phase_building);
2840 inc_irg_visited(irg);
2845 return get_r_value_internal(irg->current_block, pos + 1, mode);
2848 /* get a value from the parameter array from the current block by its index */
2850 get_value(int pos, ir_mode *mode) {
2851 return get_d_value(NULL, pos, mode);
2854 /* set a value at position pos in the parameter array from the current block */
2856 set_value(int pos, ir_node *value) {
2857 ir_graph *irg = current_ir_graph;
2858 assert(get_irg_phase_state(irg) == phase_building);
2860 assert(pos+1 < irg->n_loc);
2861 assert(is_ir_node(value));
2862 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2865 /* Find the value number for a node in the current block.*/
2867 find_value(ir_node *value) {
2869 ir_node *bl = current_ir_graph->current_block;
2871 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2872 if (bl->attr.block.graph_arr[i] == value)
2877 /* get the current store */
2880 ir_graph *irg = current_ir_graph;
2882 assert(get_irg_phase_state(irg) == phase_building);
2883 /* GL: one could call get_value instead */
2884 inc_irg_visited(irg);
2885 return get_r_value_internal(irg->current_block, 0, mode_M);
2888 /* set the current store: handles automatic Sync construction for Load nodes */
2890 set_store(ir_node *store) {
2891 ir_node *load, *pload, *pred, *in[2];
2893 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2894 /* Beware: due to dead code elimination, a store might become a Bad node even in
2895 the construction phase. */
2896 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2898 if (get_opt_auto_create_sync()) {
2899 /* handle non-volatile Load nodes by automatically creating Sync's */
2900 load = skip_Proj(store);
2901 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2902 pred = get_Load_mem(load);
2904 if (is_Sync(pred)) {
2905 /* a Load after a Sync: move it up */
2906 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2908 set_Load_mem(load, get_memop_mem(mem));
2909 add_Sync_pred(pred, store);
2912 pload = skip_Proj(pred);
2913 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2914 /* a Load after a Load: create a new Sync */
2915 set_Load_mem(load, get_Load_mem(pload));
2919 store = new_Sync(2, in);
2924 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2928 keep_alive(ir_node *ka) {
2929 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2932 /* --- Useful access routines --- */
2933 /* Returns the current block of the current graph. To set the current
2934 block use set_cur_block. */
2935 ir_node *get_cur_block(void) {
2936 return get_irg_current_block(current_ir_graph);
2937 } /* get_cur_block */
2939 /* Returns the frame type of the current graph */
2940 ir_type *get_cur_frame_type(void) {
2941 return get_irg_frame_type(current_ir_graph);
2942 } /* get_cur_frame_type */
2945 /* ********************************************************************* */
2948 /* call once for each run of the library */
2950 firm_init_cons(uninitialized_local_variable_func_t *func) {
2951 default_initialize_local_variable = func;
2952 } /* firm_init_cons */
2955 irp_finalize_cons(void) {
2957 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2958 irg_finalize_cons(get_irp_irg(i));
2960 irp->phase_state = phase_high;
2961 } /* irp_finalize_cons */
2964 ir_node *new_Block(int arity, ir_node **in) {
2965 return new_d_Block(NULL, arity, in);
2967 ir_node *new_Start(void) {
2968 return new_d_Start(NULL);
2970 ir_node *new_End(void) {
2971 return new_d_End(NULL);
2973 ir_node *new_Jmp(void) {
2974 return new_d_Jmp(NULL);
2976 ir_node *new_IJmp(ir_node *tgt) {
2977 return new_d_IJmp(NULL, tgt);
2979 ir_node *new_Cond(ir_node *c) {
2980 return new_d_Cond(NULL, c);
2982 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2983 return new_d_Return(NULL, store, arity, in);
2986 ir_node *new_Const(tarval *con) {
2987 return new_d_Const(NULL, con);
2990 ir_node *new_Const_long(ir_mode *mode, long value) {
2991 return new_d_Const_long(NULL, mode, value);
2994 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2995 return new_d_Const_type(NULL, con, tp);
2998 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2999 return new_d_SymConst_type(NULL, mode, value, kind, type);
3001 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3002 return new_d_SymConst(NULL, mode, value, kind);
3004 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3005 return new_d_simpleSel(NULL, store, objptr, ent);
3008 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3010 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3012 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3014 return new_d_Call(NULL, store, callee, arity, in, tp);
3016 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3018 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3020 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3021 return new_d_Add(NULL, op1, op2, mode);
3023 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3024 return new_d_Sub(NULL, op1, op2, mode);
3026 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3027 return new_d_Minus(NULL, op, mode);
3029 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3030 return new_d_Mul(NULL, op1, op2, mode);
3032 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3033 return new_d_Mulh(NULL, op1, op2, mode);
3035 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3036 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3038 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3039 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3041 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3042 return new_d_Div(NULL, memop, op1, op2, mode, state);
3044 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3045 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3047 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3048 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3050 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3051 return new_d_Abs(NULL, op, mode);
3053 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3054 return new_d_And(NULL, op1, op2, mode);
3056 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3057 return new_d_Or(NULL, op1, op2, mode);
3059 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3060 return new_d_Eor(NULL, op1, op2, mode);
3062 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3063 return new_d_Not(NULL, op, mode);
3065 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3066 return new_d_Shl(NULL, op, k, mode);
3068 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3069 return new_d_Shr(NULL, op, k, mode);
3071 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3072 return new_d_Shrs(NULL, op, k, mode);
3074 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3075 return new_d_Rotl(NULL, op, k, mode);
3077 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3078 return new_d_Carry(NULL, op1, op2, mode);
3080 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3081 return new_d_Borrow(NULL, op1, op2, mode);
3083 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3084 return new_d_Cmp(NULL, op1, op2);
3086 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3087 return new_d_Conv(NULL, op, mode);
3089 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3090 return new_d_strictConv(NULL, op, mode);
3093 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3094 return new_d_Phi(NULL, arity, in, mode);
3097 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3098 return new_d_Cast(NULL, op, to_tp);
3100 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3101 return new_d_Load(NULL, store, addr, mode, flags);
3103 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3104 return new_d_Store(NULL, store, addr, val, flags);
3106 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3107 ir_where_alloc where) {
3108 return new_d_Alloc(NULL, store, size, alloc_type, where);
3110 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3111 ir_type *free_type, ir_where_alloc where) {
3112 return new_d_Free(NULL, store, ptr, size, free_type, where);
3115 ir_node *new_Sync(int arity, ir_node *in[]) {
3116 return new_d_Sync(NULL, arity, in);
3119 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3120 return new_d_Proj(NULL, arg, mode, proj);
3123 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3124 return new_d_defaultProj(NULL, arg, max_proj);
3127 ir_node *new_Tuple(int arity, ir_node **in) {
3128 return new_d_Tuple(NULL, arity, in);
3130 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3131 return new_d_Id(NULL, val, mode);
3134 ir_node *new_Bad(void) {
3135 return get_irg_bad(current_ir_graph);
3138 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3139 return new_d_Confirm(NULL, val, bound, cmp);
3141 ir_node *new_Unknown(ir_mode *m) {
3142 return new_d_Unknown(m);
3145 ir_node *new_CallBegin(ir_node *callee) {
3146 return new_d_CallBegin(NULL, callee);
3148 ir_node *new_EndReg(void) {
3149 return new_d_EndReg(NULL);
3151 ir_node *new_EndExcept(void) {
3152 return new_d_EndExcept(NULL);
3155 ir_node *new_Break(void) {
3156 return new_d_Break(NULL);
3158 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3159 return new_d_Filter(NULL, arg, mode, proj);
3162 ir_node *new_NoMem(void) {
3163 return get_irg_no_mem(current_ir_graph);
3166 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3167 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3169 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3170 return new_d_CopyB(NULL, store, dst, src, data_type);
3172 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3173 return new_d_InstOf(NULL, store, objptr, ent);
3175 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3176 return new_d_Raise(NULL, store, obj);
3178 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3179 return new_d_Bound(NULL, store, idx, lower, upper);
3181 ir_node *new_Pin(ir_node *node) {
3182 return new_d_Pin(NULL, node);
3185 ir_node *new_Dummy(ir_mode *m) {
3186 ir_graph *irg = current_ir_graph;
3187 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3189 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3190 int n_outs, ir_asm_constraint *outputs,
3191 int n_clobber, ident *clobber[], ident *asm_text) {
3192 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3195 /* create a new anchor node */
3196 ir_node *new_Anchor(ir_graph *irg) {
3197 ir_node *in[anchor_last];
3198 memset(in, 0, sizeof(in));
3199 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);