2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
501 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A(ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
512 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
514 assert((get_unknown_type() == tp) || is_Method_type(tp));
515 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
516 res->attr.builtin.kind = kind;
517 res->attr.builtin.builtin_tp = tp;
518 res = optimize_node(res);
519 IRN_VRFY_IRG(res, irg);
521 } /* new_bd_Buildin */
524 new_bd_Return(dbg_info *db, ir_node *block,
525 ir_node *store, int arity, ir_node **in) {
529 ir_graph *irg = current_ir_graph;
532 NEW_ARR_A (ir_node *, r_in, r_arity);
534 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
535 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
536 res = optimize_node(res);
537 IRN_VRFY_IRG(res, irg);
539 } /* new_bd_Return */
542 new_bd_Load(dbg_info *db, ir_node *block,
543 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
551 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
552 res->attr.load.mode = mode;
553 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
554 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
555 res = optimize_node(res);
556 IRN_VRFY_IRG(res, irg);
561 new_bd_Store(dbg_info *db, ir_node *block,
562 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
571 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
572 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
573 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
575 res = optimize_node(res);
576 IRN_VRFY_IRG(res, irg);
581 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
582 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
585 ir_graph *irg = current_ir_graph;
589 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
590 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
591 res->attr.alloc.where = where;
592 res->attr.alloc.type = alloc_type;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
600 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
603 ir_graph *irg = current_ir_graph;
608 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
609 res->attr.free.where = where;
610 res->attr.free.type = free_type;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
617 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
618 int arity, ir_node **in, ir_entity *ent) {
622 ir_graph *irg = current_ir_graph;
623 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
625 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
628 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
631 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
633 * Sel's can select functions which should be of mode mode_P_code.
635 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
636 res->attr.sel.entity = ent;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
644 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
645 ir_graph *irg = current_ir_graph;
646 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
648 res->attr.symc.kind = symkind;
649 res->attr.symc.sym = value;
650 res->attr.symc.tp = tp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_SymConst_type */
658 new_bd_Sync(dbg_info *db, ir_node *block) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
663 /* no need to call optimize node here, Sync are always created with no predecessors */
664 IRN_VRFY_IRG(res, irg);
669 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
670 ir_node *in[2], *res;
671 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
676 res->attr.confirm.cmp = cmp;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_Confirm */
683 new_bd_Unknown(ir_mode *m) {
685 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
688 res = optimize_node(res);
690 } /* new_bd_Unknown */
693 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
696 ir_graph *irg = current_ir_graph;
698 in[0] = get_Call_ptr(call);
699 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
700 /* res->attr.callbegin.irg = irg; */
701 res->attr.callbegin.call = call;
702 res = optimize_node(res);
703 IRN_VRFY_IRG(res, irg);
705 } /* new_bd_CallBegin */
708 new_bd_EndReg(dbg_info *db, ir_node *block) {
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
713 set_irg_end_reg(irg, res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_EndReg */
719 new_bd_EndExcept(dbg_info *db, ir_node *block) {
721 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
724 set_irg_end_except(irg, res);
725 IRN_VRFY_IRG (res, irg);
727 } /* new_bd_EndExcept */
730 new_bd_Break(dbg_info *db, ir_node *block) {
732 ir_graph *irg = current_ir_graph;
734 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
744 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
747 res->attr.filter.proj = proj;
748 res->attr.filter.in_cg = NULL;
749 res->attr.filter.backedge = NULL;
752 assert(get_Proj_pred(res));
753 assert(get_nodes_block(get_Proj_pred(res)));
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
758 } /* new_bd_Filter */
761 new_bd_Mux(dbg_info *db, ir_node *block,
762 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_bd_CopyB(dbg_info *db, ir_node *block,
781 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
784 ir_graph *irg = current_ir_graph;
790 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
792 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
793 res->attr.copyb.data_type = data_type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
800 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
801 ir_node *objptr, ir_type *type) {
804 ir_graph *irg = current_ir_graph;
808 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
809 res->attr.instof.type = type;
810 res = optimize_node(res);
811 IRN_VRFY_IRG(res, irg);
813 } /* new_bd_InstOf */
816 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
819 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_Bound(dbg_info *db, ir_node *block,
831 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
834 ir_graph *irg = current_ir_graph;
840 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
841 res->attr.bound.exc.pin_state = op_pin_state_pinned;
842 res = optimize_node(res);
843 IRN_VRFY_IRG(res, irg);
848 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
850 ir_graph *irg = current_ir_graph;
852 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
853 res = optimize_node(res);
854 IRN_VRFY_IRG(res, irg);
859 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
860 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
862 ir_graph *irg = current_ir_graph;
864 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
865 res->attr.assem.pin_state = op_pin_state_pinned;
866 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
867 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
868 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
869 res->attr.assem.asm_text = asm_text;
871 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
872 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
873 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
875 res = optimize_node(res);
876 IRN_VRFY_IRG(res, irg);
880 /* --------------------------------------------- */
881 /* private interfaces, for professional use only */
882 /* --------------------------------------------- */
885 /* Constructs a Block with a fixed number of predecessors.
886 Does not set current_block. Can not be used with automatic
887 Phi node construction. */
889 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
890 ir_graph *rem = current_ir_graph;
893 current_ir_graph = irg;
894 res = new_bd_Block(db, arity, in);
895 current_ir_graph = rem;
901 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
902 ir_graph *rem = current_ir_graph;
905 current_ir_graph = irg;
906 res = new_bd_Start(db, block);
907 current_ir_graph = rem;
913 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
915 ir_graph *rem = current_ir_graph;
917 current_ir_graph = irg;
918 res = new_bd_End(db, block);
919 current_ir_graph = rem;
925 /* Creates a Phi node with all predecessors. Calling this constructor
926 is only allowed if the corresponding block is mature. */
928 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
930 ir_graph *rem = current_ir_graph;
932 current_ir_graph = irg;
933 res = new_bd_Phi(db, block,arity, in, mode);
934 current_ir_graph = rem;
941 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
943 ir_graph *rem = current_ir_graph;
945 current_ir_graph = irg;
946 res = new_bd_Const_type(db, con, tp);
947 current_ir_graph = rem;
950 } /* new_rd_Const_type */
954 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Const_type(db, con, firm_unknown_type);
961 current_ir_graph = rem;
963 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
970 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
971 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
972 } /* new_rd_Const_long */
976 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
978 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_Id(db, block, val, mode);
982 current_ir_graph = rem;
988 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Proj(db, block, arg, mode, proj);
995 current_ir_graph = rem;
1002 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1005 ir_graph *rem = current_ir_graph;
1007 current_ir_graph = irg;
1008 res = new_bd_defaultProj(db, block, arg, max_proj);
1009 current_ir_graph = rem;
1012 } /* new_rd_defaultProj */
1015 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1017 ir_graph *rem = current_ir_graph;
1019 current_ir_graph = irg;
1020 res = new_bd_Conv(db, block, op, mode, 0);
1021 current_ir_graph = rem;
1027 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1029 ir_graph *rem = current_ir_graph;
1031 current_ir_graph = irg;
1032 res = new_bd_Conv(db, block, op, mode, 1);
1033 current_ir_graph = rem;
1036 } /* new_rd_strictConv */
1040 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Cast(db, block, op, to_tp);
1046 current_ir_graph = rem;
1052 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1054 ir_graph *rem = current_ir_graph;
1056 current_ir_graph = irg;
1057 res = new_bd_Tuple(db, block, arity, in);
1058 current_ir_graph = rem;
1061 } /* new_rd_Tuple */
1070 NEW_RD_DIVOP(DivMod)
1085 NEW_RD_BINOP(Borrow)
1087 /* creates a rd constructor for an divRL */
1088 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1089 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1092 ir_graph *rem = current_ir_graph;
1093 current_ir_graph = irg;
1094 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1095 current_ir_graph = rem;
1101 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1102 ir_node *op1, ir_node *op2) {
1104 ir_graph *rem = current_ir_graph;
1106 current_ir_graph = irg;
1107 res = new_bd_Cmp(db, block, op1, op2);
1108 current_ir_graph = rem;
1114 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1116 ir_graph *rem = current_ir_graph;
1118 current_ir_graph = irg;
1119 res = new_bd_Jmp(db, block);
1120 current_ir_graph = rem;
1126 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1128 ir_graph *rem = current_ir_graph;
1130 current_ir_graph = irg;
1131 res = new_bd_IJmp(db, block, tgt);
1132 current_ir_graph = rem;
1138 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1140 ir_graph *rem = current_ir_graph;
1142 current_ir_graph = irg;
1143 res = new_bd_Cond(db, block, c);
1144 current_ir_graph = rem;
1151 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1152 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1154 ir_graph *rem = current_ir_graph;
1156 current_ir_graph = irg;
1157 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1158 current_ir_graph = rem;
1164 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1165 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1167 ir_graph *rem = current_ir_graph;
1169 current_ir_graph = irg;
1170 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1171 current_ir_graph = rem;
1174 } /* new_rd_Builtin */
1177 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1178 ir_node *store, int arity, ir_node **in) {
1180 ir_graph *rem = current_ir_graph;
1182 current_ir_graph = irg;
1183 res = new_bd_Return(db, block, store, arity, in);
1184 current_ir_graph = rem;
1187 } /* new_rd_Return */
1190 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1191 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1193 ir_graph *rem = current_ir_graph;
1195 current_ir_graph = irg;
1196 res = new_bd_Load(db, block, store, adr, mode, flags);
1197 current_ir_graph = rem;
1203 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1204 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1206 ir_graph *rem = current_ir_graph;
1208 current_ir_graph = irg;
1209 res = new_bd_Store(db, block, store, adr, val, flags);
1210 current_ir_graph = rem;
1213 } /* new_rd_Store */
1216 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1217 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1219 ir_graph *rem = current_ir_graph;
1221 current_ir_graph = irg;
1222 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1223 current_ir_graph = rem;
1226 } /* new_rd_Alloc */
1229 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1230 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1232 ir_graph *rem = current_ir_graph;
1234 current_ir_graph = irg;
1235 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1236 current_ir_graph = rem;
1242 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1243 ir_node *store, ir_node *objptr, ir_entity *ent) {
1245 ir_graph *rem = current_ir_graph;
1247 current_ir_graph = irg;
1248 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1249 current_ir_graph = rem;
1252 } /* new_rd_simpleSel */
1255 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1256 int arity, ir_node **in, ir_entity *ent) {
1258 ir_graph *rem = current_ir_graph;
1260 current_ir_graph = irg;
1261 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1262 current_ir_graph = rem;
1268 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1269 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1271 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1275 current_ir_graph = rem;
1278 } /* new_rd_SymConst_type */
1281 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1282 symconst_symbol value, symconst_kind symkind) {
1283 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1284 } /* new_rd_SymConst */
1286 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1287 symconst_symbol sym;
1288 sym.entity_p = symbol;
1289 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1290 } /* new_rd_SymConst_addr_ent */
1292 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1293 symconst_symbol sym;
1294 sym.entity_p = symbol;
1295 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1296 } /* new_rd_SymConst_ofs_ent */
1298 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1299 symconst_symbol sym;
1300 sym.ident_p = symbol;
1301 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1302 } /* new_rd_SymConst_addr_name */
1304 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1305 symconst_symbol sym;
1306 sym.type_p = symbol;
1307 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1308 } /* new_rd_SymConst_type_tag */
1310 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1311 symconst_symbol sym;
1312 sym.type_p = symbol;
1313 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1314 } /* new_rd_SymConst_size */
1316 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1317 symconst_symbol sym;
1318 sym.type_p = symbol;
1319 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1320 } /* new_rd_SymConst_align */
1323 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1325 ir_graph *rem = current_ir_graph;
1328 current_ir_graph = irg;
1329 res = new_bd_Sync(db, block);
1330 current_ir_graph = rem;
1332 for (i = 0; i < arity; ++i)
1333 add_Sync_pred(res, in[i]);
1339 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Confirm(db, block, val, bound, cmp);
1345 current_ir_graph = rem;
1348 } /* new_rd_Confirm */
1351 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1353 ir_graph *rem = current_ir_graph;
1355 current_ir_graph = irg;
1356 res = new_bd_Unknown(m);
1357 current_ir_graph = rem;
1360 } /* new_rd_Unknown */
1363 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1365 ir_graph *rem = current_ir_graph;
1367 current_ir_graph = irg;
1368 res = new_bd_CallBegin(db, block, call);
1369 current_ir_graph = rem;
1372 } /* new_rd_CallBegin */
1375 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1378 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1379 set_irg_end_reg(irg, res);
1380 IRN_VRFY_IRG(res, irg);
1382 } /* new_rd_EndReg */
1385 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1388 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1389 set_irg_end_except(irg, res);
1390 IRN_VRFY_IRG (res, irg);
1392 } /* new_rd_EndExcept */
1395 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1397 ir_graph *rem = current_ir_graph;
1399 current_ir_graph = irg;
1400 res = new_bd_Break(db, block);
1401 current_ir_graph = rem;
1404 } /* new_rd_Break */
1407 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1410 ir_graph *rem = current_ir_graph;
1412 current_ir_graph = irg;
1413 res = new_bd_Filter(db, block, arg, mode, proj);
1414 current_ir_graph = rem;
1417 } /* new_rd_Filter */
1420 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1427 current_ir_graph = rem;
1432 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1433 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1435 ir_graph *rem = current_ir_graph;
1437 current_ir_graph = irg;
1438 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1439 current_ir_graph = rem;
1442 } /* new_rd_CopyB */
1445 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1446 ir_node *objptr, ir_type *type) {
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_InstOf(db, block, store, objptr, type);
1452 current_ir_graph = rem;
1455 } /* new_rd_InstOf */
1458 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1460 ir_graph *rem = current_ir_graph;
1462 current_ir_graph = irg;
1463 res = new_bd_Raise(db, block, store, obj);
1464 current_ir_graph = rem;
1467 } /* new_rd_Raise */
1469 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1470 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1472 ir_graph *rem = current_ir_graph;
1474 current_ir_graph = irg;
1475 res = new_bd_Bound(db, block, store, idx, lower, upper);
1476 current_ir_graph = rem;
1479 } /* new_rd_Bound */
1481 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1483 ir_graph *rem = current_ir_graph;
1485 current_ir_graph = irg;
1486 res = new_bd_Pin(db, block, node);
1487 current_ir_graph = rem;
1492 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1493 int arity, ir_node *in[], ir_asm_constraint *inputs,
1494 int n_outs, ir_asm_constraint *outputs,
1495 int n_clobber, ident *clobber[], ident *asm_text) {
1497 ir_graph *rem = current_ir_graph;
1499 current_ir_graph = irg;
1500 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1501 current_ir_graph = rem;
1508 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1509 return new_rd_Block(NULL, irg, arity, in);
1511 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1512 return new_rd_Start(NULL, irg, block);
1514 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1515 return new_rd_End(NULL, irg, block);
1517 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1518 return new_rd_Jmp(NULL, irg, block);
1520 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1521 return new_rd_IJmp(NULL, irg, block, tgt);
1523 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1524 return new_rd_Cond(NULL, irg, block, c);
1527 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1528 ir_node *store, int arity, ir_node **in) {
1529 return new_rd_Return(NULL, irg, block, store, arity, in);
1531 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1532 return new_rd_Const(NULL, irg, con);
1534 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1535 return new_rd_Const_long(NULL, irg, mode, value);
1538 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1539 return new_rd_Const_type(NULL, irg, con, tp);
1542 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1543 symconst_symbol value, symconst_kind symkind) {
1544 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1546 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1547 ir_node *objptr, ir_entity *ent) {
1548 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1550 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1551 ir_node *objptr, int n_index, ir_node **index,
1553 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1555 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1556 ir_node *callee, int arity, ir_node **in,
1558 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1560 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1561 ir_builtin_kind kind, int arity, ir_node **in,
1563 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1566 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1567 ir_node *op1, ir_node *op2, ir_mode *mode) {
1568 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1572 ir_node *op1, ir_node *op2, ir_mode *mode) {
1573 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1575 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_mode *mode) {
1577 return new_rd_Minus(NULL, irg, block, op, mode);
1579 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1580 ir_node *op1, ir_node *op2, ir_mode *mode) {
1581 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1583 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2, ir_mode *mode) {
1585 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1587 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1588 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1589 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1591 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1592 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1593 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1596 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1597 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1598 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1601 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1602 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1603 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1605 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1606 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1607 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1609 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1610 ir_node *op, ir_mode *mode) {
1611 return new_rd_Abs(NULL, irg, block, op, mode);
1613 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1614 ir_node *op1, ir_node *op2, ir_mode *mode) {
1615 return new_rd_And(NULL, irg, block, op1, op2, mode);
1617 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1618 ir_node *op1, ir_node *op2, ir_mode *mode) {
1619 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1621 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1622 ir_node *op1, ir_node *op2, ir_mode *mode) {
1623 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1625 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1626 ir_node *op, ir_mode *mode) {
1627 return new_rd_Not(NULL, irg, block, op, mode);
1629 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1630 ir_node *op, ir_node *k, ir_mode *mode) {
1631 return new_rd_Shl(NULL, irg, block, op, k, mode);
1633 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1634 ir_node *op, ir_node *k, ir_mode *mode) {
1635 return new_rd_Shr(NULL, irg, block, op, k, mode);
1637 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1638 ir_node *op, ir_node *k, ir_mode *mode) {
1639 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1641 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1642 ir_node *op, ir_node *k, ir_mode *mode) {
1643 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1645 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1646 ir_node *op, ir_node *k, ir_mode *mode) {
1647 return new_rd_Carry(NULL, irg, block, op, k, mode);
1649 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1650 ir_node *op, ir_node *k, ir_mode *mode) {
1651 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1654 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1655 ir_node *op1, ir_node *op2) {
1656 return new_rd_Cmp(NULL, irg, block, op1, op2);
1659 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1660 ir_node *op, ir_mode *mode) {
1661 return new_rd_Conv(NULL, irg, block, op, mode);
1663 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1664 ir_node *op, ir_mode *mode) {
1665 return new_rd_strictConv(NULL, irg, block, op, mode);
1668 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1669 return new_rd_Cast(NULL, irg, block, op, to_tp);
1672 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1673 ir_node **in, ir_mode *mode) {
1674 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1676 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1677 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1678 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1680 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1681 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1682 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1684 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1685 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1686 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1688 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1689 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1690 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1692 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1693 return new_rd_Sync(NULL, irg, block, arity, in);
1696 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1697 ir_mode *mode, long proj) {
1698 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1701 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1703 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1706 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1707 int arity, ir_node **in) {
1708 return new_rd_Tuple(NULL, irg, block, arity, in );
1710 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1711 ir_node *val, ir_mode *mode) {
1712 return new_rd_Id(NULL, irg, block, val, mode);
1715 ir_node *new_r_Bad(ir_graph *irg) {
1716 return get_irg_bad(irg);
1718 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1719 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1721 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1722 return new_rd_Unknown(irg, m);
1724 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1725 return new_rd_CallBegin(NULL, irg, block, callee);
1727 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1728 return new_rd_EndReg(NULL, irg, block);
1730 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1731 return new_rd_EndExcept(NULL, irg, block);
1733 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1734 return new_rd_Break(NULL, irg, block);
1736 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1737 ir_mode *mode, long proj) {
1738 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1740 ir_node *new_r_NoMem(ir_graph *irg) {
1741 return get_irg_no_mem(irg);
1743 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1744 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1745 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1747 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1748 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1749 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1751 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1753 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1755 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1756 ir_node *store, ir_node *obj) {
1757 return new_rd_Raise(NULL, irg, block, store, obj);
1759 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1760 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1761 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1763 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1764 return new_rd_Pin(NULL, irg, block, node);
1766 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1767 int arity, ir_node *in[], ir_asm_constraint *inputs,
1768 int n_outs, ir_asm_constraint *outputs,
1769 int n_clobber, ident *clobber[], ident *asm_text) {
1770 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1773 /** ********************/
1774 /** public interfaces */
1775 /** construction tools */
1781 * - create a new Start node in the current block
1783 * @return s - pointer to the created Start node
1788 new_d_Start(dbg_info *db) {
1791 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1792 op_Start, mode_T, 0, NULL);
1794 res = optimize_node(res);
1795 IRN_VRFY_IRG(res, current_ir_graph);
1800 new_d_End(dbg_info *db) {
1802 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1803 op_End, mode_X, -1, NULL);
1804 res = optimize_node(res);
1805 IRN_VRFY_IRG(res, current_ir_graph);
1810 /* Constructs a Block with a fixed number of predecessors.
1811 Does set current_block. Can be used with automatic Phi
1812 node construction. */
1814 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1817 int has_unknown = 0;
1819 res = new_bd_Block(db, arity, in);
1821 /* Create and initialize array for Phi-node construction. */
1822 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1823 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1824 current_ir_graph->n_loc);
1825 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1828 for (i = arity-1; i >= 0; i--)
1829 if (is_Unknown(in[i])) {
1834 if (!has_unknown) res = optimize_node(res);
1836 IRN_VRFY_IRG(res, current_ir_graph);
1842 /* ***********************************************************************/
1843 /* Methods necessary for automatic Phi node creation */
1845 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1846 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1847 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1848 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1850 Call Graph: ( A ---> B == A "calls" B)
1852 get_value mature_immBlock
1860 get_r_value_internal |
1864 new_rd_Phi0 new_rd_Phi_in
1866 * *************************************************************************** */
1868 /** Creates a Phi node with 0 predecessors. */
1869 static inline ir_node *
1870 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1873 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1874 IRN_VRFY_IRG(res, irg);
1880 * Internal constructor of a Phi node by a phi_merge operation.
1882 * @param irg the graph on which the Phi will be constructed
1883 * @param block the block in which the Phi will be constructed
1884 * @param mode the mod eof the Phi node
1885 * @param in the input array of the phi node
1886 * @param ins number of elements in the input array
1887 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1888 * the value for which the new Phi is constructed
1890 static inline ir_node *
1891 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1892 ir_node **in, int ins, ir_node *phi0) {
1894 ir_node *res, *known;
1896 /* Allocate a new node on the obstack. The allocation copies the in
1898 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1899 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1901 /* This loop checks whether the Phi has more than one predecessor.
1902 If so, it is a real Phi node and we break the loop. Else the
1903 Phi node merges the same definition on several paths and therefore
1905 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1907 for (i = ins - 1; i >= 0; --i) {
1910 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1912 /* Optimize self referencing Phis: We can't detect them yet properly, as
1913 they still refer to the Phi0 they will replace. So replace right now. */
1914 if (phi0 && in[i] == phi0)
1917 if (in[i] == res || in[i] == known)
1926 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1929 edges_node_deleted(res, current_ir_graph);
1930 obstack_free(current_ir_graph->obst, res);
1931 if (is_Phi(known)) {
1932 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1933 order, an enclosing Phi know may get superfluous. */
1934 res = optimize_in_place_2(known);
1936 exchange(known, res);
1941 /* A undefined value, e.g., in unreachable code. */
1945 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1946 IRN_VRFY_IRG(res, irg);
1947 /* Memory Phis in endless loops must be kept alive.
1948 As we can't distinguish these easily we keep all of them alive. */
1949 if (is_Phi(res) && mode == mode_M)
1950 add_End_keepalive(get_irg_end(irg), res);
1954 } /* new_rd_Phi_in */
1957 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1959 #if PRECISE_EXC_CONTEXT
1961 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1964 * Construct a new frag_array for node n.
1965 * Copy the content from the current graph_arr of the corresponding block:
1966 * this is the current state.
1967 * Set ProjM(n) as current memory state.
1968 * Further the last entry in frag_arr of current block points to n. This
1969 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1971 static inline ir_node **new_frag_arr(ir_node *n) {
1975 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1976 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1977 sizeof(ir_node *)*current_ir_graph->n_loc);
1979 /* turn off optimization before allocating Proj nodes, as res isn't
1981 opt = get_opt_optimize(); set_optimize(0);
1982 /* Here we rely on the fact that all frag ops have Memory as first result! */
1984 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1985 } else if (is_CopyB(n)) {
1986 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1988 assert((pn_Quot_M == pn_DivMod_M) &&
1989 (pn_Quot_M == pn_Div_M) &&
1990 (pn_Quot_M == pn_Mod_M) &&
1991 (pn_Quot_M == pn_Load_M) &&
1992 (pn_Quot_M == pn_Store_M) &&
1993 (pn_Quot_M == pn_Alloc_M) &&
1994 (pn_Quot_M == pn_Bound_M));
1995 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1999 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2001 } /* new_frag_arr */
2004 * Returns the frag_arr from a node.
2006 static inline ir_node **get_frag_arr(ir_node *n) {
2007 switch (get_irn_opcode(n)) {
2009 return n->attr.call.exc.frag_arr;
2011 return n->attr.alloc.exc.frag_arr;
2013 return n->attr.load.exc.frag_arr;
2015 return n->attr.store.exc.frag_arr;
2017 return n->attr.except.frag_arr;
2019 } /* get_frag_arr */
2022 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2023 #ifdef DEBUG_libfirm
2026 for (i = 1024; i >= 0; --i)
2031 if (frag_arr[pos] == NULL)
2032 frag_arr[pos] = val;
2033 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2034 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2035 assert(arr != frag_arr && "Endless recursion detected");
2040 assert(!"potential endless recursion in set_frag_value");
2041 } /* set_frag_value */
2044 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2048 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2050 frag_arr = get_frag_arr(cfOp);
2051 res = frag_arr[pos];
2053 if (block->attr.block.graph_arr[pos] != NULL) {
2054 /* There was a set_value() after the cfOp and no get_value() before that
2055 set_value(). We must build a Phi node now. */
2056 if (block->attr.block.is_matured) {
2057 int ins = get_irn_arity(block);
2059 NEW_ARR_A(ir_node *, nin, ins);
2060 res = phi_merge(block, pos, mode, nin, ins);
2062 res = new_rd_Phi0(current_ir_graph, block, mode);
2063 res->attr.phi.u.pos = pos;
2064 res->attr.phi.next = block->attr.block.phis;
2065 block->attr.block.phis = res;
2067 assert(res != NULL);
2068 /* It's a Phi, we can write this into all graph_arrs with NULL */
2069 set_frag_value(block->attr.block.graph_arr, pos, res);
2071 res = get_r_value_internal(block, pos, mode);
2072 set_frag_value(block->attr.block.graph_arr, pos, res);
2076 } /* get_r_frag_value_internal */
2077 #endif /* PRECISE_EXC_CONTEXT */
2080 * Check whether a control flownode cf_pred represents an exception flow.
2082 * @param cf_pred the control flow node
2083 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2085 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2087 * Note: all projections from a raise are "exceptional control flow" we we handle it
2088 * like a normal Jmp, because there is no "regular" one.
2089 * That's why Raise is no "fragile_op"!
2091 if (is_fragile_op(prev_cf_op)) {
2092 if (is_Proj(cf_pred)) {
2093 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2094 /* the regular control flow, NO exception */
2097 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2100 /* Hmm, exception but not a Proj? */
2101 assert(!"unexpected condition: fragile op without a proj");
2105 } /* is_exception_flow */
2108 * Computes the predecessors for the real phi node, and then
2109 * allocates and returns this node. The routine called to allocate the
2110 * node might optimize it away and return a real value.
2111 * This function must be called with an in-array of proper size.
2114 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2115 ir_node *prevBlock, *res, *phi0, *phi0_all;
2118 /* If this block has no value at pos create a Phi0 and remember it
2119 in graph_arr to break recursions.
2120 Else we may not set graph_arr as there a later value is remembered. */
2122 if (block->attr.block.graph_arr[pos] == NULL) {
2123 ir_graph *irg = current_ir_graph;
2125 if (block == get_irg_start_block(irg)) {
2126 /* Collapsing to Bad tarvals is no good idea.
2127 So we call a user-supplied routine here that deals with this case as
2128 appropriate for the given language. Sorrily the only help we can give
2129 here is the position.
2131 Even if all variables are defined before use, it can happen that
2132 we get to the start block, if a Cond has been replaced by a tuple
2133 (bad, jmp). In this case we call the function needlessly, eventually
2134 generating an non existent error.
2135 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2138 if (default_initialize_local_variable != NULL) {
2139 ir_node *rem = get_cur_block();
2141 set_cur_block(block);
2142 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2146 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2147 /* We don't need to care about exception ops in the start block.
2148 There are none by definition. */
2149 return block->attr.block.graph_arr[pos];
2151 phi0 = new_rd_Phi0(irg, block, mode);
2152 block->attr.block.graph_arr[pos] = phi0;
2153 #if PRECISE_EXC_CONTEXT
2154 if (get_opt_precise_exc_context()) {
2155 /* Set graph_arr for fragile ops. Also here we should break recursion.
2156 We could choose a cyclic path through an cfop. But the recursion would
2157 break at some point. */
2158 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2164 /* This loop goes to all predecessor blocks of the block the Phi node
2165 is in and there finds the operands of the Phi node by calling
2166 get_r_value_internal. */
2167 for (i = 1; i <= ins; ++i) {
2168 ir_node *cf_pred = block->in[i];
2169 ir_node *prevCfOp = skip_Proj(cf_pred);
2171 if (is_Bad(prevCfOp)) {
2172 /* In case a Cond has been optimized we would get right to the start block
2173 with an invalid definition. */
2174 nin[i-1] = new_Bad();
2177 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2179 if (!is_Bad(prevBlock)) {
2180 #if PRECISE_EXC_CONTEXT
2181 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2182 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2183 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2186 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2188 nin[i-1] = new_Bad();
2192 /* We want to pass the Phi0 node to the constructor: this finds additional
2193 optimization possibilities.
2194 The Phi0 node either is allocated in this function, or it comes from
2195 a former call to get_r_value_internal(). In this case we may not yet
2196 exchange phi0, as this is done in mature_immBlock(). */
2198 phi0_all = block->attr.block.graph_arr[pos];
2199 if (! is_Phi0(phi0_all) ||
2200 get_irn_arity(phi0_all) != 0 ||
2201 get_nodes_block(phi0_all) != block)
2207 /* After collecting all predecessors into the array nin a new Phi node
2208 with these predecessors is created. This constructor contains an
2209 optimization: If all predecessors of the Phi node are identical it
2210 returns the only operand instead of a new Phi node. */
2211 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2213 /* In case we allocated a Phi0 node at the beginning of this procedure,
2214 we need to exchange this Phi0 with the real Phi. */
2216 exchange(phi0, res);
2217 block->attr.block.graph_arr[pos] = res;
2218 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2219 only an optimization. */
2226 * This function returns the last definition of a value. In case
2227 * this value was last defined in a previous block, Phi nodes are
2228 * inserted. If the part of the firm graph containing the definition
2229 * is not yet constructed, a dummy Phi node is returned.
2231 * @param block the current block
2232 * @param pos the value number of the value searched
2233 * @param mode the mode of this value (needed for Phi construction)
2236 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2238 /* There are 4 cases to treat.
2240 1. The block is not mature and we visit it the first time. We can not
2241 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2242 predecessors is returned. This node is added to the linked list (block
2243 attribute "phis") of the containing block to be completed when this block is
2244 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2247 2. The value is already known in this block, graph_arr[pos] is set and we
2248 visit the block the first time. We can return the value without
2249 creating any new nodes.
2251 3. The block is mature and we visit it the first time. A Phi node needs
2252 to be created (phi_merge). If the Phi is not needed, as all it's
2253 operands are the same value reaching the block through different
2254 paths, it's optimized away and the value itself is returned.
2256 4. The block is mature, and we visit it the second time. Now two
2257 subcases are possible:
2258 * The value was computed completely the last time we were here. This
2259 is the case if there is no loop. We can return the proper value.
2260 * The recursion that visited this node and set the flag did not
2261 return yet. We are computing a value in a loop and need to
2262 break the recursion. This case only happens if we visited
2263 the same block with phi_merge before, which inserted a Phi0.
2264 So we return the Phi0.
2267 /* case 4 -- already visited. */
2268 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2269 /* As phi_merge allocates a Phi0 this value is always defined. Here
2270 is the critical difference of the two algorithms. */
2271 assert(block->attr.block.graph_arr[pos]);
2272 return block->attr.block.graph_arr[pos];
2275 /* visited the first time */
2276 set_irn_visited(block, get_irg_visited(current_ir_graph));
2278 /* Get the local valid value */
2279 res = block->attr.block.graph_arr[pos];
2281 /* case 2 -- If the value is actually computed, return it. */
2285 if (block->attr.block.is_matured) { /* case 3 */
2287 /* The Phi has the same amount of ins as the corresponding block. */
2288 int ins = get_irn_arity(block);
2290 NEW_ARR_A(ir_node *, nin, ins);
2292 /* Phi merge collects the predecessors and then creates a node. */
2293 res = phi_merge(block, pos, mode, nin, ins);
2295 } else { /* case 1 */
2296 /* The block is not mature, we don't know how many in's are needed. A Phi
2297 with zero predecessors is created. Such a Phi node is called Phi0
2298 node. The Phi0 is then added to the list of Phi0 nodes in this block
2299 to be matured by mature_immBlock later.
2300 The Phi0 has to remember the pos of it's internal value. If the real
2301 Phi is computed, pos is used to update the array with the local
2303 res = new_rd_Phi0(current_ir_graph, block, mode);
2304 res->attr.phi.u.pos = pos;
2305 res->attr.phi.next = block->attr.block.phis;
2306 block->attr.block.phis = res;
2309 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2311 /* The local valid value is available now. */
2312 block->attr.block.graph_arr[pos] = res;
2315 } /* get_r_value_internal */
2317 /* ************************************************************************** */
2320 * Finalize a Block node, when all control flows are known.
2321 * Acceptable parameters are only Block nodes.
2324 mature_immBlock(ir_node *block) {
2329 assert(is_Block(block));
2330 if (!get_Block_matured(block)) {
2331 ir_graph *irg = current_ir_graph;
2333 ins = ARR_LEN(block->in) - 1;
2334 /* Fix block parameters */
2335 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2337 /* An array for building the Phi nodes. */
2338 NEW_ARR_A(ir_node *, nin, ins);
2340 /* Traverse a chain of Phi nodes attached to this block and mature
2342 for (n = block->attr.block.phis; n; n = next) {
2343 inc_irg_visited(irg);
2344 next = n->attr.phi.next;
2345 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2348 block->attr.block.is_matured = 1;
2350 /* Now, as the block is a finished Firm node, we can optimize it.
2351 Since other nodes have been allocated since the block was created
2352 we can not free the node on the obstack. Therefore we have to call
2353 optimize_in_place().
2354 Unfortunately the optimization does not change a lot, as all allocated
2355 nodes refer to the unoptimized node.
2356 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2357 block = optimize_in_place_2(block);
2358 IRN_VRFY_IRG(block, irg);
2360 } /* mature_immBlock */
2363 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2364 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2368 new_d_Const(dbg_info *db, tarval *con) {
2369 return new_bd_Const(db, con);
2373 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2374 return new_bd_Const_long(db, mode, value);
2375 } /* new_d_Const_long */
2379 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2380 return new_bd_Const_type(db, con, tp);
2381 } /* new_d_Const_type */
2385 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2386 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2390 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2391 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2396 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2398 assert(arg->op == op_Cond);
2399 arg->attr.cond.kind = fragmentary;
2400 arg->attr.cond.default_proj = max_proj;
2401 res = new_d_Proj(db, arg, mode_X, max_proj);
2403 } /* new_d_defaultProj */
2406 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2407 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2411 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2412 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2413 } /* new_d_strictConv */
2417 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2418 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2422 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2423 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2434 * Allocate a frag array for a node if the current graph state is phase_building.
2436 * @param irn the node for which the frag array should be allocated
2437 * @param op the opcode of the (original) node, if does not match opcode of irn,
2439 * @param frag_store the address of the frag store in irn attributes, if this
2440 * address contains a value != NULL, does nothing
2442 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2443 if (get_opt_precise_exc_context()) {
2444 if ((current_ir_graph->phase_state == phase_building) &&
2445 (get_irn_op(irn) == op) && /* Could be optimized away. */
2446 !*frag_store) /* Could be a cse where the arr is already set. */ {
2447 *frag_store = new_frag_arr(irn);
2450 } /* firm_alloc_frag_arr */
2453 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2455 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2456 #if PRECISE_EXC_CONTEXT
2457 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2464 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2466 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2467 #if PRECISE_EXC_CONTEXT
2468 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2472 } /* new_d_DivMod */
2476 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2478 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2479 #if PRECISE_EXC_CONTEXT
2480 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2488 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2490 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2491 #if PRECISE_EXC_CONTEXT
2492 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2499 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2501 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2502 #if PRECISE_EXC_CONTEXT
2503 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2523 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2524 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2528 new_d_Jmp(dbg_info *db) {
2529 return new_bd_Jmp(db, current_ir_graph->current_block);
2533 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2534 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2538 new_d_Cond(dbg_info *db, ir_node *c) {
2539 return new_bd_Cond(db, current_ir_graph->current_block, c);
2544 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2547 res = new_bd_Call(db, current_ir_graph->current_block,
2548 store, callee, arity, in, tp);
2549 #if PRECISE_EXC_CONTEXT
2550 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2557 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2559 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2560 } /* new_d_Builtin */
2563 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2564 return new_bd_Return(db, current_ir_graph->current_block,
2566 } /* new_d_Return */
2569 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2571 res = new_bd_Load(db, current_ir_graph->current_block,
2572 store, addr, mode, flags);
2573 #if PRECISE_EXC_CONTEXT
2574 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2581 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2583 res = new_bd_Store(db, current_ir_graph->current_block,
2584 store, addr, val, flags);
2585 #if PRECISE_EXC_CONTEXT
2586 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2593 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2594 ir_where_alloc where) {
2596 res = new_bd_Alloc(db, current_ir_graph->current_block,
2597 store, size, alloc_type, where);
2598 #if PRECISE_EXC_CONTEXT
2599 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2606 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2607 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2608 return new_bd_Free(db, current_ir_graph->current_block,
2609 store, ptr, size, free_type, where);
2613 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2614 /* GL: objptr was called frame before. Frame was a bad choice for the name
2615 as the operand could as well be a pointer to a dynamic object. */
2617 return new_bd_Sel(db, current_ir_graph->current_block,
2618 store, objptr, 0, NULL, ent);
2619 } /* new_d_simpleSel */
2622 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2623 return new_bd_Sel(db, current_ir_graph->current_block,
2624 store, objptr, n_index, index, sel);
2628 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2629 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2631 } /* new_d_SymConst_type */
2634 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2635 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2636 value, kind, firm_unknown_type);
2637 } /* new_d_SymConst */
2640 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2641 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2645 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2646 return new_bd_Confirm(db, current_ir_graph->current_block,
2648 } /* new_d_Confirm */
2651 new_d_Unknown(ir_mode *m) {
2652 return new_bd_Unknown(m);
2653 } /* new_d_Unknown */
2656 new_d_CallBegin(dbg_info *db, ir_node *call) {
2657 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2658 } /* new_d_CallBegin */
2661 new_d_EndReg(dbg_info *db) {
2662 return new_bd_EndReg(db, current_ir_graph->current_block);
2663 } /* new_d_EndReg */
2666 new_d_EndExcept(dbg_info *db) {
2667 return new_bd_EndExcept(db, current_ir_graph->current_block);
2668 } /* new_d_EndExcept */
2671 new_d_Break(dbg_info *db) {
2672 return new_bd_Break(db, current_ir_graph->current_block);
2676 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2677 return new_bd_Filter(db, current_ir_graph->current_block,
2679 } /* new_d_Filter */
2682 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2683 ir_node *ir_true, ir_mode *mode) {
2684 return new_bd_Mux(db, current_ir_graph->current_block,
2685 sel, ir_false, ir_true, mode);
2688 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2689 ir_node *dst, ir_node *src, ir_type *data_type) {
2691 res = new_bd_CopyB(db, current_ir_graph->current_block,
2692 store, dst, src, data_type);
2693 #if PRECISE_EXC_CONTEXT
2694 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2700 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2701 return new_bd_InstOf(db, current_ir_graph->current_block,
2702 store, objptr, type);
2703 } /* new_d_InstOf */
2706 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2707 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2710 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2711 ir_node *idx, ir_node *lower, ir_node *upper) {
2713 res = new_bd_Bound(db, current_ir_graph->current_block,
2714 store, idx, lower, upper);
2715 #if PRECISE_EXC_CONTEXT
2716 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2722 new_d_Pin(dbg_info *db, ir_node *node) {
2723 return new_bd_Pin(db, current_ir_graph->current_block, node);
2727 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2728 int n_outs, ir_asm_constraint *outputs,
2729 int n_clobber, ident *clobber[], ident *asm_text) {
2730 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2733 /* ********************************************************************* */
2734 /* Comfortable interface with automatic Phi node construction. */
2735 /* (Uses also constructors of ?? interface, except new_Block. */
2736 /* ********************************************************************* */
2738 /* Block construction */
2739 /* immature Block without predecessors */
2741 new_d_immBlock(dbg_info *db) {
2744 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2745 /* creates a new dynamic in-array as length of in is -1 */
2746 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2748 /* macroblock head */
2751 res->attr.block.is_matured = 0;
2752 res->attr.block.is_dead = 0;
2753 res->attr.block.is_mb_head = 1;
2754 res->attr.block.has_label = 0;
2755 res->attr.block.irg = current_ir_graph;
2756 res->attr.block.backedge = NULL;
2757 res->attr.block.in_cg = NULL;
2758 res->attr.block.cg_backedge = NULL;
2759 res->attr.block.extblk = NULL;
2760 res->attr.block.region = NULL;
2761 res->attr.block.mb_depth = 0;
2762 res->attr.block.label = 0;
2764 set_Block_block_visited(res, 0);
2766 /* Create and initialize array for Phi-node construction. */
2767 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2768 current_ir_graph->n_loc);
2769 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2771 /* Immature block may not be optimized! */
2772 IRN_VRFY_IRG(res, current_ir_graph);
2775 } /* new_d_immBlock */
2778 new_immBlock(void) {
2779 return new_d_immBlock(NULL);
2780 } /* new_immBlock */
2782 /* immature PartBlock with its predecessors */
2784 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2785 ir_node *res = new_d_immBlock(db);
2786 ir_node *blk = get_nodes_block(pred_jmp);
2788 res->in[0] = blk->in[0];
2789 assert(res->in[0] != NULL);
2790 add_immBlock_pred(res, pred_jmp);
2792 res->attr.block.is_mb_head = 0;
2793 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2796 } /* new_d_immPartBlock */
2799 new_immPartBlock(ir_node *pred_jmp) {
2800 return new_d_immPartBlock(NULL, pred_jmp);
2801 } /* new_immPartBlock */
2803 /* add an edge to a jmp/control flow node */
2805 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2806 int n = ARR_LEN(block->in) - 1;
2808 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2809 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2810 assert(is_ir_node(jmp));
2812 ARR_APP1(ir_node *, block->in, jmp);
2814 hook_set_irn_n(block, n, jmp, NULL);
2815 } /* add_immBlock_pred */
2817 /* changing the current block */
2819 set_cur_block(ir_node *target) {
2820 current_ir_graph->current_block = target;
2821 } /* set_cur_block */
2823 /* ************************ */
2824 /* parameter administration */
2826 /* get a value from the parameter array from the current block by its index */
2828 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2829 ir_graph *irg = current_ir_graph;
2830 assert(get_irg_phase_state(irg) == phase_building);
2831 inc_irg_visited(irg);
2836 return get_r_value_internal(irg->current_block, pos + 1, mode);
2839 /* get a value from the parameter array from the current block by its index */
2841 get_value(int pos, ir_mode *mode) {
2842 return get_d_value(NULL, pos, mode);
2845 /* set a value at position pos in the parameter array from the current block */
2847 set_value(int pos, ir_node *value) {
2848 ir_graph *irg = current_ir_graph;
2849 assert(get_irg_phase_state(irg) == phase_building);
2851 assert(pos+1 < irg->n_loc);
2852 assert(is_ir_node(value));
2853 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2856 /* Find the value number for a node in the current block.*/
2858 find_value(ir_node *value) {
2860 ir_node *bl = current_ir_graph->current_block;
2862 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2863 if (bl->attr.block.graph_arr[i] == value)
2868 /* get the current store */
2871 ir_graph *irg = current_ir_graph;
2873 assert(get_irg_phase_state(irg) == phase_building);
2874 /* GL: one could call get_value instead */
2875 inc_irg_visited(irg);
2876 return get_r_value_internal(irg->current_block, 0, mode_M);
2879 /* set the current store: handles automatic Sync construction for Load nodes */
2881 set_store(ir_node *store) {
2882 ir_node *load, *pload, *pred, *in[2];
2884 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2885 /* Beware: due to dead code elimination, a store might become a Bad node even in
2886 the construction phase. */
2887 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2889 if (get_opt_auto_create_sync()) {
2890 /* handle non-volatile Load nodes by automatically creating Sync's */
2891 load = skip_Proj(store);
2892 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2893 pred = get_Load_mem(load);
2895 if (is_Sync(pred)) {
2896 /* a Load after a Sync: move it up */
2897 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2899 set_Load_mem(load, get_memop_mem(mem));
2900 add_Sync_pred(pred, store);
2903 pload = skip_Proj(pred);
2904 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2905 /* a Load after a Load: create a new Sync */
2906 set_Load_mem(load, get_Load_mem(pload));
2910 store = new_Sync(2, in);
2915 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2919 keep_alive(ir_node *ka) {
2920 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2923 /* --- Useful access routines --- */
2924 /* Returns the current block of the current graph. To set the current
2925 block use set_cur_block. */
2926 ir_node *get_cur_block(void) {
2927 return get_irg_current_block(current_ir_graph);
2928 } /* get_cur_block */
2930 /* Returns the frame type of the current graph */
2931 ir_type *get_cur_frame_type(void) {
2932 return get_irg_frame_type(current_ir_graph);
2933 } /* get_cur_frame_type */
2936 /* ********************************************************************* */
2939 /* call once for each run of the library */
2941 firm_init_cons(uninitialized_local_variable_func_t *func) {
2942 default_initialize_local_variable = func;
2943 } /* firm_init_cons */
2946 irp_finalize_cons(void) {
2948 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2949 irg_finalize_cons(get_irp_irg(i));
2951 irp->phase_state = phase_high;
2952 } /* irp_finalize_cons */
2955 ir_node *new_Block(int arity, ir_node **in) {
2956 return new_d_Block(NULL, arity, in);
2958 ir_node *new_Start(void) {
2959 return new_d_Start(NULL);
2961 ir_node *new_End(void) {
2962 return new_d_End(NULL);
2964 ir_node *new_Jmp(void) {
2965 return new_d_Jmp(NULL);
2967 ir_node *new_IJmp(ir_node *tgt) {
2968 return new_d_IJmp(NULL, tgt);
2970 ir_node *new_Cond(ir_node *c) {
2971 return new_d_Cond(NULL, c);
2974 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2975 return new_d_Return(NULL, store, arity, in);
2977 ir_node *new_Const(tarval *con) {
2978 return new_d_Const(NULL, con);
2981 ir_node *new_Const_long(ir_mode *mode, long value) {
2982 return new_d_Const_long(NULL, mode, value);
2986 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2987 return new_d_Const_type(NULL, con, tp);
2991 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2992 return new_d_SymConst_type(NULL, mode, value, kind, type);
2994 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2995 return new_d_SymConst(NULL, mode, value, kind);
2997 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2998 return new_d_simpleSel(NULL, store, objptr, ent);
3000 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3002 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3004 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3006 return new_d_Call(NULL, store, callee, arity, in, tp);
3008 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3010 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3013 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3014 return new_d_Add(NULL, op1, op2, mode);
3017 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3018 return new_d_Sub(NULL, op1, op2, mode);
3020 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3021 return new_d_Minus(NULL, op, mode);
3023 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3024 return new_d_Mul(NULL, op1, op2, mode);
3026 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3027 return new_d_Mulh(NULL, op1, op2, mode);
3029 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3030 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3032 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3033 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3036 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3037 return new_d_Div(NULL, memop, op1, op2, mode, state);
3040 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3041 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3043 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3044 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3046 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3047 return new_d_Abs(NULL, op, mode);
3049 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3050 return new_d_And(NULL, op1, op2, mode);
3052 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3053 return new_d_Or(NULL, op1, op2, mode);
3055 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3056 return new_d_Eor(NULL, op1, op2, mode);
3058 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3059 return new_d_Not(NULL, op, mode);
3061 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3062 return new_d_Shl(NULL, op, k, mode);
3064 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3065 return new_d_Shr(NULL, op, k, mode);
3067 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3068 return new_d_Shrs(NULL, op, k, mode);
3070 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3071 return new_d_Rotl(NULL, op, k, mode);
3073 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3074 return new_d_Carry(NULL, op1, op2, mode);
3076 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3077 return new_d_Borrow(NULL, op1, op2, mode);
3080 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3081 return new_d_Cmp(NULL, op1, op2);
3084 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3085 return new_d_Conv(NULL, op, mode);
3087 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3088 return new_d_strictConv(NULL, op, mode);
3091 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3092 return new_d_Cast(NULL, op, to_tp);
3095 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3096 return new_d_Phi(NULL, arity, in, mode);
3098 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3099 return new_d_Load(NULL, store, addr, mode, flags);
3101 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3102 return new_d_Store(NULL, store, addr, val, flags);
3104 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3105 ir_where_alloc where) {
3106 return new_d_Alloc(NULL, store, size, alloc_type, where);
3108 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3109 ir_type *free_type, ir_where_alloc where) {
3110 return new_d_Free(NULL, store, ptr, size, free_type, where);
3112 ir_node *new_Sync(int arity, ir_node *in[]) {
3113 return new_d_Sync(NULL, arity, in);
3116 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3117 return new_d_Proj(NULL, arg, mode, proj);
3120 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3121 return new_d_defaultProj(NULL, arg, max_proj);
3124 ir_node *new_Tuple(int arity, ir_node **in) {
3125 return new_d_Tuple(NULL, arity, in);
3127 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3128 return new_d_Id(NULL, val, mode);
3131 ir_node *new_Bad(void) {
3132 return get_irg_bad(current_ir_graph);
3134 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3135 return new_d_Confirm(NULL, val, bound, cmp);
3137 ir_node *new_Unknown(ir_mode *m) {
3138 return new_d_Unknown(m);
3140 ir_node *new_CallBegin(ir_node *callee) {
3141 return new_d_CallBegin(NULL, callee);
3143 ir_node *new_EndReg(void) {
3144 return new_d_EndReg(NULL);
3146 ir_node *new_EndExcept(void) {
3147 return new_d_EndExcept(NULL);
3149 ir_node *new_Break(void) {
3150 return new_d_Break(NULL);
3152 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3153 return new_d_Filter(NULL, arg, mode, proj);
3155 ir_node *new_NoMem(void) {
3156 return get_irg_no_mem(current_ir_graph);
3158 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3159 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3161 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3162 return new_d_CopyB(NULL, store, dst, src, data_type);
3164 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3165 return new_d_InstOf(NULL, store, objptr, ent);
3167 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3168 return new_d_Raise(NULL, store, obj);
3170 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3171 return new_d_Bound(NULL, store, idx, lower, upper);
3173 ir_node *new_Pin(ir_node *node) {
3174 return new_d_Pin(NULL, node);
3176 ir_node *new_Dummy(ir_mode *m) {
3177 ir_graph *irg = current_ir_graph;
3178 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3180 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3181 int n_outs, ir_asm_constraint *outputs,
3182 int n_clobber, ident *clobber[], ident *asm_text) {
3183 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3186 /* create a new anchor node */
3187 ir_node *new_Anchor(ir_graph *irg) {
3188 ir_node *in[anchor_last];
3189 memset(in, 0, sizeof(in));
3190 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);