2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
501 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A(ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
512 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
514 assert((get_unknown_type() == tp) || is_Method_type(tp));
515 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
516 res->attr.builtin.kind = kind;
517 res->attr.builtin.builtin_tp = tp;
518 res = optimize_node(res);
519 IRN_VRFY_IRG(res, irg);
521 } /* new_bd_Buildin */
524 new_bd_Return(dbg_info *db, ir_node *block,
525 ir_node *store, int arity, ir_node **in) {
529 ir_graph *irg = current_ir_graph;
532 NEW_ARR_A (ir_node *, r_in, r_arity);
534 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
535 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
536 res = optimize_node(res);
537 IRN_VRFY_IRG(res, irg);
539 } /* new_bd_Return */
542 new_bd_Load(dbg_info *db, ir_node *block,
543 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
551 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
552 res->attr.load.load_mode = mode;
553 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
554 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
555 res = optimize_node(res);
556 IRN_VRFY_IRG(res, irg);
561 new_bd_Store(dbg_info *db, ir_node *block,
562 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
571 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
572 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
573 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
575 res = optimize_node(res);
576 IRN_VRFY_IRG(res, irg);
581 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
582 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
585 ir_graph *irg = current_ir_graph;
589 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
590 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
591 res->attr.alloc.where = where;
592 res->attr.alloc.type = alloc_type;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
600 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
603 ir_graph *irg = current_ir_graph;
608 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
609 res->attr.free.where = where;
610 res->attr.free.type = free_type;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
617 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
618 int arity, ir_node **in, ir_entity *ent) {
622 ir_graph *irg = current_ir_graph;
623 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
625 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
628 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
631 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
633 * Sel's can select functions which should be of mode mode_P_code.
635 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
636 res->attr.sel.ent = ent;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
644 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
645 ir_graph *irg = current_ir_graph;
646 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
648 res->attr.symc.kind = symkind;
649 res->attr.symc.sym = value;
650 res->attr.symc.tp = tp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_SymConst_type */
658 new_bd_Sync(dbg_info *db, ir_node *block) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
663 /* no need to call optimize node here, Sync are always created with no predecessors */
664 IRN_VRFY_IRG(res, irg);
669 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
670 ir_node *in[2], *res;
671 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
676 res->attr.confirm.cmp = cmp;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_Confirm */
683 new_bd_Unknown(ir_mode *m) {
685 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
688 res = optimize_node(res);
690 } /* new_bd_Unknown */
693 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
696 ir_graph *irg = current_ir_graph;
698 in[0] = get_Call_ptr(call);
699 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
700 /* res->attr.callbegin.irg = irg; */
701 res->attr.callbegin.call = call;
702 res = optimize_node(res);
703 IRN_VRFY_IRG(res, irg);
705 } /* new_bd_CallBegin */
708 new_bd_EndReg(dbg_info *db, ir_node *block) {
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
713 set_irg_end_reg(irg, res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_EndReg */
719 new_bd_EndExcept(dbg_info *db, ir_node *block) {
721 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
724 set_irg_end_except(irg, res);
725 IRN_VRFY_IRG (res, irg);
727 } /* new_bd_EndExcept */
730 new_bd_Break(dbg_info *db, ir_node *block) {
732 ir_graph *irg = current_ir_graph;
734 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
744 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
747 res->attr.filter.proj = proj;
748 res->attr.filter.in_cg = NULL;
749 res->attr.filter.backedge = NULL;
752 assert(get_Proj_pred(res));
753 assert(get_nodes_block(get_Proj_pred(res)));
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
758 } /* new_bd_Filter */
761 new_bd_Mux(dbg_info *db, ir_node *block,
762 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_bd_CopyB(dbg_info *db, ir_node *block,
781 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
784 ir_graph *irg = current_ir_graph;
790 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
792 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
793 res->attr.copyb.data_type = data_type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
800 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
801 ir_node *objptr, ir_type *type) {
804 ir_graph *irg = current_ir_graph;
808 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
809 res->attr.instof.type = type;
810 res = optimize_node(res);
811 IRN_VRFY_IRG(res, irg);
813 } /* new_bd_InstOf */
816 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
819 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_Bound(dbg_info *db, ir_node *block,
831 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
834 ir_graph *irg = current_ir_graph;
840 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
841 res->attr.bound.exc.pin_state = op_pin_state_pinned;
842 res = optimize_node(res);
843 IRN_VRFY_IRG(res, irg);
848 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
850 ir_graph *irg = current_ir_graph;
852 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
853 res = optimize_node(res);
854 IRN_VRFY_IRG(res, irg);
859 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
860 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
862 ir_graph *irg = current_ir_graph;
865 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
866 res->attr.assem.pin_state = op_pin_state_pinned;
867 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
868 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
869 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
870 res->attr.assem.asm_text = asm_text;
872 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
873 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
874 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
876 res = optimize_node(res);
877 IRN_VRFY_IRG(res, irg);
881 /* --------------------------------------------- */
882 /* private interfaces, for professional use only */
883 /* --------------------------------------------- */
886 /* Constructs a Block with a fixed number of predecessors.
887 Does not set current_block. Can not be used with automatic
888 Phi node construction. */
890 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
891 ir_graph *rem = current_ir_graph;
894 current_ir_graph = irg;
895 res = new_bd_Block(db, arity, in);
896 current_ir_graph = rem;
902 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
903 ir_graph *rem = current_ir_graph;
906 current_ir_graph = irg;
907 res = new_bd_Start(db, block);
908 current_ir_graph = rem;
914 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
916 ir_graph *rem = current_ir_graph;
918 current_ir_graph = irg;
919 res = new_bd_End(db, block);
920 current_ir_graph = rem;
926 /* Creates a Phi node with all predecessors. Calling this constructor
927 is only allowed if the corresponding block is mature. */
929 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
931 ir_graph *rem = current_ir_graph;
933 current_ir_graph = irg;
934 res = new_bd_Phi(db, block,arity, in, mode);
935 current_ir_graph = rem;
942 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
944 ir_graph *rem = current_ir_graph;
946 current_ir_graph = irg;
947 res = new_bd_Const_type(db, con, tp);
948 current_ir_graph = rem;
951 } /* new_rd_Const_type */
955 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
958 ir_graph *rem = current_ir_graph;
960 current_ir_graph = irg;
961 res = new_bd_Const_type(db, con, firm_unknown_type);
962 current_ir_graph = rem;
964 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
971 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
972 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
973 } /* new_rd_Const_long */
977 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
979 ir_graph *rem = current_ir_graph;
981 current_ir_graph = irg;
982 res = new_bd_Id(db, block, val, mode);
983 current_ir_graph = rem;
989 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
992 ir_graph *rem = current_ir_graph;
994 current_ir_graph = irg;
995 res = new_bd_Proj(db, block, arg, mode, proj);
996 current_ir_graph = rem;
1003 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1006 ir_graph *rem = current_ir_graph;
1008 current_ir_graph = irg;
1009 res = new_bd_defaultProj(db, block, arg, max_proj);
1010 current_ir_graph = rem;
1013 } /* new_rd_defaultProj */
1016 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1018 ir_graph *rem = current_ir_graph;
1020 current_ir_graph = irg;
1021 res = new_bd_Conv(db, block, op, mode, 0);
1022 current_ir_graph = rem;
1029 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1031 ir_graph *rem = current_ir_graph;
1033 current_ir_graph = irg;
1034 res = new_bd_Cast(db, block, op, to_tp);
1035 current_ir_graph = rem;
1041 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1043 ir_graph *rem = current_ir_graph;
1045 current_ir_graph = irg;
1046 res = new_bd_Tuple(db, block, arity, in);
1047 current_ir_graph = rem;
1050 } /* new_rd_Tuple */
1059 NEW_RD_DIVOP(DivMod)
1074 NEW_RD_BINOP(Borrow)
1076 /* creates a rd constructor for an divRL */
1077 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1078 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1081 ir_graph *rem = current_ir_graph;
1082 current_ir_graph = irg;
1083 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1084 current_ir_graph = rem;
1090 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1091 ir_node *op1, ir_node *op2) {
1093 ir_graph *rem = current_ir_graph;
1095 current_ir_graph = irg;
1096 res = new_bd_Cmp(db, block, op1, op2);
1097 current_ir_graph = rem;
1103 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1105 ir_graph *rem = current_ir_graph;
1107 current_ir_graph = irg;
1108 res = new_bd_Jmp(db, block);
1109 current_ir_graph = rem;
1115 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1117 ir_graph *rem = current_ir_graph;
1119 current_ir_graph = irg;
1120 res = new_bd_IJmp(db, block, tgt);
1121 current_ir_graph = rem;
1127 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1129 ir_graph *rem = current_ir_graph;
1131 current_ir_graph = irg;
1132 res = new_bd_Cond(db, block, c);
1133 current_ir_graph = rem;
1140 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1141 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1143 ir_graph *rem = current_ir_graph;
1145 current_ir_graph = irg;
1146 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1147 current_ir_graph = rem;
1153 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1154 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1156 ir_graph *rem = current_ir_graph;
1158 current_ir_graph = irg;
1159 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1160 current_ir_graph = rem;
1163 } /* new_rd_Builtin */
1166 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1167 ir_node *store, int arity, ir_node **in) {
1169 ir_graph *rem = current_ir_graph;
1171 current_ir_graph = irg;
1172 res = new_bd_Return(db, block, store, arity, in);
1173 current_ir_graph = rem;
1176 } /* new_rd_Return */
1179 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1180 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1182 ir_graph *rem = current_ir_graph;
1184 current_ir_graph = irg;
1185 res = new_bd_Load(db, block, store, adr, mode, flags);
1186 current_ir_graph = rem;
1192 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1193 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_Store(db, block, store, adr, val, flags);
1199 current_ir_graph = rem;
1202 } /* new_rd_Store */
1205 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1206 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1208 ir_graph *rem = current_ir_graph;
1210 current_ir_graph = irg;
1211 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1212 current_ir_graph = rem;
1215 } /* new_rd_Alloc */
1218 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1219 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1221 ir_graph *rem = current_ir_graph;
1223 current_ir_graph = irg;
1224 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1225 current_ir_graph = rem;
1231 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1232 ir_node *store, ir_node *objptr, ir_entity *ent) {
1234 ir_graph *rem = current_ir_graph;
1236 current_ir_graph = irg;
1237 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1238 current_ir_graph = rem;
1241 } /* new_rd_simpleSel */
1244 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1245 int arity, ir_node **in, ir_entity *ent) {
1247 ir_graph *rem = current_ir_graph;
1249 current_ir_graph = irg;
1250 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1251 current_ir_graph = rem;
1257 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1258 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1260 ir_graph *rem = current_ir_graph;
1262 current_ir_graph = irg;
1263 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1264 current_ir_graph = rem;
1267 } /* new_rd_SymConst_type */
1270 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1271 symconst_symbol value, symconst_kind symkind) {
1272 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1273 } /* new_rd_SymConst */
1275 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1276 symconst_symbol sym;
1277 sym.entity_p = symbol;
1278 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1279 } /* new_rd_SymConst_addr_ent */
1281 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1282 symconst_symbol sym;
1283 sym.entity_p = symbol;
1284 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1285 } /* new_rd_SymConst_ofs_ent */
1287 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1288 symconst_symbol sym;
1289 sym.ident_p = symbol;
1290 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1291 } /* new_rd_SymConst_addr_name */
1293 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1294 symconst_symbol sym;
1295 sym.type_p = symbol;
1296 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1297 } /* new_rd_SymConst_type_tag */
1299 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1300 symconst_symbol sym;
1301 sym.type_p = symbol;
1302 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1303 } /* new_rd_SymConst_size */
1305 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1306 symconst_symbol sym;
1307 sym.type_p = symbol;
1308 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1309 } /* new_rd_SymConst_align */
1312 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1314 ir_graph *rem = current_ir_graph;
1317 current_ir_graph = irg;
1318 res = new_bd_Sync(db, block);
1319 current_ir_graph = rem;
1321 for (i = 0; i < arity; ++i)
1322 add_Sync_pred(res, in[i]);
1328 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1330 ir_graph *rem = current_ir_graph;
1332 current_ir_graph = irg;
1333 res = new_bd_Confirm(db, block, val, bound, cmp);
1334 current_ir_graph = rem;
1337 } /* new_rd_Confirm */
1340 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1342 ir_graph *rem = current_ir_graph;
1344 current_ir_graph = irg;
1345 res = new_bd_Unknown(m);
1346 current_ir_graph = rem;
1349 } /* new_rd_Unknown */
1352 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1354 ir_graph *rem = current_ir_graph;
1356 current_ir_graph = irg;
1357 res = new_bd_CallBegin(db, block, call);
1358 current_ir_graph = rem;
1361 } /* new_rd_CallBegin */
1364 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1367 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1368 set_irg_end_reg(irg, res);
1369 IRN_VRFY_IRG(res, irg);
1371 } /* new_rd_EndReg */
1374 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1377 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1378 set_irg_end_except(irg, res);
1379 IRN_VRFY_IRG (res, irg);
1381 } /* new_rd_EndExcept */
1384 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_Break(db, block);
1390 current_ir_graph = rem;
1393 } /* new_rd_Break */
1396 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_Filter(db, block, arg, mode, proj);
1403 current_ir_graph = rem;
1406 } /* new_rd_Filter */
1409 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1410 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1412 ir_graph *rem = current_ir_graph;
1414 current_ir_graph = irg;
1415 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1416 current_ir_graph = rem;
1421 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1422 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1424 ir_graph *rem = current_ir_graph;
1426 current_ir_graph = irg;
1427 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1428 current_ir_graph = rem;
1431 } /* new_rd_CopyB */
1434 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1435 ir_node *objptr, ir_type *type) {
1437 ir_graph *rem = current_ir_graph;
1439 current_ir_graph = irg;
1440 res = new_bd_InstOf(db, block, store, objptr, type);
1441 current_ir_graph = rem;
1444 } /* new_rd_InstOf */
1447 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1449 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1452 res = new_bd_Raise(db, block, store, obj);
1453 current_ir_graph = rem;
1456 } /* new_rd_Raise */
1458 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1459 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1461 ir_graph *rem = current_ir_graph;
1463 current_ir_graph = irg;
1464 res = new_bd_Bound(db, block, store, idx, lower, upper);
1465 current_ir_graph = rem;
1468 } /* new_rd_Bound */
1470 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1472 ir_graph *rem = current_ir_graph;
1474 current_ir_graph = irg;
1475 res = new_bd_Pin(db, block, node);
1476 current_ir_graph = rem;
1481 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1482 int arity, ir_node *in[], ir_asm_constraint *inputs,
1483 int n_outs, ir_asm_constraint *outputs,
1484 int n_clobber, ident *clobber[], ident *asm_text) {
1486 ir_graph *rem = current_ir_graph;
1488 current_ir_graph = irg;
1489 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1490 current_ir_graph = rem;
1497 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1498 return new_rd_Block(NULL, irg, arity, in);
1500 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1501 return new_rd_Start(NULL, irg, block);
1503 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1504 return new_rd_End(NULL, irg, block);
1506 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1507 return new_rd_Jmp(NULL, irg, block);
1509 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1510 return new_rd_IJmp(NULL, irg, block, tgt);
1512 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1513 return new_rd_Cond(NULL, irg, block, c);
1516 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1517 ir_node *store, int arity, ir_node **in) {
1518 return new_rd_Return(NULL, irg, block, store, arity, in);
1520 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1521 return new_rd_Const(NULL, irg, con);
1523 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1524 return new_rd_Const_long(NULL, irg, mode, value);
1527 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1528 return new_rd_Const_type(NULL, irg, con, tp);
1531 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1532 symconst_symbol value, symconst_kind symkind) {
1533 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1535 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1536 ir_node *objptr, ir_entity *ent) {
1537 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1539 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1540 ir_node *objptr, int n_index, ir_node **index,
1542 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1544 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1545 ir_node *callee, int arity, ir_node **in,
1547 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1549 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1550 ir_builtin_kind kind, int arity, ir_node **in,
1552 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1555 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1556 ir_node *op1, ir_node *op2, ir_mode *mode) {
1557 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1560 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1561 ir_node *op1, ir_node *op2, ir_mode *mode) {
1562 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1564 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_mode *mode) {
1566 return new_rd_Minus(NULL, irg, block, op, mode);
1568 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1569 ir_node *op1, ir_node *op2, ir_mode *mode) {
1570 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1572 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1573 ir_node *op1, ir_node *op2, ir_mode *mode) {
1574 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1576 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1577 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1578 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1580 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1581 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1582 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1585 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1586 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1587 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1590 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1591 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1592 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1594 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1595 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1596 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1598 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1599 ir_node *op, ir_mode *mode) {
1600 return new_rd_Abs(NULL, irg, block, op, mode);
1602 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1603 ir_node *op1, ir_node *op2, ir_mode *mode) {
1604 return new_rd_And(NULL, irg, block, op1, op2, mode);
1606 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1607 ir_node *op1, ir_node *op2, ir_mode *mode) {
1608 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1610 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1611 ir_node *op1, ir_node *op2, ir_mode *mode) {
1612 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1614 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1615 ir_node *op, ir_mode *mode) {
1616 return new_rd_Not(NULL, irg, block, op, mode);
1618 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1619 ir_node *op, ir_node *k, ir_mode *mode) {
1620 return new_rd_Shl(NULL, irg, block, op, k, mode);
1622 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1623 ir_node *op, ir_node *k, ir_mode *mode) {
1624 return new_rd_Shr(NULL, irg, block, op, k, mode);
1626 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1627 ir_node *op, ir_node *k, ir_mode *mode) {
1628 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1630 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1631 ir_node *op, ir_node *k, ir_mode *mode) {
1632 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1634 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1635 ir_node *op, ir_node *k, ir_mode *mode) {
1636 return new_rd_Carry(NULL, irg, block, op, k, mode);
1638 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1639 ir_node *op, ir_node *k, ir_mode *mode) {
1640 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1643 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1644 ir_node *op1, ir_node *op2) {
1645 return new_rd_Cmp(NULL, irg, block, op1, op2);
1648 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1649 ir_node *op, ir_mode *mode) {
1650 return new_rd_Conv(NULL, irg, block, op, mode);
1653 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1654 return new_rd_Cast(NULL, irg, block, op, to_tp);
1657 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1658 ir_node **in, ir_mode *mode) {
1659 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1661 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1662 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1663 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1665 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1666 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1667 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1669 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1670 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1671 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1673 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1674 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1675 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1677 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1678 return new_rd_Sync(NULL, irg, block, arity, in);
1681 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1682 ir_mode *mode, long proj) {
1683 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1686 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1688 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1691 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1692 int arity, ir_node **in) {
1693 return new_rd_Tuple(NULL, irg, block, arity, in );
1695 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1696 ir_node *val, ir_mode *mode) {
1697 return new_rd_Id(NULL, irg, block, val, mode);
1700 ir_node *new_r_Bad(ir_graph *irg) {
1701 return get_irg_bad(irg);
1703 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1704 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1706 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1707 return new_rd_Unknown(irg, m);
1709 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1710 return new_rd_CallBegin(NULL, irg, block, callee);
1712 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1713 return new_rd_EndReg(NULL, irg, block);
1715 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1716 return new_rd_EndExcept(NULL, irg, block);
1718 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1719 return new_rd_Break(NULL, irg, block);
1721 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1722 ir_mode *mode, long proj) {
1723 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1725 ir_node *new_r_NoMem(ir_graph *irg) {
1726 return get_irg_no_mem(irg);
1728 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1729 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1730 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1732 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1733 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1734 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1736 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1738 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1740 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1741 ir_node *store, ir_node *obj) {
1742 return new_rd_Raise(NULL, irg, block, store, obj);
1744 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1745 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1746 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1748 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1749 return new_rd_Pin(NULL, irg, block, node);
1751 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1752 int arity, ir_node *in[], ir_asm_constraint *inputs,
1753 int n_outs, ir_asm_constraint *outputs,
1754 int n_clobber, ident *clobber[], ident *asm_text) {
1755 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1758 /** ********************/
1759 /** public interfaces */
1760 /** construction tools */
1766 * - create a new Start node in the current block
1768 * @return s - pointer to the created Start node
1773 new_d_Start(dbg_info *db) {
1776 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1777 op_Start, mode_T, 0, NULL);
1779 res = optimize_node(res);
1780 IRN_VRFY_IRG(res, current_ir_graph);
1785 new_d_End(dbg_info *db) {
1787 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1788 op_End, mode_X, -1, NULL);
1789 res = optimize_node(res);
1790 IRN_VRFY_IRG(res, current_ir_graph);
1795 /* Constructs a Block with a fixed number of predecessors.
1796 Does set current_block. Can be used with automatic Phi
1797 node construction. */
1799 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1802 int has_unknown = 0;
1804 res = new_bd_Block(db, arity, in);
1806 /* Create and initialize array for Phi-node construction. */
1807 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1808 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1809 current_ir_graph->n_loc);
1810 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1813 for (i = arity-1; i >= 0; i--)
1814 if (is_Unknown(in[i])) {
1819 if (!has_unknown) res = optimize_node(res);
1821 IRN_VRFY_IRG(res, current_ir_graph);
1827 /* ***********************************************************************/
1828 /* Methods necessary for automatic Phi node creation */
1830 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1831 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1832 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1833 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1835 Call Graph: ( A ---> B == A "calls" B)
1837 get_value mature_immBlock
1845 get_r_value_internal |
1849 new_rd_Phi0 new_rd_Phi_in
1851 * *************************************************************************** */
1853 /** Creates a Phi node with 0 predecessors. */
1854 static inline ir_node *
1855 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1858 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1859 IRN_VRFY_IRG(res, irg);
1865 * Internal constructor of a Phi node by a phi_merge operation.
1867 * @param irg the graph on which the Phi will be constructed
1868 * @param block the block in which the Phi will be constructed
1869 * @param mode the mod eof the Phi node
1870 * @param in the input array of the phi node
1871 * @param ins number of elements in the input array
1872 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1873 * the value for which the new Phi is constructed
1875 static inline ir_node *
1876 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1877 ir_node **in, int ins, ir_node *phi0) {
1879 ir_node *res, *known;
1881 /* Allocate a new node on the obstack. The allocation copies the in
1883 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1884 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1886 /* This loop checks whether the Phi has more than one predecessor.
1887 If so, it is a real Phi node and we break the loop. Else the
1888 Phi node merges the same definition on several paths and therefore
1890 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1892 for (i = ins - 1; i >= 0; --i) {
1895 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1897 /* Optimize self referencing Phis: We can't detect them yet properly, as
1898 they still refer to the Phi0 they will replace. So replace right now. */
1899 if (phi0 && in[i] == phi0)
1902 if (in[i] == res || in[i] == known)
1911 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1914 edges_node_deleted(res, current_ir_graph);
1915 obstack_free(current_ir_graph->obst, res);
1916 if (is_Phi(known)) {
1917 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1918 order, an enclosing Phi know may get superfluous. */
1919 res = optimize_in_place_2(known);
1921 exchange(known, res);
1926 /* A undefined value, e.g., in unreachable code. */
1930 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1931 IRN_VRFY_IRG(res, irg);
1932 /* Memory Phis in endless loops must be kept alive.
1933 As we can't distinguish these easily we keep all of them alive. */
1934 if (is_Phi(res) && mode == mode_M)
1935 add_End_keepalive(get_irg_end(irg), res);
1939 } /* new_rd_Phi_in */
1942 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1944 #if PRECISE_EXC_CONTEXT
1946 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1949 * Construct a new frag_array for node n.
1950 * Copy the content from the current graph_arr of the corresponding block:
1951 * this is the current state.
1952 * Set ProjM(n) as current memory state.
1953 * Further the last entry in frag_arr of current block points to n. This
1954 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1956 static inline ir_node **new_frag_arr(ir_node *n) {
1960 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1961 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1962 sizeof(ir_node *)*current_ir_graph->n_loc);
1964 /* turn off optimization before allocating Proj nodes, as res isn't
1966 opt = get_opt_optimize(); set_optimize(0);
1967 /* Here we rely on the fact that all frag ops have Memory as first result! */
1969 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1970 } else if (is_CopyB(n)) {
1971 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1973 assert((pn_Quot_M == pn_DivMod_M) &&
1974 (pn_Quot_M == pn_Div_M) &&
1975 (pn_Quot_M == pn_Mod_M) &&
1976 (pn_Quot_M == pn_Load_M) &&
1977 (pn_Quot_M == pn_Store_M) &&
1978 (pn_Quot_M == pn_Alloc_M) &&
1979 (pn_Quot_M == pn_Bound_M));
1980 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1984 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1986 } /* new_frag_arr */
1989 * Returns the frag_arr from a node.
1991 static inline ir_node **get_frag_arr(ir_node *n) {
1992 switch (get_irn_opcode(n)) {
1994 return n->attr.call.exc.frag_arr;
1996 return n->attr.alloc.exc.frag_arr;
1998 return n->attr.load.exc.frag_arr;
2000 return n->attr.store.exc.frag_arr;
2002 return n->attr.except.frag_arr;
2004 } /* get_frag_arr */
2007 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2008 #ifdef DEBUG_libfirm
2011 for (i = 1024; i >= 0; --i)
2016 if (frag_arr[pos] == NULL)
2017 frag_arr[pos] = val;
2018 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2019 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2020 assert(arr != frag_arr && "Endless recursion detected");
2025 assert(!"potential endless recursion in set_frag_value");
2026 } /* set_frag_value */
2029 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2033 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2035 frag_arr = get_frag_arr(cfOp);
2036 res = frag_arr[pos];
2038 if (block->attr.block.graph_arr[pos] != NULL) {
2039 /* There was a set_value() after the cfOp and no get_value() before that
2040 set_value(). We must build a Phi node now. */
2041 if (block->attr.block.is_matured) {
2042 int ins = get_irn_arity(block);
2044 NEW_ARR_A(ir_node *, nin, ins);
2045 res = phi_merge(block, pos, mode, nin, ins);
2047 res = new_rd_Phi0(current_ir_graph, block, mode);
2048 res->attr.phi.u.pos = pos;
2049 res->attr.phi.next = block->attr.block.phis;
2050 block->attr.block.phis = res;
2052 assert(res != NULL);
2053 /* It's a Phi, we can write this into all graph_arrs with NULL */
2054 set_frag_value(block->attr.block.graph_arr, pos, res);
2056 res = get_r_value_internal(block, pos, mode);
2057 set_frag_value(block->attr.block.graph_arr, pos, res);
2061 } /* get_r_frag_value_internal */
2062 #endif /* PRECISE_EXC_CONTEXT */
2065 * Check whether a control flownode cf_pred represents an exception flow.
2067 * @param cf_pred the control flow node
2068 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2070 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2072 * Note: all projections from a raise are "exceptional control flow" we we handle it
2073 * like a normal Jmp, because there is no "regular" one.
2074 * That's why Raise is no "fragile_op"!
2076 if (is_fragile_op(prev_cf_op)) {
2077 if (is_Proj(cf_pred)) {
2078 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2079 /* the regular control flow, NO exception */
2082 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2085 /* Hmm, exception but not a Proj? */
2086 assert(!"unexpected condition: fragile op without a proj");
2090 } /* is_exception_flow */
2093 * Computes the predecessors for the real phi node, and then
2094 * allocates and returns this node. The routine called to allocate the
2095 * node might optimize it away and return a real value.
2096 * This function must be called with an in-array of proper size.
2099 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2100 ir_node *prevBlock, *res, *phi0, *phi0_all;
2103 /* If this block has no value at pos create a Phi0 and remember it
2104 in graph_arr to break recursions.
2105 Else we may not set graph_arr as there a later value is remembered. */
2107 if (block->attr.block.graph_arr[pos] == NULL) {
2108 ir_graph *irg = current_ir_graph;
2110 if (block == get_irg_start_block(irg)) {
2111 /* Collapsing to Bad tarvals is no good idea.
2112 So we call a user-supplied routine here that deals with this case as
2113 appropriate for the given language. Sorrily the only help we can give
2114 here is the position.
2116 Even if all variables are defined before use, it can happen that
2117 we get to the start block, if a Cond has been replaced by a tuple
2118 (bad, jmp). In this case we call the function needlessly, eventually
2119 generating an non existent error.
2120 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2123 if (default_initialize_local_variable != NULL) {
2124 ir_node *rem = get_cur_block();
2126 set_cur_block(block);
2127 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2131 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2132 /* We don't need to care about exception ops in the start block.
2133 There are none by definition. */
2134 return block->attr.block.graph_arr[pos];
2136 phi0 = new_rd_Phi0(irg, block, mode);
2137 block->attr.block.graph_arr[pos] = phi0;
2138 #if PRECISE_EXC_CONTEXT
2139 if (get_opt_precise_exc_context()) {
2140 /* Set graph_arr for fragile ops. Also here we should break recursion.
2141 We could choose a cyclic path through an cfop. But the recursion would
2142 break at some point. */
2143 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2149 /* This loop goes to all predecessor blocks of the block the Phi node
2150 is in and there finds the operands of the Phi node by calling
2151 get_r_value_internal. */
2152 for (i = 1; i <= ins; ++i) {
2153 ir_node *cf_pred = block->in[i];
2154 ir_node *prevCfOp = skip_Proj(cf_pred);
2156 if (is_Bad(prevCfOp)) {
2157 /* In case a Cond has been optimized we would get right to the start block
2158 with an invalid definition. */
2159 nin[i-1] = new_Bad();
2162 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2164 if (!is_Bad(prevBlock)) {
2165 #if PRECISE_EXC_CONTEXT
2166 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2167 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2168 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2171 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2173 nin[i-1] = new_Bad();
2177 /* We want to pass the Phi0 node to the constructor: this finds additional
2178 optimization possibilities.
2179 The Phi0 node either is allocated in this function, or it comes from
2180 a former call to get_r_value_internal(). In this case we may not yet
2181 exchange phi0, as this is done in mature_immBlock(). */
2183 phi0_all = block->attr.block.graph_arr[pos];
2184 if (! is_Phi0(phi0_all) ||
2185 get_irn_arity(phi0_all) != 0 ||
2186 get_nodes_block(phi0_all) != block)
2192 /* After collecting all predecessors into the array nin a new Phi node
2193 with these predecessors is created. This constructor contains an
2194 optimization: If all predecessors of the Phi node are identical it
2195 returns the only operand instead of a new Phi node. */
2196 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2198 /* In case we allocated a Phi0 node at the beginning of this procedure,
2199 we need to exchange this Phi0 with the real Phi. */
2201 exchange(phi0, res);
2202 block->attr.block.graph_arr[pos] = res;
2203 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2204 only an optimization. */
2211 * This function returns the last definition of a value. In case
2212 * this value was last defined in a previous block, Phi nodes are
2213 * inserted. If the part of the firm graph containing the definition
2214 * is not yet constructed, a dummy Phi node is returned.
2216 * @param block the current block
2217 * @param pos the value number of the value searched
2218 * @param mode the mode of this value (needed for Phi construction)
2221 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2223 /* There are 4 cases to treat.
2225 1. The block is not mature and we visit it the first time. We can not
2226 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2227 predecessors is returned. This node is added to the linked list (block
2228 attribute "phis") of the containing block to be completed when this block is
2229 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2232 2. The value is already known in this block, graph_arr[pos] is set and we
2233 visit the block the first time. We can return the value without
2234 creating any new nodes.
2236 3. The block is mature and we visit it the first time. A Phi node needs
2237 to be created (phi_merge). If the Phi is not needed, as all it's
2238 operands are the same value reaching the block through different
2239 paths, it's optimized away and the value itself is returned.
2241 4. The block is mature, and we visit it the second time. Now two
2242 subcases are possible:
2243 * The value was computed completely the last time we were here. This
2244 is the case if there is no loop. We can return the proper value.
2245 * The recursion that visited this node and set the flag did not
2246 return yet. We are computing a value in a loop and need to
2247 break the recursion. This case only happens if we visited
2248 the same block with phi_merge before, which inserted a Phi0.
2249 So we return the Phi0.
2252 /* case 4 -- already visited. */
2253 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2254 /* As phi_merge allocates a Phi0 this value is always defined. Here
2255 is the critical difference of the two algorithms. */
2256 assert(block->attr.block.graph_arr[pos]);
2257 return block->attr.block.graph_arr[pos];
2260 /* visited the first time */
2261 set_irn_visited(block, get_irg_visited(current_ir_graph));
2263 /* Get the local valid value */
2264 res = block->attr.block.graph_arr[pos];
2266 /* case 2 -- If the value is actually computed, return it. */
2270 if (block->attr.block.is_matured) { /* case 3 */
2272 /* The Phi has the same amount of ins as the corresponding block. */
2273 int ins = get_irn_arity(block);
2275 NEW_ARR_A(ir_node *, nin, ins);
2277 /* Phi merge collects the predecessors and then creates a node. */
2278 res = phi_merge(block, pos, mode, nin, ins);
2280 } else { /* case 1 */
2281 /* The block is not mature, we don't know how many in's are needed. A Phi
2282 with zero predecessors is created. Such a Phi node is called Phi0
2283 node. The Phi0 is then added to the list of Phi0 nodes in this block
2284 to be matured by mature_immBlock later.
2285 The Phi0 has to remember the pos of it's internal value. If the real
2286 Phi is computed, pos is used to update the array with the local
2288 res = new_rd_Phi0(current_ir_graph, block, mode);
2289 res->attr.phi.u.pos = pos;
2290 res->attr.phi.next = block->attr.block.phis;
2291 block->attr.block.phis = res;
2294 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2296 /* The local valid value is available now. */
2297 block->attr.block.graph_arr[pos] = res;
2300 } /* get_r_value_internal */
2302 /* ************************************************************************** */
2305 * Finalize a Block node, when all control flows are known.
2306 * Acceptable parameters are only Block nodes.
2309 mature_immBlock(ir_node *block) {
2314 assert(is_Block(block));
2315 if (!get_Block_matured(block)) {
2316 ir_graph *irg = current_ir_graph;
2318 ins = ARR_LEN(block->in) - 1;
2319 /* Fix block parameters */
2320 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2322 /* An array for building the Phi nodes. */
2323 NEW_ARR_A(ir_node *, nin, ins);
2325 /* Traverse a chain of Phi nodes attached to this block and mature
2327 for (n = block->attr.block.phis; n; n = next) {
2328 inc_irg_visited(irg);
2329 next = n->attr.phi.next;
2330 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2333 block->attr.block.is_matured = 1;
2335 /* Now, as the block is a finished Firm node, we can optimize it.
2336 Since other nodes have been allocated since the block was created
2337 we can not free the node on the obstack. Therefore we have to call
2338 optimize_in_place().
2339 Unfortunately the optimization does not change a lot, as all allocated
2340 nodes refer to the unoptimized node.
2341 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2342 block = optimize_in_place_2(block);
2343 IRN_VRFY_IRG(block, irg);
2345 } /* mature_immBlock */
2348 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2349 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2353 new_d_Const(dbg_info *db, tarval *con) {
2354 return new_bd_Const(db, con);
2358 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2359 return new_bd_Const_long(db, mode, value);
2360 } /* new_d_Const_long */
2364 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2365 return new_bd_Const_type(db, con, tp);
2366 } /* new_d_Const_type */
2370 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2371 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2375 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2376 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2381 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2384 assert(arg->op == op_Cond);
2385 arg->attr.cond.kind = fragmentary;
2386 arg->attr.cond.default_proj = max_proj;
2387 res = new_Proj(arg, mode_X, max_proj);
2389 } /* new_d_defaultProj */
2392 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2393 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2397 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2398 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2399 } /* new_d_strictConv */
2403 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2404 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2408 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2409 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2420 * Allocate a frag array for a node if the current graph state is phase_building.
2422 * @param irn the node for which the frag array should be allocated
2423 * @param op the opcode of the (original) node, if does not match opcode of irn,
2425 * @param frag_store the address of the frag store in irn attributes, if this
2426 * address contains a value != NULL, does nothing
2428 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2429 if (get_opt_precise_exc_context()) {
2430 if ((current_ir_graph->phase_state == phase_building) &&
2431 (get_irn_op(irn) == op) && /* Could be optimized away. */
2432 !*frag_store) /* Could be a cse where the arr is already set. */ {
2433 *frag_store = new_frag_arr(irn);
2436 } /* firm_alloc_frag_arr */
2439 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2441 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2442 #if PRECISE_EXC_CONTEXT
2443 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2450 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2452 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2453 #if PRECISE_EXC_CONTEXT
2454 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2458 } /* new_d_DivMod */
2462 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2464 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2465 #if PRECISE_EXC_CONTEXT
2466 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2474 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2476 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2477 #if PRECISE_EXC_CONTEXT
2478 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2485 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2487 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2488 #if PRECISE_EXC_CONTEXT
2489 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2509 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2510 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2514 new_d_Jmp(dbg_info *db) {
2515 return new_bd_Jmp(db, current_ir_graph->current_block);
2519 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2520 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2524 new_d_Cond(dbg_info *db, ir_node *c) {
2525 return new_bd_Cond(db, current_ir_graph->current_block, c);
2530 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2533 res = new_bd_Call(db, current_ir_graph->current_block,
2534 store, callee, arity, in, tp);
2535 #if PRECISE_EXC_CONTEXT
2536 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2543 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2545 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2546 } /* new_d_Builtin */
2549 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2550 return new_bd_Return(db, current_ir_graph->current_block,
2552 } /* new_d_Return */
2555 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2557 res = new_bd_Load(db, current_ir_graph->current_block,
2558 store, addr, mode, flags);
2559 #if PRECISE_EXC_CONTEXT
2560 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2567 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2569 res = new_bd_Store(db, current_ir_graph->current_block,
2570 store, addr, val, flags);
2571 #if PRECISE_EXC_CONTEXT
2572 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2579 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2580 ir_where_alloc where) {
2582 res = new_bd_Alloc(db, current_ir_graph->current_block,
2583 store, size, alloc_type, where);
2584 #if PRECISE_EXC_CONTEXT
2585 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2592 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2593 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2594 return new_bd_Free(db, current_ir_graph->current_block,
2595 store, ptr, size, free_type, where);
2599 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2600 /* GL: objptr was called frame before. Frame was a bad choice for the name
2601 as the operand could as well be a pointer to a dynamic object. */
2603 return new_bd_Sel(db, current_ir_graph->current_block,
2604 store, objptr, 0, NULL, ent);
2605 } /* new_d_simpleSel */
2608 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2609 return new_bd_Sel(db, current_ir_graph->current_block,
2610 store, objptr, n_index, index, sel);
2614 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2615 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2617 } /* new_d_SymConst_type */
2620 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2621 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2622 value, kind, firm_unknown_type);
2623 } /* new_d_SymConst */
2626 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2627 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2631 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2632 return new_bd_Confirm(db, current_ir_graph->current_block,
2634 } /* new_d_Confirm */
2637 new_d_Unknown(ir_mode *m) {
2638 return new_bd_Unknown(m);
2639 } /* new_d_Unknown */
2642 new_d_CallBegin(dbg_info *db, ir_node *call) {
2643 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2644 } /* new_d_CallBegin */
2647 new_d_EndReg(dbg_info *db) {
2648 return new_bd_EndReg(db, current_ir_graph->current_block);
2649 } /* new_d_EndReg */
2652 new_d_EndExcept(dbg_info *db) {
2653 return new_bd_EndExcept(db, current_ir_graph->current_block);
2654 } /* new_d_EndExcept */
2657 new_d_Break(dbg_info *db) {
2658 return new_bd_Break(db, current_ir_graph->current_block);
2662 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2663 return new_bd_Filter(db, current_ir_graph->current_block,
2665 } /* new_d_Filter */
2668 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2669 ir_node *ir_true, ir_mode *mode) {
2670 return new_bd_Mux(db, current_ir_graph->current_block,
2671 sel, ir_false, ir_true, mode);
2674 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2675 ir_node *dst, ir_node *src, ir_type *data_type) {
2677 res = new_bd_CopyB(db, current_ir_graph->current_block,
2678 store, dst, src, data_type);
2679 #if PRECISE_EXC_CONTEXT
2680 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2686 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2687 return new_bd_InstOf(db, current_ir_graph->current_block,
2688 store, objptr, type);
2689 } /* new_d_InstOf */
2692 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2693 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2696 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2697 ir_node *idx, ir_node *lower, ir_node *upper) {
2699 res = new_bd_Bound(db, current_ir_graph->current_block,
2700 store, idx, lower, upper);
2701 #if PRECISE_EXC_CONTEXT
2702 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2708 new_d_Pin(dbg_info *db, ir_node *node) {
2709 return new_bd_Pin(db, current_ir_graph->current_block, node);
2713 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2714 int n_outs, ir_asm_constraint *outputs,
2715 int n_clobber, ident *clobber[], ident *asm_text) {
2716 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2719 /* ********************************************************************* */
2720 /* Comfortable interface with automatic Phi node construction. */
2721 /* (Uses also constructors of ?? interface, except new_Block. */
2722 /* ********************************************************************* */
2724 /* Block construction */
2725 /* immature Block without predecessors */
2727 new_d_immBlock(dbg_info *db) {
2730 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2731 /* creates a new dynamic in-array as length of in is -1 */
2732 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2734 /* macroblock head */
2737 res->attr.block.is_matured = 0;
2738 res->attr.block.is_dead = 0;
2739 res->attr.block.is_mb_head = 1;
2740 res->attr.block.has_label = 0;
2741 res->attr.block.irg = current_ir_graph;
2742 res->attr.block.backedge = NULL;
2743 res->attr.block.in_cg = NULL;
2744 res->attr.block.cg_backedge = NULL;
2745 res->attr.block.extblk = NULL;
2746 res->attr.block.region = NULL;
2747 res->attr.block.mb_depth = 0;
2748 res->attr.block.label = 0;
2750 set_Block_block_visited(res, 0);
2752 /* Create and initialize array for Phi-node construction. */
2753 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2754 current_ir_graph->n_loc);
2755 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2757 /* Immature block may not be optimized! */
2758 IRN_VRFY_IRG(res, current_ir_graph);
2761 } /* new_d_immBlock */
2764 new_immBlock(void) {
2765 return new_d_immBlock(NULL);
2766 } /* new_immBlock */
2768 /* immature PartBlock with its predecessors */
2770 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2771 ir_node *res = new_d_immBlock(db);
2772 ir_node *blk = get_nodes_block(pred_jmp);
2774 res->in[0] = blk->in[0];
2775 assert(res->in[0] != NULL);
2776 add_immBlock_pred(res, pred_jmp);
2778 res->attr.block.is_mb_head = 0;
2779 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2782 } /* new_d_immPartBlock */
2785 new_immPartBlock(ir_node *pred_jmp) {
2786 return new_d_immPartBlock(NULL, pred_jmp);
2787 } /* new_immPartBlock */
2789 /* add an edge to a jmp/control flow node */
2791 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2792 int n = ARR_LEN(block->in) - 1;
2794 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2795 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2796 assert(is_ir_node(jmp));
2798 ARR_APP1(ir_node *, block->in, jmp);
2800 hook_set_irn_n(block, n, jmp, NULL);
2801 } /* add_immBlock_pred */
2803 /* changing the current block */
2805 set_cur_block(ir_node *target) {
2806 current_ir_graph->current_block = target;
2807 } /* set_cur_block */
2809 /* ************************ */
2810 /* parameter administration */
2812 /* get a value from the parameter array from the current block by its index */
2814 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2815 ir_graph *irg = current_ir_graph;
2816 assert(get_irg_phase_state(irg) == phase_building);
2817 inc_irg_visited(irg);
2822 return get_r_value_internal(irg->current_block, pos + 1, mode);
2825 /* get a value from the parameter array from the current block by its index */
2827 get_value(int pos, ir_mode *mode) {
2828 return get_d_value(NULL, pos, mode);
2831 /* set a value at position pos in the parameter array from the current block */
2833 set_value(int pos, ir_node *value) {
2834 ir_graph *irg = current_ir_graph;
2835 assert(get_irg_phase_state(irg) == phase_building);
2837 assert(pos+1 < irg->n_loc);
2838 assert(is_ir_node(value));
2839 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2842 /* Find the value number for a node in the current block.*/
2844 find_value(ir_node *value) {
2846 ir_node *bl = current_ir_graph->current_block;
2848 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2849 if (bl->attr.block.graph_arr[i] == value)
2854 /* get the current store */
2857 ir_graph *irg = current_ir_graph;
2859 assert(get_irg_phase_state(irg) == phase_building);
2860 /* GL: one could call get_value instead */
2861 inc_irg_visited(irg);
2862 return get_r_value_internal(irg->current_block, 0, mode_M);
2865 /* set the current store: handles automatic Sync construction for Load nodes */
2867 set_store(ir_node *store) {
2868 ir_node *load, *pload, *pred, *in[2];
2870 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2871 /* Beware: due to dead code elimination, a store might become a Bad node even in
2872 the construction phase. */
2873 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2875 if (get_opt_auto_create_sync()) {
2876 /* handle non-volatile Load nodes by automatically creating Sync's */
2877 load = skip_Proj(store);
2878 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2879 pred = get_Load_mem(load);
2881 if (is_Sync(pred)) {
2882 /* a Load after a Sync: move it up */
2883 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2885 set_Load_mem(load, get_memop_mem(mem));
2886 add_Sync_pred(pred, store);
2889 pload = skip_Proj(pred);
2890 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2891 /* a Load after a Load: create a new Sync */
2892 set_Load_mem(load, get_Load_mem(pload));
2896 store = new_Sync(2, in);
2901 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2905 keep_alive(ir_node *ka) {
2906 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2909 /* --- Useful access routines --- */
2910 /* Returns the current block of the current graph. To set the current
2911 block use set_cur_block. */
2912 ir_node *get_cur_block(void) {
2913 return get_irg_current_block(current_ir_graph);
2914 } /* get_cur_block */
2916 /* Returns the frame type of the current graph */
2917 ir_type *get_cur_frame_type(void) {
2918 return get_irg_frame_type(current_ir_graph);
2919 } /* get_cur_frame_type */
2922 /* ********************************************************************* */
2925 /* call once for each run of the library */
2927 firm_init_cons(uninitialized_local_variable_func_t *func) {
2928 default_initialize_local_variable = func;
2929 } /* firm_init_cons */
2932 irp_finalize_cons(void) {
2934 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2935 irg_finalize_cons(get_irp_irg(i));
2937 irp->phase_state = phase_high;
2938 } /* irp_finalize_cons */
2941 ir_node *new_Block(int arity, ir_node **in) {
2942 return new_d_Block(NULL, arity, in);
2944 ir_node *new_Start(void) {
2945 return new_d_Start(NULL);
2947 ir_node *new_End(void) {
2948 return new_d_End(NULL);
2950 ir_node *new_Jmp(void) {
2951 return new_d_Jmp(NULL);
2953 ir_node *new_IJmp(ir_node *tgt) {
2954 return new_d_IJmp(NULL, tgt);
2956 ir_node *new_Cond(ir_node *c) {
2957 return new_d_Cond(NULL, c);
2960 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2961 return new_d_Return(NULL, store, arity, in);
2963 ir_node *new_Const(tarval *con) {
2964 return new_d_Const(NULL, con);
2967 ir_node *new_Const_long(ir_mode *mode, long value) {
2968 return new_d_Const_long(NULL, mode, value);
2972 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2973 return new_d_Const_type(NULL, con, tp);
2977 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2978 return new_d_SymConst_type(NULL, mode, value, kind, type);
2980 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2981 return new_d_SymConst(NULL, mode, value, kind);
2983 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2984 return new_d_simpleSel(NULL, store, objptr, ent);
2986 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2988 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2990 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2992 return new_d_Call(NULL, store, callee, arity, in, tp);
2994 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2996 return new_d_Builtin(NULL, store, kind, arity, in, tp);
2999 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3000 return new_d_Add(NULL, op1, op2, mode);
3003 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3004 return new_d_Sub(NULL, op1, op2, mode);
3006 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3007 return new_d_Minus(NULL, op, mode);
3009 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3010 return new_d_Mul(NULL, op1, op2, mode);
3012 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3013 return new_d_Mulh(NULL, op1, op2, mode);
3015 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3016 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3018 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3019 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3022 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3023 return new_d_Div(NULL, memop, op1, op2, mode, state);
3026 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3027 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3029 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3030 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3032 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3033 return new_d_Abs(NULL, op, mode);
3035 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3036 return new_d_And(NULL, op1, op2, mode);
3038 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3039 return new_d_Or(NULL, op1, op2, mode);
3041 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3042 return new_d_Eor(NULL, op1, op2, mode);
3044 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3045 return new_d_Not(NULL, op, mode);
3047 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3048 return new_d_Shl(NULL, op, k, mode);
3050 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3051 return new_d_Shr(NULL, op, k, mode);
3053 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3054 return new_d_Shrs(NULL, op, k, mode);
3056 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3057 return new_d_Rotl(NULL, op, k, mode);
3059 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3060 return new_d_Carry(NULL, op1, op2, mode);
3062 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3063 return new_d_Borrow(NULL, op1, op2, mode);
3066 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3067 return new_d_Cmp(NULL, op1, op2);
3070 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3071 return new_d_Conv(NULL, op, mode);
3073 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3074 return new_d_strictConv(NULL, op, mode);
3077 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3078 return new_d_Cast(NULL, op, to_tp);
3081 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3082 return new_d_Phi(NULL, arity, in, mode);
3084 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3085 return new_d_Load(NULL, store, addr, mode, flags);
3087 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3088 return new_d_Store(NULL, store, addr, val, flags);
3090 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3091 ir_where_alloc where) {
3092 return new_d_Alloc(NULL, store, size, alloc_type, where);
3094 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3095 ir_type *free_type, ir_where_alloc where) {
3096 return new_d_Free(NULL, store, ptr, size, free_type, where);
3098 ir_node *new_Sync(int arity, ir_node *in[]) {
3099 return new_d_Sync(NULL, arity, in);
3102 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3103 return new_d_Proj(NULL, arg, mode, proj);
3106 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3107 return new_d_defaultProj(NULL, arg, max_proj);
3110 ir_node *new_Tuple(int arity, ir_node **in) {
3111 return new_d_Tuple(NULL, arity, in);
3113 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3114 return new_d_Id(NULL, val, mode);
3117 ir_node *new_Bad(void) {
3118 return get_irg_bad(current_ir_graph);
3120 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3121 return new_d_Confirm(NULL, val, bound, cmp);
3123 ir_node *new_Unknown(ir_mode *m) {
3124 return new_d_Unknown(m);
3126 ir_node *new_CallBegin(ir_node *callee) {
3127 return new_d_CallBegin(NULL, callee);
3129 ir_node *new_EndReg(void) {
3130 return new_d_EndReg(NULL);
3132 ir_node *new_EndExcept(void) {
3133 return new_d_EndExcept(NULL);
3135 ir_node *new_Break(void) {
3136 return new_d_Break(NULL);
3138 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3139 return new_d_Filter(NULL, arg, mode, proj);
3141 ir_node *new_NoMem(void) {
3142 return get_irg_no_mem(current_ir_graph);
3144 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3145 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3147 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3148 return new_d_CopyB(NULL, store, dst, src, data_type);
3150 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3151 return new_d_InstOf(NULL, store, objptr, ent);
3153 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3154 return new_d_Raise(NULL, store, obj);
3156 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3157 return new_d_Bound(NULL, store, idx, lower, upper);
3159 ir_node *new_Pin(ir_node *node) {
3160 return new_d_Pin(NULL, node);
3162 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3163 int n_outs, ir_asm_constraint *outputs,
3164 int n_clobber, ident *clobber[], ident *asm_text) {
3165 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3168 /* create a new anchor node */
3169 ir_node *new_Anchor(ir_graph *irg) {
3170 ir_node *in[anchor_last];
3171 memset(in, 0, sizeof(in));
3172 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);