2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.res_mode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
208 new_bd_Start(dbg_info *db, ir_node *block) {
210 ir_graph *irg = current_ir_graph;
212 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
214 IRN_VRFY_IRG(res, irg);
219 new_bd_End(dbg_info *db, ir_node *block) {
221 ir_graph *irg = current_ir_graph;
223 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
225 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
268 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
270 ir_graph *irg = current_ir_graph;
272 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
273 res->attr.con.tv = con;
274 set_Const_type(res, tp); /* Call method because of complex assertion. */
275 res = optimize_node (res);
276 assert(get_Const_type(res) == tp);
277 IRN_VRFY_IRG(res, irg);
280 } /* new_bd_Const_type */
284 new_bd_Const(dbg_info *db, tarval *con) {
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const_type (db, irg, con, firm_unknown_type);
291 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
292 ir_graph *irg = current_ir_graph;
294 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
295 } /* new_bd_Const_long */
299 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
301 ir_graph *irg = current_ir_graph;
303 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
310 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
313 ir_graph *irg = current_ir_graph;
315 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
316 res->attr.proj = proj;
319 assert(get_Proj_pred(res));
320 assert(get_nodes_block(get_Proj_pred(res)));
322 res = optimize_node(res);
324 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
356 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
358 ir_graph *irg = current_ir_graph;
360 assert(is_atomic_type(to_tp));
362 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
363 res->attr.cast.totype = to_tp;
364 res = optimize_node(res);
365 IRN_VRFY_IRG(res, irg);
370 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
372 ir_graph *irg = current_ir_graph;
374 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
375 res = optimize_node (res);
376 IRN_VRFY_IRG(res, irg);
404 /** Creates a remainderless Div node. */
405 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
406 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
410 ir_graph *irg = current_ir_graph;
414 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
415 res->attr.divmod.exc.pin_state = state;
416 res->attr.divmod.res_mode = mode;
417 res->attr.divmod.no_remainder = 1;
418 res = optimize_node(res);
419 IRN_VRFY_IRG(res, irg);
425 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
428 ir_graph *irg = current_ir_graph;
431 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
432 res = optimize_node(res);
433 IRN_VRFY_IRG(res, irg);
438 new_bd_Jmp(dbg_info *db, ir_node *block) {
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
443 res = optimize_node(res);
444 IRN_VRFY_IRG(res, irg);
449 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
451 ir_graph *irg = current_ir_graph;
453 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
454 res = optimize_node(res);
455 IRN_VRFY_IRG(res, irg);
460 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
462 ir_graph *irg = current_ir_graph;
464 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
465 res->attr.cond.kind = dense;
466 res->attr.cond.default_proj = 0;
467 res->attr.cond.pred = COND_JMP_PRED_NONE;
468 res = optimize_node(res);
469 IRN_VRFY_IRG(res, irg);
475 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
476 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
480 ir_graph *irg = current_ir_graph;
483 NEW_ARR_A(ir_node *, r_in, r_arity);
486 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
488 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
490 assert((get_unknown_type() == tp) || is_Method_type(tp));
491 set_Call_type(res, tp);
492 res->attr.call.exc.pin_state = op_pin_state_pinned;
493 res->attr.call.callee_arr = NULL;
494 res = optimize_node(res);
495 IRN_VRFY_IRG(res, irg);
500 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
501 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
505 ir_graph *irg = current_ir_graph;
508 NEW_ARR_A(ir_node *, r_in, r_arity);
510 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
512 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
514 assert((get_unknown_type() == tp) || is_Method_type(tp));
515 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
516 res->attr.builtin.kind = kind;
517 res->attr.builtin.builtin_tp = tp;
518 res = optimize_node(res);
519 IRN_VRFY_IRG(res, irg);
521 } /* new_bd_Buildin */
524 new_bd_Return(dbg_info *db, ir_node *block,
525 ir_node *store, int arity, ir_node **in) {
529 ir_graph *irg = current_ir_graph;
532 NEW_ARR_A (ir_node *, r_in, r_arity);
534 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
535 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
536 res = optimize_node(res);
537 IRN_VRFY_IRG(res, irg);
539 } /* new_bd_Return */
542 new_bd_Load(dbg_info *db, ir_node *block,
543 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
546 ir_graph *irg = current_ir_graph;
550 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
551 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
552 res->attr.load.load_mode = mode;
553 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
554 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
555 res = optimize_node(res);
556 IRN_VRFY_IRG(res, irg);
561 new_bd_Store(dbg_info *db, ir_node *block,
562 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
565 ir_graph *irg = current_ir_graph;
570 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
571 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
572 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
573 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
575 res = optimize_node(res);
576 IRN_VRFY_IRG(res, irg);
581 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
582 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
585 ir_graph *irg = current_ir_graph;
589 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
590 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
591 res->attr.alloc.where = where;
592 res->attr.alloc.type = alloc_type;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
600 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
603 ir_graph *irg = current_ir_graph;
608 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
609 res->attr.free.where = where;
610 res->attr.free.type = free_type;
611 res = optimize_node(res);
612 IRN_VRFY_IRG(res, irg);
617 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
618 int arity, ir_node **in, ir_entity *ent) {
622 ir_graph *irg = current_ir_graph;
623 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
625 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
628 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
631 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
633 * Sel's can select functions which should be of mode mode_P_code.
635 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
636 res->attr.sel.ent = ent;
637 res = optimize_node(res);
638 IRN_VRFY_IRG(res, irg);
643 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
644 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
645 ir_graph *irg = current_ir_graph;
646 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
648 res->attr.symc.kind = symkind;
649 res->attr.symc.sym = value;
650 res->attr.symc.tp = tp;
652 res = optimize_node(res);
653 IRN_VRFY_IRG(res, irg);
655 } /* new_bd_SymConst_type */
658 new_bd_Sync(dbg_info *db, ir_node *block) {
660 ir_graph *irg = current_ir_graph;
662 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
663 /* no need to call optimize node here, Sync are always created with no predecessors */
664 IRN_VRFY_IRG(res, irg);
669 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
670 ir_node *in[2], *res;
671 ir_graph *irg = current_ir_graph;
675 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
676 res->attr.confirm.cmp = cmp;
677 res = optimize_node(res);
678 IRN_VRFY_IRG(res, irg);
680 } /* new_bd_Confirm */
683 new_bd_Unknown(ir_mode *m) {
685 ir_graph *irg = current_ir_graph;
687 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
688 res = optimize_node(res);
690 } /* new_bd_Unknown */
693 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
696 ir_graph *irg = current_ir_graph;
698 in[0] = get_Call_ptr(call);
699 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
700 /* res->attr.callbegin.irg = irg; */
701 res->attr.callbegin.call = call;
702 res = optimize_node(res);
703 IRN_VRFY_IRG(res, irg);
705 } /* new_bd_CallBegin */
708 new_bd_EndReg(dbg_info *db, ir_node *block) {
710 ir_graph *irg = current_ir_graph;
712 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
713 set_irg_end_reg(irg, res);
714 IRN_VRFY_IRG(res, irg);
716 } /* new_bd_EndReg */
719 new_bd_EndExcept(dbg_info *db, ir_node *block) {
721 ir_graph *irg = current_ir_graph;
723 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
724 set_irg_end_except(irg, res);
725 IRN_VRFY_IRG (res, irg);
727 } /* new_bd_EndExcept */
730 new_bd_Break(dbg_info *db, ir_node *block) {
732 ir_graph *irg = current_ir_graph;
734 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
735 res = optimize_node(res);
736 IRN_VRFY_IRG(res, irg);
741 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
744 ir_graph *irg = current_ir_graph;
746 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
747 res->attr.filter.proj = proj;
748 res->attr.filter.in_cg = NULL;
749 res->attr.filter.backedge = NULL;
752 assert(get_Proj_pred(res));
753 assert(get_nodes_block(get_Proj_pred(res)));
755 res = optimize_node(res);
756 IRN_VRFY_IRG(res, irg);
758 } /* new_bd_Filter */
761 new_bd_Mux(dbg_info *db, ir_node *block,
762 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
765 ir_graph *irg = current_ir_graph;
771 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
774 res = optimize_node(res);
775 IRN_VRFY_IRG(res, irg);
780 new_bd_CopyB(dbg_info *db, ir_node *block,
781 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
784 ir_graph *irg = current_ir_graph;
790 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
792 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
793 res->attr.copyb.data_type = data_type;
794 res = optimize_node(res);
795 IRN_VRFY_IRG(res, irg);
800 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
801 ir_node *objptr, ir_type *type) {
804 ir_graph *irg = current_ir_graph;
808 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
809 res->attr.instof.type = type;
810 res = optimize_node(res);
811 IRN_VRFY_IRG(res, irg);
813 } /* new_bd_InstOf */
816 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
819 ir_graph *irg = current_ir_graph;
823 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
824 res = optimize_node(res);
825 IRN_VRFY_IRG(res, irg);
830 new_bd_Bound(dbg_info *db, ir_node *block,
831 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
834 ir_graph *irg = current_ir_graph;
840 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
841 res->attr.bound.exc.pin_state = op_pin_state_pinned;
842 res = optimize_node(res);
843 IRN_VRFY_IRG(res, irg);
848 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
850 ir_graph *irg = current_ir_graph;
852 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
853 res = optimize_node(res);
854 IRN_VRFY_IRG(res, irg);
859 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
860 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
862 ir_graph *irg = current_ir_graph;
864 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
865 res->attr.assem.pin_state = op_pin_state_pinned;
866 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
867 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
868 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
869 res->attr.assem.asm_text = asm_text;
871 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
872 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
873 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
875 res = optimize_node(res);
876 IRN_VRFY_IRG(res, irg);
880 /* --------------------------------------------- */
881 /* private interfaces, for professional use only */
882 /* --------------------------------------------- */
885 /* Constructs a Block with a fixed number of predecessors.
886 Does not set current_block. Can not be used with automatic
887 Phi node construction. */
889 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
890 ir_graph *rem = current_ir_graph;
893 current_ir_graph = irg;
894 res = new_bd_Block(db, arity, in);
895 current_ir_graph = rem;
901 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
902 ir_graph *rem = current_ir_graph;
905 current_ir_graph = irg;
906 res = new_bd_Start(db, block);
907 current_ir_graph = rem;
913 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
915 ir_graph *rem = current_ir_graph;
917 current_ir_graph = irg;
918 res = new_bd_End(db, block);
919 current_ir_graph = rem;
925 /* Creates a Phi node with all predecessors. Calling this constructor
926 is only allowed if the corresponding block is mature. */
928 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
930 ir_graph *rem = current_ir_graph;
932 current_ir_graph = irg;
933 res = new_bd_Phi(db, block,arity, in, mode);
934 current_ir_graph = rem;
941 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
943 ir_graph *rem = current_ir_graph;
945 current_ir_graph = irg;
946 res = new_bd_Const_type(db, con, tp);
947 current_ir_graph = rem;
950 } /* new_rd_Const_type */
954 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Const_type(db, con, firm_unknown_type);
961 current_ir_graph = rem;
963 res = new_rd_Const_type(db, irg, con, firm_unknown_type);
970 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
971 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
972 } /* new_rd_Const_long */
976 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
978 ir_graph *rem = current_ir_graph;
980 current_ir_graph = irg;
981 res = new_bd_Id(db, block, val, mode);
982 current_ir_graph = rem;
988 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
991 ir_graph *rem = current_ir_graph;
993 current_ir_graph = irg;
994 res = new_bd_Proj(db, block, arg, mode, proj);
995 current_ir_graph = rem;
1002 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1005 ir_graph *rem = current_ir_graph;
1007 current_ir_graph = irg;
1008 res = new_bd_defaultProj(db, block, arg, max_proj);
1009 current_ir_graph = rem;
1012 } /* new_rd_defaultProj */
1015 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
1017 ir_graph *rem = current_ir_graph;
1019 current_ir_graph = irg;
1020 res = new_bd_Conv(db, block, op, mode, strict_flag);
1021 current_ir_graph = rem;
1028 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1030 ir_graph *rem = current_ir_graph;
1032 current_ir_graph = irg;
1033 res = new_bd_Cast(db, block, op, to_tp);
1034 current_ir_graph = rem;
1040 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1042 ir_graph *rem = current_ir_graph;
1044 current_ir_graph = irg;
1045 res = new_bd_Tuple(db, block, arity, in);
1046 current_ir_graph = rem;
1049 } /* new_rd_Tuple */
1058 NEW_RD_DIVOP(DivMod)
1073 NEW_RD_BINOP(Borrow)
1075 /* creates a rd constructor for an divRL */
1076 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1077 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1080 ir_graph *rem = current_ir_graph;
1081 current_ir_graph = irg;
1082 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1083 current_ir_graph = rem;
1089 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1090 ir_node *op1, ir_node *op2) {
1092 ir_graph *rem = current_ir_graph;
1094 current_ir_graph = irg;
1095 res = new_bd_Cmp(db, block, op1, op2);
1096 current_ir_graph = rem;
1102 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1104 ir_graph *rem = current_ir_graph;
1106 current_ir_graph = irg;
1107 res = new_bd_Jmp(db, block);
1108 current_ir_graph = rem;
1114 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1116 ir_graph *rem = current_ir_graph;
1118 current_ir_graph = irg;
1119 res = new_bd_IJmp(db, block, tgt);
1120 current_ir_graph = rem;
1126 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1128 ir_graph *rem = current_ir_graph;
1130 current_ir_graph = irg;
1131 res = new_bd_Cond(db, block, c);
1132 current_ir_graph = rem;
1139 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1140 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1142 ir_graph *rem = current_ir_graph;
1144 current_ir_graph = irg;
1145 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1146 current_ir_graph = rem;
1152 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1153 ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
1155 ir_graph *rem = current_ir_graph;
1157 current_ir_graph = irg;
1158 res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
1159 current_ir_graph = rem;
1162 } /* new_rd_Builtin */
1165 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1166 ir_node *store, int arity, ir_node **in) {
1168 ir_graph *rem = current_ir_graph;
1170 current_ir_graph = irg;
1171 res = new_bd_Return(db, block, store, arity, in);
1172 current_ir_graph = rem;
1175 } /* new_rd_Return */
1178 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1179 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Load(db, block, store, adr, mode, flags);
1185 current_ir_graph = rem;
1191 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1192 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1194 ir_graph *rem = current_ir_graph;
1196 current_ir_graph = irg;
1197 res = new_bd_Store(db, block, store, adr, val, flags);
1198 current_ir_graph = rem;
1201 } /* new_rd_Store */
1204 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1205 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1207 ir_graph *rem = current_ir_graph;
1209 current_ir_graph = irg;
1210 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1211 current_ir_graph = rem;
1214 } /* new_rd_Alloc */
1217 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1218 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1220 ir_graph *rem = current_ir_graph;
1222 current_ir_graph = irg;
1223 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1224 current_ir_graph = rem;
1230 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1231 ir_node *store, ir_node *objptr, ir_entity *ent) {
1233 ir_graph *rem = current_ir_graph;
1235 current_ir_graph = irg;
1236 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1237 current_ir_graph = rem;
1240 } /* new_rd_simpleSel */
1243 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1244 int arity, ir_node **in, ir_entity *ent) {
1246 ir_graph *rem = current_ir_graph;
1248 current_ir_graph = irg;
1249 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1250 current_ir_graph = rem;
1256 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1257 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1259 ir_graph *rem = current_ir_graph;
1261 current_ir_graph = irg;
1262 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1263 current_ir_graph = rem;
1266 } /* new_rd_SymConst_type */
1269 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1270 symconst_symbol value, symconst_kind symkind) {
1271 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1272 } /* new_rd_SymConst */
1274 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1275 symconst_symbol sym;
1276 sym.entity_p = symbol;
1277 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1278 } /* new_rd_SymConst_addr_ent */
1280 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1281 symconst_symbol sym;
1282 sym.entity_p = symbol;
1283 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1284 } /* new_rd_SymConst_ofs_ent */
1286 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1287 symconst_symbol sym;
1288 sym.ident_p = symbol;
1289 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1290 } /* new_rd_SymConst_addr_name */
1292 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1293 symconst_symbol sym;
1294 sym.type_p = symbol;
1295 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1296 } /* new_rd_SymConst_type_tag */
1298 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1299 symconst_symbol sym;
1300 sym.type_p = symbol;
1301 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1302 } /* new_rd_SymConst_size */
1304 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1305 symconst_symbol sym;
1306 sym.type_p = symbol;
1307 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1308 } /* new_rd_SymConst_align */
1311 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1313 ir_graph *rem = current_ir_graph;
1316 current_ir_graph = irg;
1317 res = new_bd_Sync(db, block);
1318 current_ir_graph = rem;
1320 for (i = 0; i < arity; ++i)
1321 add_Sync_pred(res, in[i]);
1327 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1329 ir_graph *rem = current_ir_graph;
1331 current_ir_graph = irg;
1332 res = new_bd_Confirm(db, block, val, bound, cmp);
1333 current_ir_graph = rem;
1336 } /* new_rd_Confirm */
1339 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1341 ir_graph *rem = current_ir_graph;
1343 current_ir_graph = irg;
1344 res = new_bd_Unknown(m);
1345 current_ir_graph = rem;
1348 } /* new_rd_Unknown */
1351 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1353 ir_graph *rem = current_ir_graph;
1355 current_ir_graph = irg;
1356 res = new_bd_CallBegin(db, block, call);
1357 current_ir_graph = rem;
1360 } /* new_rd_CallBegin */
1363 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1366 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1367 set_irg_end_reg(irg, res);
1368 IRN_VRFY_IRG(res, irg);
1370 } /* new_rd_EndReg */
1373 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1376 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1377 set_irg_end_except(irg, res);
1378 IRN_VRFY_IRG (res, irg);
1380 } /* new_rd_EndExcept */
1383 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1385 ir_graph *rem = current_ir_graph;
1387 current_ir_graph = irg;
1388 res = new_bd_Break(db, block);
1389 current_ir_graph = rem;
1392 } /* new_rd_Break */
1395 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1398 ir_graph *rem = current_ir_graph;
1400 current_ir_graph = irg;
1401 res = new_bd_Filter(db, block, arg, mode, proj);
1402 current_ir_graph = rem;
1405 } /* new_rd_Filter */
1408 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1409 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1415 current_ir_graph = rem;
1420 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1421 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1423 ir_graph *rem = current_ir_graph;
1425 current_ir_graph = irg;
1426 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1427 current_ir_graph = rem;
1430 } /* new_rd_CopyB */
1433 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1434 ir_node *objptr, ir_type *type) {
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_InstOf(db, block, store, objptr, type);
1440 current_ir_graph = rem;
1443 } /* new_rd_InstOf */
1446 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_Raise(db, block, store, obj);
1452 current_ir_graph = rem;
1455 } /* new_rd_Raise */
1457 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1458 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1460 ir_graph *rem = current_ir_graph;
1462 current_ir_graph = irg;
1463 res = new_bd_Bound(db, block, store, idx, lower, upper);
1464 current_ir_graph = rem;
1467 } /* new_rd_Bound */
1469 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1471 ir_graph *rem = current_ir_graph;
1473 current_ir_graph = irg;
1474 res = new_bd_Pin(db, block, node);
1475 current_ir_graph = rem;
1480 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1481 int arity, ir_node *in[], ir_asm_constraint *inputs,
1482 int n_outs, ir_asm_constraint *outputs,
1483 int n_clobber, ident *clobber[], ident *asm_text) {
1485 ir_graph *rem = current_ir_graph;
1487 current_ir_graph = irg;
1488 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1489 current_ir_graph = rem;
1496 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1497 return new_rd_Block(NULL, irg, arity, in);
1499 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1500 return new_rd_Start(NULL, irg, block);
1502 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1503 return new_rd_End(NULL, irg, block);
1505 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1506 return new_rd_Jmp(NULL, irg, block);
1508 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1509 return new_rd_IJmp(NULL, irg, block, tgt);
1511 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1512 return new_rd_Cond(NULL, irg, block, c);
1515 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1516 ir_node *store, int arity, ir_node **in) {
1517 return new_rd_Return(NULL, irg, block, store, arity, in);
1519 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1520 return new_rd_Const(NULL, irg, con);
1522 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1523 return new_rd_Const_long(NULL, irg, mode, value);
1526 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1527 return new_rd_Const_type(NULL, irg, con, tp);
1530 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1531 symconst_symbol value, symconst_kind symkind) {
1532 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1534 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1535 ir_node *objptr, ir_entity *ent) {
1536 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1538 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1539 ir_node *objptr, int n_index, ir_node **index,
1541 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1543 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1544 ir_node *callee, int arity, ir_node **in,
1546 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1548 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1549 ir_builtin_kind kind, int arity, ir_node **in,
1551 return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
1554 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1559 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1560 ir_node *op1, ir_node *op2, ir_mode *mode) {
1561 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1563 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1564 ir_node *op, ir_mode *mode) {
1565 return new_rd_Minus(NULL, irg, block, op, mode);
1567 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1568 ir_node *op1, ir_node *op2, ir_mode *mode) {
1569 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1571 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1572 ir_node *op1, ir_node *op2, ir_mode *mode) {
1573 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1575 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1576 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1577 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1579 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1580 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1581 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1584 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1585 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1586 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1589 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1590 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1591 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1593 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1594 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1595 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1597 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1598 ir_node *op, ir_mode *mode) {
1599 return new_rd_Abs(NULL, irg, block, op, mode);
1601 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1602 ir_node *op1, ir_node *op2, ir_mode *mode) {
1603 return new_rd_And(NULL, irg, block, op1, op2, mode);
1605 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1606 ir_node *op1, ir_node *op2, ir_mode *mode) {
1607 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1609 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1610 ir_node *op1, ir_node *op2, ir_mode *mode) {
1611 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1613 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1614 ir_node *op, ir_mode *mode) {
1615 return new_rd_Not(NULL, irg, block, op, mode);
1617 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1618 ir_node *op, ir_node *k, ir_mode *mode) {
1619 return new_rd_Shl(NULL, irg, block, op, k, mode);
1621 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1622 ir_node *op, ir_node *k, ir_mode *mode) {
1623 return new_rd_Shr(NULL, irg, block, op, k, mode);
1625 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1626 ir_node *op, ir_node *k, ir_mode *mode) {
1627 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1629 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1630 ir_node *op, ir_node *k, ir_mode *mode) {
1631 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1633 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1634 ir_node *op, ir_node *k, ir_mode *mode) {
1635 return new_rd_Carry(NULL, irg, block, op, k, mode);
1637 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1638 ir_node *op, ir_node *k, ir_mode *mode) {
1639 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1642 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1643 ir_node *op1, ir_node *op2) {
1644 return new_rd_Cmp(NULL, irg, block, op1, op2);
1647 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1648 ir_node *op, ir_mode *mode, int strict_flag) {
1649 return new_rd_Conv(NULL, irg, block, op, mode, strict_flag);
1652 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1653 return new_rd_Cast(NULL, irg, block, op, to_tp);
1656 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1657 ir_node **in, ir_mode *mode) {
1658 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1660 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1661 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1662 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1664 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1665 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1666 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1668 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1669 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1670 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1672 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1673 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1674 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1676 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1677 return new_rd_Sync(NULL, irg, block, arity, in);
1680 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1681 ir_mode *mode, long proj) {
1682 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1685 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1687 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1690 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1691 int arity, ir_node **in) {
1692 return new_rd_Tuple(NULL, irg, block, arity, in );
1694 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1695 ir_node *val, ir_mode *mode) {
1696 return new_rd_Id(NULL, irg, block, val, mode);
1699 ir_node *new_r_Bad(ir_graph *irg) {
1700 return get_irg_bad(irg);
1702 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1703 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1705 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1706 return new_rd_Unknown(irg, m);
1708 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1709 return new_rd_CallBegin(NULL, irg, block, callee);
1711 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1712 return new_rd_EndReg(NULL, irg, block);
1714 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1715 return new_rd_EndExcept(NULL, irg, block);
1717 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1718 return new_rd_Break(NULL, irg, block);
1720 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1721 ir_mode *mode, long proj) {
1722 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1724 ir_node *new_r_NoMem(ir_graph *irg) {
1725 return get_irg_no_mem(irg);
1727 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1728 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1729 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1731 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1732 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1733 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1735 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1737 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1739 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1740 ir_node *store, ir_node *obj) {
1741 return new_rd_Raise(NULL, irg, block, store, obj);
1743 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1744 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1745 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1747 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1748 return new_rd_Pin(NULL, irg, block, node);
1750 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1751 int arity, ir_node *in[], ir_asm_constraint *inputs,
1752 int n_outs, ir_asm_constraint *outputs,
1753 int n_clobber, ident *clobber[], ident *asm_text) {
1754 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1757 /** ********************/
1758 /** public interfaces */
1759 /** construction tools */
1765 * - create a new Start node in the current block
1767 * @return s - pointer to the created Start node
1772 new_d_Start(dbg_info *db) {
1775 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1776 op_Start, mode_T, 0, NULL);
1778 res = optimize_node(res);
1779 IRN_VRFY_IRG(res, current_ir_graph);
1784 new_d_End(dbg_info *db) {
1786 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1787 op_End, mode_X, -1, NULL);
1788 res = optimize_node(res);
1789 IRN_VRFY_IRG(res, current_ir_graph);
1794 /* Constructs a Block with a fixed number of predecessors.
1795 Does set current_block. Can be used with automatic Phi
1796 node construction. */
1798 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1801 int has_unknown = 0;
1803 res = new_bd_Block(db, arity, in);
1805 /* Create and initialize array for Phi-node construction. */
1806 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1807 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1808 current_ir_graph->n_loc);
1809 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1812 for (i = arity-1; i >= 0; i--)
1813 if (is_Unknown(in[i])) {
1818 if (!has_unknown) res = optimize_node(res);
1820 IRN_VRFY_IRG(res, current_ir_graph);
1826 /* ***********************************************************************/
1827 /* Methods necessary for automatic Phi node creation */
1829 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1830 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1831 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1832 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1834 Call Graph: ( A ---> B == A "calls" B)
1836 get_value mature_immBlock
1844 get_r_value_internal |
1848 new_rd_Phi0 new_rd_Phi_in
1850 * *************************************************************************** */
1852 /** Creates a Phi node with 0 predecessors. */
1853 static inline ir_node *
1854 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1857 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1858 IRN_VRFY_IRG(res, irg);
1864 * Internal constructor of a Phi node by a phi_merge operation.
1866 * @param irg the graph on which the Phi will be constructed
1867 * @param block the block in which the Phi will be constructed
1868 * @param mode the mod eof the Phi node
1869 * @param in the input array of the phi node
1870 * @param ins number of elements in the input array
1871 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1872 * the value for which the new Phi is constructed
1874 static inline ir_node *
1875 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1876 ir_node **in, int ins, ir_node *phi0) {
1878 ir_node *res, *known;
1880 /* Allocate a new node on the obstack. The allocation copies the in
1882 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1883 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1885 /* This loop checks whether the Phi has more than one predecessor.
1886 If so, it is a real Phi node and we break the loop. Else the
1887 Phi node merges the same definition on several paths and therefore
1889 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1891 for (i = ins - 1; i >= 0; --i) {
1894 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1896 /* Optimize self referencing Phis: We can't detect them yet properly, as
1897 they still refer to the Phi0 they will replace. So replace right now. */
1898 if (phi0 && in[i] == phi0)
1901 if (in[i] == res || in[i] == known)
1910 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1913 edges_node_deleted(res, current_ir_graph);
1914 obstack_free(current_ir_graph->obst, res);
1915 if (is_Phi(known)) {
1916 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1917 order, an enclosing Phi know may get superfluous. */
1918 res = optimize_in_place_2(known);
1920 exchange(known, res);
1925 /* A undefined value, e.g., in unreachable code. */
1929 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1930 IRN_VRFY_IRG(res, irg);
1931 /* Memory Phis in endless loops must be kept alive.
1932 As we can't distinguish these easily we keep all of them alive. */
1933 if (is_Phi(res) && mode == mode_M)
1934 add_End_keepalive(get_irg_end(irg), res);
1938 } /* new_rd_Phi_in */
1941 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1943 #if PRECISE_EXC_CONTEXT
1945 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1948 * Construct a new frag_array for node n.
1949 * Copy the content from the current graph_arr of the corresponding block:
1950 * this is the current state.
1951 * Set ProjM(n) as current memory state.
1952 * Further the last entry in frag_arr of current block points to n. This
1953 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1955 static inline ir_node **new_frag_arr(ir_node *n) {
1959 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1960 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1961 sizeof(ir_node *)*current_ir_graph->n_loc);
1963 /* turn off optimization before allocating Proj nodes, as res isn't
1965 opt = get_opt_optimize(); set_optimize(0);
1966 /* Here we rely on the fact that all frag ops have Memory as first result! */
1968 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1969 } else if (is_CopyB(n)) {
1970 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1972 assert((pn_Quot_M == pn_DivMod_M) &&
1973 (pn_Quot_M == pn_Div_M) &&
1974 (pn_Quot_M == pn_Mod_M) &&
1975 (pn_Quot_M == pn_Load_M) &&
1976 (pn_Quot_M == pn_Store_M) &&
1977 (pn_Quot_M == pn_Alloc_M) &&
1978 (pn_Quot_M == pn_Bound_M));
1979 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1983 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1985 } /* new_frag_arr */
1988 * Returns the frag_arr from a node.
1990 static inline ir_node **get_frag_arr(ir_node *n) {
1991 switch (get_irn_opcode(n)) {
1993 return n->attr.call.exc.frag_arr;
1995 return n->attr.alloc.exc.frag_arr;
1997 return n->attr.load.exc.frag_arr;
1999 return n->attr.store.exc.frag_arr;
2001 return n->attr.except.frag_arr;
2003 } /* get_frag_arr */
2006 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2007 #ifdef DEBUG_libfirm
2010 for (i = 1024; i >= 0; --i)
2015 if (frag_arr[pos] == NULL)
2016 frag_arr[pos] = val;
2017 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2018 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2019 assert(arr != frag_arr && "Endless recursion detected");
2024 assert(!"potential endless recursion in set_frag_value");
2025 } /* set_frag_value */
2028 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2032 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2034 frag_arr = get_frag_arr(cfOp);
2035 res = frag_arr[pos];
2037 if (block->attr.block.graph_arr[pos] != NULL) {
2038 /* There was a set_value() after the cfOp and no get_value() before that
2039 set_value(). We must build a Phi node now. */
2040 if (block->attr.block.is_matured) {
2041 int ins = get_irn_arity(block);
2043 NEW_ARR_A(ir_node *, nin, ins);
2044 res = phi_merge(block, pos, mode, nin, ins);
2046 res = new_rd_Phi0(current_ir_graph, block, mode);
2047 res->attr.phi.u.pos = pos;
2048 res->attr.phi.next = block->attr.block.phis;
2049 block->attr.block.phis = res;
2051 assert(res != NULL);
2052 /* It's a Phi, we can write this into all graph_arrs with NULL */
2053 set_frag_value(block->attr.block.graph_arr, pos, res);
2055 res = get_r_value_internal(block, pos, mode);
2056 set_frag_value(block->attr.block.graph_arr, pos, res);
2060 } /* get_r_frag_value_internal */
2061 #endif /* PRECISE_EXC_CONTEXT */
2064 * Check whether a control flownode cf_pred represents an exception flow.
2066 * @param cf_pred the control flow node
2067 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2069 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2071 * Note: all projections from a raise are "exceptional control flow" we we handle it
2072 * like a normal Jmp, because there is no "regular" one.
2073 * That's why Raise is no "fragile_op"!
2075 if (is_fragile_op(prev_cf_op)) {
2076 if (is_Proj(cf_pred)) {
2077 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2078 /* the regular control flow, NO exception */
2081 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2084 /* Hmm, exception but not a Proj? */
2085 assert(!"unexpected condition: fragile op without a proj");
2089 } /* is_exception_flow */
2092 * Computes the predecessors for the real phi node, and then
2093 * allocates and returns this node. The routine called to allocate the
2094 * node might optimize it away and return a real value.
2095 * This function must be called with an in-array of proper size.
2098 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2099 ir_node *prevBlock, *res, *phi0, *phi0_all;
2102 /* If this block has no value at pos create a Phi0 and remember it
2103 in graph_arr to break recursions.
2104 Else we may not set graph_arr as there a later value is remembered. */
2106 if (block->attr.block.graph_arr[pos] == NULL) {
2107 ir_graph *irg = current_ir_graph;
2109 if (block == get_irg_start_block(irg)) {
2110 /* Collapsing to Bad tarvals is no good idea.
2111 So we call a user-supplied routine here that deals with this case as
2112 appropriate for the given language. Sorrily the only help we can give
2113 here is the position.
2115 Even if all variables are defined before use, it can happen that
2116 we get to the start block, if a Cond has been replaced by a tuple
2117 (bad, jmp). In this case we call the function needlessly, eventually
2118 generating an non existent error.
2119 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2122 if (default_initialize_local_variable != NULL) {
2123 ir_node *rem = get_cur_block();
2125 set_cur_block(block);
2126 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2130 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2131 /* We don't need to care about exception ops in the start block.
2132 There are none by definition. */
2133 return block->attr.block.graph_arr[pos];
2135 phi0 = new_rd_Phi0(irg, block, mode);
2136 block->attr.block.graph_arr[pos] = phi0;
2137 #if PRECISE_EXC_CONTEXT
2138 if (get_opt_precise_exc_context()) {
2139 /* Set graph_arr for fragile ops. Also here we should break recursion.
2140 We could choose a cyclic path through an cfop. But the recursion would
2141 break at some point. */
2142 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2148 /* This loop goes to all predecessor blocks of the block the Phi node
2149 is in and there finds the operands of the Phi node by calling
2150 get_r_value_internal. */
2151 for (i = 1; i <= ins; ++i) {
2152 ir_node *cf_pred = block->in[i];
2153 ir_node *prevCfOp = skip_Proj(cf_pred);
2155 if (is_Bad(prevCfOp)) {
2156 /* In case a Cond has been optimized we would get right to the start block
2157 with an invalid definition. */
2158 nin[i-1] = new_Bad();
2161 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2163 if (!is_Bad(prevBlock)) {
2164 #if PRECISE_EXC_CONTEXT
2165 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2166 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2167 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2170 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2172 nin[i-1] = new_Bad();
2176 /* We want to pass the Phi0 node to the constructor: this finds additional
2177 optimization possibilities.
2178 The Phi0 node either is allocated in this function, or it comes from
2179 a former call to get_r_value_internal(). In this case we may not yet
2180 exchange phi0, as this is done in mature_immBlock(). */
2182 phi0_all = block->attr.block.graph_arr[pos];
2183 if (! is_Phi0(phi0_all) ||
2184 get_irn_arity(phi0_all) != 0 ||
2185 get_nodes_block(phi0_all) != block)
2191 /* After collecting all predecessors into the array nin a new Phi node
2192 with these predecessors is created. This constructor contains an
2193 optimization: If all predecessors of the Phi node are identical it
2194 returns the only operand instead of a new Phi node. */
2195 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2197 /* In case we allocated a Phi0 node at the beginning of this procedure,
2198 we need to exchange this Phi0 with the real Phi. */
2200 exchange(phi0, res);
2201 block->attr.block.graph_arr[pos] = res;
2202 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2203 only an optimization. */
2210 * This function returns the last definition of a value. In case
2211 * this value was last defined in a previous block, Phi nodes are
2212 * inserted. If the part of the firm graph containing the definition
2213 * is not yet constructed, a dummy Phi node is returned.
2215 * @param block the current block
2216 * @param pos the value number of the value searched
2217 * @param mode the mode of this value (needed for Phi construction)
2220 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2222 /* There are 4 cases to treat.
2224 1. The block is not mature and we visit it the first time. We can not
2225 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2226 predecessors is returned. This node is added to the linked list (block
2227 attribute "phis") of the containing block to be completed when this block is
2228 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2231 2. The value is already known in this block, graph_arr[pos] is set and we
2232 visit the block the first time. We can return the value without
2233 creating any new nodes.
2235 3. The block is mature and we visit it the first time. A Phi node needs
2236 to be created (phi_merge). If the Phi is not needed, as all it's
2237 operands are the same value reaching the block through different
2238 paths, it's optimized away and the value itself is returned.
2240 4. The block is mature, and we visit it the second time. Now two
2241 subcases are possible:
2242 * The value was computed completely the last time we were here. This
2243 is the case if there is no loop. We can return the proper value.
2244 * The recursion that visited this node and set the flag did not
2245 return yet. We are computing a value in a loop and need to
2246 break the recursion. This case only happens if we visited
2247 the same block with phi_merge before, which inserted a Phi0.
2248 So we return the Phi0.
2251 /* case 4 -- already visited. */
2252 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2253 /* As phi_merge allocates a Phi0 this value is always defined. Here
2254 is the critical difference of the two algorithms. */
2255 assert(block->attr.block.graph_arr[pos]);
2256 return block->attr.block.graph_arr[pos];
2259 /* visited the first time */
2260 set_irn_visited(block, get_irg_visited(current_ir_graph));
2262 /* Get the local valid value */
2263 res = block->attr.block.graph_arr[pos];
2265 /* case 2 -- If the value is actually computed, return it. */
2269 if (block->attr.block.is_matured) { /* case 3 */
2271 /* The Phi has the same amount of ins as the corresponding block. */
2272 int ins = get_irn_arity(block);
2274 NEW_ARR_A(ir_node *, nin, ins);
2276 /* Phi merge collects the predecessors and then creates a node. */
2277 res = phi_merge(block, pos, mode, nin, ins);
2279 } else { /* case 1 */
2280 /* The block is not mature, we don't know how many in's are needed. A Phi
2281 with zero predecessors is created. Such a Phi node is called Phi0
2282 node. The Phi0 is then added to the list of Phi0 nodes in this block
2283 to be matured by mature_immBlock later.
2284 The Phi0 has to remember the pos of it's internal value. If the real
2285 Phi is computed, pos is used to update the array with the local
2287 res = new_rd_Phi0(current_ir_graph, block, mode);
2288 res->attr.phi.u.pos = pos;
2289 res->attr.phi.next = block->attr.block.phis;
2290 block->attr.block.phis = res;
2293 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2295 /* The local valid value is available now. */
2296 block->attr.block.graph_arr[pos] = res;
2299 } /* get_r_value_internal */
2301 /* ************************************************************************** */
2304 * Finalize a Block node, when all control flows are known.
2305 * Acceptable parameters are only Block nodes.
2308 mature_immBlock(ir_node *block) {
2313 assert(is_Block(block));
2314 if (!get_Block_matured(block)) {
2315 ir_graph *irg = current_ir_graph;
2317 ins = ARR_LEN(block->in) - 1;
2318 /* Fix block parameters */
2319 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2321 /* An array for building the Phi nodes. */
2322 NEW_ARR_A(ir_node *, nin, ins);
2324 /* Traverse a chain of Phi nodes attached to this block and mature
2326 for (n = block->attr.block.phis; n; n = next) {
2327 inc_irg_visited(irg);
2328 next = n->attr.phi.next;
2329 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2332 block->attr.block.is_matured = 1;
2334 /* Now, as the block is a finished Firm node, we can optimize it.
2335 Since other nodes have been allocated since the block was created
2336 we can not free the node on the obstack. Therefore we have to call
2337 optimize_in_place().
2338 Unfortunately the optimization does not change a lot, as all allocated
2339 nodes refer to the unoptimized node.
2340 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2341 block = optimize_in_place_2(block);
2342 IRN_VRFY_IRG(block, irg);
2344 } /* mature_immBlock */
2347 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2348 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2352 new_d_Const(dbg_info *db, tarval *con) {
2353 return new_bd_Const(db, con);
2357 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2358 return new_bd_Const_long(db, mode, value);
2359 } /* new_d_Const_long */
2363 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2364 return new_bd_Const_type(db, con, tp);
2365 } /* new_d_Const_type */
2369 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2370 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2374 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2375 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2380 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2382 assert(arg->op == op_Cond);
2383 arg->attr.cond.kind = fragmentary;
2384 arg->attr.cond.default_proj = max_proj;
2385 res = new_d_Proj(db, arg, mode_X, max_proj);
2387 } /* new_d_defaultProj */
2390 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2391 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2395 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2396 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2397 } /* new_d_strictConv */
2401 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2402 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2406 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2407 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2418 * Allocate a frag array for a node if the current graph state is phase_building.
2420 * @param irn the node for which the frag array should be allocated
2421 * @param op the opcode of the (original) node, if does not match opcode of irn,
2423 * @param frag_store the address of the frag store in irn attributes, if this
2424 * address contains a value != NULL, does nothing
2426 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2427 if (get_opt_precise_exc_context()) {
2428 if ((current_ir_graph->phase_state == phase_building) &&
2429 (get_irn_op(irn) == op) && /* Could be optimized away. */
2430 !*frag_store) /* Could be a cse where the arr is already set. */ {
2431 *frag_store = new_frag_arr(irn);
2434 } /* firm_alloc_frag_arr */
2437 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2439 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2440 #if PRECISE_EXC_CONTEXT
2441 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2448 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2450 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2451 #if PRECISE_EXC_CONTEXT
2452 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2456 } /* new_d_DivMod */
2460 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2462 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2463 #if PRECISE_EXC_CONTEXT
2464 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2472 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2474 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2475 #if PRECISE_EXC_CONTEXT
2476 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2483 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2485 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2486 #if PRECISE_EXC_CONTEXT
2487 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2507 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2508 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2512 new_d_Jmp(dbg_info *db) {
2513 return new_bd_Jmp(db, current_ir_graph->current_block);
2517 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2518 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2522 new_d_Cond(dbg_info *db, ir_node *c) {
2523 return new_bd_Cond(db, current_ir_graph->current_block, c);
2528 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2531 res = new_bd_Call(db, current_ir_graph->current_block,
2532 store, callee, arity, in, tp);
2533 #if PRECISE_EXC_CONTEXT
2534 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2541 new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2543 return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
2544 } /* new_d_Builtin */
2547 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2548 return new_bd_Return(db, current_ir_graph->current_block,
2550 } /* new_d_Return */
2553 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2555 res = new_bd_Load(db, current_ir_graph->current_block,
2556 store, addr, mode, flags);
2557 #if PRECISE_EXC_CONTEXT
2558 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2565 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2567 res = new_bd_Store(db, current_ir_graph->current_block,
2568 store, addr, val, flags);
2569 #if PRECISE_EXC_CONTEXT
2570 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2577 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2578 ir_where_alloc where) {
2580 res = new_bd_Alloc(db, current_ir_graph->current_block,
2581 store, size, alloc_type, where);
2582 #if PRECISE_EXC_CONTEXT
2583 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2590 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2591 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2592 return new_bd_Free(db, current_ir_graph->current_block,
2593 store, ptr, size, free_type, where);
2597 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2598 /* GL: objptr was called frame before. Frame was a bad choice for the name
2599 as the operand could as well be a pointer to a dynamic object. */
2601 return new_bd_Sel(db, current_ir_graph->current_block,
2602 store, objptr, 0, NULL, ent);
2603 } /* new_d_simpleSel */
2606 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2607 return new_bd_Sel(db, current_ir_graph->current_block,
2608 store, objptr, n_index, index, sel);
2612 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2613 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2615 } /* new_d_SymConst_type */
2618 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2619 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2620 value, kind, firm_unknown_type);
2621 } /* new_d_SymConst */
2624 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2625 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2629 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2630 return new_bd_Confirm(db, current_ir_graph->current_block,
2632 } /* new_d_Confirm */
2635 new_d_Unknown(ir_mode *m) {
2636 return new_bd_Unknown(m);
2637 } /* new_d_Unknown */
2640 new_d_CallBegin(dbg_info *db, ir_node *call) {
2641 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2642 } /* new_d_CallBegin */
2645 new_d_EndReg(dbg_info *db) {
2646 return new_bd_EndReg(db, current_ir_graph->current_block);
2647 } /* new_d_EndReg */
2650 new_d_EndExcept(dbg_info *db) {
2651 return new_bd_EndExcept(db, current_ir_graph->current_block);
2652 } /* new_d_EndExcept */
2655 new_d_Break(dbg_info *db) {
2656 return new_bd_Break(db, current_ir_graph->current_block);
2660 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2661 return new_bd_Filter(db, current_ir_graph->current_block,
2663 } /* new_d_Filter */
2666 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2667 ir_node *ir_true, ir_mode *mode) {
2668 return new_bd_Mux(db, current_ir_graph->current_block,
2669 sel, ir_false, ir_true, mode);
2672 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2673 ir_node *dst, ir_node *src, ir_type *data_type) {
2675 res = new_bd_CopyB(db, current_ir_graph->current_block,
2676 store, dst, src, data_type);
2677 #if PRECISE_EXC_CONTEXT
2678 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2684 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2685 return new_bd_InstOf(db, current_ir_graph->current_block,
2686 store, objptr, type);
2687 } /* new_d_InstOf */
2690 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2691 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2694 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2695 ir_node *idx, ir_node *lower, ir_node *upper) {
2697 res = new_bd_Bound(db, current_ir_graph->current_block,
2698 store, idx, lower, upper);
2699 #if PRECISE_EXC_CONTEXT
2700 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2706 new_d_Pin(dbg_info *db, ir_node *node) {
2707 return new_bd_Pin(db, current_ir_graph->current_block, node);
2711 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2712 int n_outs, ir_asm_constraint *outputs,
2713 int n_clobber, ident *clobber[], ident *asm_text) {
2714 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2717 /* ********************************************************************* */
2718 /* Comfortable interface with automatic Phi node construction. */
2719 /* (Uses also constructors of ?? interface, except new_Block. */
2720 /* ********************************************************************* */
2722 /* Block construction */
2723 /* immature Block without predecessors */
2725 new_d_immBlock(dbg_info *db) {
2728 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2729 /* creates a new dynamic in-array as length of in is -1 */
2730 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2732 /* macroblock head */
2735 res->attr.block.is_matured = 0;
2736 res->attr.block.is_dead = 0;
2737 res->attr.block.is_mb_head = 1;
2738 res->attr.block.has_label = 0;
2739 res->attr.block.irg = current_ir_graph;
2740 res->attr.block.backedge = NULL;
2741 res->attr.block.in_cg = NULL;
2742 res->attr.block.cg_backedge = NULL;
2743 res->attr.block.extblk = NULL;
2744 res->attr.block.region = NULL;
2745 res->attr.block.mb_depth = 0;
2746 res->attr.block.label = 0;
2748 set_Block_block_visited(res, 0);
2750 /* Create and initialize array for Phi-node construction. */
2751 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2752 current_ir_graph->n_loc);
2753 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2755 /* Immature block may not be optimized! */
2756 IRN_VRFY_IRG(res, current_ir_graph);
2759 } /* new_d_immBlock */
2762 new_immBlock(void) {
2763 return new_d_immBlock(NULL);
2764 } /* new_immBlock */
2766 /* immature PartBlock with its predecessors */
2768 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2769 ir_node *res = new_d_immBlock(db);
2770 ir_node *blk = get_nodes_block(pred_jmp);
2772 res->in[0] = blk->in[0];
2773 assert(res->in[0] != NULL);
2774 add_immBlock_pred(res, pred_jmp);
2776 res->attr.block.is_mb_head = 0;
2777 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2780 } /* new_d_immPartBlock */
2783 new_immPartBlock(ir_node *pred_jmp) {
2784 return new_d_immPartBlock(NULL, pred_jmp);
2785 } /* new_immPartBlock */
2787 /* add an edge to a jmp/control flow node */
2789 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2790 int n = ARR_LEN(block->in) - 1;
2792 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2793 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2794 assert(is_ir_node(jmp));
2796 ARR_APP1(ir_node *, block->in, jmp);
2798 hook_set_irn_n(block, n, jmp, NULL);
2799 } /* add_immBlock_pred */
2801 /* changing the current block */
2803 set_cur_block(ir_node *target) {
2804 current_ir_graph->current_block = target;
2805 } /* set_cur_block */
2807 /* ************************ */
2808 /* parameter administration */
2810 /* get a value from the parameter array from the current block by its index */
2812 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2813 ir_graph *irg = current_ir_graph;
2814 assert(get_irg_phase_state(irg) == phase_building);
2815 inc_irg_visited(irg);
2820 return get_r_value_internal(irg->current_block, pos + 1, mode);
2823 /* get a value from the parameter array from the current block by its index */
2825 get_value(int pos, ir_mode *mode) {
2826 return get_d_value(NULL, pos, mode);
2829 /* set a value at position pos in the parameter array from the current block */
2831 set_value(int pos, ir_node *value) {
2832 ir_graph *irg = current_ir_graph;
2833 assert(get_irg_phase_state(irg) == phase_building);
2835 assert(pos+1 < irg->n_loc);
2836 assert(is_ir_node(value));
2837 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2840 /* Find the value number for a node in the current block.*/
2842 find_value(ir_node *value) {
2844 ir_node *bl = current_ir_graph->current_block;
2846 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2847 if (bl->attr.block.graph_arr[i] == value)
2852 /* get the current store */
2855 ir_graph *irg = current_ir_graph;
2857 assert(get_irg_phase_state(irg) == phase_building);
2858 /* GL: one could call get_value instead */
2859 inc_irg_visited(irg);
2860 return get_r_value_internal(irg->current_block, 0, mode_M);
2863 /* set the current store: handles automatic Sync construction for Load nodes */
2865 set_store(ir_node *store) {
2866 ir_node *load, *pload, *pred, *in[2];
2868 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2869 /* Beware: due to dead code elimination, a store might become a Bad node even in
2870 the construction phase. */
2871 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2873 if (get_opt_auto_create_sync()) {
2874 /* handle non-volatile Load nodes by automatically creating Sync's */
2875 load = skip_Proj(store);
2876 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2877 pred = get_Load_mem(load);
2879 if (is_Sync(pred)) {
2880 /* a Load after a Sync: move it up */
2881 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2883 set_Load_mem(load, get_memop_mem(mem));
2884 add_Sync_pred(pred, store);
2887 pload = skip_Proj(pred);
2888 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2889 /* a Load after a Load: create a new Sync */
2890 set_Load_mem(load, get_Load_mem(pload));
2894 store = new_Sync(2, in);
2899 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2903 keep_alive(ir_node *ka) {
2904 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2907 /* --- Useful access routines --- */
2908 /* Returns the current block of the current graph. To set the current
2909 block use set_cur_block. */
2910 ir_node *get_cur_block(void) {
2911 return get_irg_current_block(current_ir_graph);
2912 } /* get_cur_block */
2914 /* Returns the frame type of the current graph */
2915 ir_type *get_cur_frame_type(void) {
2916 return get_irg_frame_type(current_ir_graph);
2917 } /* get_cur_frame_type */
2920 /* ********************************************************************* */
2923 /* call once for each run of the library */
2925 firm_init_cons(uninitialized_local_variable_func_t *func) {
2926 default_initialize_local_variable = func;
2927 } /* firm_init_cons */
2930 irp_finalize_cons(void) {
2932 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2933 irg_finalize_cons(get_irp_irg(i));
2935 irp->phase_state = phase_high;
2936 } /* irp_finalize_cons */
2939 ir_node *new_Block(int arity, ir_node **in) {
2940 return new_d_Block(NULL, arity, in);
2942 ir_node *new_Start(void) {
2943 return new_d_Start(NULL);
2945 ir_node *new_End(void) {
2946 return new_d_End(NULL);
2948 ir_node *new_Jmp(void) {
2949 return new_d_Jmp(NULL);
2951 ir_node *new_IJmp(ir_node *tgt) {
2952 return new_d_IJmp(NULL, tgt);
2954 ir_node *new_Cond(ir_node *c) {
2955 return new_d_Cond(NULL, c);
2958 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2959 return new_d_Return(NULL, store, arity, in);
2961 ir_node *new_Const(tarval *con) {
2962 return new_d_Const(NULL, con);
2965 ir_node *new_Const_long(ir_mode *mode, long value) {
2966 return new_d_Const_long(NULL, mode, value);
2970 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2971 return new_d_Const_type(NULL, con, tp);
2975 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2976 return new_d_SymConst_type(NULL, mode, value, kind, type);
2978 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2979 return new_d_SymConst(NULL, mode, value, kind);
2981 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2982 return new_d_simpleSel(NULL, store, objptr, ent);
2984 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2986 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2988 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2990 return new_d_Call(NULL, store, callee, arity, in, tp);
2992 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
2994 return new_d_Builtin(NULL, store, kind, arity, in, tp);
2997 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2998 return new_d_Add(NULL, op1, op2, mode);
3001 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3002 return new_d_Sub(NULL, op1, op2, mode);
3004 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3005 return new_d_Minus(NULL, op, mode);
3007 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3008 return new_d_Mul(NULL, op1, op2, mode);
3010 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3011 return new_d_Mulh(NULL, op1, op2, mode);
3013 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3014 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3016 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3017 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3020 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3021 return new_d_Div(NULL, memop, op1, op2, mode, state);
3024 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3025 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3027 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3028 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3030 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3031 return new_d_Abs(NULL, op, mode);
3033 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3034 return new_d_And(NULL, op1, op2, mode);
3036 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3037 return new_d_Or(NULL, op1, op2, mode);
3039 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3040 return new_d_Eor(NULL, op1, op2, mode);
3042 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3043 return new_d_Not(NULL, op, mode);
3045 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3046 return new_d_Shl(NULL, op, k, mode);
3048 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3049 return new_d_Shr(NULL, op, k, mode);
3051 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3052 return new_d_Shrs(NULL, op, k, mode);
3054 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3055 return new_d_Rotl(NULL, op, k, mode);
3057 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3058 return new_d_Carry(NULL, op1, op2, mode);
3060 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3061 return new_d_Borrow(NULL, op1, op2, mode);
3064 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3065 return new_d_Cmp(NULL, op1, op2);
3068 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3069 return new_d_Conv(NULL, op, mode);
3071 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3072 return new_d_strictConv(NULL, op, mode);
3075 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3076 return new_d_Cast(NULL, op, to_tp);
3079 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3080 return new_d_Phi(NULL, arity, in, mode);
3082 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3083 return new_d_Load(NULL, store, addr, mode, flags);
3085 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3086 return new_d_Store(NULL, store, addr, val, flags);
3088 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3089 ir_where_alloc where) {
3090 return new_d_Alloc(NULL, store, size, alloc_type, where);
3092 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3093 ir_type *free_type, ir_where_alloc where) {
3094 return new_d_Free(NULL, store, ptr, size, free_type, where);
3096 ir_node *new_Sync(int arity, ir_node *in[]) {
3097 return new_d_Sync(NULL, arity, in);
3100 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3101 return new_d_Proj(NULL, arg, mode, proj);
3104 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3105 return new_d_defaultProj(NULL, arg, max_proj);
3108 ir_node *new_Tuple(int arity, ir_node **in) {
3109 return new_d_Tuple(NULL, arity, in);
3111 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3112 return new_d_Id(NULL, val, mode);
3115 ir_node *new_Bad(void) {
3116 return get_irg_bad(current_ir_graph);
3118 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3119 return new_d_Confirm(NULL, val, bound, cmp);
3121 ir_node *new_Unknown(ir_mode *m) {
3122 return new_d_Unknown(m);
3124 ir_node *new_CallBegin(ir_node *callee) {
3125 return new_d_CallBegin(NULL, callee);
3127 ir_node *new_EndReg(void) {
3128 return new_d_EndReg(NULL);
3130 ir_node *new_EndExcept(void) {
3131 return new_d_EndExcept(NULL);
3133 ir_node *new_Break(void) {
3134 return new_d_Break(NULL);
3136 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3137 return new_d_Filter(NULL, arg, mode, proj);
3139 ir_node *new_NoMem(void) {
3140 return get_irg_no_mem(current_ir_graph);
3142 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3143 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3145 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3146 return new_d_CopyB(NULL, store, dst, src, data_type);
3148 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3149 return new_d_InstOf(NULL, store, objptr, ent);
3151 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3152 return new_d_Raise(NULL, store, obj);
3154 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3155 return new_d_Bound(NULL, store, idx, lower, upper);
3157 ir_node *new_Pin(ir_node *node) {
3158 return new_d_Pin(NULL, node);
3160 ir_node *new_Dummy(ir_mode *m) {
3161 ir_graph *irg = current_ir_graph;
3162 return new_ir_node(NULL, irg, get_irg_start_block(irg), op_Dummy, m, 0, NULL);
3164 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3165 int n_outs, ir_asm_constraint *outputs,
3166 int n_clobber, ident *clobber[], ident *asm_text) {
3167 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3170 /* create a new anchor node */
3171 ir_node *new_Anchor(ir_graph *irg) {
3172 ir_node *in[anchor_last];
3173 memset(in, 0, sizeof(in));
3174 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);