2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 #if USE_EXPLICIT_PHI_IN_STACK
48 /* A stack needed for the automatic Phi node construction in constructor
49 Phi_in. Redefinition in irgraph.c!! */
54 typedef struct Phi_in_stack Phi_in_stack;
57 /* when we need verifying */
59 # define IRN_VRFY_IRG(res, irg)
61 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
65 * Language dependent variable initialization callback.
67 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
69 /* creates a bd constructor for a binop */
70 #define NEW_BD_BINOP(instr) \
72 new_bd_##instr(dbg_info *db, ir_node *block, \
73 ir_node *op1, ir_node *op2, ir_mode *mode) \
77 ir_graph *irg = current_ir_graph; \
80 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
81 res = optimize_node(res); \
82 IRN_VRFY_IRG(res, irg); \
86 /* creates a bd constructor for an unop */
87 #define NEW_BD_UNOP(instr) \
89 new_bd_##instr(dbg_info *db, ir_node *block, \
90 ir_node *op, ir_mode *mode) \
93 ir_graph *irg = current_ir_graph; \
94 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
95 res = optimize_node(res); \
96 IRN_VRFY_IRG(res, irg); \
100 /* creates a bd constructor for an divop */
101 #define NEW_BD_DIVOP(instr) \
103 new_bd_##instr(dbg_info *db, ir_node *block, \
104 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
108 ir_graph *irg = current_ir_graph; \
112 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
113 res->attr.divmod.exc.pin_state = state; \
114 res->attr.divmod.res_mode = mode; \
115 res->attr.divmod.no_remainder = 0; \
116 res = optimize_node(res); \
117 IRN_VRFY_IRG(res, irg); \
121 /* creates a rd constructor for a binop */
122 #define NEW_RD_BINOP(instr) \
124 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
125 ir_node *op1, ir_node *op2, ir_mode *mode) \
128 ir_graph *rem = current_ir_graph; \
129 current_ir_graph = irg; \
130 res = new_bd_##instr(db, block, op1, op2, mode); \
131 current_ir_graph = rem; \
135 /* creates a rd constructor for an unop */
136 #define NEW_RD_UNOP(instr) \
138 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
139 ir_node *op, ir_mode *mode) \
142 ir_graph *rem = current_ir_graph; \
143 current_ir_graph = irg; \
144 res = new_bd_##instr(db, block, op, mode); \
145 current_ir_graph = rem; \
149 /* creates a rd constructor for an divop */
150 #define NEW_RD_DIVOP(instr) \
152 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
153 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
156 ir_graph *rem = current_ir_graph; \
157 current_ir_graph = irg; \
158 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
159 current_ir_graph = rem; \
163 /* creates a d constructor for an binop */
164 #define NEW_D_BINOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
170 /* creates a d constructor for an unop */
171 #define NEW_D_UNOP(instr) \
173 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
174 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
179 * Constructs a Block with a fixed number of predecessors.
180 * Does not set current_block. Cannot be used with automatic
181 * Phi node construction.
184 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
186 ir_graph *irg = current_ir_graph;
188 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
190 /* macroblock header */
193 res->attr.block.is_dead = 0;
194 res->attr.block.is_mb_head = 1;
195 res->attr.block.has_label = 0;
196 res->attr.block.irg = irg;
197 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
198 res->attr.block.in_cg = NULL;
199 res->attr.block.cg_backedge = NULL;
200 res->attr.block.extblk = NULL;
201 res->attr.block.mb_depth = 0;
202 res->attr.block.label = 0;
204 set_Block_matured(res, 1);
205 set_Block_block_visited(res, 0);
207 IRN_VRFY_IRG(res, irg);
212 new_bd_Start(dbg_info *db, ir_node *block) {
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
218 IRN_VRFY_IRG(res, irg);
223 new_bd_End(dbg_info *db, ir_node *block) {
225 ir_graph *irg = current_ir_graph;
227 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
229 IRN_VRFY_IRG(res, irg);
234 * Creates a Phi node with all predecessors. Calling this constructor
235 * is only allowed if the corresponding block is mature.
238 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
240 ir_graph *irg = current_ir_graph;
244 /* Don't assert that block matured: the use of this constructor is strongly
246 if (get_Block_matured(block))
247 assert(get_irn_arity(block) == arity);
249 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
251 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
253 for (i = arity - 1; i >= 0; --i)
254 if (is_Unknown(in[i])) {
259 if (!has_unknown) res = optimize_node(res);
260 IRN_VRFY_IRG(res, irg);
262 /* Memory Phis in endless loops must be kept alive.
263 As we can't distinguish these easily we keep all of them alive. */
264 if (is_Phi(res) && mode == mode_M)
265 add_End_keepalive(get_irg_end(irg), res);
270 new_bd_Const_type(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
272 ir_graph *irg = current_ir_graph;
275 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
276 res->attr.con.tv = con;
277 set_Const_type(res, tp); /* Call method because of complex assertion. */
278 res = optimize_node (res);
279 assert(get_Const_type(res) == tp);
280 IRN_VRFY_IRG(res, irg);
283 } /* new_bd_Const_type */
286 new_bd_Const(dbg_info *db, ir_node *block, ir_mode *mode, tarval *con) {
287 ir_graph *irg = current_ir_graph;
289 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
293 new_bd_Const_long(dbg_info *db, ir_node *block, ir_mode *mode, long value) {
294 ir_graph *irg = current_ir_graph;
296 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
297 } /* new_bd_Const_long */
300 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
302 ir_graph *irg = current_ir_graph;
304 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
305 res = optimize_node(res);
306 IRN_VRFY_IRG(res, irg);
311 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
314 ir_graph *irg = current_ir_graph;
316 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
317 res->attr.proj = proj;
320 assert(get_Proj_pred(res));
321 assert(get_nodes_block(get_Proj_pred(res)));
323 res = optimize_node(res);
325 IRN_VRFY_IRG(res, irg);
330 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
333 ir_graph *irg = current_ir_graph;
335 assert(arg->op == op_Cond);
336 arg->attr.cond.kind = fragmentary;
337 arg->attr.cond.default_proj = max_proj;
338 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
340 } /* new_bd_defaultProj */
343 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
345 ir_graph *irg = current_ir_graph;
347 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
348 res->attr.conv.strict = strict_flag;
349 res = optimize_node(res);
350 IRN_VRFY_IRG(res, irg);
355 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
357 ir_graph *irg = current_ir_graph;
359 assert(is_atomic_type(to_tp));
361 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
362 res->attr.cast.totype = to_tp;
363 res = optimize_node(res);
364 IRN_VRFY_IRG(res, irg);
369 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
371 ir_graph *irg = current_ir_graph;
373 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
374 res = optimize_node (res);
375 IRN_VRFY_IRG(res, irg);
400 /** Creates a remainderless Div node. */
401 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
402 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
406 ir_graph *irg = current_ir_graph;
410 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
411 res->attr.divmod.exc.pin_state = state;
412 res->attr.divmod.res_mode = mode;
413 res->attr.divmod.no_remainder = 1;
414 res = optimize_node(res);
415 IRN_VRFY_IRG(res, irg);
420 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
423 ir_graph *irg = current_ir_graph;
426 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
427 res = optimize_node(res);
428 IRN_VRFY_IRG(res, irg);
433 new_bd_Jmp(dbg_info *db, ir_node *block) {
435 ir_graph *irg = current_ir_graph;
437 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
438 res = optimize_node(res);
439 IRN_VRFY_IRG(res, irg);
444 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
446 ir_graph *irg = current_ir_graph;
448 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
449 res = optimize_node(res);
450 IRN_VRFY_IRG(res, irg);
455 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
457 ir_graph *irg = current_ir_graph;
459 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
460 res->attr.cond.kind = dense;
461 res->attr.cond.default_proj = 0;
462 res->attr.cond.pred = COND_JMP_PRED_NONE;
463 res = optimize_node(res);
464 IRN_VRFY_IRG(res, irg);
469 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
470 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
474 ir_graph *irg = current_ir_graph;
477 NEW_ARR_A(ir_node *, r_in, r_arity);
480 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
482 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
484 assert((get_unknown_type() == tp) || is_Method_type(tp));
485 set_Call_type(res, tp);
486 res->attr.call.exc.pin_state = op_pin_state_pinned;
487 res->attr.call.callee_arr = NULL;
488 res = optimize_node(res);
489 IRN_VRFY_IRG(res, irg);
494 new_bd_Return(dbg_info *db, ir_node *block,
495 ir_node *store, int arity, ir_node **in) {
499 ir_graph *irg = current_ir_graph;
502 NEW_ARR_A (ir_node *, r_in, r_arity);
504 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
505 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
506 res = optimize_node(res);
507 IRN_VRFY_IRG(res, irg);
509 } /* new_bd_Return */
512 new_bd_Load(dbg_info *db, ir_node *block,
513 ir_node *store, ir_node *adr, ir_mode *mode) {
516 ir_graph *irg = current_ir_graph;
520 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
521 res->attr.load.exc.pin_state = op_pin_state_pinned;
522 res->attr.load.load_mode = mode;
523 res->attr.load.volatility = volatility_non_volatile;
524 res->attr.load.aligned = align_is_aligned;
525 res = optimize_node(res);
526 IRN_VRFY_IRG(res, irg);
531 new_bd_Store(dbg_info *db, ir_node *block,
532 ir_node *store, ir_node *adr, ir_node *val) {
535 ir_graph *irg = current_ir_graph;
540 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
541 res->attr.store.exc.pin_state = op_pin_state_pinned;
542 res->attr.store.volatility = volatility_non_volatile;
543 res->attr.store.aligned = align_is_aligned;
544 res = optimize_node(res);
545 IRN_VRFY_IRG(res, irg);
550 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
551 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
554 ir_graph *irg = current_ir_graph;
558 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
559 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
560 res->attr.alloc.where = where;
561 res->attr.alloc.type = alloc_type;
562 res = optimize_node(res);
563 IRN_VRFY_IRG(res, irg);
568 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
569 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
572 ir_graph *irg = current_ir_graph;
577 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
578 res->attr.free.where = where;
579 res->attr.free.type = free_type;
580 res = optimize_node(res);
581 IRN_VRFY_IRG(res, irg);
586 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
587 int arity, ir_node **in, ir_entity *ent) {
591 ir_graph *irg = current_ir_graph;
592 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
594 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
597 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
600 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
602 * Sel's can select functions which should be of mode mode_P_code.
604 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
605 res->attr.sel.ent = ent;
606 res = optimize_node(res);
607 IRN_VRFY_IRG(res, irg);
612 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
613 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
614 ir_graph *irg = current_ir_graph;
615 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
617 res->attr.symc.kind = symkind;
618 res->attr.symc.sym = value;
619 res->attr.symc.tp = tp;
621 res = optimize_node(res);
622 IRN_VRFY_IRG(res, irg);
624 } /* new_bd_SymConst_type */
627 new_bd_Sync(dbg_info *db, ir_node *block) {
629 ir_graph *irg = current_ir_graph;
631 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
632 /* no need to call optimize node here, Sync are always created with no predecessors */
633 IRN_VRFY_IRG(res, irg);
638 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
639 ir_node *in[2], *res;
640 ir_graph *irg = current_ir_graph;
644 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
645 res->attr.confirm.cmp = cmp;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
649 } /* new_bd_Confirm */
652 new_bd_Unknown(ir_mode *m) {
654 ir_graph *irg = current_ir_graph;
656 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
657 res = optimize_node(res);
659 } /* new_bd_Unknown */
662 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
665 ir_graph *irg = current_ir_graph;
667 in[0] = get_Call_ptr(call);
668 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
669 /* res->attr.callbegin.irg = irg; */
670 res->attr.callbegin.call = call;
671 res = optimize_node(res);
672 IRN_VRFY_IRG(res, irg);
674 } /* new_bd_CallBegin */
677 new_bd_EndReg(dbg_info *db, ir_node *block) {
679 ir_graph *irg = current_ir_graph;
681 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
682 set_irg_end_reg(irg, res);
683 IRN_VRFY_IRG(res, irg);
685 } /* new_bd_EndReg */
688 new_bd_EndExcept(dbg_info *db, ir_node *block) {
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
693 set_irg_end_except(irg, res);
694 IRN_VRFY_IRG (res, irg);
696 } /* new_bd_EndExcept */
699 new_bd_Break(dbg_info *db, ir_node *block) {
701 ir_graph *irg = current_ir_graph;
703 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
704 res = optimize_node(res);
705 IRN_VRFY_IRG(res, irg);
710 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
713 ir_graph *irg = current_ir_graph;
715 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
716 res->attr.filter.proj = proj;
717 res->attr.filter.in_cg = NULL;
718 res->attr.filter.backedge = NULL;
721 assert(get_Proj_pred(res));
722 assert(get_nodes_block(get_Proj_pred(res)));
724 res = optimize_node(res);
725 IRN_VRFY_IRG(res, irg);
727 } /* new_bd_Filter */
730 new_bd_Mux(dbg_info *db, ir_node *block,
731 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
734 ir_graph *irg = current_ir_graph;
740 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
743 res = optimize_node(res);
744 IRN_VRFY_IRG(res, irg);
749 new_bd_CopyB(dbg_info *db, ir_node *block,
750 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
753 ir_graph *irg = current_ir_graph;
759 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
761 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
762 res->attr.copyb.data_type = data_type;
763 res = optimize_node(res);
764 IRN_VRFY_IRG(res, irg);
769 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
770 ir_node *objptr, ir_type *type) {
773 ir_graph *irg = current_ir_graph;
777 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
778 res->attr.instof.type = type;
779 res = optimize_node(res);
780 IRN_VRFY_IRG(res, irg);
782 } /* new_bd_InstOf */
785 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
788 ir_graph *irg = current_ir_graph;
792 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
793 res = optimize_node(res);
794 IRN_VRFY_IRG(res, irg);
799 new_bd_Bound(dbg_info *db, ir_node *block,
800 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
803 ir_graph *irg = current_ir_graph;
809 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
810 res->attr.bound.exc.pin_state = op_pin_state_pinned;
811 res = optimize_node(res);
812 IRN_VRFY_IRG(res, irg);
817 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
819 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
829 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
831 ir_graph *irg = current_ir_graph;
834 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
835 res->attr.assem.pin_state = op_pin_state_pinned;
836 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
837 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
838 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
839 res->attr.assem.asm_text = asm_text;
841 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
842 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
843 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
845 res = optimize_node(res);
846 IRN_VRFY_IRG(res, irg);
850 /* --------------------------------------------- */
851 /* private interfaces, for professional use only */
852 /* --------------------------------------------- */
854 /* Constructs a Block with a fixed number of predecessors.
855 Does not set current_block. Can not be used with automatic
856 Phi node construction. */
858 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
859 ir_graph *rem = current_ir_graph;
862 current_ir_graph = irg;
863 res = new_bd_Block(db, arity, in);
864 current_ir_graph = rem;
870 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
871 ir_graph *rem = current_ir_graph;
874 current_ir_graph = irg;
875 res = new_bd_Start(db, block);
876 current_ir_graph = rem;
882 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
884 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_End(db, block);
888 current_ir_graph = rem;
893 /* Creates a Phi node with all predecessors. Calling this constructor
894 is only allowed if the corresponding block is mature. */
896 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
898 ir_graph *rem = current_ir_graph;
900 current_ir_graph = irg;
901 res = new_bd_Phi(db, block,arity, in, mode);
902 current_ir_graph = rem;
908 new_rd_Const_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp) {
910 ir_graph *rem = current_ir_graph;
912 current_ir_graph = irg;
913 res = new_bd_Const_type(db, block, mode, con, tp);
914 current_ir_graph = rem;
917 } /* new_rd_Const_type */
920 new_rd_Const(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) {
922 ir_graph *rem = current_ir_graph;
924 current_ir_graph = irg;
925 res = new_bd_Const_type(db, block, mode, con, firm_unknown_type);
926 current_ir_graph = rem;
932 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) {
933 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
934 } /* new_rd_Const_long */
937 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
939 ir_graph *rem = current_ir_graph;
941 current_ir_graph = irg;
942 res = new_bd_Id(db, block, val, mode);
943 current_ir_graph = rem;
949 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
952 ir_graph *rem = current_ir_graph;
954 current_ir_graph = irg;
955 res = new_bd_Proj(db, block, arg, mode, proj);
956 current_ir_graph = rem;
962 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
965 ir_graph *rem = current_ir_graph;
967 current_ir_graph = irg;
968 res = new_bd_defaultProj(db, block, arg, max_proj);
969 current_ir_graph = rem;
972 } /* new_rd_defaultProj */
975 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
977 ir_graph *rem = current_ir_graph;
979 current_ir_graph = irg;
980 res = new_bd_Conv(db, block, op, mode, 0);
981 current_ir_graph = rem;
987 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
989 ir_graph *rem = current_ir_graph;
991 current_ir_graph = irg;
992 res = new_bd_Cast(db, block, op, to_tp);
993 current_ir_graph = rem;
999 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1001 ir_graph *rem = current_ir_graph;
1003 current_ir_graph = irg;
1004 res = new_bd_Tuple(db, block, arity, in);
1005 current_ir_graph = rem;
1008 } /* new_rd_Tuple */
1016 NEW_RD_DIVOP(DivMod)
1029 NEW_RD_BINOP(Borrow)
1031 /* creates a rd constructor for an divRL */
1032 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1033 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1036 ir_graph *rem = current_ir_graph;
1037 current_ir_graph = irg;
1038 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1039 current_ir_graph = rem;
1044 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1045 ir_node *op1, ir_node *op2) {
1047 ir_graph *rem = current_ir_graph;
1049 current_ir_graph = irg;
1050 res = new_bd_Cmp(db, block, op1, op2);
1051 current_ir_graph = rem;
1057 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1059 ir_graph *rem = current_ir_graph;
1061 current_ir_graph = irg;
1062 res = new_bd_Jmp(db, block);
1063 current_ir_graph = rem;
1069 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_IJmp(db, block, tgt);
1075 current_ir_graph = rem;
1081 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1083 ir_graph *rem = current_ir_graph;
1085 current_ir_graph = irg;
1086 res = new_bd_Cond(db, block, c);
1087 current_ir_graph = rem;
1093 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1094 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1096 ir_graph *rem = current_ir_graph;
1098 current_ir_graph = irg;
1099 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1100 current_ir_graph = rem;
1106 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1107 ir_node *store, int arity, ir_node **in) {
1109 ir_graph *rem = current_ir_graph;
1111 current_ir_graph = irg;
1112 res = new_bd_Return(db, block, store, arity, in);
1113 current_ir_graph = rem;
1116 } /* new_rd_Return */
1119 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1120 ir_node *store, ir_node *adr, ir_mode *mode) {
1122 ir_graph *rem = current_ir_graph;
1124 current_ir_graph = irg;
1125 res = new_bd_Load(db, block, store, adr, mode);
1126 current_ir_graph = rem;
1132 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1133 ir_node *store, ir_node *adr, ir_node *val) {
1135 ir_graph *rem = current_ir_graph;
1137 current_ir_graph = irg;
1138 res = new_bd_Store(db, block, store, adr, val);
1139 current_ir_graph = rem;
1142 } /* new_rd_Store */
1145 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1146 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1148 ir_graph *rem = current_ir_graph;
1150 current_ir_graph = irg;
1151 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1152 current_ir_graph = rem;
1155 } /* new_rd_Alloc */
1158 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1159 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1161 ir_graph *rem = current_ir_graph;
1163 current_ir_graph = irg;
1164 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1165 current_ir_graph = rem;
1171 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1172 ir_node *store, ir_node *objptr, ir_entity *ent) {
1174 ir_graph *rem = current_ir_graph;
1176 current_ir_graph = irg;
1177 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1178 current_ir_graph = rem;
1181 } /* new_rd_simpleSel */
1184 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1185 int arity, ir_node **in, ir_entity *ent) {
1187 ir_graph *rem = current_ir_graph;
1189 current_ir_graph = irg;
1190 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1191 current_ir_graph = rem;
1197 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1198 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1200 ir_graph *rem = current_ir_graph;
1202 current_ir_graph = irg;
1203 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1204 current_ir_graph = rem;
1207 } /* new_rd_SymConst_type */
1210 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1211 symconst_symbol value, symconst_kind symkind) {
1212 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1213 } /* new_rd_SymConst */
1215 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1216 symconst_symbol sym;
1217 sym.entity_p = symbol;
1218 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1219 } /* new_rd_SymConst_addr_ent */
1221 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1222 symconst_symbol sym;
1223 sym.entity_p = symbol;
1224 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1225 } /* new_rd_SymConst_ofs_ent */
1227 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1228 symconst_symbol sym;
1229 sym.ident_p = symbol;
1230 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1231 } /* new_rd_SymConst_addr_name */
1233 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1234 symconst_symbol sym;
1235 sym.type_p = symbol;
1236 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1237 } /* new_rd_SymConst_type_tag */
1239 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1240 symconst_symbol sym;
1241 sym.type_p = symbol;
1242 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1243 } /* new_rd_SymConst_size */
1245 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1246 symconst_symbol sym;
1247 sym.type_p = symbol;
1248 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1249 } /* new_rd_SymConst_align */
1252 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1254 ir_graph *rem = current_ir_graph;
1257 current_ir_graph = irg;
1258 res = new_bd_Sync(db, block);
1259 current_ir_graph = rem;
1261 for (i = 0; i < arity; ++i)
1262 add_Sync_pred(res, in[i]);
1268 new_rd_Bad(ir_graph *irg) {
1269 return get_irg_bad(irg);
1273 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1275 ir_graph *rem = current_ir_graph;
1277 current_ir_graph = irg;
1278 res = new_bd_Confirm(db, block, val, bound, cmp);
1279 current_ir_graph = rem;
1282 } /* new_rd_Confirm */
1285 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1287 ir_graph *rem = current_ir_graph;
1289 current_ir_graph = irg;
1290 res = new_bd_Unknown(m);
1291 current_ir_graph = rem;
1294 } /* new_rd_Unknown */
1297 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1299 ir_graph *rem = current_ir_graph;
1301 current_ir_graph = irg;
1302 res = new_bd_CallBegin(db, block, call);
1303 current_ir_graph = rem;
1306 } /* new_rd_CallBegin */
1309 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1312 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1313 set_irg_end_reg(irg, res);
1314 IRN_VRFY_IRG(res, irg);
1316 } /* new_rd_EndReg */
1319 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1322 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1323 set_irg_end_except(irg, res);
1324 IRN_VRFY_IRG (res, irg);
1326 } /* new_rd_EndExcept */
1329 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1331 ir_graph *rem = current_ir_graph;
1333 current_ir_graph = irg;
1334 res = new_bd_Break(db, block);
1335 current_ir_graph = rem;
1338 } /* new_rd_Break */
1341 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1344 ir_graph *rem = current_ir_graph;
1346 current_ir_graph = irg;
1347 res = new_bd_Filter(db, block, arg, mode, proj);
1348 current_ir_graph = rem;
1351 } /* new_rd_Filter */
1354 new_rd_NoMem(ir_graph *irg) {
1355 return get_irg_no_mem(irg);
1356 } /* new_rd_NoMem */
1359 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1360 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1366 current_ir_graph = rem;
1371 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1372 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1374 ir_graph *rem = current_ir_graph;
1376 current_ir_graph = irg;
1377 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1378 current_ir_graph = rem;
1381 } /* new_rd_CopyB */
1384 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1385 ir_node *objptr, ir_type *type) {
1387 ir_graph *rem = current_ir_graph;
1389 current_ir_graph = irg;
1390 res = new_bd_InstOf(db, block, store, objptr, type);
1391 current_ir_graph = rem;
1394 } /* new_rd_InstOf */
1397 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1399 ir_graph *rem = current_ir_graph;
1401 current_ir_graph = irg;
1402 res = new_bd_Raise(db, block, store, obj);
1403 current_ir_graph = rem;
1406 } /* new_rd_Raise */
1408 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1409 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1411 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1414 res = new_bd_Bound(db, block, store, idx, lower, upper);
1415 current_ir_graph = rem;
1418 } /* new_rd_Bound */
1420 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Pin(db, block, node);
1426 current_ir_graph = rem;
1431 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1432 int arity, ir_node *in[], ir_asm_constraint *inputs,
1433 int n_outs, ir_asm_constraint *outputs,
1434 int n_clobber, ident *clobber[], ident *asm_text) {
1436 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1439 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1440 current_ir_graph = rem;
1446 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1447 return new_rd_Block(NULL, irg, arity, in);
1449 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1450 return new_rd_Start(NULL, irg, block);
1452 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1453 return new_rd_End(NULL, irg, block);
1455 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1456 return new_rd_Jmp(NULL, irg, block);
1458 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1459 return new_rd_IJmp(NULL, irg, block, tgt);
1461 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1462 return new_rd_Cond(NULL, irg, block, c);
1464 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1465 ir_node *store, int arity, ir_node **in) {
1466 return new_rd_Return(NULL, irg, block, store, arity, in);
1468 ir_node *new_r_Const(ir_graph *irg, ir_node *block,
1469 ir_mode *mode, tarval *con) {
1470 return new_rd_Const(NULL, irg, block, mode, con);
1472 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1473 ir_mode *mode, long value) {
1474 return new_rd_Const_long(NULL, irg, block, mode, value);
1476 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1477 ir_mode *mode, tarval *con, ir_type *tp) {
1478 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1480 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1481 symconst_symbol value, symconst_kind symkind) {
1482 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1484 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1485 ir_node *objptr, ir_entity *ent) {
1486 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1488 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1489 ir_node *objptr, int n_index, ir_node **index,
1491 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1493 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1494 ir_node *callee, int arity, ir_node **in,
1496 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1498 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1499 ir_node *op1, ir_node *op2, ir_mode *mode) {
1500 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1502 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1503 ir_node *op1, ir_node *op2, ir_mode *mode) {
1504 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1506 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1507 ir_node *op, ir_mode *mode) {
1508 return new_rd_Minus(NULL, irg, block, op, mode);
1510 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1511 ir_node *op1, ir_node *op2, ir_mode *mode) {
1512 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1514 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1519 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1520 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1522 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1523 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1524 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1526 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1527 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1528 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1530 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1531 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1532 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1534 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1536 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1538 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1539 ir_node *op, ir_mode *mode) {
1540 return new_rd_Abs(NULL, irg, block, op, mode);
1542 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1543 ir_node *op1, ir_node *op2, ir_mode *mode) {
1544 return new_rd_And(NULL, irg, block, op1, op2, mode);
1546 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1547 ir_node *op1, ir_node *op2, ir_mode *mode) {
1548 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1550 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1555 ir_node *op, ir_mode *mode) {
1556 return new_rd_Not(NULL, irg, block, op, mode);
1558 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1559 ir_node *op, ir_node *k, ir_mode *mode) {
1560 return new_rd_Shl(NULL, irg, block, op, k, mode);
1562 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1563 ir_node *op, ir_node *k, ir_mode *mode) {
1564 return new_rd_Shr(NULL, irg, block, op, k, mode);
1566 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_node *k, ir_mode *mode) {
1568 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1570 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Carry(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1583 ir_node *op1, ir_node *op2) {
1584 return new_rd_Cmp(NULL, irg, block, op1, op2);
1586 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_mode *mode) {
1588 return new_rd_Conv(NULL, irg, block, op, mode);
1590 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1591 return new_rd_Cast(NULL, irg, block, op, to_tp);
1593 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1594 ir_node **in, ir_mode *mode) {
1595 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1597 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1598 ir_node *store, ir_node *adr, ir_mode *mode) {
1599 return new_rd_Load(NULL, irg, block, store, adr, mode);
1601 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1602 ir_node *store, ir_node *adr, ir_node *val) {
1603 return new_rd_Store(NULL, irg, block, store, adr, val);
1605 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1606 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1607 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1609 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1610 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1611 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1613 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1614 return new_rd_Sync(NULL, irg, block, arity, in);
1616 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1617 ir_mode *mode, long proj) {
1618 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1620 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1622 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1624 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1625 int arity, ir_node **in) {
1626 return new_rd_Tuple(NULL, irg, block, arity, in );
1628 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1629 ir_node *val, ir_mode *mode) {
1630 return new_rd_Id(NULL, irg, block, val, mode);
1632 ir_node *new_r_Bad(ir_graph *irg) {
1633 return new_rd_Bad(irg);
1635 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1636 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1638 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1639 return new_rd_Unknown(irg, m);
1641 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1642 return new_rd_CallBegin(NULL, irg, block, callee);
1644 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1645 return new_rd_EndReg(NULL, irg, block);
1647 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1648 return new_rd_EndExcept(NULL, irg, block);
1650 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1651 return new_rd_Break(NULL, irg, block);
1653 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1654 ir_mode *mode, long proj) {
1655 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1657 ir_node *new_r_NoMem(ir_graph *irg) {
1658 return new_rd_NoMem(irg);
1660 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1661 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1662 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1664 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1665 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1666 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1668 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1670 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1672 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1673 ir_node *store, ir_node *obj) {
1674 return new_rd_Raise(NULL, irg, block, store, obj);
1676 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1677 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1678 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1680 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1681 return new_rd_Pin(NULL, irg, block, node);
1683 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1684 int arity, ir_node *in[], ir_asm_constraint *inputs,
1685 int n_outs, ir_asm_constraint *outputs,
1686 int n_clobber, ident *clobber[], ident *asm_text) {
1687 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1690 /** ********************/
1691 /** public interfaces */
1692 /** construction tools */
1696 * - create a new Start node in the current block
1698 * @return s - pointer to the created Start node
1703 new_d_Start(dbg_info *db) {
1706 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1707 op_Start, mode_T, 0, NULL);
1709 res = optimize_node(res);
1710 IRN_VRFY_IRG(res, current_ir_graph);
1715 new_d_End(dbg_info *db) {
1717 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1718 op_End, mode_X, -1, NULL);
1719 res = optimize_node(res);
1720 IRN_VRFY_IRG(res, current_ir_graph);
1725 /* Constructs a Block with a fixed number of predecessors.
1726 Does set current_block. Can be used with automatic Phi
1727 node construction. */
1729 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1732 int has_unknown = 0;
1734 res = new_bd_Block(db, arity, in);
1736 /* Create and initialize array for Phi-node construction. */
1737 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1738 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1739 current_ir_graph->n_loc);
1740 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1743 for (i = arity-1; i >= 0; i--)
1744 if (is_Unknown(in[i])) {
1749 if (!has_unknown) res = optimize_node(res);
1750 current_ir_graph->current_block = res;
1752 IRN_VRFY_IRG(res, current_ir_graph);
1757 /* ***********************************************************************/
1758 /* Methods necessary for automatic Phi node creation */
1760 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1761 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1762 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1763 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1765 Call Graph: ( A ---> B == A "calls" B)
1767 get_value mature_immBlock
1775 get_r_value_internal |
1779 new_rd_Phi0 new_rd_Phi_in
1781 * *************************************************************************** */
1783 /** Creates a Phi node with 0 predecessors. */
1784 static INLINE ir_node *
1785 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1788 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1789 IRN_VRFY_IRG(res, irg);
1795 * Internal constructor of a Phi node by a phi_merge operation.
1797 * @param irg the graph on which the Phi will be constructed
1798 * @param block the block in which the Phi will be constructed
1799 * @param mode the mod eof the Phi node
1800 * @param in the input array of the phi node
1801 * @param ins number of elements in the input array
1802 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1803 * the value for which the new Phi is constructed
1805 static INLINE ir_node *
1806 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1807 ir_node **in, int ins, ir_node *phi0) {
1809 ir_node *res, *known;
1811 /* Allocate a new node on the obstack. The allocation copies the in
1813 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1814 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1816 /* This loop checks whether the Phi has more than one predecessor.
1817 If so, it is a real Phi node and we break the loop. Else the
1818 Phi node merges the same definition on several paths and therefore
1819 is not needed. Don't consider Bad nodes! */
1821 for (i = ins - 1; i >= 0; --i) {
1824 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1826 /* Optimize self referencing Phis: We can't detect them yet properly, as
1827 they still refer to the Phi0 they will replace. So replace right now. */
1828 if (phi0 && in[i] == phi0)
1831 if (in[i] == res || in[i] == known || is_Bad(in[i]))
1840 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1843 edges_node_deleted(res, current_ir_graph);
1844 obstack_free(current_ir_graph->obst, res);
1845 if (is_Phi(known)) {
1846 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1847 order, an enclosing Phi know may get superfluous. */
1848 res = optimize_in_place_2(known);
1850 exchange(known, res);
1855 /* A undefined value, e.g., in unreachable code. */
1859 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1860 IRN_VRFY_IRG(res, irg);
1861 /* Memory Phis in endless loops must be kept alive.
1862 As we can't distinguish these easily we keep all of them alive. */
1863 if (is_Phi(res) && mode == mode_M)
1864 add_End_keepalive(get_irg_end(irg), res);
1868 } /* new_rd_Phi_in */
1871 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1873 #if PRECISE_EXC_CONTEXT
1875 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1878 * Construct a new frag_array for node n.
1879 * Copy the content from the current graph_arr of the corresponding block:
1880 * this is the current state.
1881 * Set ProjM(n) as current memory state.
1882 * Further the last entry in frag_arr of current block points to n. This
1883 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1885 static INLINE ir_node **new_frag_arr(ir_node *n) {
1889 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1890 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1891 sizeof(ir_node *)*current_ir_graph->n_loc);
1893 /* turn off optimization before allocating Proj nodes, as res isn't
1895 opt = get_opt_optimize(); set_optimize(0);
1896 /* Here we rely on the fact that all frag ops have Memory as first result! */
1898 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1899 } else if (is_CopyB(n)) {
1900 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1902 assert((pn_Quot_M == pn_DivMod_M) &&
1903 (pn_Quot_M == pn_Div_M) &&
1904 (pn_Quot_M == pn_Mod_M) &&
1905 (pn_Quot_M == pn_Load_M) &&
1906 (pn_Quot_M == pn_Store_M) &&
1907 (pn_Quot_M == pn_Alloc_M) &&
1908 (pn_Quot_M == pn_Bound_M));
1909 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
1913 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1915 } /* new_frag_arr */
1918 * Returns the frag_arr from a node.
1920 static INLINE ir_node **get_frag_arr(ir_node *n) {
1921 switch (get_irn_opcode(n)) {
1923 return n->attr.call.exc.frag_arr;
1925 return n->attr.alloc.exc.frag_arr;
1927 return n->attr.load.exc.frag_arr;
1929 return n->attr.store.exc.frag_arr;
1931 return n->attr.except.frag_arr;
1933 } /* get_frag_arr */
1936 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1937 #ifdef DEBUG_libfirm
1940 for (i = 1024; i >= 0; --i)
1945 if (frag_arr[pos] == NULL)
1946 frag_arr[pos] = val;
1947 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
1948 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
1949 assert(arr != frag_arr && "Endless recursion detected");
1954 assert(!"potential endless recursion in set_frag_value");
1955 } /* set_frag_value */
1958 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1962 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
1964 frag_arr = get_frag_arr(cfOp);
1965 res = frag_arr[pos];
1967 if (block->attr.block.graph_arr[pos] != NULL) {
1968 /* There was a set_value() after the cfOp and no get_value() before that
1969 set_value(). We must build a Phi node now. */
1970 if (block->attr.block.is_matured) {
1971 int ins = get_irn_arity(block);
1973 NEW_ARR_A(ir_node *, nin, ins);
1974 res = phi_merge(block, pos, mode, nin, ins);
1976 res = new_rd_Phi0(current_ir_graph, block, mode);
1977 res->attr.phi.u.pos = pos;
1978 res->attr.phi.next = block->attr.block.phis;
1979 block->attr.block.phis = res;
1981 assert(res != NULL);
1982 /* It's a Phi, we can write this into all graph_arrs with NULL */
1983 set_frag_value(block->attr.block.graph_arr, pos, res);
1985 res = get_r_value_internal(block, pos, mode);
1986 set_frag_value(block->attr.block.graph_arr, pos, res);
1990 } /* get_r_frag_value_internal */
1991 #endif /* PRECISE_EXC_CONTEXT */
1994 * Check whether a control flownode cf_pred represents an exception flow.
1996 * @param cf_pred the control flow node
1997 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
1999 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2001 * Note: all projections from a raise are "exceptional control flow" we we handle it
2002 * like a normal Jmp, because there is no "regular" one.
2003 * That's why Raise is no "fragile_op"!
2005 if (is_fragile_op(prev_cf_op)) {
2006 if (is_Proj(cf_pred)) {
2007 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2008 /* the regular control flow, NO exception */
2011 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2014 /* Hmm, exception but not a Proj? */
2015 assert(!"unexpected condition: fragile op without a proj");
2019 } /* is_exception_flow */
2022 * Computes the predecessors for the real phi node, and then
2023 * allocates and returns this node. The routine called to allocate the
2024 * node might optimize it away and return a real value.
2025 * This function must be called with an in-array of proper size.
2028 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2029 ir_node *prevBlock, *res, *phi0, *phi0_all;
2032 /* If this block has no value at pos create a Phi0 and remember it
2033 in graph_arr to break recursions.
2034 Else we may not set graph_arr as there a later value is remembered. */
2036 if (block->attr.block.graph_arr[pos] == NULL) {
2037 ir_graph *irg = current_ir_graph;
2039 if (block == get_irg_start_block(irg)) {
2040 /* Collapsing to Bad tarvals is no good idea.
2041 So we call a user-supplied routine here that deals with this case as
2042 appropriate for the given language. Sorrily the only help we can give
2043 here is the position.
2045 Even if all variables are defined before use, it can happen that
2046 we get to the start block, if a Cond has been replaced by a tuple
2047 (bad, jmp). In this case we call the function needlessly, eventually
2048 generating an non existent error.
2049 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2052 if (default_initialize_local_variable != NULL) {
2053 ir_node *rem = get_cur_block();
2055 set_cur_block(block);
2056 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2060 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2061 /* We don't need to care about exception ops in the start block.
2062 There are none by definition. */
2063 return block->attr.block.graph_arr[pos];
2065 phi0 = new_rd_Phi0(irg, block, mode);
2066 block->attr.block.graph_arr[pos] = phi0;
2067 #if PRECISE_EXC_CONTEXT
2068 if (get_opt_precise_exc_context()) {
2069 /* Set graph_arr for fragile ops. Also here we should break recursion.
2070 We could choose a cyclic path through an cfop. But the recursion would
2071 break at some point. */
2072 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2078 /* This loop goes to all predecessor blocks of the block the Phi node
2079 is in and there finds the operands of the Phi node by calling
2080 get_r_value_internal. */
2081 for (i = 1; i <= ins; ++i) {
2082 ir_node *cf_pred = block->in[i];
2083 ir_node *prevCfOp = skip_Proj(cf_pred);
2085 if (is_Bad(prevCfOp)) {
2086 /* In case a Cond has been optimized we would get right to the start block
2087 with an invalid definition. */
2088 nin[i-1] = new_Bad();
2091 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2093 if (!is_Bad(prevBlock)) {
2094 #if PRECISE_EXC_CONTEXT
2095 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2096 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2097 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2100 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2102 nin[i-1] = new_Bad();
2106 /* We want to pass the Phi0 node to the constructor: this finds additional
2107 optimization possibilities.
2108 The Phi0 node either is allocated in this function, or it comes from
2109 a former call to get_r_value_internal(). In this case we may not yet
2110 exchange phi0, as this is done in mature_immBlock(). */
2112 phi0_all = block->attr.block.graph_arr[pos];
2113 if (!(is_Phi(phi0_all) &&
2114 (get_irn_arity(phi0_all) == 0) &&
2115 (get_nodes_block(phi0_all) == block)))
2121 /* After collecting all predecessors into the array nin a new Phi node
2122 with these predecessors is created. This constructor contains an
2123 optimization: If all predecessors of the Phi node are identical it
2124 returns the only operand instead of a new Phi node. */
2125 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2127 /* In case we allocated a Phi0 node at the beginning of this procedure,
2128 we need to exchange this Phi0 with the real Phi. */
2130 exchange(phi0, res);
2131 block->attr.block.graph_arr[pos] = res;
2132 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2133 only an optimization. */
2140 * This function returns the last definition of a value. In case
2141 * this value was last defined in a previous block, Phi nodes are
2142 * inserted. If the part of the firm graph containing the definition
2143 * is not yet constructed, a dummy Phi node is returned.
2145 * @param block the current block
2146 * @param pos the value number of the value searched
2147 * @param mode the mode of this value (needed for Phi construction)
2150 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2152 /* There are 4 cases to treat.
2154 1. The block is not mature and we visit it the first time. We can not
2155 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2156 predecessors is returned. This node is added to the linked list (block
2157 attribute "phis") of the containing block to be completed when this block is
2158 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2161 2. The value is already known in this block, graph_arr[pos] is set and we
2162 visit the block the first time. We can return the value without
2163 creating any new nodes.
2165 3. The block is mature and we visit it the first time. A Phi node needs
2166 to be created (phi_merge). If the Phi is not needed, as all it's
2167 operands are the same value reaching the block through different
2168 paths, it's optimized away and the value itself is returned.
2170 4. The block is mature, and we visit it the second time. Now two
2171 subcases are possible:
2172 * The value was computed completely the last time we were here. This
2173 is the case if there is no loop. We can return the proper value.
2174 * The recursion that visited this node and set the flag did not
2175 return yet. We are computing a value in a loop and need to
2176 break the recursion. This case only happens if we visited
2177 the same block with phi_merge before, which inserted a Phi0.
2178 So we return the Phi0.
2181 /* case 4 -- already visited. */
2182 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2183 /* As phi_merge allocates a Phi0 this value is always defined. Here
2184 is the critical difference of the two algorithms. */
2185 assert(block->attr.block.graph_arr[pos]);
2186 return block->attr.block.graph_arr[pos];
2189 /* visited the first time */
2190 set_irn_visited(block, get_irg_visited(current_ir_graph));
2192 /* Get the local valid value */
2193 res = block->attr.block.graph_arr[pos];
2195 /* case 2 -- If the value is actually computed, return it. */
2199 if (block->attr.block.is_matured) { /* case 3 */
2201 /* The Phi has the same amount of ins as the corresponding block. */
2202 int ins = get_irn_arity(block);
2204 NEW_ARR_A(ir_node *, nin, ins);
2206 /* Phi merge collects the predecessors and then creates a node. */
2207 res = phi_merge(block, pos, mode, nin, ins);
2209 } else { /* case 1 */
2210 /* The block is not mature, we don't know how many in's are needed. A Phi
2211 with zero predecessors is created. Such a Phi node is called Phi0
2212 node. The Phi0 is then added to the list of Phi0 nodes in this block
2213 to be matured by mature_immBlock later.
2214 The Phi0 has to remember the pos of it's internal value. If the real
2215 Phi is computed, pos is used to update the array with the local
2217 res = new_rd_Phi0(current_ir_graph, block, mode);
2218 res->attr.phi.u.pos = pos;
2219 res->attr.phi.next = block->attr.block.phis;
2220 block->attr.block.phis = res;
2223 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2225 /* The local valid value is available now. */
2226 block->attr.block.graph_arr[pos] = res;
2229 } /* get_r_value_internal */
2231 /* ************************************************************************** */
2234 * Finalize a Block node, when all control flows are known.
2235 * Acceptable parameters are only Block nodes.
2238 mature_immBlock(ir_node *block) {
2243 assert(is_Block(block));
2244 if (!get_Block_matured(block)) {
2245 ir_graph *irg = current_ir_graph;
2247 ins = ARR_LEN(block->in) - 1;
2248 /* Fix block parameters */
2249 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2251 /* An array for building the Phi nodes. */
2252 NEW_ARR_A(ir_node *, nin, ins);
2254 /* Traverse a chain of Phi nodes attached to this block and mature
2256 for (n = block->attr.block.phis; n; n = next) {
2257 inc_irg_visited(irg);
2258 next = n->attr.phi.next;
2259 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2262 block->attr.block.is_matured = 1;
2264 /* Now, as the block is a finished Firm node, we can optimize it.
2265 Since other nodes have been allocated since the block was created
2266 we can not free the node on the obstack. Therefore we have to call
2267 optimize_in_place().
2268 Unfortunately the optimization does not change a lot, as all allocated
2269 nodes refer to the unoptimized node.
2270 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2271 block = optimize_in_place_2(block);
2272 IRN_VRFY_IRG(block, irg);
2274 } /* mature_immBlock */
2277 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2278 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2282 new_d_Const(dbg_info *db, ir_mode *mode, tarval *con) {
2283 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2287 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2288 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2289 } /* new_d_Const_long */
2292 new_d_Const_type(dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp) {
2293 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2294 } /* new_d_Const_type */
2298 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2299 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2303 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2304 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2308 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2311 assert(arg->op == op_Cond);
2312 arg->attr.cond.kind = fragmentary;
2313 arg->attr.cond.default_proj = max_proj;
2314 res = new_Proj(arg, mode_X, max_proj);
2316 } /* new_d_defaultProj */
2319 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2320 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2324 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2325 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2326 } /* new_d_strictConv */
2329 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2330 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2334 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2335 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2345 * Allocate a frag array for a node if the current graph state is phase_building.
2347 * @param irn the node for which the frag array should be allocated
2348 * @param op the opcode of the (original) node, if does not match opcode of irn,
2350 * @param frag_store the address of the frag store in irn attributes, if this
2351 * address contains a value != NULL, does nothing
2353 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2354 if (get_opt_precise_exc_context()) {
2355 if ((current_ir_graph->phase_state == phase_building) &&
2356 (get_irn_op(irn) == op) && /* Could be optimized away. */
2357 !*frag_store) /* Could be a cse where the arr is already set. */ {
2358 *frag_store = new_frag_arr(irn);
2361 } /* firm_alloc_frag_arr */
2364 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2366 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2367 #if PRECISE_EXC_CONTEXT
2368 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2375 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2377 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2378 #if PRECISE_EXC_CONTEXT
2379 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2383 } /* new_d_DivMod */
2386 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2388 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2389 #if PRECISE_EXC_CONTEXT
2390 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2397 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2399 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2400 #if PRECISE_EXC_CONTEXT
2401 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2408 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2410 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2411 #if PRECISE_EXC_CONTEXT
2412 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2431 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2432 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2436 new_d_Jmp(dbg_info *db) {
2437 return new_bd_Jmp(db, current_ir_graph->current_block);
2441 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2442 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2446 new_d_Cond(dbg_info *db, ir_node *c) {
2447 return new_bd_Cond(db, current_ir_graph->current_block, c);
2451 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2454 res = new_bd_Call(db, current_ir_graph->current_block,
2455 store, callee, arity, in, tp);
2456 #if PRECISE_EXC_CONTEXT
2457 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2464 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2465 return new_bd_Return(db, current_ir_graph->current_block,
2467 } /* new_d_Return */
2470 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
2472 res = new_bd_Load(db, current_ir_graph->current_block,
2474 #if PRECISE_EXC_CONTEXT
2475 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2482 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
2484 res = new_bd_Store(db, current_ir_graph->current_block,
2486 #if PRECISE_EXC_CONTEXT
2487 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2494 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2495 ir_where_alloc where) {
2497 res = new_bd_Alloc(db, current_ir_graph->current_block,
2498 store, size, alloc_type, where);
2499 #if PRECISE_EXC_CONTEXT
2500 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2507 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2508 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2509 return new_bd_Free(db, current_ir_graph->current_block,
2510 store, ptr, size, free_type, where);
2514 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2515 /* GL: objptr was called frame before. Frame was a bad choice for the name
2516 as the operand could as well be a pointer to a dynamic object. */
2518 return new_bd_Sel(db, current_ir_graph->current_block,
2519 store, objptr, 0, NULL, ent);
2520 } /* new_d_simpleSel */
2523 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2524 return new_bd_Sel(db, current_ir_graph->current_block,
2525 store, objptr, n_index, index, sel);
2529 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2530 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2532 } /* new_d_SymConst_type */
2535 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2536 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2537 value, kind, firm_unknown_type);
2538 } /* new_d_SymConst */
2541 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2542 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2548 return _new_d_Bad();
2552 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2553 return new_bd_Confirm(db, current_ir_graph->current_block,
2555 } /* new_d_Confirm */
2558 new_d_Unknown(ir_mode *m) {
2559 return new_bd_Unknown(m);
2560 } /* new_d_Unknown */
2563 new_d_CallBegin(dbg_info *db, ir_node *call) {
2564 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2565 } /* new_d_CallBegin */
2568 new_d_EndReg(dbg_info *db) {
2569 return new_bd_EndReg(db, current_ir_graph->current_block);
2570 } /* new_d_EndReg */
2573 new_d_EndExcept(dbg_info *db) {
2574 return new_bd_EndExcept(db, current_ir_graph->current_block);
2575 } /* new_d_EndExcept */
2578 new_d_Break(dbg_info *db) {
2579 return new_bd_Break(db, current_ir_graph->current_block);
2583 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2584 return new_bd_Filter(db, current_ir_graph->current_block,
2586 } /* new_d_Filter */
2589 (new_d_NoMem)(void) {
2590 return _new_d_NoMem();
2594 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2595 ir_node *ir_true, ir_mode *mode) {
2596 return new_bd_Mux(db, current_ir_graph->current_block,
2597 sel, ir_false, ir_true, mode);
2600 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2601 ir_node *dst, ir_node *src, ir_type *data_type) {
2603 res = new_bd_CopyB(db, current_ir_graph->current_block,
2604 store, dst, src, data_type);
2605 #if PRECISE_EXC_CONTEXT
2606 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2612 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2613 return new_bd_InstOf(db, current_ir_graph->current_block,
2614 store, objptr, type);
2615 } /* new_d_InstOf */
2618 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2619 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2622 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2623 ir_node *idx, ir_node *lower, ir_node *upper) {
2625 res = new_bd_Bound(db, current_ir_graph->current_block,
2626 store, idx, lower, upper);
2627 #if PRECISE_EXC_CONTEXT
2628 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2634 new_d_Pin(dbg_info *db, ir_node *node) {
2635 return new_bd_Pin(db, current_ir_graph->current_block, node);
2639 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2640 int n_outs, ir_asm_constraint *outputs,
2641 int n_clobber, ident *clobber[], ident *asm_text) {
2642 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2645 /* ********************************************************************* */
2646 /* Comfortable interface with automatic Phi node construction. */
2647 /* (Uses also constructors of ?? interface, except new_Block. */
2648 /* ********************************************************************* */
2650 /* Block construction */
2651 /* immature Block without predecessors */
2653 new_d_immBlock(dbg_info *db) {
2656 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2657 /* creates a new dynamic in-array as length of in is -1 */
2658 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2659 current_ir_graph->current_block = res;
2661 /* macroblock head */
2664 res->attr.block.is_matured = 0;
2665 res->attr.block.is_dead = 0;
2666 res->attr.block.is_mb_head = 1;
2667 res->attr.block.has_label = 0;
2668 res->attr.block.irg = current_ir_graph;
2669 res->attr.block.backedge = NULL;
2670 res->attr.block.in_cg = NULL;
2671 res->attr.block.cg_backedge = NULL;
2672 res->attr.block.extblk = NULL;
2673 res->attr.block.region = NULL;
2674 res->attr.block.mb_depth = 0;
2675 res->attr.block.label = 0;
2677 set_Block_block_visited(res, 0);
2679 /* Create and initialize array for Phi-node construction. */
2680 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2681 current_ir_graph->n_loc);
2682 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2684 /* Immature block may not be optimized! */
2685 IRN_VRFY_IRG(res, current_ir_graph);
2688 } /* new_d_immBlock */
2691 new_immBlock(void) {
2692 return new_d_immBlock(NULL);
2693 } /* new_immBlock */
2695 /* immature PartBlock with its predecessors */
2697 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2698 ir_node *res = new_d_immBlock(db);
2699 ir_node *blk = get_nodes_block(pred_jmp);
2701 res->in[0] = blk->in[0];
2702 assert(res->in[0] != NULL);
2703 add_immBlock_pred(res, pred_jmp);
2705 res->attr.block.is_mb_head = 0;
2706 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2709 } /* new_d_immPartBlock */
2712 new_immPartBlock(ir_node *pred_jmp) {
2713 return new_d_immPartBlock(NULL, pred_jmp);
2714 } /* new_immPartBlock */
2716 /* add an edge to a jmp/control flow node */
2718 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2719 int n = ARR_LEN(block->in) - 1;
2721 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2722 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2723 assert(is_ir_node(jmp));
2725 ARR_APP1(ir_node *, block->in, jmp);
2727 hook_set_irn_n(block, n, jmp, NULL);
2728 } /* add_immBlock_pred */
2730 /* changing the current block */
2732 set_cur_block(ir_node *target) {
2733 current_ir_graph->current_block = target;
2734 } /* set_cur_block */
2736 /* ************************ */
2737 /* parameter administration */
2739 /* get a value from the parameter array from the current block by its index */
2741 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2742 ir_graph *irg = current_ir_graph;
2743 assert(get_irg_phase_state(irg) == phase_building);
2744 inc_irg_visited(irg);
2747 return get_r_value_internal(irg->current_block, pos + 1, mode);
2750 /* get a value from the parameter array from the current block by its index */
2752 get_value(int pos, ir_mode *mode) {
2753 return get_d_value(NULL, pos, mode);
2756 /* set a value at position pos in the parameter array from the current block */
2758 set_value(int pos, ir_node *value) {
2759 ir_graph *irg = current_ir_graph;
2760 assert(get_irg_phase_state(irg) == phase_building);
2761 assert(pos+1 < irg->n_loc);
2762 assert(is_ir_node(value));
2763 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2766 /* Find the value number for a node in the current block.*/
2768 find_value(ir_node *value) {
2770 ir_node *bl = current_ir_graph->current_block;
2772 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2773 if (bl->attr.block.graph_arr[i] == value)
2778 /* get the current store */
2781 ir_graph *irg = current_ir_graph;
2783 assert(get_irg_phase_state(irg) == phase_building);
2784 /* GL: one could call get_value instead */
2785 inc_irg_visited(irg);
2786 return get_r_value_internal(irg->current_block, 0, mode_M);
2789 /* set the current store: handles automatic Sync construction for Load nodes */
2791 set_store(ir_node *store) {
2792 ir_node *load, *pload, *pred, *in[2];
2794 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2795 /* Beware: due to dead code elimination, a store might become a Bad node even in
2796 the construction phase. */
2797 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2799 if (get_opt_auto_create_sync()) {
2800 /* handle non-volatile Load nodes by automatically creating Sync's */
2801 load = skip_Proj(store);
2802 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2803 pred = get_Load_mem(load);
2805 if (is_Sync(pred)) {
2806 /* a Load after a Sync: move it up */
2807 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2809 set_Load_mem(load, get_memop_mem(mem));
2810 add_Sync_pred(pred, store);
2813 pload = skip_Proj(pred);
2814 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2815 /* a Load after a Load: create a new Sync */
2816 set_Load_mem(load, get_Load_mem(pload));
2820 store = new_Sync(2, in);
2825 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2829 keep_alive(ir_node *ka) {
2830 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2833 /* --- Useful access routines --- */
2834 /* Returns the current block of the current graph. To set the current
2835 block use set_cur_block. */
2836 ir_node *get_cur_block(void) {
2837 return get_irg_current_block(current_ir_graph);
2838 } /* get_cur_block */
2840 /* Returns the frame type of the current graph */
2841 ir_type *get_cur_frame_type(void) {
2842 return get_irg_frame_type(current_ir_graph);
2843 } /* get_cur_frame_type */
2846 /* ********************************************************************* */
2849 /* call once for each run of the library */
2851 firm_init_cons(uninitialized_local_variable_func_t *func) {
2852 default_initialize_local_variable = func;
2853 } /* firm_init_cons */
2856 irp_finalize_cons(void) {
2858 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2859 irg_finalize_cons(get_irp_irg(i));
2861 irp->phase_state = phase_high;
2862 } /* irp_finalize_cons */
2865 ir_node *new_Block(int arity, ir_node **in) {
2866 return new_d_Block(NULL, arity, in);
2868 ir_node *new_Start(void) {
2869 return new_d_Start(NULL);
2871 ir_node *new_End(void) {
2872 return new_d_End(NULL);
2874 ir_node *new_Jmp(void) {
2875 return new_d_Jmp(NULL);
2877 ir_node *new_IJmp(ir_node *tgt) {
2878 return new_d_IJmp(NULL, tgt);
2880 ir_node *new_Cond(ir_node *c) {
2881 return new_d_Cond(NULL, c);
2883 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2884 return new_d_Return(NULL, store, arity, in);
2886 ir_node *new_Const(ir_mode *mode, tarval *con) {
2887 return new_d_Const(NULL, mode, con);
2890 ir_node *new_Const_long(ir_mode *mode, long value) {
2891 return new_d_Const_long(NULL, mode, value);
2894 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2895 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
2898 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
2899 return new_d_SymConst_type(NULL, mode, value, kind, type);
2901 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2902 return new_d_SymConst(NULL, mode, value, kind);
2904 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
2905 return new_d_simpleSel(NULL, store, objptr, ent);
2907 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
2909 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2911 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
2913 return new_d_Call(NULL, store, callee, arity, in, tp);
2915 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
2916 return new_d_Add(NULL, op1, op2, mode);
2918 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
2919 return new_d_Sub(NULL, op1, op2, mode);
2921 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
2922 return new_d_Minus(NULL, op, mode);
2924 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
2925 return new_d_Mul(NULL, op1, op2, mode);
2927 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
2928 return new_d_Mulh(NULL, op1, op2, mode);
2930 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2931 return new_d_Quot(NULL, memop, op1, op2, mode, state);
2933 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2934 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
2936 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2937 return new_d_Div(NULL, memop, op1, op2, mode, state);
2939 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2940 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
2942 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2943 return new_d_Mod(NULL, memop, op1, op2, mode, state);
2945 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
2946 return new_d_Abs(NULL, op, mode);
2948 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
2949 return new_d_And(NULL, op1, op2, mode);
2951 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
2952 return new_d_Or(NULL, op1, op2, mode);
2954 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
2955 return new_d_Eor(NULL, op1, op2, mode);
2957 ir_node *new_Not(ir_node *op, ir_mode *mode) {
2958 return new_d_Not(NULL, op, mode);
2960 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
2961 return new_d_Shl(NULL, op, k, mode);
2963 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
2964 return new_d_Shr(NULL, op, k, mode);
2966 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
2967 return new_d_Shrs(NULL, op, k, mode);
2969 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
2970 return new_d_Rotl(NULL, op, k, mode);
2972 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
2973 return new_d_Carry(NULL, op1, op2, mode);
2975 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
2976 return new_d_Borrow(NULL, op1, op2, mode);
2978 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
2979 return new_d_Cmp(NULL, op1, op2);
2981 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
2982 return new_d_Conv(NULL, op, mode);
2984 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
2985 return new_d_strictConv(NULL, op, mode);
2987 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
2988 return new_d_Cast(NULL, op, to_tp);
2990 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
2991 return new_d_Phi(NULL, arity, in, mode);
2993 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
2994 return new_d_Load(NULL, store, addr, mode);
2996 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
2997 return new_d_Store(NULL, store, addr, val);
2999 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3000 ir_where_alloc where) {
3001 return new_d_Alloc(NULL, store, size, alloc_type, where);
3003 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3004 ir_type *free_type, ir_where_alloc where) {
3005 return new_d_Free(NULL, store, ptr, size, free_type, where);
3007 ir_node *new_Sync(int arity, ir_node *in[]) {
3008 return new_d_Sync(NULL, arity, in);
3010 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3011 return new_d_Proj(NULL, arg, mode, proj);
3013 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3014 return new_d_defaultProj(NULL, arg, max_proj);
3016 ir_node *new_Tuple(int arity, ir_node **in) {
3017 return new_d_Tuple(NULL, arity, in);
3019 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3020 return new_d_Id(NULL, val, mode);
3022 ir_node *new_Bad(void) {
3025 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3026 return new_d_Confirm(NULL, val, bound, cmp);
3028 ir_node *new_Unknown(ir_mode *m) {
3029 return new_d_Unknown(m);
3031 ir_node *new_CallBegin(ir_node *callee) {
3032 return new_d_CallBegin(NULL, callee);
3034 ir_node *new_EndReg(void) {
3035 return new_d_EndReg(NULL);
3037 ir_node *new_EndExcept(void) {
3038 return new_d_EndExcept(NULL);
3040 ir_node *new_Break(void) {
3041 return new_d_Break(NULL);
3043 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3044 return new_d_Filter(NULL, arg, mode, proj);
3046 ir_node *new_NoMem(void) {
3047 return new_d_NoMem();
3049 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3050 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3052 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3053 return new_d_CopyB(NULL, store, dst, src, data_type);
3055 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3056 return new_d_InstOf(NULL, store, objptr, ent);
3058 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3059 return new_d_Raise(NULL, store, obj);
3061 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3062 return new_d_Bound(NULL, store, idx, lower, upper);
3064 ir_node *new_Pin(ir_node *node) {
3065 return new_d_Pin(NULL, node);
3067 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3068 int n_outs, ir_asm_constraint *outputs,
3069 int n_clobber, ident *clobber[], ident *asm_text) {
3070 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3073 /* create a new anchor node */
3074 ir_node *new_Anchor(ir_graph *irg) {
3075 ir_node *in[anchor_last];
3076 memset(in, 0, sizeof(in));
3077 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);