2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
35 #include "firm_common_t.h"
42 #include "irbackedge_t.h"
44 #include "iredges_t.h"
47 /* Uncomment to use original code instead of generated one */
48 // #define USE_ORIGINAL
50 /* when we need verifying */
52 # define IRN_VRFY_IRG(res, irg)
54 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
58 * Language dependent variable initialization callback.
60 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
62 /* creates a bd constructor for a binop */
63 #define NEW_BD_BINOP(instr) \
65 new_bd_##instr(dbg_info *db, ir_node *block, \
66 ir_node *op1, ir_node *op2, ir_mode *mode) \
70 ir_graph *irg = current_ir_graph; \
73 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
74 res = optimize_node(res); \
75 IRN_VRFY_IRG(res, irg); \
79 /* creates a bd constructor for an unop */
80 #define NEW_BD_UNOP(instr) \
82 new_bd_##instr(dbg_info *db, ir_node *block, \
83 ir_node *op, ir_mode *mode) \
86 ir_graph *irg = current_ir_graph; \
87 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
88 res = optimize_node(res); \
89 IRN_VRFY_IRG(res, irg); \
93 /* creates a bd constructor for an divop */
94 #define NEW_BD_DIVOP(instr) \
96 new_bd_##instr(dbg_info *db, ir_node *block, \
97 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
101 ir_graph *irg = current_ir_graph; \
105 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
106 res->attr.divmod.exc.pin_state = state; \
107 res->attr.divmod.resmode = mode; \
108 res->attr.divmod.no_remainder = 0; \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 #include "gen_ir_cons.c.inl"
175 * Constructs a Block with a fixed number of predecessors.
176 * Does not set current_block. Cannot be used with automatic
177 * Phi node construction.
180 new_bd_Block(dbg_info *db, int arity, ir_node **in) {
182 ir_graph *irg = current_ir_graph;
184 res = new_ir_node(db, irg, NULL, op_Block, mode_BB, arity, in);
186 /* macroblock header */
189 res->attr.block.is_dead = 0;
190 res->attr.block.is_mb_head = 1;
191 res->attr.block.has_label = 0;
192 res->attr.block.irg = irg;
193 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
194 res->attr.block.in_cg = NULL;
195 res->attr.block.cg_backedge = NULL;
196 res->attr.block.extblk = NULL;
197 res->attr.block.mb_depth = 0;
198 res->attr.block.label = 0;
200 set_Block_matured(res, 1);
201 set_Block_block_visited(res, 0);
203 IRN_VRFY_IRG(res, irg);
209 new_bd_Start(dbg_info *db, ir_node *block) {
211 ir_graph *irg = current_ir_graph;
213 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
215 IRN_VRFY_IRG(res, irg);
220 new_bd_End(dbg_info *db, ir_node *block) {
222 ir_graph *irg = current_ir_graph;
224 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
226 IRN_VRFY_IRG(res, irg);
231 * Creates a Phi node with all predecessors. Calling this constructor
232 * is only allowed if the corresponding block is mature.
235 new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
237 ir_graph *irg = current_ir_graph;
241 /* Don't assert that block matured: the use of this constructor is strongly
243 if (get_Block_matured(block))
244 assert(get_irn_arity(block) == arity);
246 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
248 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
250 for (i = arity - 1; i >= 0; --i)
251 if (is_Unknown(in[i])) {
256 if (!has_unknown) res = optimize_node(res);
257 IRN_VRFY_IRG(res, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(res) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), res);
267 new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
269 ir_graph *irg = current_ir_graph;
271 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
272 res->attr.con.tv = con;
273 set_Const_type(res, tp); /* Call method because of complex assertion. */
274 res = optimize_node (res);
275 assert(get_Const_type(res) == tp);
276 IRN_VRFY_IRG(res, irg);
279 } /* new_bd_Const_type */
282 new_bd_Const(dbg_info *db, tarval *con) {
283 ir_graph *irg = current_ir_graph;
285 return new_rd_Const_type (db, irg, con, firm_unknown_type);
289 new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
290 ir_graph *irg = current_ir_graph;
292 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
293 } /* new_bd_Const_long */
297 new_bd_Id(dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode) {
299 ir_graph *irg = current_ir_graph;
301 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
302 res = optimize_node(res);
303 IRN_VRFY_IRG(res, irg);
308 new_bd_Proj(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
311 ir_graph *irg = current_ir_graph;
313 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
314 res->attr.proj = proj;
317 assert(get_Proj_pred(res));
318 assert(get_nodes_block(get_Proj_pred(res)));
320 res = optimize_node(res);
322 IRN_VRFY_IRG(res, irg);
328 new_bd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg,
331 ir_graph *irg = current_ir_graph;
333 assert(arg->op == op_Cond);
334 arg->attr.cond.kind = fragmentary;
335 arg->attr.cond.default_proj = max_proj;
336 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 } /* new_bd_defaultProj */
342 new_bd_Conv(dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag) {
344 ir_graph *irg = current_ir_graph;
346 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
347 res->attr.conv.strict = strict_flag;
348 res = optimize_node(res);
349 IRN_VRFY_IRG(res, irg);
354 new_bd_Cast(dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp) {
356 ir_graph *irg = current_ir_graph;
358 assert(is_atomic_type(to_tp));
360 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
361 res->attr.cast.type = to_tp;
362 res = optimize_node(res);
363 IRN_VRFY_IRG(res, irg);
368 new_bd_Tuple(dbg_info *db, ir_node *block, int arity, ir_node **in) {
370 ir_graph *irg = current_ir_graph;
372 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
373 res = optimize_node (res);
374 IRN_VRFY_IRG(res, irg);
399 /** Creates a remainderless Div node. */
400 static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
401 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
405 ir_graph *irg = current_ir_graph;
409 res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
410 res->attr.divmod.exc.pin_state = state;
411 res->attr.divmod.resmode = mode;
412 res->attr.divmod.no_remainder = 1;
413 res = optimize_node(res);
414 IRN_VRFY_IRG(res, irg);
419 new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
422 ir_graph *irg = current_ir_graph;
425 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
426 res = optimize_node(res);
427 IRN_VRFY_IRG(res, irg);
432 new_bd_Jmp(dbg_info *db, ir_node *block) {
434 ir_graph *irg = current_ir_graph;
436 res = new_ir_node(db, irg, block, op_Jmp, mode_X, 0, NULL);
437 res = optimize_node(res);
438 IRN_VRFY_IRG(res, irg);
443 new_bd_IJmp(dbg_info *db, ir_node *block, ir_node *tgt) {
445 ir_graph *irg = current_ir_graph;
447 res = new_ir_node(db, irg, block, op_IJmp, mode_X, 1, &tgt);
448 res = optimize_node(res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Cond(dbg_info *db, ir_node *block, ir_node *c) {
456 ir_graph *irg = current_ir_graph;
458 res = new_ir_node(db, irg, block, op_Cond, mode_T, 1, &c);
459 res->attr.cond.kind = dense;
460 res->attr.cond.default_proj = 0;
461 res->attr.cond.pred = COND_JMP_PRED_NONE;
462 res = optimize_node(res);
463 IRN_VRFY_IRG(res, irg);
468 new_bd_Call(dbg_info *db, ir_node *block, ir_node *store,
469 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
473 ir_graph *irg = current_ir_graph;
476 NEW_ARR_A(ir_node *, r_in, r_arity);
479 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
481 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
483 assert((get_unknown_type() == tp) || is_Method_type(tp));
484 set_Call_type(res, tp);
485 res->attr.call.exc.pin_state = op_pin_state_pinned;
486 res->attr.call.callee_arr = NULL;
487 res = optimize_node(res);
488 IRN_VRFY_IRG(res, irg);
493 new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
494 int arity, ir_node **in, ir_builtin_kind kind, ir_type *tp) {
498 ir_graph *irg = current_ir_graph;
501 NEW_ARR_A(ir_node *, r_in, r_arity);
503 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
505 res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
507 assert((get_unknown_type() == tp) || is_Method_type(tp));
508 res->attr.builtin.exc.pin_state = op_pin_state_pinned;
509 res->attr.builtin.kind = kind;
510 res->attr.builtin.type = tp;
511 res = optimize_node(res);
512 IRN_VRFY_IRG(res, irg);
514 } /* new_bd_Buildin */
517 new_bd_Return(dbg_info *db, ir_node *block,
518 ir_node *store, int arity, ir_node **in) {
522 ir_graph *irg = current_ir_graph;
525 NEW_ARR_A (ir_node *, r_in, r_arity);
527 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
528 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
532 } /* new_bd_Return */
535 new_bd_Load(dbg_info *db, ir_node *block,
536 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
539 ir_graph *irg = current_ir_graph;
543 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
544 res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
545 res->attr.load.mode = mode;
546 res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
547 res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Store(dbg_info *db, ir_node *block,
555 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
558 ir_graph *irg = current_ir_graph;
563 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
564 res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
565 res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
566 res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
568 res = optimize_node(res);
569 IRN_VRFY_IRG(res, irg);
574 new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
575 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
578 ir_graph *irg = current_ir_graph;
582 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
583 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
584 res->attr.alloc.where = where;
585 res->attr.alloc.type = alloc_type;
586 res = optimize_node(res);
587 IRN_VRFY_IRG(res, irg);
592 new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
593 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
596 ir_graph *irg = current_ir_graph;
601 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
602 res->attr.free.where = where;
603 res->attr.free.type = free_type;
604 res = optimize_node(res);
605 IRN_VRFY_IRG(res, irg);
611 new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
612 int arity, ir_node **in, ir_entity *ent) {
616 ir_graph *irg = current_ir_graph;
617 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
619 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
622 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
625 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
627 * Sel's can select functions which should be of mode mode_P_code.
629 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
630 res->attr.sel.entity = ent;
631 res = optimize_node(res);
632 IRN_VRFY_IRG(res, irg);
637 new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
638 symconst_symbol value,symconst_kind symkind, ir_type *tp) {
639 ir_graph *irg = current_ir_graph;
640 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
642 res->attr.symc.kind = symkind;
643 res->attr.symc.sym = value;
644 res->attr.symc.tp = tp;
646 res = optimize_node(res);
647 IRN_VRFY_IRG(res, irg);
649 } /* new_bd_SymConst_type */
652 new_bd_Sync(dbg_info *db, ir_node *block) {
654 ir_graph *irg = current_ir_graph;
656 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
657 /* no need to call optimize node here, Sync are always created with no predecessors */
658 IRN_VRFY_IRG(res, irg);
664 new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
665 ir_node *in[2], *res;
666 ir_graph *irg = current_ir_graph;
670 res = new_ir_node(db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
671 res->attr.confirm.cmp = cmp;
672 res = optimize_node(res);
673 IRN_VRFY_IRG(res, irg);
675 } /* new_bd_Confirm */
678 new_bd_Unknown(ir_mode *m) {
680 ir_graph *irg = current_ir_graph;
682 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
683 res = optimize_node(res);
685 } /* new_bd_Unknown */
689 new_bd_CallBegin(dbg_info *db, ir_node *block, ir_node *call) {
692 ir_graph *irg = current_ir_graph;
694 in[0] = get_Call_ptr(call);
695 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
696 /* res->attr.callbegin.irg = irg; */
697 res->attr.callbegin.call = call;
698 res = optimize_node(res);
699 IRN_VRFY_IRG(res, irg);
701 } /* new_bd_CallBegin */
704 new_bd_EndReg(dbg_info *db, ir_node *block) {
706 ir_graph *irg = current_ir_graph;
708 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
709 set_irg_end_reg(irg, res);
710 IRN_VRFY_IRG(res, irg);
712 } /* new_bd_EndReg */
715 new_bd_EndExcept(dbg_info *db, ir_node *block) {
717 ir_graph *irg = current_ir_graph;
719 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
720 set_irg_end_except(irg, res);
721 IRN_VRFY_IRG (res, irg);
723 } /* new_bd_EndExcept */
727 new_bd_Break(dbg_info *db, ir_node *block) {
729 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
732 res = optimize_node(res);
733 IRN_VRFY_IRG(res, irg);
738 new_bd_Filter(dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
741 ir_graph *irg = current_ir_graph;
743 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
744 res->attr.filter.proj = proj;
745 res->attr.filter.in_cg = NULL;
746 res->attr.filter.backedge = NULL;
749 assert(get_Proj_pred(res));
750 assert(get_nodes_block(get_Proj_pred(res)));
752 res = optimize_node(res);
753 IRN_VRFY_IRG(res, irg);
755 } /* new_bd_Filter */
758 new_bd_Mux(dbg_info *db, ir_node *block,
759 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
762 ir_graph *irg = current_ir_graph;
768 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
771 res = optimize_node(res);
772 IRN_VRFY_IRG(res, irg);
777 new_bd_CopyB(dbg_info *db, ir_node *block,
778 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
781 ir_graph *irg = current_ir_graph;
787 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
789 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
790 res->attr.copyb.type = data_type;
791 res = optimize_node(res);
792 IRN_VRFY_IRG(res, irg);
797 new_bd_InstOf(dbg_info *db, ir_node *block, ir_node *store,
798 ir_node *objptr, ir_type *type) {
801 ir_graph *irg = current_ir_graph;
805 res = new_ir_node(db, irg, block, op_InstOf, mode_T, 2, in);
806 res->attr.instof.exc.pin_state = op_pin_state_floats;
807 res->attr.instof.type = type;
808 res = optimize_node(res);
809 IRN_VRFY_IRG(res, irg);
811 } /* new_bd_InstOf */
814 new_bd_Raise(dbg_info *db, ir_node *block, ir_node *store, ir_node *obj) {
817 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
822 res = optimize_node(res);
823 IRN_VRFY_IRG(res, irg);
828 new_bd_Bound(dbg_info *db, ir_node *block,
829 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
832 ir_graph *irg = current_ir_graph;
838 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
839 res->attr.bound.exc.pin_state = op_pin_state_pinned;
840 res = optimize_node(res);
841 IRN_VRFY_IRG(res, irg);
846 new_bd_Pin(dbg_info *db, ir_node *block, ir_node *node) {
848 ir_graph *irg = current_ir_graph;
850 res = new_ir_node(db, irg, block, op_Pin, get_irn_mode(node), 1, &node);
851 res = optimize_node(res);
852 IRN_VRFY_IRG(res, irg);
858 new_bd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[], ir_asm_constraint *inputs,
859 int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
861 ir_graph *irg = current_ir_graph;
863 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
864 res->attr.assem.pin_state = op_pin_state_pinned;
865 res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
866 res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
867 res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
868 res->attr.assem.asm_text = asm_text;
870 memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
871 memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
872 memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
874 res = optimize_node(res);
875 IRN_VRFY_IRG(res, irg);
879 /* --------------------------------------------- */
880 /* private interfaces, for professional use only */
881 /* --------------------------------------------- */
884 /* Constructs a Block with a fixed number of predecessors.
885 Does not set current_block. Can not be used with automatic
886 Phi node construction. */
888 new_rd_Block(dbg_info *db, ir_graph *irg, int arity, ir_node **in) {
889 ir_graph *rem = current_ir_graph;
892 current_ir_graph = irg;
893 res = new_bd_Block(db, arity, in);
894 current_ir_graph = rem;
902 new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
903 ir_graph *rem = current_ir_graph;
906 current_ir_graph = irg;
907 res = new_bd_Start(db, block);
908 current_ir_graph = rem;
914 new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
916 ir_graph *rem = current_ir_graph;
918 current_ir_graph = irg;
919 res = new_bd_End(db, block);
920 current_ir_graph = rem;
925 /* Creates a Phi node with all predecessors. Calling this constructor
926 is only allowed if the corresponding block is mature. */
928 new_rd_Phi(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
930 ir_graph *rem = current_ir_graph;
932 current_ir_graph = irg;
933 res = new_bd_Phi(db, block,arity, in, mode);
934 current_ir_graph = rem;
940 new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
942 ir_graph *rem = current_ir_graph;
944 current_ir_graph = irg;
945 res = new_bd_Const_type(db, con, tp);
946 current_ir_graph = rem;
949 } /* new_rd_Const_type */
952 new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
954 //#ifdef USE_ORIGINAL
955 ir_graph *rem = current_ir_graph;
957 current_ir_graph = irg;
958 res = new_bd_Const_type(db, con, firm_unknown_type);
959 current_ir_graph = rem;
961 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
968 new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
969 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
970 } /* new_rd_Const_long */
974 new_rd_Id(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) {
976 ir_graph *rem = current_ir_graph;
978 current_ir_graph = irg;
979 res = new_bd_Id(db, block, val, mode);
980 current_ir_graph = rem;
986 new_rd_Proj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
989 ir_graph *rem = current_ir_graph;
991 current_ir_graph = irg;
992 res = new_bd_Proj(db, block, arg, mode, proj);
993 current_ir_graph = rem;
1000 new_rd_defaultProj(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
1003 ir_graph *rem = current_ir_graph;
1005 current_ir_graph = irg;
1006 res = new_bd_defaultProj(db, block, arg, max_proj);
1007 current_ir_graph = rem;
1010 } /* new_rd_defaultProj */
1014 new_rd_Conv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1016 ir_graph *rem = current_ir_graph;
1018 current_ir_graph = irg;
1019 res = new_bd_Conv(db, block, op, mode, 0);
1020 current_ir_graph = rem;
1026 new_rd_strictConv(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) {
1028 ir_graph *rem = current_ir_graph;
1030 current_ir_graph = irg;
1031 res = new_bd_Conv(db, block, op, mode, 1);
1032 current_ir_graph = rem;
1035 } /* new_rd_strictConv */
1038 new_rd_Cast(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1040 ir_graph *rem = current_ir_graph;
1042 current_ir_graph = irg;
1043 res = new_bd_Cast(db, block, op, to_tp);
1044 current_ir_graph = rem;
1050 new_rd_Tuple(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1052 ir_graph *rem = current_ir_graph;
1054 current_ir_graph = irg;
1055 res = new_bd_Tuple(db, block, arity, in);
1056 current_ir_graph = rem;
1059 } /* new_rd_Tuple */
1067 NEW_RD_DIVOP(DivMod)
1080 NEW_RD_BINOP(Borrow)
1082 /* creates a rd constructor for an divRL */
1083 ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
1084 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
1087 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
1090 current_ir_graph = rem;
1095 new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
1096 ir_node *op1, ir_node *op2) {
1098 ir_graph *rem = current_ir_graph;
1100 current_ir_graph = irg;
1101 res = new_bd_Cmp(db, block, op1, op2);
1102 current_ir_graph = rem;
1108 new_rd_Jmp(dbg_info *db, ir_graph *irg, ir_node *block) {
1110 ir_graph *rem = current_ir_graph;
1112 current_ir_graph = irg;
1113 res = new_bd_Jmp(db, block);
1114 current_ir_graph = rem;
1120 new_rd_IJmp(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt) {
1122 ir_graph *rem = current_ir_graph;
1124 current_ir_graph = irg;
1125 res = new_bd_IJmp(db, block, tgt);
1126 current_ir_graph = rem;
1132 new_rd_Cond(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c) {
1134 ir_graph *rem = current_ir_graph;
1136 current_ir_graph = irg;
1137 res = new_bd_Cond(db, block, c);
1138 current_ir_graph = rem;
1144 new_rd_Call(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1145 ir_node *callee, int arity, ir_node **in, ir_type *tp) {
1147 ir_graph *rem = current_ir_graph;
1149 current_ir_graph = irg;
1150 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1151 current_ir_graph = rem;
1157 new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1158 int arity, ir_node **in, ir_builtin_kind kind, ir_type *tp) {
1160 ir_graph *rem = current_ir_graph;
1162 current_ir_graph = irg;
1163 res = new_bd_Builtin(db, block, store, arity, in, kind, tp);
1164 current_ir_graph = rem;
1167 } /* new_rd_Builtin */
1170 new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
1171 ir_node *store, int arity, ir_node **in) {
1173 ir_graph *rem = current_ir_graph;
1175 current_ir_graph = irg;
1176 res = new_bd_Return(db, block, store, arity, in);
1177 current_ir_graph = rem;
1180 } /* new_rd_Return */
1183 new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
1184 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1186 ir_graph *rem = current_ir_graph;
1188 current_ir_graph = irg;
1189 res = new_bd_Load(db, block, store, adr, mode, flags);
1190 current_ir_graph = rem;
1196 new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
1197 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1199 ir_graph *rem = current_ir_graph;
1201 current_ir_graph = irg;
1202 res = new_bd_Store(db, block, store, adr, val, flags);
1203 current_ir_graph = rem;
1206 } /* new_rd_Store */
1209 new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1210 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1212 ir_graph *rem = current_ir_graph;
1214 current_ir_graph = irg;
1215 res = new_bd_Alloc(db, block, store, size, alloc_type, where);
1216 current_ir_graph = rem;
1219 } /* new_rd_Alloc */
1222 new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1223 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1225 ir_graph *rem = current_ir_graph;
1227 current_ir_graph = irg;
1228 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1229 current_ir_graph = rem;
1236 new_rd_simpleSel(dbg_info *db, ir_graph *irg, ir_node *block,
1237 ir_node *store, ir_node *objptr, ir_entity *ent) {
1239 ir_graph *rem = current_ir_graph;
1241 current_ir_graph = irg;
1242 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1243 current_ir_graph = rem;
1246 } /* new_rd_simpleSel */
1250 new_rd_Sel(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1251 int arity, ir_node **in, ir_entity *ent) {
1253 ir_graph *rem = current_ir_graph;
1255 current_ir_graph = irg;
1256 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1257 current_ir_graph = rem;
1264 new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1265 symconst_symbol value, symconst_kind symkind, ir_type *tp) {
1267 ir_graph *rem = current_ir_graph;
1269 current_ir_graph = irg;
1270 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
1271 current_ir_graph = rem;
1274 } /* new_rd_SymConst_type */
1277 new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
1278 symconst_symbol value, symconst_kind symkind) {
1279 return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
1280 } /* new_rd_SymConst */
1282 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1283 symconst_symbol sym;
1284 sym.entity_p = symbol;
1285 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
1286 } /* new_rd_SymConst_addr_ent */
1288 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
1289 symconst_symbol sym;
1290 sym.entity_p = symbol;
1291 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
1292 } /* new_rd_SymConst_ofs_ent */
1294 ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
1295 symconst_symbol sym;
1296 sym.ident_p = symbol;
1297 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
1298 } /* new_rd_SymConst_addr_name */
1300 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1301 symconst_symbol sym;
1302 sym.type_p = symbol;
1303 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
1304 } /* new_rd_SymConst_type_tag */
1306 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1307 symconst_symbol sym;
1308 sym.type_p = symbol;
1309 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
1310 } /* new_rd_SymConst_size */
1312 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
1313 symconst_symbol sym;
1314 sym.type_p = symbol;
1315 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
1316 } /* new_rd_SymConst_align */
1319 new_rd_Sync(dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1321 ir_graph *rem = current_ir_graph;
1324 current_ir_graph = irg;
1325 res = new_bd_Sync(db, block);
1326 current_ir_graph = rem;
1328 for (i = 0; i < arity; ++i)
1329 add_Sync_pred(res, in[i]);
1336 new_rd_Confirm(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1338 ir_graph *rem = current_ir_graph;
1340 current_ir_graph = irg;
1341 res = new_bd_Confirm(db, block, val, bound, cmp);
1342 current_ir_graph = rem;
1345 } /* new_rd_Confirm */
1348 new_rd_Unknown(ir_graph *irg, ir_mode *m) {
1350 ir_graph *rem = current_ir_graph;
1352 current_ir_graph = irg;
1353 res = new_bd_Unknown(m);
1354 current_ir_graph = rem;
1357 } /* new_rd_Unknown */
1361 new_rd_CallBegin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) {
1363 ir_graph *rem = current_ir_graph;
1365 current_ir_graph = irg;
1366 res = new_bd_CallBegin(db, block, call);
1367 current_ir_graph = rem;
1370 } /* new_rd_CallBegin */
1373 new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
1376 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1377 set_irg_end_reg(irg, res);
1378 IRN_VRFY_IRG(res, irg);
1380 } /* new_rd_EndReg */
1383 new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
1386 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1387 set_irg_end_except(irg, res);
1388 IRN_VRFY_IRG (res, irg);
1390 } /* new_rd_EndExcept */
1394 new_rd_Break(dbg_info *db, ir_graph *irg, ir_node *block) {
1396 ir_graph *rem = current_ir_graph;
1398 current_ir_graph = irg;
1399 res = new_bd_Break(db, block);
1400 current_ir_graph = rem;
1403 } /* new_rd_Break */
1406 new_rd_Filter(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1409 ir_graph *rem = current_ir_graph;
1411 current_ir_graph = irg;
1412 res = new_bd_Filter(db, block, arg, mode, proj);
1413 current_ir_graph = rem;
1416 } /* new_rd_Filter */
1419 new_rd_Mux(dbg_info *db, ir_graph *irg, ir_node *block,
1420 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1426 current_ir_graph = rem;
1431 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1432 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1434 ir_graph *rem = current_ir_graph;
1436 current_ir_graph = irg;
1437 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1438 current_ir_graph = rem;
1441 } /* new_rd_CopyB */
1444 new_rd_InstOf(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1445 ir_node *objptr, ir_type *type) {
1447 ir_graph *rem = current_ir_graph;
1449 current_ir_graph = irg;
1450 res = new_bd_InstOf(db, block, store, objptr, type);
1451 current_ir_graph = rem;
1454 } /* new_rd_InstOf */
1457 new_rd_Raise(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) {
1459 ir_graph *rem = current_ir_graph;
1461 current_ir_graph = irg;
1462 res = new_bd_Raise(db, block, store, obj);
1463 current_ir_graph = rem;
1466 } /* new_rd_Raise */
1468 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1469 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1471 ir_graph *rem = current_ir_graph;
1473 current_ir_graph = irg;
1474 res = new_bd_Bound(db, block, store, idx, lower, upper);
1475 current_ir_graph = rem;
1478 } /* new_rd_Bound */
1480 ir_node *new_rd_Pin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *node) {
1482 ir_graph *rem = current_ir_graph;
1484 current_ir_graph = irg;
1485 res = new_bd_Pin(db, block, node);
1486 current_ir_graph = rem;
1492 ir_node *new_rd_ASM(dbg_info *db, ir_graph *irg, ir_node *block,
1493 int arity, ir_node *in[], ir_asm_constraint *inputs,
1494 int n_outs, ir_asm_constraint *outputs,
1495 int n_clobber, ident *clobber[], ident *asm_text) {
1497 ir_graph *rem = current_ir_graph;
1499 current_ir_graph = irg;
1500 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1501 current_ir_graph = rem;
1508 ir_node *new_r_Block(ir_graph *irg, int arity, ir_node **in) {
1509 return new_rd_Block(NULL, irg, arity, in);
1511 ir_node *new_r_Jmp(ir_graph *irg, ir_node *block) {
1512 return new_rd_Jmp(NULL, irg, block);
1514 ir_node *new_r_IJmp(ir_graph *irg, ir_node *block, ir_node *tgt) {
1515 return new_rd_IJmp(NULL, irg, block, tgt);
1517 ir_node *new_r_Cond(ir_graph *irg, ir_node *block, ir_node *c) {
1518 return new_rd_Cond(NULL, irg, block, c);
1520 ir_node *new_r_Return(ir_graph *irg, ir_node *block,
1521 ir_node *store, int arity, ir_node **in) {
1522 return new_rd_Return(NULL, irg, block, store, arity, in);
1525 ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
1526 return new_rd_Start(NULL, irg, block);
1528 ir_node *new_r_End(ir_graph *irg, ir_node *block) {
1529 return new_rd_End(NULL, irg, block);
1531 ir_node *new_r_Const(ir_graph *irg, tarval *con) {
1532 return new_rd_Const(NULL, irg, con);
1534 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
1535 return new_rd_Const_long(NULL, irg, mode, value);
1537 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
1538 return new_rd_Const_type(NULL, irg, con, tp);
1540 ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
1541 symconst_symbol value, symconst_kind symkind) {
1542 return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
1544 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1545 ir_node *objptr, ir_entity *ent) {
1546 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1549 ir_node *new_r_Sel(ir_graph *irg, ir_node *block, ir_node *store,
1550 ir_node *objptr, int n_index, ir_node **index,
1552 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1554 ir_node *new_r_Call(ir_graph *irg, ir_node *block, ir_node *store,
1555 ir_node *callee, int arity, ir_node **in,
1557 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1559 ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
1560 int arity, ir_node **in, ir_builtin_kind kind,
1562 return new_rd_Builtin(NULL, irg, block, store, arity, in, kind, tp);
1564 ir_node *new_r_Add(ir_graph *irg, ir_node *block,
1565 ir_node *op1, ir_node *op2, ir_mode *mode) {
1566 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1568 ir_node *new_r_Sub(ir_graph *irg, ir_node *block,
1569 ir_node *op1, ir_node *op2, ir_mode *mode) {
1570 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1572 ir_node *new_r_Minus(ir_graph *irg, ir_node *block,
1573 ir_node *op, ir_mode *mode) {
1574 return new_rd_Minus(NULL, irg, block, op, mode);
1576 ir_node *new_r_Mul(ir_graph *irg, ir_node *block,
1577 ir_node *op1, ir_node *op2, ir_mode *mode) {
1578 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1580 ir_node *new_r_Mulh(ir_graph *irg, ir_node *block,
1581 ir_node *op1, ir_node *op2, ir_mode *mode) {
1582 return new_rd_Mulh(NULL, irg, block, op1, op2, mode);
1584 ir_node *new_r_Quot(ir_graph *irg, ir_node *block,
1585 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1586 return new_rd_Quot(NULL, irg, block, memop, op1, op2, mode, state);
1588 ir_node *new_r_DivMod(ir_graph *irg, ir_node *block,
1589 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1590 return new_rd_DivMod(NULL, irg, block, memop, op1, op2, mode, state);
1592 ir_node *new_r_Div(ir_graph *irg, ir_node *block,
1593 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1594 return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
1596 ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
1597 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1598 return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
1600 ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
1601 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
1602 return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
1604 ir_node *new_r_Abs(ir_graph *irg, ir_node *block,
1605 ir_node *op, ir_mode *mode) {
1606 return new_rd_Abs(NULL, irg, block, op, mode);
1608 ir_node *new_r_And(ir_graph *irg, ir_node *block,
1609 ir_node *op1, ir_node *op2, ir_mode *mode) {
1610 return new_rd_And(NULL, irg, block, op1, op2, mode);
1612 ir_node *new_r_Or(ir_graph *irg, ir_node *block,
1613 ir_node *op1, ir_node *op2, ir_mode *mode) {
1614 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1616 ir_node *new_r_Eor(ir_graph *irg, ir_node *block,
1617 ir_node *op1, ir_node *op2, ir_mode *mode) {
1618 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1620 ir_node *new_r_Not(ir_graph *irg, ir_node *block,
1621 ir_node *op, ir_mode *mode) {
1622 return new_rd_Not(NULL, irg, block, op, mode);
1624 ir_node *new_r_Shl(ir_graph *irg, ir_node *block,
1625 ir_node *op, ir_node *k, ir_mode *mode) {
1626 return new_rd_Shl(NULL, irg, block, op, k, mode);
1628 ir_node *new_r_Shr(ir_graph *irg, ir_node *block,
1629 ir_node *op, ir_node *k, ir_mode *mode) {
1630 return new_rd_Shr(NULL, irg, block, op, k, mode);
1632 ir_node *new_r_Shrs(ir_graph *irg, ir_node *block,
1633 ir_node *op, ir_node *k, ir_mode *mode) {
1634 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1636 ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
1637 ir_node *op, ir_node *k, ir_mode *mode) {
1638 return new_rd_Rotl(NULL, irg, block, op, k, mode);
1640 ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
1641 ir_node *op, ir_node *k, ir_mode *mode) {
1642 return new_rd_Carry(NULL, irg, block, op, k, mode);
1644 ir_node *new_r_Borrow(ir_graph *irg, ir_node *block,
1645 ir_node *op, ir_node *k, ir_mode *mode) {
1646 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1648 ir_node *new_r_Cmp(ir_graph *irg, ir_node *block,
1649 ir_node *op1, ir_node *op2) {
1650 return new_rd_Cmp(NULL, irg, block, op1, op2);
1652 ir_node *new_r_Conv(ir_graph *irg, ir_node *block,
1653 ir_node *op, ir_mode *mode) {
1654 return new_rd_Conv(NULL, irg, block, op, mode);
1656 ir_node *new_r_strictConv(ir_graph *irg, ir_node *block,
1657 ir_node *op, ir_mode *mode) {
1658 return new_rd_strictConv(NULL, irg, block, op, mode);
1661 ir_node *new_r_Phi(ir_graph *irg, ir_node *block, int arity,
1662 ir_node **in, ir_mode *mode) {
1663 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1666 ir_node *new_r_Cast(ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1667 return new_rd_Cast(NULL, irg, block, op, to_tp);
1669 ir_node *new_r_Load(ir_graph *irg, ir_node *block,
1670 ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
1671 return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
1673 ir_node *new_r_Store(ir_graph *irg, ir_node *block,
1674 ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
1675 return new_rd_Store(NULL, irg, block, store, adr, val, flags);
1677 ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
1678 ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
1679 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1681 ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
1682 ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
1683 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1686 ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1687 return new_rd_Sync(NULL, irg, block, arity, in);
1690 ir_node *new_r_Proj(ir_graph *irg, ir_node *block, ir_node *arg,
1691 ir_mode *mode, long proj) {
1692 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1695 ir_node *new_r_defaultProj(ir_graph *irg, ir_node *block, ir_node *arg,
1697 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1700 ir_node *new_r_Tuple(ir_graph *irg, ir_node *block,
1701 int arity, ir_node **in) {
1702 return new_rd_Tuple(NULL, irg, block, arity, in );
1704 ir_node *new_r_Id(ir_graph *irg, ir_node *block,
1705 ir_node *val, ir_mode *mode) {
1706 return new_rd_Id(NULL, irg, block, val, mode);
1709 ir_node *new_r_Bad(ir_graph *irg) {
1710 return get_irg_bad(irg);
1713 ir_node *new_r_Confirm(ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1714 return new_rd_Confirm(NULL, irg, block, val, bound, cmp);
1716 ir_node *new_r_Unknown(ir_graph *irg, ir_mode *m) {
1717 return new_rd_Unknown(irg, m);
1720 ir_node *new_r_CallBegin(ir_graph *irg, ir_node *block, ir_node *callee) {
1721 return new_rd_CallBegin(NULL, irg, block, callee);
1723 ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
1724 return new_rd_EndReg(NULL, irg, block);
1726 ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
1727 return new_rd_EndExcept(NULL, irg, block);
1730 ir_node *new_r_Break(ir_graph *irg, ir_node *block) {
1731 return new_rd_Break(NULL, irg, block);
1733 ir_node *new_r_Filter(ir_graph *irg, ir_node *block, ir_node *arg,
1734 ir_mode *mode, long proj) {
1735 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1738 ir_node *new_r_NoMem(ir_graph *irg) {
1739 return get_irg_no_mem(irg);
1742 ir_node *new_r_Mux(ir_graph *irg, ir_node *block,
1743 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1744 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1746 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1747 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1748 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1750 ir_node *new_r_InstOf(ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1752 return new_rd_InstOf(NULL, irg, block, store, objptr, type);
1754 ir_node *new_r_Raise(ir_graph *irg, ir_node *block,
1755 ir_node *store, ir_node *obj) {
1756 return new_rd_Raise(NULL, irg, block, store, obj);
1758 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1759 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1760 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1762 ir_node *new_r_Pin(ir_graph *irg, ir_node *block, ir_node *node) {
1763 return new_rd_Pin(NULL, irg, block, node);
1766 ir_node *new_r_ASM(ir_graph *irg, ir_node *block,
1767 int arity, ir_node *in[], ir_asm_constraint *inputs,
1768 int n_outs, ir_asm_constraint *outputs,
1769 int n_clobber, ident *clobber[], ident *asm_text) {
1770 return new_rd_ASM(NULL, irg, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
1773 /** ********************/
1774 /** public interfaces */
1775 /** construction tools */
1781 * - create a new Start node in the current block
1783 * @return s - pointer to the created Start node
1787 /* Constructs a Block with a fixed number of predecessors.
1788 Does set current_block. Can be used with automatic Phi
1789 node construction. */
1791 new_d_Block(dbg_info *db, int arity, ir_node **in) {
1794 int has_unknown = 0;
1797 res = new_bd_Block(db, arity, in);
1799 res = new_rd_Block(db, current_ir_graph, arity, in);
1802 /* Create and initialize array for Phi-node construction. */
1803 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1804 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1805 current_ir_graph->n_loc);
1806 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1809 for (i = arity-1; i >= 0; i--)
1810 if (is_Unknown(in[i])) {
1815 if (!has_unknown) res = optimize_node(res);
1817 current_ir_graph->current_block = res;
1819 IRN_VRFY_IRG(res, current_ir_graph);
1826 new_d_Start(dbg_info *db) {
1829 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1830 op_Start, mode_T, 0, NULL);
1832 res = optimize_node(res);
1833 IRN_VRFY_IRG(res, current_ir_graph);
1838 new_d_End(dbg_info *db) {
1840 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1841 op_End, mode_X, -1, NULL);
1842 res = optimize_node(res);
1843 IRN_VRFY_IRG(res, current_ir_graph);
1848 /* ***********************************************************************/
1849 /* Methods necessary for automatic Phi node creation */
1851 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1852 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1853 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1854 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1856 Call Graph: ( A ---> B == A "calls" B)
1858 get_value mature_immBlock
1866 get_r_value_internal |
1870 new_rd_Phi0 new_rd_Phi_in
1872 * *************************************************************************** */
1874 /** Creates a Phi node with 0 predecessors. */
1875 static inline ir_node *
1876 new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
1879 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1880 IRN_VRFY_IRG(res, irg);
1886 * Internal constructor of a Phi node by a phi_merge operation.
1888 * @param irg the graph on which the Phi will be constructed
1889 * @param block the block in which the Phi will be constructed
1890 * @param mode the mod eof the Phi node
1891 * @param in the input array of the phi node
1892 * @param ins number of elements in the input array
1893 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
1894 * the value for which the new Phi is constructed
1896 static inline ir_node *
1897 new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
1898 ir_node **in, int ins, ir_node *phi0) {
1900 ir_node *res, *known;
1902 /* Allocate a new node on the obstack. The allocation copies the in
1904 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
1905 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
1907 /* This loop checks whether the Phi has more than one predecessor.
1908 If so, it is a real Phi node and we break the loop. Else the
1909 Phi node merges the same definition on several paths and therefore
1911 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
1913 for (i = ins - 1; i >= 0; --i) {
1916 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
1918 /* Optimize self referencing Phis: We can't detect them yet properly, as
1919 they still refer to the Phi0 they will replace. So replace right now. */
1920 if (phi0 && in[i] == phi0)
1923 if (in[i] == res || in[i] == known)
1932 /* i < 0: there is at most one predecessor, we don't need a phi node. */
1935 edges_node_deleted(res, current_ir_graph);
1936 obstack_free(current_ir_graph->obst, res);
1937 if (is_Phi(known)) {
1938 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
1939 order, an enclosing Phi know may get superfluous. */
1940 res = optimize_in_place_2(known);
1942 exchange(known, res);
1947 /* A undefined value, e.g., in unreachable code. */
1951 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
1952 IRN_VRFY_IRG(res, irg);
1953 /* Memory Phis in endless loops must be kept alive.
1954 As we can't distinguish these easily we keep all of them alive. */
1955 if (is_Phi(res) && mode == mode_M)
1956 add_End_keepalive(get_irg_end(irg), res);
1960 } /* new_rd_Phi_in */
1963 get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
1965 #if PRECISE_EXC_CONTEXT
1967 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1970 * Construct a new frag_array for node n.
1971 * Copy the content from the current graph_arr of the corresponding block:
1972 * this is the current state.
1973 * Set ProjM(n) as current memory state.
1974 * Further the last entry in frag_arr of current block points to n. This
1975 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
1977 static inline ir_node **new_frag_arr(ir_node *n) {
1981 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1982 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1983 sizeof(ir_node *)*current_ir_graph->n_loc);
1985 /* turn off optimization before allocating Proj nodes, as res isn't
1987 opt = get_opt_optimize(); set_optimize(0);
1988 /* Here we rely on the fact that all frag ops have Memory as first result! */
1990 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
1991 } else if (is_CopyB(n)) {
1992 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
1994 assert((pn_Quot_M == pn_DivMod_M) &&
1995 (pn_Quot_M == pn_Div_M) &&
1996 (pn_Quot_M == pn_Mod_M) &&
1997 (pn_Quot_M == pn_Load_M) &&
1998 (pn_Quot_M == pn_Store_M) &&
1999 (pn_Quot_M == pn_Alloc_M) &&
2000 (pn_Quot_M == pn_Bound_M));
2001 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2005 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2007 } /* new_frag_arr */
2010 * Returns the frag_arr from a node.
2012 static inline ir_node **get_frag_arr(ir_node *n) {
2013 switch (get_irn_opcode(n)) {
2015 return n->attr.call.exc.frag_arr;
2017 return n->attr.alloc.exc.frag_arr;
2019 return n->attr.load.exc.frag_arr;
2021 return n->attr.store.exc.frag_arr;
2023 return n->attr.except.frag_arr;
2025 } /* get_frag_arr */
2028 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2029 #ifdef DEBUG_libfirm
2032 for (i = 1024; i >= 0; --i)
2037 if (frag_arr[pos] == NULL)
2038 frag_arr[pos] = val;
2039 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
2040 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2041 assert(arr != frag_arr && "Endless recursion detected");
2046 assert(!"potential endless recursion in set_frag_value");
2047 } /* set_frag_value */
2050 get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2054 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
2056 frag_arr = get_frag_arr(cfOp);
2057 res = frag_arr[pos];
2059 if (block->attr.block.graph_arr[pos] != NULL) {
2060 /* There was a set_value() after the cfOp and no get_value() before that
2061 set_value(). We must build a Phi node now. */
2062 if (block->attr.block.is_matured) {
2063 int ins = get_irn_arity(block);
2065 NEW_ARR_A(ir_node *, nin, ins);
2066 res = phi_merge(block, pos, mode, nin, ins);
2068 res = new_rd_Phi0(current_ir_graph, block, mode);
2069 res->attr.phi.u.pos = pos;
2070 res->attr.phi.next = block->attr.block.phis;
2071 block->attr.block.phis = res;
2073 assert(res != NULL);
2074 /* It's a Phi, we can write this into all graph_arrs with NULL */
2075 set_frag_value(block->attr.block.graph_arr, pos, res);
2077 res = get_r_value_internal(block, pos, mode);
2078 set_frag_value(block->attr.block.graph_arr, pos, res);
2082 } /* get_r_frag_value_internal */
2083 #endif /* PRECISE_EXC_CONTEXT */
2086 * Check whether a control flownode cf_pred represents an exception flow.
2088 * @param cf_pred the control flow node
2089 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
2091 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
2093 * Note: all projections from a raise are "exceptional control flow" we we handle it
2094 * like a normal Jmp, because there is no "regular" one.
2095 * That's why Raise is no "fragile_op"!
2097 if (is_fragile_op(prev_cf_op)) {
2098 if (is_Proj(cf_pred)) {
2099 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
2100 /* the regular control flow, NO exception */
2103 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
2106 /* Hmm, exception but not a Proj? */
2107 assert(!"unexpected condition: fragile op without a proj");
2111 } /* is_exception_flow */
2114 * Computes the predecessors for the real phi node, and then
2115 * allocates and returns this node. The routine called to allocate the
2116 * node might optimize it away and return a real value.
2117 * This function must be called with an in-array of proper size.
2120 phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
2121 ir_node *prevBlock, *res, *phi0, *phi0_all;
2124 /* If this block has no value at pos create a Phi0 and remember it
2125 in graph_arr to break recursions.
2126 Else we may not set graph_arr as there a later value is remembered. */
2128 if (block->attr.block.graph_arr[pos] == NULL) {
2129 ir_graph *irg = current_ir_graph;
2131 if (block == get_irg_start_block(irg)) {
2132 /* Collapsing to Bad tarvals is no good idea.
2133 So we call a user-supplied routine here that deals with this case as
2134 appropriate for the given language. Sorrily the only help we can give
2135 here is the position.
2137 Even if all variables are defined before use, it can happen that
2138 we get to the start block, if a Cond has been replaced by a tuple
2139 (bad, jmp). In this case we call the function needlessly, eventually
2140 generating an non existent error.
2141 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2144 if (default_initialize_local_variable != NULL) {
2145 ir_node *rem = get_cur_block();
2147 set_cur_block(block);
2148 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
2152 block->attr.block.graph_arr[pos] = new_Unknown(mode);
2153 /* We don't need to care about exception ops in the start block.
2154 There are none by definition. */
2155 return block->attr.block.graph_arr[pos];
2157 phi0 = new_rd_Phi0(irg, block, mode);
2158 block->attr.block.graph_arr[pos] = phi0;
2159 #if PRECISE_EXC_CONTEXT
2160 if (get_opt_precise_exc_context()) {
2161 /* Set graph_arr for fragile ops. Also here we should break recursion.
2162 We could choose a cyclic path through an cfop. But the recursion would
2163 break at some point. */
2164 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2170 /* This loop goes to all predecessor blocks of the block the Phi node
2171 is in and there finds the operands of the Phi node by calling
2172 get_r_value_internal. */
2173 for (i = 1; i <= ins; ++i) {
2174 ir_node *cf_pred = block->in[i];
2175 ir_node *prevCfOp = skip_Proj(cf_pred);
2177 if (is_Bad(prevCfOp)) {
2178 /* In case a Cond has been optimized we would get right to the start block
2179 with an invalid definition. */
2180 nin[i-1] = new_Bad();
2183 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
2185 if (!is_Bad(prevBlock)) {
2186 #if PRECISE_EXC_CONTEXT
2187 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
2188 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
2189 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
2192 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
2194 nin[i-1] = new_Bad();
2198 /* We want to pass the Phi0 node to the constructor: this finds additional
2199 optimization possibilities.
2200 The Phi0 node either is allocated in this function, or it comes from
2201 a former call to get_r_value_internal(). In this case we may not yet
2202 exchange phi0, as this is done in mature_immBlock(). */
2204 phi0_all = block->attr.block.graph_arr[pos];
2205 if (! is_Phi0(phi0_all) ||
2206 get_irn_arity(phi0_all) != 0 ||
2207 get_nodes_block(phi0_all) != block)
2213 /* After collecting all predecessors into the array nin a new Phi node
2214 with these predecessors is created. This constructor contains an
2215 optimization: If all predecessors of the Phi node are identical it
2216 returns the only operand instead of a new Phi node. */
2217 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
2219 /* In case we allocated a Phi0 node at the beginning of this procedure,
2220 we need to exchange this Phi0 with the real Phi. */
2222 exchange(phi0, res);
2223 block->attr.block.graph_arr[pos] = res;
2224 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2225 only an optimization. */
2232 * This function returns the last definition of a value. In case
2233 * this value was last defined in a previous block, Phi nodes are
2234 * inserted. If the part of the firm graph containing the definition
2235 * is not yet constructed, a dummy Phi node is returned.
2237 * @param block the current block
2238 * @param pos the value number of the value searched
2239 * @param mode the mode of this value (needed for Phi construction)
2242 get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
2244 /* There are 4 cases to treat.
2246 1. The block is not mature and we visit it the first time. We can not
2247 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2248 predecessors is returned. This node is added to the linked list (block
2249 attribute "phis") of the containing block to be completed when this block is
2250 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2253 2. The value is already known in this block, graph_arr[pos] is set and we
2254 visit the block the first time. We can return the value without
2255 creating any new nodes.
2257 3. The block is mature and we visit it the first time. A Phi node needs
2258 to be created (phi_merge). If the Phi is not needed, as all it's
2259 operands are the same value reaching the block through different
2260 paths, it's optimized away and the value itself is returned.
2262 4. The block is mature, and we visit it the second time. Now two
2263 subcases are possible:
2264 * The value was computed completely the last time we were here. This
2265 is the case if there is no loop. We can return the proper value.
2266 * The recursion that visited this node and set the flag did not
2267 return yet. We are computing a value in a loop and need to
2268 break the recursion. This case only happens if we visited
2269 the same block with phi_merge before, which inserted a Phi0.
2270 So we return the Phi0.
2273 /* case 4 -- already visited. */
2274 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2275 /* As phi_merge allocates a Phi0 this value is always defined. Here
2276 is the critical difference of the two algorithms. */
2277 assert(block->attr.block.graph_arr[pos]);
2278 return block->attr.block.graph_arr[pos];
2281 /* visited the first time */
2282 set_irn_visited(block, get_irg_visited(current_ir_graph));
2284 /* Get the local valid value */
2285 res = block->attr.block.graph_arr[pos];
2287 /* case 2 -- If the value is actually computed, return it. */
2291 if (block->attr.block.is_matured) { /* case 3 */
2293 /* The Phi has the same amount of ins as the corresponding block. */
2294 int ins = get_irn_arity(block);
2296 NEW_ARR_A(ir_node *, nin, ins);
2298 /* Phi merge collects the predecessors and then creates a node. */
2299 res = phi_merge(block, pos, mode, nin, ins);
2301 } else { /* case 1 */
2302 /* The block is not mature, we don't know how many in's are needed. A Phi
2303 with zero predecessors is created. Such a Phi node is called Phi0
2304 node. The Phi0 is then added to the list of Phi0 nodes in this block
2305 to be matured by mature_immBlock later.
2306 The Phi0 has to remember the pos of it's internal value. If the real
2307 Phi is computed, pos is used to update the array with the local
2309 res = new_rd_Phi0(current_ir_graph, block, mode);
2310 res->attr.phi.u.pos = pos;
2311 res->attr.phi.next = block->attr.block.phis;
2312 block->attr.block.phis = res;
2315 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
2317 /* The local valid value is available now. */
2318 block->attr.block.graph_arr[pos] = res;
2321 } /* get_r_value_internal */
2323 /* ************************************************************************** */
2326 * Finalize a Block node, when all control flows are known.
2327 * Acceptable parameters are only Block nodes.
2330 mature_immBlock(ir_node *block) {
2335 assert(is_Block(block));
2336 if (!get_Block_matured(block)) {
2337 ir_graph *irg = current_ir_graph;
2339 ins = ARR_LEN(block->in) - 1;
2340 /* Fix block parameters */
2341 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
2343 /* An array for building the Phi nodes. */
2344 NEW_ARR_A(ir_node *, nin, ins);
2346 /* Traverse a chain of Phi nodes attached to this block and mature
2348 for (n = block->attr.block.phis; n; n = next) {
2349 inc_irg_visited(irg);
2350 next = n->attr.phi.next;
2351 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
2354 block->attr.block.is_matured = 1;
2356 /* Now, as the block is a finished Firm node, we can optimize it.
2357 Since other nodes have been allocated since the block was created
2358 we can not free the node on the obstack. Therefore we have to call
2359 optimize_in_place().
2360 Unfortunately the optimization does not change a lot, as all allocated
2361 nodes refer to the unoptimized node.
2362 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
2363 block = optimize_in_place_2(block);
2364 IRN_VRFY_IRG(block, irg);
2366 } /* mature_immBlock */
2369 new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
2370 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2374 new_d_Const(dbg_info *db, tarval *con) {
2375 return new_bd_Const(db, con);
2379 new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
2380 return new_bd_Const_long(db, mode, value);
2381 } /* new_d_Const_long */
2384 new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
2385 return new_bd_Const_type(db, con, tp);
2386 } /* new_d_Const_type */
2391 new_d_Id(dbg_info *db, ir_node *val, ir_mode *mode) {
2392 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2396 new_d_Proj(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2397 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2402 new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
2404 assert(arg->op == op_Cond);
2405 arg->attr.cond.kind = fragmentary;
2406 arg->attr.cond.default_proj = max_proj;
2407 res = new_d_Proj(db, arg, mode_X, max_proj);
2409 } /* new_d_defaultProj */
2413 new_d_Conv(dbg_info *db, ir_node *op, ir_mode *mode) {
2414 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2418 new_d_strictConv(dbg_info *db, ir_node *op, ir_mode *mode) {
2419 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2420 } /* new_d_strictConv */
2423 new_d_Cast(dbg_info *db, ir_node *op, ir_type *to_tp) {
2424 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2428 new_d_Tuple(dbg_info *db, int arity, ir_node **in) {
2429 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2440 * Allocate a frag array for a node if the current graph state is phase_building.
2442 * @param irn the node for which the frag array should be allocated
2443 * @param op the opcode of the (original) node, if does not match opcode of irn,
2445 * @param frag_store the address of the frag store in irn attributes, if this
2446 * address contains a value != NULL, does nothing
2448 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
2449 if (get_opt_precise_exc_context()) {
2450 if ((current_ir_graph->phase_state == phase_building) &&
2451 (get_irn_op(irn) == op) && /* Could be optimized away. */
2452 !*frag_store) /* Could be a cse where the arr is already set. */ {
2453 *frag_store = new_frag_arr(irn);
2456 } /* firm_alloc_frag_arr */
2460 new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2462 res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2463 #if PRECISE_EXC_CONTEXT
2464 firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
2471 new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2473 res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2474 #if PRECISE_EXC_CONTEXT
2475 firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
2479 } /* new_d_DivMod */
2482 new_d_Div(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2484 res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2485 #if PRECISE_EXC_CONTEXT
2486 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2493 new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2495 res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2496 #if PRECISE_EXC_CONTEXT
2497 firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
2504 new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
2506 res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
2507 #if PRECISE_EXC_CONTEXT
2508 firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
2527 new_d_Cmp(dbg_info *db, ir_node *op1, ir_node *op2) {
2528 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2532 new_d_Jmp(dbg_info *db) {
2533 return new_bd_Jmp(db, current_ir_graph->current_block);
2537 new_d_IJmp(dbg_info *db, ir_node *tgt) {
2538 return new_bd_IJmp(db, current_ir_graph->current_block, tgt);
2542 new_d_Cond(dbg_info *db, ir_node *c) {
2543 return new_bd_Cond(db, current_ir_graph->current_block, c);
2547 new_d_Call(dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2550 res = new_bd_Call(db, current_ir_graph->current_block,
2551 store, callee, arity, in, tp);
2552 #if PRECISE_EXC_CONTEXT
2553 firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
2560 new_d_Builtin(dbg_info *db, ir_node *store, int arity, ir_node **in,
2561 ir_builtin_kind kind, ir_type *tp) {
2562 return new_bd_Builtin(db, current_ir_graph->current_block, store, arity, in, tp, kind);
2563 } /* new_d_Builtin */
2566 new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
2567 return new_bd_Return(db, current_ir_graph->current_block,
2569 } /* new_d_Return */
2572 new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
2574 res = new_bd_Load(db, current_ir_graph->current_block,
2575 store, addr, mode, flags);
2576 #if PRECISE_EXC_CONTEXT
2577 firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
2584 new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
2586 res = new_bd_Store(db, current_ir_graph->current_block,
2587 store, addr, val, flags);
2588 #if PRECISE_EXC_CONTEXT
2589 firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
2596 new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2597 ir_where_alloc where) {
2599 res = new_bd_Alloc(db, current_ir_graph->current_block,
2600 store, size, alloc_type, where);
2601 #if PRECISE_EXC_CONTEXT
2602 firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
2609 new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
2610 ir_node *size, ir_type *free_type, ir_where_alloc where) {
2611 return new_bd_Free(db, current_ir_graph->current_block,
2612 store, ptr, size, free_type, where);
2617 new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
2618 /* GL: objptr was called frame before. Frame was a bad choice for the name
2619 as the operand could as well be a pointer to a dynamic object. */
2621 return new_bd_Sel(db, current_ir_graph->current_block,
2622 store, objptr, 0, NULL, ent);
2623 } /* new_d_simpleSel */
2627 new_d_Sel(dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, ir_entity *sel) {
2628 return new_bd_Sel(db, current_ir_graph->current_block,
2629 store, objptr, n_index, index, sel);
2634 new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
2635 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2637 } /* new_d_SymConst_type */
2640 new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
2641 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
2642 value, kind, firm_unknown_type);
2643 } /* new_d_SymConst */
2646 new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
2647 return new_rd_Sync(db, current_ir_graph, current_ir_graph->current_block, arity, in);
2652 new_d_Confirm(dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp) {
2653 return new_bd_Confirm(db, current_ir_graph->current_block,
2655 } /* new_d_Confirm */
2658 new_d_Unknown(ir_mode *m) {
2659 return new_bd_Unknown(m);
2660 } /* new_d_Unknown */
2664 new_d_CallBegin(dbg_info *db, ir_node *call) {
2665 return new_bd_CallBegin(db, current_ir_graph->current_block, call);
2666 } /* new_d_CallBegin */
2669 new_d_EndReg(dbg_info *db) {
2670 return new_bd_EndReg(db, current_ir_graph->current_block);
2671 } /* new_d_EndReg */
2674 new_d_EndExcept(dbg_info *db) {
2675 return new_bd_EndExcept(db, current_ir_graph->current_block);
2676 } /* new_d_EndExcept */
2680 new_d_Break(dbg_info *db) {
2681 return new_bd_Break(db, current_ir_graph->current_block);
2685 new_d_Filter(dbg_info *db, ir_node *arg, ir_mode *mode, long proj) {
2686 return new_bd_Filter(db, current_ir_graph->current_block,
2688 } /* new_d_Filter */
2691 new_d_Mux(dbg_info *db, ir_node *sel, ir_node *ir_false,
2692 ir_node *ir_true, ir_mode *mode) {
2693 return new_bd_Mux(db, current_ir_graph->current_block,
2694 sel, ir_false, ir_true, mode);
2697 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2698 ir_node *dst, ir_node *src, ir_type *data_type) {
2700 res = new_bd_CopyB(db, current_ir_graph->current_block,
2701 store, dst, src, data_type);
2702 #if PRECISE_EXC_CONTEXT
2703 firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2709 new_d_InstOf(dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type) {
2710 return new_bd_InstOf(db, current_ir_graph->current_block,
2711 store, objptr, type);
2712 } /* new_d_InstOf */
2715 new_d_Raise(dbg_info *db, ir_node *store, ir_node *obj) {
2716 return new_bd_Raise(db, current_ir_graph->current_block, store, obj);
2719 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2720 ir_node *idx, ir_node *lower, ir_node *upper) {
2722 res = new_bd_Bound(db, current_ir_graph->current_block,
2723 store, idx, lower, upper);
2724 #if PRECISE_EXC_CONTEXT
2725 firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2731 new_d_Pin(dbg_info *db, ir_node *node) {
2732 return new_bd_Pin(db, current_ir_graph->current_block, node);
2737 new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
2738 int n_outs, ir_asm_constraint *outputs,
2739 int n_clobber, ident *clobber[], ident *asm_text) {
2740 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
2743 /* ********************************************************************* */
2744 /* Comfortable interface with automatic Phi node construction. */
2745 /* (Uses also constructors of ?? interface, except new_Block. */
2746 /* ********************************************************************* */
2748 /* Block construction */
2749 /* immature Block without predecessors */
2751 new_d_immBlock(dbg_info *db) {
2754 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2755 /* creates a new dynamic in-array as length of in is -1 */
2756 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2758 /* macroblock head */
2761 res->attr.block.is_matured = 0;
2762 res->attr.block.is_dead = 0;
2763 res->attr.block.is_mb_head = 1;
2764 res->attr.block.has_label = 0;
2765 res->attr.block.irg = current_ir_graph;
2766 res->attr.block.backedge = NULL;
2767 res->attr.block.in_cg = NULL;
2768 res->attr.block.cg_backedge = NULL;
2769 res->attr.block.extblk = NULL;
2770 res->attr.block.region = NULL;
2771 res->attr.block.mb_depth = 0;
2772 res->attr.block.label = 0;
2774 set_Block_block_visited(res, 0);
2776 /* Create and initialize array for Phi-node construction. */
2777 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
2778 current_ir_graph->n_loc);
2779 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2781 /* Immature block may not be optimized! */
2782 IRN_VRFY_IRG(res, current_ir_graph);
2785 } /* new_d_immBlock */
2788 new_immBlock(void) {
2789 return new_d_immBlock(NULL);
2790 } /* new_immBlock */
2792 /* immature PartBlock with its predecessors */
2794 new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
2795 ir_node *res = new_d_immBlock(db);
2796 ir_node *blk = get_nodes_block(pred_jmp);
2798 res->in[0] = blk->in[0];
2799 assert(res->in[0] != NULL);
2800 add_immBlock_pred(res, pred_jmp);
2802 res->attr.block.is_mb_head = 0;
2803 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
2806 } /* new_d_immPartBlock */
2809 new_immPartBlock(ir_node *pred_jmp) {
2810 return new_d_immPartBlock(NULL, pred_jmp);
2811 } /* new_immPartBlock */
2813 /* add an edge to a jmp/control flow node */
2815 add_immBlock_pred(ir_node *block, ir_node *jmp) {
2816 int n = ARR_LEN(block->in) - 1;
2818 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
2819 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
2820 assert(is_ir_node(jmp));
2822 ARR_APP1(ir_node *, block->in, jmp);
2824 hook_set_irn_n(block, n, jmp, NULL);
2825 } /* add_immBlock_pred */
2827 /* changing the current block */
2829 set_cur_block(ir_node *target) {
2830 current_ir_graph->current_block = target;
2831 } /* set_cur_block */
2833 /* ************************ */
2834 /* parameter administration */
2836 /* get a value from the parameter array from the current block by its index */
2838 get_d_value(dbg_info *db, int pos, ir_mode *mode) {
2839 ir_graph *irg = current_ir_graph;
2840 assert(get_irg_phase_state(irg) == phase_building);
2841 inc_irg_visited(irg);
2846 return get_r_value_internal(irg->current_block, pos + 1, mode);
2849 /* get a value from the parameter array from the current block by its index */
2851 get_value(int pos, ir_mode *mode) {
2852 return get_d_value(NULL, pos, mode);
2855 /* set a value at position pos in the parameter array from the current block */
2857 set_value(int pos, ir_node *value) {
2858 ir_graph *irg = current_ir_graph;
2859 assert(get_irg_phase_state(irg) == phase_building);
2861 assert(pos+1 < irg->n_loc);
2862 assert(is_ir_node(value));
2863 irg->current_block->attr.block.graph_arr[pos + 1] = value;
2866 /* Find the value number for a node in the current block.*/
2868 find_value(ir_node *value) {
2870 ir_node *bl = current_ir_graph->current_block;
2872 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
2873 if (bl->attr.block.graph_arr[i] == value)
2878 /* get the current store */
2881 ir_graph *irg = current_ir_graph;
2883 assert(get_irg_phase_state(irg) == phase_building);
2884 /* GL: one could call get_value instead */
2885 inc_irg_visited(irg);
2886 return get_r_value_internal(irg->current_block, 0, mode_M);
2889 /* set the current store: handles automatic Sync construction for Load nodes */
2891 set_store(ir_node *store) {
2892 ir_node *load, *pload, *pred, *in[2];
2894 assert(get_irg_phase_state(current_ir_graph) == phase_building);
2895 /* Beware: due to dead code elimination, a store might become a Bad node even in
2896 the construction phase. */
2897 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
2899 if (get_opt_auto_create_sync()) {
2900 /* handle non-volatile Load nodes by automatically creating Sync's */
2901 load = skip_Proj(store);
2902 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
2903 pred = get_Load_mem(load);
2905 if (is_Sync(pred)) {
2906 /* a Load after a Sync: move it up */
2907 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
2909 set_Load_mem(load, get_memop_mem(mem));
2910 add_Sync_pred(pred, store);
2913 pload = skip_Proj(pred);
2914 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
2915 /* a Load after a Load: create a new Sync */
2916 set_Load_mem(load, get_Load_mem(pload));
2920 store = new_Sync(2, in);
2925 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2929 keep_alive(ir_node *ka) {
2930 add_End_keepalive(get_irg_end(current_ir_graph), ka);
2933 /* --- Useful access routines --- */
2934 /* Returns the current block of the current graph. To set the current
2935 block use set_cur_block. */
2936 ir_node *get_cur_block(void) {
2937 return get_irg_current_block(current_ir_graph);
2938 } /* get_cur_block */
2940 /* Returns the frame type of the current graph */
2941 ir_type *get_cur_frame_type(void) {
2942 return get_irg_frame_type(current_ir_graph);
2943 } /* get_cur_frame_type */
2946 /* ********************************************************************* */
2949 /* call once for each run of the library */
2951 firm_init_cons(uninitialized_local_variable_func_t *func) {
2952 default_initialize_local_variable = func;
2953 } /* firm_init_cons */
2956 irp_finalize_cons(void) {
2958 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2959 irg_finalize_cons(get_irp_irg(i));
2961 irp->phase_state = phase_high;
2962 } /* irp_finalize_cons */
2965 ir_node *new_Block(int arity, ir_node **in) {
2966 return new_d_Block(NULL, arity, in);
2968 ir_node *new_Jmp(void) {
2969 return new_d_Jmp(NULL);
2971 ir_node *new_IJmp(ir_node *tgt) {
2972 return new_d_IJmp(NULL, tgt);
2974 ir_node *new_Cond(ir_node *c) {
2975 return new_d_Cond(NULL, c);
2977 ir_node *new_Return(ir_node *store, int arity, ir_node *in[]) {
2978 return new_d_Return(NULL, store, arity, in);
2981 ir_node *new_Start(void) {
2982 return new_d_Start(NULL);
2984 ir_node *new_End(void) {
2985 return new_d_End(NULL);
2987 ir_node *new_Const(tarval *con) {
2988 return new_d_Const(NULL, con);
2991 ir_node *new_Const_long(ir_mode *mode, long value) {
2992 return new_d_Const_long(NULL, mode, value);
2995 ir_node *new_Const_type(tarval *con, ir_type *tp) {
2996 return new_d_Const_type(NULL, con, tp);
2999 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
3000 return new_d_SymConst_type(NULL, mode, value, kind, type);
3002 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
3003 return new_d_SymConst(NULL, mode, value, kind);
3005 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
3006 return new_d_simpleSel(NULL, store, objptr, ent);
3009 ir_node *new_Sel(ir_node *store, ir_node *objptr, int arity, ir_node **in,
3011 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3013 ir_node *new_Call(ir_node *store, ir_node *callee, int arity, ir_node **in,
3015 return new_d_Call(NULL, store, callee, arity, in, tp);
3017 ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
3019 return new_d_Builtin(NULL, store, kind, arity, in, tp);
3021 ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
3022 return new_d_Add(NULL, op1, op2, mode);
3024 ir_node *new_Sub(ir_node *op1, ir_node *op2, ir_mode *mode) {
3025 return new_d_Sub(NULL, op1, op2, mode);
3027 ir_node *new_Minus(ir_node *op, ir_mode *mode) {
3028 return new_d_Minus(NULL, op, mode);
3030 ir_node *new_Mul(ir_node *op1, ir_node *op2, ir_mode *mode) {
3031 return new_d_Mul(NULL, op1, op2, mode);
3033 ir_node *new_Mulh(ir_node *op1, ir_node *op2, ir_mode *mode) {
3034 return new_d_Mulh(NULL, op1, op2, mode);
3036 ir_node *new_Quot(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3037 return new_d_Quot(NULL, memop, op1, op2, mode, state);
3039 ir_node *new_DivMod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3040 return new_d_DivMod(NULL, memop, op1, op2, mode, state);
3042 ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3043 return new_d_Div(NULL, memop, op1, op2, mode, state);
3045 ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3046 return new_d_DivRL(NULL, memop, op1, op2, mode, state);
3048 ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
3049 return new_d_Mod(NULL, memop, op1, op2, mode, state);
3051 ir_node *new_Abs(ir_node *op, ir_mode *mode) {
3052 return new_d_Abs(NULL, op, mode);
3054 ir_node *new_And(ir_node *op1, ir_node *op2, ir_mode *mode) {
3055 return new_d_And(NULL, op1, op2, mode);
3057 ir_node *new_Or(ir_node *op1, ir_node *op2, ir_mode *mode) {
3058 return new_d_Or(NULL, op1, op2, mode);
3060 ir_node *new_Eor(ir_node *op1, ir_node *op2, ir_mode *mode) {
3061 return new_d_Eor(NULL, op1, op2, mode);
3063 ir_node *new_Not(ir_node *op, ir_mode *mode) {
3064 return new_d_Not(NULL, op, mode);
3066 ir_node *new_Shl(ir_node *op, ir_node *k, ir_mode *mode) {
3067 return new_d_Shl(NULL, op, k, mode);
3069 ir_node *new_Shr(ir_node *op, ir_node *k, ir_mode *mode) {
3070 return new_d_Shr(NULL, op, k, mode);
3072 ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
3073 return new_d_Shrs(NULL, op, k, mode);
3075 ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
3076 return new_d_Rotl(NULL, op, k, mode);
3078 ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
3079 return new_d_Carry(NULL, op1, op2, mode);
3081 ir_node *new_Borrow(ir_node *op1, ir_node *op2, ir_mode *mode) {
3082 return new_d_Borrow(NULL, op1, op2, mode);
3084 ir_node *new_Cmp(ir_node *op1, ir_node *op2) {
3085 return new_d_Cmp(NULL, op1, op2);
3087 ir_node *new_Conv(ir_node *op, ir_mode *mode) {
3088 return new_d_Conv(NULL, op, mode);
3090 ir_node *new_strictConv(ir_node *op, ir_mode *mode) {
3091 return new_d_strictConv(NULL, op, mode);
3094 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
3095 return new_d_Phi(NULL, arity, in, mode);
3098 ir_node *new_Cast(ir_node *op, ir_type *to_tp) {
3099 return new_d_Cast(NULL, op, to_tp);
3101 ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
3102 return new_d_Load(NULL, store, addr, mode, flags);
3104 ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
3105 return new_d_Store(NULL, store, addr, val, flags);
3107 ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
3108 ir_where_alloc where) {
3109 return new_d_Alloc(NULL, store, size, alloc_type, where);
3111 ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
3112 ir_type *free_type, ir_where_alloc where) {
3113 return new_d_Free(NULL, store, ptr, size, free_type, where);
3116 ir_node *new_Sync(int arity, ir_node *in[]) {
3117 return new_d_Sync(NULL, arity, in);
3120 ir_node *new_Proj(ir_node *arg, ir_mode *mode, long proj) {
3121 return new_d_Proj(NULL, arg, mode, proj);
3124 ir_node *new_defaultProj(ir_node *arg, long max_proj) {
3125 return new_d_defaultProj(NULL, arg, max_proj);
3128 ir_node *new_Tuple(int arity, ir_node **in) {
3129 return new_d_Tuple(NULL, arity, in);
3131 ir_node *new_Id(ir_node *val, ir_mode *mode) {
3132 return new_d_Id(NULL, val, mode);
3135 ir_node *new_Bad(void) {
3136 return get_irg_bad(current_ir_graph);
3139 ir_node *new_Confirm(ir_node *val, ir_node *bound, pn_Cmp cmp) {
3140 return new_d_Confirm(NULL, val, bound, cmp);
3142 ir_node *new_Unknown(ir_mode *m) {
3143 return new_d_Unknown(m);
3146 ir_node *new_CallBegin(ir_node *callee) {
3147 return new_d_CallBegin(NULL, callee);
3149 ir_node *new_EndReg(void) {
3150 return new_d_EndReg(NULL);
3152 ir_node *new_EndExcept(void) {
3153 return new_d_EndExcept(NULL);
3156 ir_node *new_Break(void) {
3157 return new_d_Break(NULL);
3159 ir_node *new_Filter(ir_node *arg, ir_mode *mode, long proj) {
3160 return new_d_Filter(NULL, arg, mode, proj);
3163 ir_node *new_NoMem(void) {
3164 return get_irg_no_mem(current_ir_graph);
3167 ir_node *new_Mux(ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3168 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3170 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3171 return new_d_CopyB(NULL, store, dst, src, data_type);
3173 ir_node *new_InstOf(ir_node *store, ir_node *objptr, ir_type *ent) {
3174 return new_d_InstOf(NULL, store, objptr, ent);
3176 ir_node *new_Raise(ir_node *store, ir_node *obj) {
3177 return new_d_Raise(NULL, store, obj);
3179 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3180 return new_d_Bound(NULL, store, idx, lower, upper);
3182 ir_node *new_Pin(ir_node *node) {
3183 return new_d_Pin(NULL, node);
3186 ir_node *new_rd_Dummy(dbg_info *db, ir_graph *irg, ir_mode *mode) {
3187 return new_ir_node(db, irg, get_irg_start_block(irg), op_Dummy, mode, 0, NULL);
3189 ir_node *new_r_Dummy(ir_graph *irg, ir_mode *mode) {
3190 return new_rd_Dummy(NULL, irg, mode);
3192 ir_node *new_Dummy(ir_mode *mode) {
3193 return new_r_Dummy(current_ir_graph, mode);
3195 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
3196 int n_outs, ir_asm_constraint *outputs,
3197 int n_clobber, ident *clobber[], ident *asm_text) {
3198 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, asm_text);
3201 /* create a new anchor node */
3202 ir_node *new_Anchor(ir_graph *irg) {
3203 ir_node *in[anchor_last];
3204 memset(in, 0, sizeof(in));
3205 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);