3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
29 #include "irgraph_t.h"
33 #include "firm_common_t.h"
39 #include "irbackedge_t.h"
41 #include "iredges_t.h"
44 #if USE_EXPLICIT_PHI_IN_STACK
45 /* A stack needed for the automatic Phi node construction in constructor
46 Phi_in. Redefinition in irgraph.c!! */
51 typedef struct Phi_in_stack Phi_in_stack;
54 /* when we need verifying */
56 # define IRN_VRFY_IRG(res, irg)
58 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
62 * Language dependent variable initialization callback.
64 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
66 /* creates a bd constructor for a binop */
67 #define NEW_BD_BINOP(instr) \
69 new_bd_##instr (dbg_info *db, ir_node *block, \
70 ir_node *op1, ir_node *op2, ir_mode *mode) \
74 ir_graph *irg = current_ir_graph; \
77 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
78 res = optimize_node(res); \
79 IRN_VRFY_IRG(res, irg); \
83 /* creates a bd constructor for an unop */
84 #define NEW_BD_UNOP(instr) \
86 new_bd_##instr (dbg_info *db, ir_node *block, \
87 ir_node *op, ir_mode *mode) \
90 ir_graph *irg = current_ir_graph; \
91 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
92 res = optimize_node(res); \
93 IRN_VRFY_IRG(res, irg); \
97 /* creates a bd constructor for an divop */
98 #define NEW_BD_DIVOP(instr) \
100 new_bd_##instr (dbg_info *db, ir_node *block, \
101 ir_node *memop, ir_node *op1, ir_node *op2) \
105 ir_graph *irg = current_ir_graph; \
109 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
110 res = optimize_node(res); \
111 IRN_VRFY_IRG(res, irg); \
115 /* creates a rd constructor for a binop */
116 #define NEW_RD_BINOP(instr) \
118 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
119 ir_node *op1, ir_node *op2, ir_mode *mode) \
122 ir_graph *rem = current_ir_graph; \
123 current_ir_graph = irg; \
124 res = new_bd_##instr(db, block, op1, op2, mode); \
125 current_ir_graph = rem; \
129 /* creates a rd constructor for an unop */
130 #define NEW_RD_UNOP(instr) \
132 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
133 ir_node *op, ir_mode *mode) \
136 ir_graph *rem = current_ir_graph; \
137 current_ir_graph = irg; \
138 res = new_bd_##instr(db, block, op, mode); \
139 current_ir_graph = rem; \
143 /* creates a rd constructor for an divop */
144 #define NEW_RD_DIVOP(instr) \
146 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
147 ir_node *memop, ir_node *op1, ir_node *op2) \
150 ir_graph *rem = current_ir_graph; \
151 current_ir_graph = irg; \
152 res = new_bd_##instr(db, block, memop, op1, op2); \
153 current_ir_graph = rem; \
157 /* creates a d constructor for an binop */
158 #define NEW_D_BINOP(instr) \
160 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
161 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
164 /* creates a d constructor for an unop */
165 #define NEW_D_UNOP(instr) \
167 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
168 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
172 /* Constructs a Block with a fixed number of predecessors.
173 Does not set current_block. Can not be used with automatic
174 Phi node construction. */
176 new_bd_Block (dbg_info *db, int arity, ir_node **in)
179 ir_graph *irg = current_ir_graph;
181 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
182 set_Block_matured(res, 1);
183 set_Block_block_visited(res, 0);
185 /* res->attr.block.exc = exc_normal; */
186 /* res->attr.block.handler_entry = 0; */
187 res->attr.block.dead = 0;
188 res->attr.block.irg = irg;
189 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
190 res->attr.block.in_cg = NULL;
191 res->attr.block.cg_backedge = NULL;
192 res->attr.block.extblk = NULL;
194 IRN_VRFY_IRG(res, irg);
199 new_bd_Start (dbg_info *db, ir_node *block)
202 ir_graph *irg = current_ir_graph;
204 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
205 /* res->attr.start.irg = irg; */
207 IRN_VRFY_IRG(res, irg);
212 new_bd_End (dbg_info *db, ir_node *block)
215 ir_graph *irg = current_ir_graph;
217 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
219 IRN_VRFY_IRG(res, irg);
223 /* Creates a Phi node with all predecessors. Calling this constructor
224 is only allowed if the corresponding block is mature. */
226 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
229 ir_graph *irg = current_ir_graph;
233 /* Don't assert that block matured: the use of this constructor is strongly
235 if ( get_Block_matured(block) )
236 assert( get_irn_arity(block) == arity );
238 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
240 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
242 for (i = arity-1; i >= 0; i--)
243 if (get_irn_op(in[i]) == op_Unknown) {
248 if (!has_unknown) res = optimize_node (res);
249 IRN_VRFY_IRG(res, irg);
251 /* Memory Phis in endless loops must be kept alive.
252 As we can't distinguish these easily we keep all of them alive. */
253 if ((res->op == op_Phi) && (mode == mode_M))
254 add_End_keepalive(get_irg_end(irg), res);
259 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
262 ir_graph *irg = current_ir_graph;
264 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
265 res->attr.con.tv = con;
266 set_Const_type(res, tp); /* Call method because of complex assertion. */
267 res = optimize_node (res);
268 assert(get_Const_type(res) == tp);
269 IRN_VRFY_IRG(res, irg);
275 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
277 ir_graph *irg = current_ir_graph;
279 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
283 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
285 ir_graph *irg = current_ir_graph;
287 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
291 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
294 ir_graph *irg = current_ir_graph;
296 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
297 res = optimize_node(res);
298 IRN_VRFY_IRG(res, irg);
303 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
307 ir_graph *irg = current_ir_graph;
309 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
310 res->attr.proj = proj;
313 assert(get_Proj_pred(res));
314 assert(get_nodes_block(get_Proj_pred(res)));
316 res = optimize_node(res);
318 IRN_VRFY_IRG(res, irg);
324 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
328 ir_graph *irg = current_ir_graph;
330 assert(arg->op == op_Cond);
331 arg->attr.cond.kind = fragmentary;
332 arg->attr.cond.default_proj = max_proj;
333 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
338 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode, int strict_flag)
341 ir_graph *irg = current_ir_graph;
343 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
344 res->attr.conv.strict = strict_flag;
345 res = optimize_node(res);
346 IRN_VRFY_IRG(res, irg);
351 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
354 ir_graph *irg = current_ir_graph;
356 assert(is_atomic_type(to_tp));
358 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
359 res->attr.cast.totype = to_tp;
360 res = optimize_node(res);
361 IRN_VRFY_IRG(res, irg);
366 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
369 ir_graph *irg = current_ir_graph;
371 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
372 res = optimize_node (res);
373 IRN_VRFY_IRG(res, irg);
398 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
402 ir_graph *irg = current_ir_graph;
405 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
406 res = optimize_node(res);
407 IRN_VRFY_IRG(res, irg);
412 new_bd_Jmp (dbg_info *db, ir_node *block)
415 ir_graph *irg = current_ir_graph;
417 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
418 res = optimize_node (res);
419 IRN_VRFY_IRG (res, irg);
424 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
427 ir_graph *irg = current_ir_graph;
429 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
430 res = optimize_node (res);
431 IRN_VRFY_IRG (res, irg);
433 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
439 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
442 ir_graph *irg = current_ir_graph;
444 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
445 res->attr.cond.kind = dense;
446 res->attr.cond.default_proj = 0;
447 res->attr.cond.pred = COND_JMP_PRED_NONE;
448 res = optimize_node (res);
449 IRN_VRFY_IRG(res, irg);
454 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
455 ir_node *callee, int arity, ir_node **in, ir_type *tp)
460 ir_graph *irg = current_ir_graph;
463 NEW_ARR_A(ir_node *, r_in, r_arity);
466 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
468 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
470 assert((get_unknown_type() == tp) || is_Method_type(tp));
471 set_Call_type(res, tp);
472 res->attr.call.exc.pin_state = op_pin_state_pinned;
473 res->attr.call.callee_arr = NULL;
474 res = optimize_node(res);
475 IRN_VRFY_IRG(res, irg);
480 new_bd_Return (dbg_info *db, ir_node *block,
481 ir_node *store, int arity, ir_node **in)
486 ir_graph *irg = current_ir_graph;
489 NEW_ARR_A (ir_node *, r_in, r_arity);
491 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
492 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
493 res = optimize_node(res);
494 IRN_VRFY_IRG(res, irg);
499 new_bd_Load (dbg_info *db, ir_node *block,
500 ir_node *store, ir_node *adr, ir_mode *mode)
504 ir_graph *irg = current_ir_graph;
508 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
509 res->attr.load.exc.pin_state = op_pin_state_pinned;
510 res->attr.load.load_mode = mode;
511 res->attr.load.volatility = volatility_non_volatile;
512 res = optimize_node(res);
513 IRN_VRFY_IRG(res, irg);
518 new_bd_Store (dbg_info *db, ir_node *block,
519 ir_node *store, ir_node *adr, ir_node *val)
523 ir_graph *irg = current_ir_graph;
528 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
529 res->attr.store.exc.pin_state = op_pin_state_pinned;
530 res->attr.store.volatility = volatility_non_volatile;
531 res = optimize_node(res);
532 IRN_VRFY_IRG(res, irg);
537 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
538 ir_node *size, ir_type *alloc_type, where_alloc where)
542 ir_graph *irg = current_ir_graph;
546 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
547 res->attr.alloc.exc.pin_state = op_pin_state_pinned;
548 res->attr.alloc.where = where;
549 res->attr.alloc.type = alloc_type;
550 res = optimize_node(res);
551 IRN_VRFY_IRG(res, irg);
556 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
557 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
561 ir_graph *irg = current_ir_graph;
566 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
567 res->attr.free.where = where;
568 res->attr.free.type = free_type;
569 res = optimize_node(res);
570 IRN_VRFY_IRG(res, irg);
575 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
576 int arity, ir_node **in, entity *ent)
581 ir_graph *irg = current_ir_graph;
583 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
586 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
589 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
591 * FIXM: Sel's can select functions which should be of mode mode_P_code.
593 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
594 res->attr.sel.ent = ent;
595 res = optimize_node(res);
596 IRN_VRFY_IRG(res, irg);
601 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
602 symconst_kind symkind, ir_type *tp) {
605 ir_graph *irg = current_ir_graph;
607 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
608 mode = mode_P_data; /* FIXME: can be mode_P_code */
612 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
614 res->attr.symc.num = symkind;
615 res->attr.symc.sym = value;
616 res->attr.symc.tp = tp;
618 res = optimize_node(res);
619 IRN_VRFY_IRG(res, irg);
624 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
625 symconst_kind symkind)
627 ir_graph *irg = current_ir_graph;
629 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
634 new_bd_Sync (dbg_info *db, ir_node *block)
637 ir_graph *irg = current_ir_graph;
639 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
640 /* no need to call optimize node here, Sync are always created with no predecessors */
641 IRN_VRFY_IRG(res, irg);
646 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
648 ir_node *in[2], *res;
649 ir_graph *irg = current_ir_graph;
653 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
654 res->attr.confirm_cmp = cmp;
655 res = optimize_node (res);
656 IRN_VRFY_IRG(res, irg);
660 /* this function is often called with current_ir_graph unset */
662 new_bd_Unknown (ir_mode *m)
665 ir_graph *irg = current_ir_graph;
667 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
668 res = optimize_node(res);
673 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
677 ir_graph *irg = current_ir_graph;
679 in[0] = get_Call_ptr(call);
680 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
681 /* res->attr.callbegin.irg = irg; */
682 res->attr.callbegin.call = call;
683 res = optimize_node(res);
684 IRN_VRFY_IRG(res, irg);
689 new_bd_EndReg (dbg_info *db, ir_node *block)
692 ir_graph *irg = current_ir_graph;
694 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
695 set_irg_end_reg(irg, res);
696 IRN_VRFY_IRG(res, irg);
701 new_bd_EndExcept (dbg_info *db, ir_node *block)
704 ir_graph *irg = current_ir_graph;
706 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
707 set_irg_end_except(irg, res);
708 IRN_VRFY_IRG (res, irg);
713 new_bd_Break (dbg_info *db, ir_node *block)
716 ir_graph *irg = current_ir_graph;
718 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
719 res = optimize_node(res);
720 IRN_VRFY_IRG(res, irg);
725 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
729 ir_graph *irg = current_ir_graph;
731 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
732 res->attr.filter.proj = proj;
733 res->attr.filter.in_cg = NULL;
734 res->attr.filter.backedge = NULL;
737 assert(get_Proj_pred(res));
738 assert(get_nodes_block(get_Proj_pred(res)));
740 res = optimize_node(res);
741 IRN_VRFY_IRG(res, irg);
746 new_bd_Mux (dbg_info *db, ir_node *block,
747 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
751 ir_graph *irg = current_ir_graph;
757 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
760 res = optimize_node(res);
761 IRN_VRFY_IRG(res, irg);
766 new_bd_Psi (dbg_info *db, ir_node *block,
767 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
771 ir_graph *irg = current_ir_graph;
774 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
776 for (i = 0; i < arity; ++i) {
778 in[2 * i + 1] = vals[i];
782 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
785 res = optimize_node(res);
786 IRN_VRFY_IRG(res, irg);
791 new_bd_CopyB (dbg_info *db, ir_node *block,
792 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
796 ir_graph *irg = current_ir_graph;
802 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
804 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
805 res->attr.copyb.data_type = data_type;
806 res = optimize_node(res);
807 IRN_VRFY_IRG(res, irg);
812 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
813 ir_node *objptr, ir_type *type)
817 ir_graph *irg = current_ir_graph;
821 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
822 res->attr.instof.type = type;
823 res = optimize_node(res);
824 IRN_VRFY_IRG(res, irg);
829 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
833 ir_graph *irg = current_ir_graph;
837 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
838 res = optimize_node(res);
839 IRN_VRFY_IRG(res, irg);
844 new_bd_Bound (dbg_info *db, ir_node *block,
845 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
849 ir_graph *irg = current_ir_graph;
855 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
856 res->attr.bound.exc.pin_state = op_pin_state_pinned;
857 res = optimize_node(res);
858 IRN_VRFY_IRG(res, irg);
862 /* --------------------------------------------- */
863 /* private interfaces, for professional use only */
864 /* --------------------------------------------- */
866 /* Constructs a Block with a fixed number of predecessors.
867 Does not set current_block. Can not be used with automatic
868 Phi node construction. */
870 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
872 ir_graph *rem = current_ir_graph;
875 current_ir_graph = irg;
876 res = new_bd_Block (db, arity, in);
877 current_ir_graph = rem;
883 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
885 ir_graph *rem = current_ir_graph;
888 current_ir_graph = irg;
889 res = new_bd_Start (db, block);
890 current_ir_graph = rem;
896 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
899 ir_graph *rem = current_ir_graph;
901 current_ir_graph = rem;
902 res = new_bd_End (db, block);
903 current_ir_graph = rem;
908 /* Creates a Phi node with all predecessors. Calling this constructor
909 is only allowed if the corresponding block is mature. */
911 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
914 ir_graph *rem = current_ir_graph;
916 current_ir_graph = irg;
917 res = new_bd_Phi (db, block,arity, in, mode);
918 current_ir_graph = rem;
924 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
927 ir_graph *rem = current_ir_graph;
929 current_ir_graph = irg;
930 res = new_bd_Const_type (db, block, mode, con, tp);
931 current_ir_graph = rem;
937 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
940 ir_graph *rem = current_ir_graph;
942 current_ir_graph = irg;
943 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
944 current_ir_graph = rem;
950 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
952 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
956 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
959 ir_graph *rem = current_ir_graph;
961 current_ir_graph = irg;
962 res = new_bd_Id(db, block, val, mode);
963 current_ir_graph = rem;
969 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
973 ir_graph *rem = current_ir_graph;
975 current_ir_graph = irg;
976 res = new_bd_Proj(db, block, arg, mode, proj);
977 current_ir_graph = rem;
983 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
987 ir_graph *rem = current_ir_graph;
989 current_ir_graph = irg;
990 res = new_bd_defaultProj(db, block, arg, max_proj);
991 current_ir_graph = rem;
997 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
1000 ir_graph *rem = current_ir_graph;
1002 current_ir_graph = irg;
1003 res = new_bd_Conv(db, block, op, mode, 0);
1004 current_ir_graph = rem;
1010 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1013 ir_graph *rem = current_ir_graph;
1015 current_ir_graph = irg;
1016 res = new_bd_Cast(db, block, op, to_tp);
1017 current_ir_graph = rem;
1023 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1026 ir_graph *rem = current_ir_graph;
1028 current_ir_graph = irg;
1029 res = new_bd_Tuple(db, block, arity, in);
1030 current_ir_graph = rem;
1040 NEW_RD_DIVOP(DivMod)
1053 NEW_RD_BINOP(Borrow)
1056 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1057 ir_node *op1, ir_node *op2)
1060 ir_graph *rem = current_ir_graph;
1062 current_ir_graph = irg;
1063 res = new_bd_Cmp(db, block, op1, op2);
1064 current_ir_graph = rem;
1070 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1073 ir_graph *rem = current_ir_graph;
1075 current_ir_graph = irg;
1076 res = new_bd_Jmp(db, block);
1077 current_ir_graph = rem;
1083 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1086 ir_graph *rem = current_ir_graph;
1088 current_ir_graph = irg;
1089 res = new_bd_IJmp(db, block, tgt);
1090 current_ir_graph = rem;
1096 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1099 ir_graph *rem = current_ir_graph;
1101 current_ir_graph = irg;
1102 res = new_bd_Cond(db, block, c);
1103 current_ir_graph = rem;
1109 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1110 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1113 ir_graph *rem = current_ir_graph;
1115 current_ir_graph = irg;
1116 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1117 current_ir_graph = rem;
1123 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1124 ir_node *store, int arity, ir_node **in)
1127 ir_graph *rem = current_ir_graph;
1129 current_ir_graph = irg;
1130 res = new_bd_Return(db, block, store, arity, in);
1131 current_ir_graph = rem;
1137 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1138 ir_node *store, ir_node *adr, ir_mode *mode)
1141 ir_graph *rem = current_ir_graph;
1143 current_ir_graph = irg;
1144 res = new_bd_Load(db, block, store, adr, mode);
1145 current_ir_graph = rem;
1151 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1152 ir_node *store, ir_node *adr, ir_node *val)
1155 ir_graph *rem = current_ir_graph;
1157 current_ir_graph = irg;
1158 res = new_bd_Store(db, block, store, adr, val);
1159 current_ir_graph = rem;
1165 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1166 ir_node *size, ir_type *alloc_type, where_alloc where)
1169 ir_graph *rem = current_ir_graph;
1171 current_ir_graph = irg;
1172 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1173 current_ir_graph = rem;
1179 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1180 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1183 ir_graph *rem = current_ir_graph;
1185 current_ir_graph = irg;
1186 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1187 current_ir_graph = rem;
1193 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1194 ir_node *store, ir_node *objptr, entity *ent)
1197 ir_graph *rem = current_ir_graph;
1199 current_ir_graph = irg;
1200 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1201 current_ir_graph = rem;
1207 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1208 int arity, ir_node **in, entity *ent)
1211 ir_graph *rem = current_ir_graph;
1213 current_ir_graph = irg;
1214 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1215 current_ir_graph = rem;
1221 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1222 symconst_kind symkind, ir_type *tp)
1225 ir_graph *rem = current_ir_graph;
1227 current_ir_graph = irg;
1228 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1229 current_ir_graph = rem;
1235 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1236 symconst_kind symkind)
1238 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1242 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1244 symconst_symbol sym = {(ir_type *)symbol};
1245 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1248 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1249 symconst_symbol sym = {(ir_type *)symbol};
1250 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1253 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1254 symconst_symbol sym = {symbol};
1255 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1258 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1259 symconst_symbol sym = {symbol};
1260 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1263 ir_node *new_rd_SymConst_align (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1264 symconst_symbol sym = {symbol};
1265 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1269 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1272 ir_graph *rem = current_ir_graph;
1275 current_ir_graph = irg;
1276 res = new_bd_Sync(db, block);
1277 current_ir_graph = rem;
1279 for (i = 0; i < arity; ++i) add_Sync_pred(res, in[i]);
1285 new_rd_Bad (ir_graph *irg) {
1286 return get_irg_bad(irg);
1290 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1293 ir_graph *rem = current_ir_graph;
1295 current_ir_graph = irg;
1296 res = new_bd_Confirm(db, block, val, bound, cmp);
1297 current_ir_graph = rem;
1302 /* this function is often called with current_ir_graph unset */
1304 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1307 ir_graph *rem = current_ir_graph;
1309 current_ir_graph = irg;
1310 res = new_bd_Unknown(m);
1311 current_ir_graph = rem;
1317 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1320 ir_graph *rem = current_ir_graph;
1322 current_ir_graph = irg;
1323 res = new_bd_CallBegin(db, block, call);
1324 current_ir_graph = rem;
1330 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1334 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1335 set_irg_end_reg(irg, res);
1336 IRN_VRFY_IRG(res, irg);
1341 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1345 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1346 set_irg_end_except(irg, res);
1347 IRN_VRFY_IRG (res, irg);
1352 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1355 ir_graph *rem = current_ir_graph;
1357 current_ir_graph = irg;
1358 res = new_bd_Break(db, block);
1359 current_ir_graph = rem;
1365 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1369 ir_graph *rem = current_ir_graph;
1371 current_ir_graph = irg;
1372 res = new_bd_Filter(db, block, arg, mode, proj);
1373 current_ir_graph = rem;
1379 new_rd_NoMem (ir_graph *irg) {
1380 return get_irg_no_mem(irg);
1384 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1385 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1388 ir_graph *rem = current_ir_graph;
1390 current_ir_graph = irg;
1391 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1392 current_ir_graph = rem;
1398 new_rd_Psi (dbg_info *db, ir_graph *irg, ir_node *block,
1399 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1402 ir_graph *rem = current_ir_graph;
1404 current_ir_graph = irg;
1405 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1406 current_ir_graph = rem;
1411 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1412 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1415 ir_graph *rem = current_ir_graph;
1417 current_ir_graph = irg;
1418 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1419 current_ir_graph = rem;
1425 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1426 ir_node *objptr, ir_type *type)
1429 ir_graph *rem = current_ir_graph;
1431 current_ir_graph = irg;
1432 res = new_bd_InstOf(db, block, store, objptr, type);
1433 current_ir_graph = rem;
1439 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1442 ir_graph *rem = current_ir_graph;
1444 current_ir_graph = irg;
1445 res = new_bd_Raise(db, block, store, obj);
1446 current_ir_graph = rem;
1451 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1452 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1455 ir_graph *rem = current_ir_graph;
1457 current_ir_graph = irg;
1458 res = new_bd_Bound(db, block, store, idx, lower, upper);
1459 current_ir_graph = rem;
1464 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1465 return new_rd_Block(NULL, irg, arity, in);
1467 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1468 return new_rd_Start(NULL, irg, block);
1470 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1471 return new_rd_End(NULL, irg, block);
1473 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1474 return new_rd_Jmp(NULL, irg, block);
1476 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1477 return new_rd_IJmp(NULL, irg, block, tgt);
1479 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1480 return new_rd_Cond(NULL, irg, block, c);
1482 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1483 ir_node *store, int arity, ir_node **in) {
1484 return new_rd_Return(NULL, irg, block, store, arity, in);
1486 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1487 ir_mode *mode, tarval *con) {
1488 return new_rd_Const(NULL, irg, block, mode, con);
1490 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1491 ir_mode *mode, long value) {
1492 return new_rd_Const_long(NULL, irg, block, mode, value);
1494 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1495 ir_mode *mode, tarval *con, ir_type *tp) {
1496 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1498 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1499 symconst_symbol value, symconst_kind symkind) {
1500 return new_rd_SymConst(NULL, irg, block, value, symkind);
1502 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *objptr, entity *ent) {
1504 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1506 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1507 ir_node *objptr, int n_index, ir_node **index,
1509 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1511 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1512 ir_node *callee, int arity, ir_node **in,
1514 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1516 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1517 ir_node *op1, ir_node *op2, ir_mode *mode) {
1518 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1520 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1521 ir_node *op1, ir_node *op2, ir_mode *mode) {
1522 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1524 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1525 ir_node *op, ir_mode *mode) {
1526 return new_rd_Minus(NULL, irg, block, op, mode);
1528 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1529 ir_node *op1, ir_node *op2, ir_mode *mode) {
1530 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1532 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1533 ir_node *memop, ir_node *op1, ir_node *op2) {
1534 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1536 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1537 ir_node *memop, ir_node *op1, ir_node *op2) {
1538 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1540 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1541 ir_node *memop, ir_node *op1, ir_node *op2) {
1542 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1544 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1545 ir_node *memop, ir_node *op1, ir_node *op2) {
1546 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1548 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1549 ir_node *op, ir_mode *mode) {
1550 return new_rd_Abs(NULL, irg, block, op, mode);
1552 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1553 ir_node *op1, ir_node *op2, ir_mode *mode) {
1554 return new_rd_And(NULL, irg, block, op1, op2, mode);
1556 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1557 ir_node *op1, ir_node *op2, ir_mode *mode) {
1558 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1560 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1561 ir_node *op1, ir_node *op2, ir_mode *mode) {
1562 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1564 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1565 ir_node *op, ir_mode *mode) {
1566 return new_rd_Not(NULL, irg, block, op, mode);
1568 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1569 ir_node *op, ir_node *k, ir_mode *mode) {
1570 return new_rd_Shl(NULL, irg, block, op, k, mode);
1572 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1573 ir_node *op, ir_node *k, ir_mode *mode) {
1574 return new_rd_Shr(NULL, irg, block, op, k, mode);
1576 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1577 ir_node *op, ir_node *k, ir_mode *mode) {
1578 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1580 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1581 ir_node *op, ir_node *k, ir_mode *mode) {
1582 return new_rd_Rot(NULL, irg, block, op, k, mode);
1584 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1585 ir_node *op, ir_node *k, ir_mode *mode) {
1586 return new_rd_Carry(NULL, irg, block, op, k, mode);
1588 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1589 ir_node *op, ir_node *k, ir_mode *mode) {
1590 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1592 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1593 ir_node *op1, ir_node *op2) {
1594 return new_rd_Cmp(NULL, irg, block, op1, op2);
1596 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1597 ir_node *op, ir_mode *mode) {
1598 return new_rd_Conv(NULL, irg, block, op, mode);
1600 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1601 return new_rd_Cast(NULL, irg, block, op, to_tp);
1603 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1604 ir_node **in, ir_mode *mode) {
1605 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1607 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1608 ir_node *store, ir_node *adr, ir_mode *mode) {
1609 return new_rd_Load(NULL, irg, block, store, adr, mode);
1611 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1612 ir_node *store, ir_node *adr, ir_node *val) {
1613 return new_rd_Store(NULL, irg, block, store, adr, val);
1615 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1616 ir_node *size, ir_type *alloc_type, where_alloc where) {
1617 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1619 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1620 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1621 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1623 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1624 return new_rd_Sync(NULL, irg, block, arity, in);
1626 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1627 ir_mode *mode, long proj) {
1628 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1630 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1632 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1634 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1635 int arity, ir_node **in) {
1636 return new_rd_Tuple(NULL, irg, block, arity, in );
1638 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1639 ir_node *val, ir_mode *mode) {
1640 return new_rd_Id(NULL, irg, block, val, mode);
1642 ir_node *new_r_Bad (ir_graph *irg) {
1643 return new_rd_Bad(irg);
1645 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1646 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1648 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1649 return new_rd_Unknown(irg, m);
1651 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1652 return new_rd_CallBegin(NULL, irg, block, callee);
1654 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1655 return new_rd_EndReg(NULL, irg, block);
1657 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1658 return new_rd_EndExcept(NULL, irg, block);
1660 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1661 return new_rd_Break(NULL, irg, block);
1663 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1664 ir_mode *mode, long proj) {
1665 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1667 ir_node *new_r_NoMem (ir_graph *irg) {
1668 return new_rd_NoMem(irg);
1670 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1671 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1672 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1674 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1675 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1676 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1678 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1679 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1680 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1682 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1684 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1686 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1687 ir_node *store, ir_node *obj) {
1688 return new_rd_Raise(NULL, irg, block, store, obj);
1690 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1691 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1692 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1695 /** ********************/
1696 /** public interfaces */
1697 /** construction tools */
1701 * - create a new Start node in the current block
1703 * @return s - pointer to the created Start node
1708 new_d_Start (dbg_info *db)
1712 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1713 op_Start, mode_T, 0, NULL);
1714 /* res->attr.start.irg = current_ir_graph; */
1716 res = optimize_node(res);
1717 IRN_VRFY_IRG(res, current_ir_graph);
1722 new_d_End (dbg_info *db)
1725 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1726 op_End, mode_X, -1, NULL);
1727 res = optimize_node(res);
1728 IRN_VRFY_IRG(res, current_ir_graph);
1733 /* Constructs a Block with a fixed number of predecessors.
1734 Does set current_block. Can be used with automatic Phi
1735 node construction. */
1737 new_d_Block (dbg_info *db, int arity, ir_node **in)
1741 int has_unknown = 0;
1743 res = new_bd_Block(db, arity, in);
1745 /* Create and initialize array for Phi-node construction. */
1746 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1747 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1748 current_ir_graph->n_loc);
1749 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1752 for (i = arity-1; i >= 0; i--)
1753 if (get_irn_op(in[i]) == op_Unknown) {
1758 if (!has_unknown) res = optimize_node(res);
1759 current_ir_graph->current_block = res;
1761 IRN_VRFY_IRG(res, current_ir_graph);
1766 /* ***********************************************************************/
1767 /* Methods necessary for automatic Phi node creation */
1769 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1770 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1771 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1772 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1774 Call Graph: ( A ---> B == A "calls" B)
1776 get_value mature_immBlock
1784 get_r_value_internal |
1788 new_rd_Phi0 new_rd_Phi_in
1790 * *************************************************************************** */
1792 /** Creates a Phi node with 0 predecessors */
1793 static INLINE ir_node *
1794 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1798 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1799 IRN_VRFY_IRG(res, irg);
1803 /* There are two implementations of the Phi node construction. The first
1804 is faster, but does not work for blocks with more than 2 predecessors.
1805 The second works always but is slower and causes more unnecessary Phi
1807 Select the implementations by the following preprocessor flag set in
1809 #if USE_FAST_PHI_CONSTRUCTION
1811 /* This is a stack used for allocating and deallocating nodes in
1812 new_rd_Phi_in. The original implementation used the obstack
1813 to model this stack, now it is explicit. This reduces side effects.
1815 #if USE_EXPLICIT_PHI_IN_STACK
1817 new_Phi_in_stack(void) {
1820 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1822 res->stack = NEW_ARR_F (ir_node *, 0);
1829 free_Phi_in_stack(Phi_in_stack *s) {
1830 DEL_ARR_F(s->stack);
1834 free_to_Phi_in_stack(ir_node *phi) {
1835 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1836 current_ir_graph->Phi_in_stack->pos)
1837 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1839 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1841 (current_ir_graph->Phi_in_stack->pos)++;
1844 static INLINE ir_node *
1845 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1846 int arity, ir_node **in) {
1848 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1849 int pos = current_ir_graph->Phi_in_stack->pos;
1853 /* We need to allocate a new node */
1854 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1855 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1857 /* reuse the old node and initialize it again. */
1860 assert (res->kind == k_ir_node);
1861 assert (res->op == op_Phi);
1865 assert (arity >= 0);
1866 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1867 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1869 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1871 (current_ir_graph->Phi_in_stack->pos)--;
1875 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1877 /* Creates a Phi node with a given, fixed array **in of predecessors.
1878 If the Phi node is unnecessary, as the same value reaches the block
1879 through all control flow paths, it is eliminated and the value
1880 returned directly. This constructor is only intended for use in
1881 the automatic Phi node generation triggered by get_value or mature.
1882 The implementation is quite tricky and depends on the fact, that
1883 the nodes are allocated on a stack:
1884 The in array contains predecessors and NULLs. The NULLs appear,
1885 if get_r_value_internal, that computed the predecessors, reached
1886 the same block on two paths. In this case the same value reaches
1887 this block on both paths, there is no definition in between. We need
1888 not allocate a Phi where these path's merge, but we have to communicate
1889 this fact to the caller. This happens by returning a pointer to the
1890 node the caller _will_ allocate. (Yes, we predict the address. We can
1891 do so because the nodes are allocated on the obstack.) The caller then
1892 finds a pointer to itself and, when this routine is called again,
1895 static INLINE ir_node *
1896 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1899 ir_node *res, *known;
1901 /* Allocate a new node on the obstack. This can return a node to
1902 which some of the pointers in the in-array already point.
1903 Attention: the constructor copies the in array, i.e., the later
1904 changes to the array in this routine do not affect the
1905 constructed node! If the in array contains NULLs, there will be
1906 missing predecessors in the returned node. Is this a possible
1907 internal state of the Phi node generation? */
1908 #if USE_EXPLICIT_PHI_IN_STACK
1909 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1911 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1912 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1915 /* The in-array can contain NULLs. These were returned by
1916 get_r_value_internal if it reached the same block/definition on a
1917 second path. The NULLs are replaced by the node itself to
1918 simplify the test in the next loop. */
1919 for (i = 0; i < ins; ++i) {
1924 /* This loop checks whether the Phi has more than one predecessor.
1925 If so, it is a real Phi node and we break the loop. Else the Phi
1926 node merges the same definition on several paths and therefore is
1928 for (i = 0; i < ins; ++i) {
1929 if (in[i] == res || in[i] == known)
1938 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1940 #if USE_EXPLICIT_PHI_IN_STACK
1941 free_to_Phi_in_stack(res);
1943 edges_node_deleted(res, current_ir_graph);
1944 obstack_free(current_ir_graph->obst, res);
1948 res = optimize_node (res);
1949 IRN_VRFY_IRG(res, irg);
1952 /* return the pointer to the Phi node. This node might be deallocated! */
1957 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1960 allocates and returns this node. The routine called to allocate the
1961 node might optimize it away and return a real value, or even a pointer
1962 to a deallocated Phi node on top of the obstack!
1963 This function is called with an in-array of proper size. **/
1965 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1967 ir_node *prevBlock, *res;
1970 /* This loop goes to all predecessor blocks of the block the Phi node is in
1971 and there finds the operands of the Phi node by calling
1972 get_r_value_internal. */
1973 for (i = 1; i <= ins; ++i) {
1974 assert (block->in[i]);
1975 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1977 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1980 /* After collecting all predecessors into the array nin a new Phi node
1981 with these predecessors is created. This constructor contains an
1982 optimization: If all predecessors of the Phi node are identical it
1983 returns the only operand instead of a new Phi node. If the value
1984 passes two different control flow edges without being defined, and
1985 this is the second path treated, a pointer to the node that will be
1986 allocated for the first path (recursion) is returned. We already
1987 know the address of this node, as it is the next node to be allocated
1988 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1989 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1991 /* Now we now the value for "pos" and can enter it in the array with
1992 all known local variables. Attention: this might be a pointer to
1993 a node, that later will be allocated!!! See new_rd_Phi_in().
1994 If this is called in mature, after some set_value() in the same block,
1995 the proper value must not be overwritten:
1997 get_value (makes Phi0, put's it into graph_arr)
1998 set_value (overwrites Phi0 in graph_arr)
1999 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2002 if (!block->attr.block.graph_arr[pos]) {
2003 block->attr.block.graph_arr[pos] = res;
2005 /* printf(" value already computed by %s\n",
2006 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2012 /* This function returns the last definition of a variable. In case
2013 this variable was last defined in a previous block, Phi nodes are
2014 inserted. If the part of the firm graph containing the definition
2015 is not yet constructed, a dummy Phi node is returned. */
2017 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2020 /* There are 4 cases to treat.
2022 1. The block is not mature and we visit it the first time. We can not
2023 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2024 predecessors is returned. This node is added to the linked list (field
2025 "link") of the containing block to be completed when this block is
2026 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2029 2. The value is already known in this block, graph_arr[pos] is set and we
2030 visit the block the first time. We can return the value without
2031 creating any new nodes.
2033 3. The block is mature and we visit it the first time. A Phi node needs
2034 to be created (phi_merge). If the Phi is not needed, as all it's
2035 operands are the same value reaching the block through different
2036 paths, it's optimized away and the value itself is returned.
2038 4. The block is mature, and we visit it the second time. Now two
2039 subcases are possible:
2040 * The value was computed completely the last time we were here. This
2041 is the case if there is no loop. We can return the proper value.
2042 * The recursion that visited this node and set the flag did not
2043 return yet. We are computing a value in a loop and need to
2044 break the recursion without knowing the result yet.
2045 @@@ strange case. Straight forward we would create a Phi before
2046 starting the computation of it's predecessors. In this case we will
2047 find a Phi here in any case. The problem is that this implementation
2048 only creates a Phi after computing the predecessors, so that it is
2049 hard to compute self references of this Phi. @@@
2050 There is no simple check for the second subcase. Therefore we check
2051 for a second visit and treat all such cases as the second subcase.
2052 Anyways, the basic situation is the same: we reached a block
2053 on two paths without finding a definition of the value: No Phi
2054 nodes are needed on both paths.
2055 We return this information "Two paths, no Phi needed" by a very tricky
2056 implementation that relies on the fact that an obstack is a stack and
2057 will return a node with the same address on different allocations.
2058 Look also at phi_merge and new_rd_phi_in to understand this.
2059 @@@ Unfortunately this does not work, see testprogram
2060 three_cfpred_example.
2064 /* case 4 -- already visited. */
2065 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2067 /* visited the first time */
2068 set_irn_visited(block, get_irg_visited(current_ir_graph));
2070 /* Get the local valid value */
2071 res = block->attr.block.graph_arr[pos];
2073 /* case 2 -- If the value is actually computed, return it. */
2074 if (res) return res;
2076 if (block->attr.block.matured) { /* case 3 */
2078 /* The Phi has the same amount of ins as the corresponding block. */
2079 int ins = get_irn_arity(block);
2081 NEW_ARR_A (ir_node *, nin, ins);
2083 /* Phi merge collects the predecessors and then creates a node. */
2084 res = phi_merge (block, pos, mode, nin, ins);
2086 } else { /* case 1 */
2087 /* The block is not mature, we don't know how many in's are needed. A Phi
2088 with zero predecessors is created. Such a Phi node is called Phi0
2089 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2090 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2092 The Phi0 has to remember the pos of it's internal value. If the real
2093 Phi is computed, pos is used to update the array with the local
2096 res = new_rd_Phi0 (current_ir_graph, block, mode);
2097 res->attr.phi0_pos = pos;
2098 res->link = block->link;
2102 /* If we get here, the frontend missed a use-before-definition error */
2105 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2106 assert (mode->code >= irm_F && mode->code <= irm_P);
2107 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2108 tarval_mode_null[mode->code]);
2111 /* The local valid value is available now. */
2112 block->attr.block.graph_arr[pos] = res;
2120 it starts the recursion. This causes an Id at the entry of
2121 every block that has no definition of the value! **/
2123 #if USE_EXPLICIT_PHI_IN_STACK
2125 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2126 void free_Phi_in_stack(Phi_in_stack *s) { }
2129 static INLINE ir_node *
2130 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2131 ir_node **in, int ins, ir_node *phi0)
2134 ir_node *res, *known;
2136 /* Allocate a new node on the obstack. The allocation copies the in
2138 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2139 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2141 /* This loop checks whether the Phi has more than one predecessor.
2142 If so, it is a real Phi node and we break the loop. Else the
2143 Phi node merges the same definition on several paths and therefore
2144 is not needed. Don't consider Bad nodes! */
2146 for (i=0; i < ins; ++i)
2150 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2152 /* Optimize self referencing Phis: We can't detect them yet properly, as
2153 they still refer to the Phi0 they will replace. So replace right now. */
2154 if (phi0 && in[i] == phi0) in[i] = res;
2156 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2164 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2167 edges_node_deleted(res, current_ir_graph);
2168 obstack_free (current_ir_graph->obst, res);
2169 if (is_Phi(known)) {
2170 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2171 order, an enclosing Phi know may get superfluous. */
2172 res = optimize_in_place_2(known);
2174 exchange(known, res);
2180 /* A undefined value, e.g., in unreachable code. */
2184 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2185 IRN_VRFY_IRG(res, irg);
2186 /* Memory Phis in endless loops must be kept alive.
2187 As we can't distinguish these easily we keep all of them alive. */
2188 if ((res->op == op_Phi) && (mode == mode_M))
2189 add_End_keepalive(get_irg_end(irg), res);
2196 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2198 #if PRECISE_EXC_CONTEXT
2200 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2202 /* Construct a new frag_array for node n.
2203 Copy the content from the current graph_arr of the corresponding block:
2204 this is the current state.
2205 Set ProjM(n) as current memory state.
2206 Further the last entry in frag_arr of current block points to n. This
2207 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2209 static INLINE ir_node ** new_frag_arr (ir_node *n)
2214 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2215 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2216 sizeof(ir_node *)*current_ir_graph->n_loc);
2218 /* turn off optimization before allocating Proj nodes, as res isn't
2220 opt = get_opt_optimize(); set_optimize(0);
2221 /* Here we rely on the fact that all frag ops have Memory as first result! */
2222 if (get_irn_op(n) == op_Call)
2223 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2224 else if (get_irn_op(n) == op_CopyB)
2225 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2227 assert((pn_Quot_M == pn_DivMod_M) &&
2228 (pn_Quot_M == pn_Div_M) &&
2229 (pn_Quot_M == pn_Mod_M) &&
2230 (pn_Quot_M == pn_Load_M) &&
2231 (pn_Quot_M == pn_Store_M) &&
2232 (pn_Quot_M == pn_Alloc_M) &&
2233 (pn_Quot_M == pn_Bound_M));
2234 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2238 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2243 * returns the frag_arr from a node
2245 static INLINE ir_node **
2246 get_frag_arr (ir_node *n) {
2247 switch (get_irn_opcode(n)) {
2249 return n->attr.call.exc.frag_arr;
2251 return n->attr.alloc.exc.frag_arr;
2253 return n->attr.load.exc.frag_arr;
2255 return n->attr.store.exc.frag_arr;
2257 return n->attr.except.frag_arr;
2262 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2264 if (!frag_arr[pos]) frag_arr[pos] = val;
2265 if (frag_arr[current_ir_graph->n_loc - 1]) {
2266 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2267 assert(arr != frag_arr && "Endless recursion detected");
2268 set_frag_value(arr, pos, val);
2273 for (i = 0; i < 1000; ++i) {
2274 if (!frag_arr[pos]) {
2275 frag_arr[pos] = val;
2277 if (frag_arr[current_ir_graph->n_loc - 1]) {
2278 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2284 assert(0 && "potential endless recursion");
2289 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2293 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2295 frag_arr = get_frag_arr(cfOp);
2296 res = frag_arr[pos];
2298 if (block->attr.block.graph_arr[pos]) {
2299 /* There was a set_value() after the cfOp and no get_value before that
2300 set_value(). We must build a Phi node now. */
2301 if (block->attr.block.matured) {
2302 int ins = get_irn_arity(block);
2304 NEW_ARR_A (ir_node *, nin, ins);
2305 res = phi_merge(block, pos, mode, nin, ins);
2307 res = new_rd_Phi0 (current_ir_graph, block, mode);
2308 res->attr.phi0_pos = pos;
2309 res->link = block->link;
2313 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2314 but this should be better: (remove comment if this works) */
2315 /* It's a Phi, we can write this into all graph_arrs with NULL */
2316 set_frag_value(block->attr.block.graph_arr, pos, res);
2318 res = get_r_value_internal(block, pos, mode);
2319 set_frag_value(block->attr.block.graph_arr, pos, res);
2324 #endif /* PRECISE_EXC_CONTEXT */
2327 computes the predecessors for the real phi node, and then
2328 allocates and returns this node. The routine called to allocate the
2329 node might optimize it away and return a real value.
2330 This function must be called with an in-array of proper size. **/
2332 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2334 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2337 /* If this block has no value at pos create a Phi0 and remember it
2338 in graph_arr to break recursions.
2339 Else we may not set graph_arr as there a later value is remembered. */
2341 if (!block->attr.block.graph_arr[pos]) {
2342 if (block == get_irg_start_block(current_ir_graph)) {
2343 /* Collapsing to Bad tarvals is no good idea.
2344 So we call a user-supplied routine here that deals with this case as
2345 appropriate for the given language. Sorrily the only help we can give
2346 here is the position.
2348 Even if all variables are defined before use, it can happen that
2349 we get to the start block, if a Cond has been replaced by a tuple
2350 (bad, jmp). In this case we call the function needlessly, eventually
2351 generating an non existent error.
2352 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2355 if (default_initialize_local_variable) {
2356 ir_node *rem = get_cur_block();
2358 set_cur_block(block);
2359 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2363 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2364 /* We don't need to care about exception ops in the start block.
2365 There are none by definition. */
2366 return block->attr.block.graph_arr[pos];
2368 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2369 block->attr.block.graph_arr[pos] = phi0;
2370 #if PRECISE_EXC_CONTEXT
2371 if (get_opt_precise_exc_context()) {
2372 /* Set graph_arr for fragile ops. Also here we should break recursion.
2373 We could choose a cyclic path through an cfop. But the recursion would
2374 break at some point. */
2375 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2381 /* This loop goes to all predecessor blocks of the block the Phi node
2382 is in and there finds the operands of the Phi node by calling
2383 get_r_value_internal. */
2384 for (i = 1; i <= ins; ++i) {
2385 prevCfOp = skip_Proj(block->in[i]);
2387 if (is_Bad(prevCfOp)) {
2388 /* In case a Cond has been optimized we would get right to the start block
2389 with an invalid definition. */
2390 nin[i-1] = new_Bad();
2393 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2395 if (!is_Bad(prevBlock)) {
2396 #if PRECISE_EXC_CONTEXT
2397 if (get_opt_precise_exc_context() &&
2398 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2399 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2400 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2403 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2405 nin[i-1] = new_Bad();
2409 /* We want to pass the Phi0 node to the constructor: this finds additional
2410 optimization possibilities.
2411 The Phi0 node either is allocated in this function, or it comes from
2412 a former call to get_r_value_internal. In this case we may not yet
2413 exchange phi0, as this is done in mature_immBlock. */
2415 phi0_all = block->attr.block.graph_arr[pos];
2416 if (!((get_irn_op(phi0_all) == op_Phi) &&
2417 (get_irn_arity(phi0_all) == 0) &&
2418 (get_nodes_block(phi0_all) == block)))
2424 /* After collecting all predecessors into the array nin a new Phi node
2425 with these predecessors is created. This constructor contains an
2426 optimization: If all predecessors of the Phi node are identical it
2427 returns the only operand instead of a new Phi node. */
2428 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2430 /* In case we allocated a Phi0 node at the beginning of this procedure,
2431 we need to exchange this Phi0 with the real Phi. */
2433 exchange(phi0, res);
2434 block->attr.block.graph_arr[pos] = res;
2435 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2436 only an optimization. */
2442 /* This function returns the last definition of a variable. In case
2443 this variable was last defined in a previous block, Phi nodes are
2444 inserted. If the part of the firm graph containing the definition
2445 is not yet constructed, a dummy Phi node is returned. */
2447 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2450 /* There are 4 cases to treat.
2452 1. The block is not mature and we visit it the first time. We can not
2453 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2454 predecessors is returned. This node is added to the linked list (field
2455 "link") of the containing block to be completed when this block is
2456 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2459 2. The value is already known in this block, graph_arr[pos] is set and we
2460 visit the block the first time. We can return the value without
2461 creating any new nodes.
2463 3. The block is mature and we visit it the first time. A Phi node needs
2464 to be created (phi_merge). If the Phi is not needed, as all it's
2465 operands are the same value reaching the block through different
2466 paths, it's optimized away and the value itself is returned.
2468 4. The block is mature, and we visit it the second time. Now two
2469 subcases are possible:
2470 * The value was computed completely the last time we were here. This
2471 is the case if there is no loop. We can return the proper value.
2472 * The recursion that visited this node and set the flag did not
2473 return yet. We are computing a value in a loop and need to
2474 break the recursion. This case only happens if we visited
2475 the same block with phi_merge before, which inserted a Phi0.
2476 So we return the Phi0.
2479 /* case 4 -- already visited. */
2480 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2481 /* As phi_merge allocates a Phi0 this value is always defined. Here
2482 is the critical difference of the two algorithms. */
2483 assert(block->attr.block.graph_arr[pos]);
2484 return block->attr.block.graph_arr[pos];
2487 /* visited the first time */
2488 set_irn_visited(block, get_irg_visited(current_ir_graph));
2490 /* Get the local valid value */
2491 res = block->attr.block.graph_arr[pos];
2493 /* case 2 -- If the value is actually computed, return it. */
2494 if (res) { return res; };
2496 if (block->attr.block.matured) { /* case 3 */
2498 /* The Phi has the same amount of ins as the corresponding block. */
2499 int ins = get_irn_arity(block);
2501 NEW_ARR_A (ir_node *, nin, ins);
2503 /* Phi merge collects the predecessors and then creates a node. */
2504 res = phi_merge (block, pos, mode, nin, ins);
2506 } else { /* case 1 */
2507 /* The block is not mature, we don't know how many in's are needed. A Phi
2508 with zero predecessors is created. Such a Phi node is called Phi0
2509 node. The Phi0 is then added to the list of Phi0 nodes in this block
2510 to be matured by mature_immBlock later.
2511 The Phi0 has to remember the pos of it's internal value. If the real
2512 Phi is computed, pos is used to update the array with the local
2514 res = new_rd_Phi0 (current_ir_graph, block, mode);
2515 res->attr.phi0_pos = pos;
2516 res->link = block->link;
2520 /* If we get here, the frontend missed a use-before-definition error */
2523 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2524 assert (mode->code >= irm_F && mode->code <= irm_P);
2525 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2526 get_mode_null(mode));
2529 /* The local valid value is available now. */
2530 block->attr.block.graph_arr[pos] = res;
2535 #endif /* USE_FAST_PHI_CONSTRUCTION */
2537 /* ************************************************************************** */
2540 * Finalize a Block node, when all control flows are known.
2541 * Acceptable parameters are only Block nodes.
2544 mature_immBlock (ir_node *block)
2550 assert (get_irn_opcode(block) == iro_Block);
2551 /* @@@ should be commented in
2552 assert (!get_Block_matured(block) && "Block already matured"); */
2554 if (!get_Block_matured(block)) {
2555 ins = ARR_LEN (block->in)-1;
2556 /* Fix block parameters */
2557 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2559 /* An array for building the Phi nodes. */
2560 NEW_ARR_A (ir_node *, nin, ins);
2562 /* Traverse a chain of Phi nodes attached to this block and mature
2564 for (n = block->link; n; n=next) {
2565 inc_irg_visited(current_ir_graph);
2567 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2570 block->attr.block.matured = 1;
2572 /* Now, as the block is a finished firm node, we can optimize it.
2573 Since other nodes have been allocated since the block was created
2574 we can not free the node on the obstack. Therefore we have to call
2576 Unfortunately the optimization does not change a lot, as all allocated
2577 nodes refer to the unoptimized node.
2578 We can call _2, as global cse has no effect on blocks. */
2579 block = optimize_in_place_2(block);
2580 IRN_VRFY_IRG(block, current_ir_graph);
2585 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2587 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2591 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2593 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2597 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2599 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2603 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2605 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2610 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2612 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2616 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2618 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2622 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2625 assert(arg->op == op_Cond);
2626 arg->attr.cond.kind = fragmentary;
2627 arg->attr.cond.default_proj = max_proj;
2628 res = new_Proj (arg, mode_X, max_proj);
2633 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode) {
2634 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 0);
2638 new_d_strictConv (dbg_info *db, ir_node *op, ir_mode *mode) {
2639 return new_bd_Conv(db, current_ir_graph->current_block, op, mode, 1);
2643 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp) {
2644 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2648 new_d_Tuple (dbg_info *db, int arity, ir_node **in) {
2649 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2658 * allocate the frag array
2660 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2661 if (get_opt_precise_exc_context()) {
2662 if ((current_ir_graph->phase_state == phase_building) &&
2663 (get_irn_op(res) == op) && /* Could be optimized away. */
2664 !*frag_store) /* Could be a cse where the arr is already set. */ {
2665 *frag_store = new_frag_arr(res);
2671 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2674 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2675 res->attr.except.pin_state = op_pin_state_pinned;
2676 #if PRECISE_EXC_CONTEXT
2677 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2684 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2687 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2688 res->attr.except.pin_state = op_pin_state_pinned;
2689 #if PRECISE_EXC_CONTEXT
2690 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2697 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2700 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2701 res->attr.except.pin_state = op_pin_state_pinned;
2702 #if PRECISE_EXC_CONTEXT
2703 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2710 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2713 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2714 res->attr.except.pin_state = op_pin_state_pinned;
2715 #if PRECISE_EXC_CONTEXT
2716 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2735 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2737 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2741 new_d_Jmp (dbg_info *db)
2743 return new_bd_Jmp (db, current_ir_graph->current_block);
2747 new_d_IJmp (dbg_info *db, ir_node *tgt)
2749 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2753 new_d_Cond (dbg_info *db, ir_node *c)
2755 return new_bd_Cond (db, current_ir_graph->current_block, c);
2759 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2763 res = new_bd_Call (db, current_ir_graph->current_block,
2764 store, callee, arity, in, tp);
2765 #if PRECISE_EXC_CONTEXT
2766 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2773 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2775 return new_bd_Return (db, current_ir_graph->current_block,
2780 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2783 res = new_bd_Load (db, current_ir_graph->current_block,
2785 #if PRECISE_EXC_CONTEXT
2786 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2793 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2796 res = new_bd_Store (db, current_ir_graph->current_block,
2798 #if PRECISE_EXC_CONTEXT
2799 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2806 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2810 res = new_bd_Alloc (db, current_ir_graph->current_block,
2811 store, size, alloc_type, where);
2812 #if PRECISE_EXC_CONTEXT
2813 allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
2820 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2821 ir_node *size, ir_type *free_type, where_alloc where)
2823 return new_bd_Free (db, current_ir_graph->current_block,
2824 store, ptr, size, free_type, where);
2828 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2829 /* GL: objptr was called frame before. Frame was a bad choice for the name
2830 as the operand could as well be a pointer to a dynamic object. */
2832 return new_bd_Sel (db, current_ir_graph->current_block,
2833 store, objptr, 0, NULL, ent);
2837 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2839 return new_bd_Sel (db, current_ir_graph->current_block,
2840 store, objptr, n_index, index, sel);
2844 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2846 return new_bd_SymConst_type (db, get_irg_start_block(current_ir_graph),
2851 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2853 return new_bd_SymConst (db, get_irg_start_block(current_ir_graph),
2858 new_d_Sync (dbg_info *db, int arity, ir_node *in[])
2860 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block, arity, in);
2866 return _new_d_Bad();
2870 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2872 return new_bd_Confirm (db, current_ir_graph->current_block,
2877 new_d_Unknown (ir_mode *m)
2879 return new_bd_Unknown(m);
2883 new_d_CallBegin (dbg_info *db, ir_node *call)
2886 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2891 new_d_EndReg (dbg_info *db)
2894 res = new_bd_EndReg(db, current_ir_graph->current_block);
2899 new_d_EndExcept (dbg_info *db)
2902 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2907 new_d_Break (dbg_info *db)
2909 return new_bd_Break (db, current_ir_graph->current_block);
2913 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2915 return new_bd_Filter (db, current_ir_graph->current_block,
2922 return _new_d_NoMem();
2926 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2927 ir_node *ir_true, ir_mode *mode) {
2928 return new_bd_Mux (db, current_ir_graph->current_block,
2929 sel, ir_false, ir_true, mode);
2933 new_d_Psi (dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2934 return new_bd_Psi (db, current_ir_graph->current_block,
2935 arity, conds, vals, mode);
2938 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2939 ir_node *dst, ir_node *src, ir_type *data_type) {
2941 res = new_bd_CopyB(db, current_ir_graph->current_block,
2942 store, dst, src, data_type);
2943 #if PRECISE_EXC_CONTEXT
2944 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2950 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2952 return new_bd_InstOf (db, current_ir_graph->current_block,
2953 store, objptr, type);
2957 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2959 return new_bd_Raise (db, current_ir_graph->current_block,
2963 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2964 ir_node *idx, ir_node *lower, ir_node *upper) {
2966 res = new_bd_Bound(db, current_ir_graph->current_block,
2967 store, idx, lower, upper);
2968 #if PRECISE_EXC_CONTEXT
2969 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2974 /* ********************************************************************* */
2975 /* Comfortable interface with automatic Phi node construction. */
2976 /* (Uses also constructors of ?? interface, except new_Block. */
2977 /* ********************************************************************* */
2979 /* Block construction */
2980 /* immature Block without predecessors */
2981 ir_node *new_d_immBlock (dbg_info *db) {
2984 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2985 /* creates a new dynamic in-array as length of in is -1 */
2986 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2987 current_ir_graph->current_block = res;
2988 res->attr.block.matured = 0;
2989 res->attr.block.dead = 0;
2990 /* res->attr.block.exc = exc_normal; */
2991 /* res->attr.block.handler_entry = 0; */
2992 res->attr.block.irg = current_ir_graph;
2993 res->attr.block.backedge = NULL;
2994 res->attr.block.in_cg = NULL;
2995 res->attr.block.cg_backedge = NULL;
2996 set_Block_block_visited(res, 0);
2998 /* Create and initialize array for Phi-node construction. */
2999 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
3000 current_ir_graph->n_loc);
3001 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3003 /* Immature block may not be optimized! */
3004 IRN_VRFY_IRG(res, current_ir_graph);
3010 new_immBlock (void) {
3011 return new_d_immBlock(NULL);
3014 /* add an edge to a jmp/control flow node */
3016 add_immBlock_pred (ir_node *block, ir_node *jmp)
3018 if (block->attr.block.matured) {
3019 assert(0 && "Error: Block already matured!\n");
3022 assert(jmp != NULL);
3023 ARR_APP1(ir_node *, block->in, jmp);
3027 /* changing the current block */
3029 set_cur_block (ir_node *target) {
3030 current_ir_graph->current_block = target;
3033 /* ************************ */
3034 /* parameter administration */
3036 /* get a value from the parameter array from the current block by its index */
3038 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3040 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3041 inc_irg_visited(current_ir_graph);
3043 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3045 /* get a value from the parameter array from the current block by its index */
3047 get_value (int pos, ir_mode *mode)
3049 return get_d_value(NULL, pos, mode);
3052 /* set a value at position pos in the parameter array from the current block */
3054 set_value (int pos, ir_node *value)
3056 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3057 assert(pos+1 < current_ir_graph->n_loc);
3058 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3062 find_value(ir_node *value)
3065 ir_node *bl = current_ir_graph->current_block;
3067 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3068 if (bl->attr.block.graph_arr[i] == value)
3073 /* get the current store */
3077 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3078 /* GL: one could call get_value instead */
3079 inc_irg_visited(current_ir_graph);
3080 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3083 /* set the current store: handles automatic Sync construction for Load nodes */
3085 set_store (ir_node *store)
3087 ir_node *load, *pload, *pred, *in[2];
3089 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3091 if (get_opt_auto_create_sync()) {
3092 /* handle non-volatile Load nodes by automatically creating Sync's */
3093 load = skip_Proj(store);
3094 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3095 pred = get_Load_mem(load);
3097 if (is_Sync(pred)) {
3098 /* a Load after a Sync: move it up */
3099 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
3101 set_Load_mem(load, get_memop_mem(mem));
3102 add_Sync_pred(pred, store);
3106 pload = skip_Proj(pred);
3107 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3108 /* a Load after a Load: create a new Sync */
3109 set_Load_mem(load, get_Load_mem(pload));
3113 store = new_Sync(2, in);
3118 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3122 keep_alive (ir_node *ka) {
3123 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3126 /* --- Useful access routines --- */
3127 /* Returns the current block of the current graph. To set the current
3128 block use set_cur_block. */
3129 ir_node *get_cur_block(void) {
3130 return get_irg_current_block(current_ir_graph);
3133 /* Returns the frame type of the current graph */
3134 ir_type *get_cur_frame_type(void) {
3135 return get_irg_frame_type(current_ir_graph);
3139 /* ********************************************************************* */
3142 /* call once for each run of the library */
3144 init_cons(uninitialized_local_variable_func_t *func)
3146 default_initialize_local_variable = func;
3150 irp_finalize_cons (void) {
3152 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3153 irg_finalize_cons(get_irp_irg(i));
3155 irp->phase_state = phase_high;
3159 ir_node *new_Block(int arity, ir_node **in) {
3160 return new_d_Block(NULL, arity, in);
3162 ir_node *new_Start (void) {
3163 return new_d_Start(NULL);
3165 ir_node *new_End (void) {
3166 return new_d_End(NULL);
3168 ir_node *new_Jmp (void) {
3169 return new_d_Jmp(NULL);
3171 ir_node *new_IJmp (ir_node *tgt) {
3172 return new_d_IJmp(NULL, tgt);
3174 ir_node *new_Cond (ir_node *c) {
3175 return new_d_Cond(NULL, c);
3177 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3178 return new_d_Return(NULL, store, arity, in);
3180 ir_node *new_Const (ir_mode *mode, tarval *con) {
3181 return new_d_Const(NULL, mode, con);
3184 ir_node *new_Const_long(ir_mode *mode, long value)
3186 return new_d_Const_long(NULL, mode, value);
3189 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3190 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3193 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3194 return new_d_SymConst(NULL, value, kind);
3196 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3197 return new_d_simpleSel(NULL, store, objptr, ent);
3199 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3201 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3203 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3205 return new_d_Call(NULL, store, callee, arity, in, tp);
3207 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Add(NULL, op1, op2, mode);
3210 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3211 return new_d_Sub(NULL, op1, op2, mode);
3213 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3214 return new_d_Minus(NULL, op, mode);
3216 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3217 return new_d_Mul(NULL, op1, op2, mode);
3219 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3220 return new_d_Quot(NULL, memop, op1, op2);
3222 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3223 return new_d_DivMod(NULL, memop, op1, op2);
3225 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3226 return new_d_Div(NULL, memop, op1, op2);
3228 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3229 return new_d_Mod(NULL, memop, op1, op2);
3231 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3232 return new_d_Abs(NULL, op, mode);
3234 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3235 return new_d_And(NULL, op1, op2, mode);
3237 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3238 return new_d_Or(NULL, op1, op2, mode);
3240 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3241 return new_d_Eor(NULL, op1, op2, mode);
3243 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3244 return new_d_Not(NULL, op, mode);
3246 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3247 return new_d_Shl(NULL, op, k, mode);
3249 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3250 return new_d_Shr(NULL, op, k, mode);
3252 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3253 return new_d_Shrs(NULL, op, k, mode);
3255 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3256 return new_d_Rot(NULL, op, k, mode);
3258 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3259 return new_d_Carry(NULL, op1, op2, mode);
3261 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3262 return new_d_Borrow(NULL, op1, op2, mode);
3264 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3265 return new_d_Cmp(NULL, op1, op2);
3267 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3268 return new_d_Conv(NULL, op, mode);
3270 ir_node *new_strictConv (ir_node *op, ir_mode *mode) {
3271 return new_d_strictConv(NULL, op, mode);
3273 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3274 return new_d_Cast(NULL, op, to_tp);
3276 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3277 return new_d_Phi(NULL, arity, in, mode);
3279 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3280 return new_d_Load(NULL, store, addr, mode);
3282 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3283 return new_d_Store(NULL, store, addr, val);
3285 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3286 where_alloc where) {
3287 return new_d_Alloc(NULL, store, size, alloc_type, where);
3289 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3290 ir_type *free_type, where_alloc where) {
3291 return new_d_Free(NULL, store, ptr, size, free_type, where);
3293 ir_node *new_Sync (int arity, ir_node *in[]) {
3294 return new_d_Sync(NULL, arity, in);
3296 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3297 return new_d_Proj(NULL, arg, mode, proj);
3299 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3300 return new_d_defaultProj(NULL, arg, max_proj);
3302 ir_node *new_Tuple (int arity, ir_node **in) {
3303 return new_d_Tuple(NULL, arity, in);
3305 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3306 return new_d_Id(NULL, val, mode);
3308 ir_node *new_Bad (void) {
3311 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3312 return new_d_Confirm (NULL, val, bound, cmp);
3314 ir_node *new_Unknown(ir_mode *m) {
3315 return new_d_Unknown(m);
3317 ir_node *new_CallBegin (ir_node *callee) {
3318 return new_d_CallBegin(NULL, callee);
3320 ir_node *new_EndReg (void) {
3321 return new_d_EndReg(NULL);
3323 ir_node *new_EndExcept (void) {
3324 return new_d_EndExcept(NULL);
3326 ir_node *new_Break (void) {
3327 return new_d_Break(NULL);
3329 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3330 return new_d_Filter(NULL, arg, mode, proj);
3332 ir_node *new_NoMem (void) {
3333 return new_d_NoMem();
3335 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3336 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3338 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3339 return new_d_Psi(NULL, arity, conds, vals, mode);
3341 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3342 return new_d_CopyB(NULL, store, dst, src, data_type);
3344 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3345 return new_d_InstOf (NULL, store, objptr, ent);
3347 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3348 return new_d_Raise(NULL, store, obj);
3350 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3351 return new_d_Bound(NULL, store, idx, lower, upper);