3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* creates a bd constructor for a binop */
66 #define NEW_BD_BINOP(instr) \
68 new_bd_##instr (dbg_info *db, ir_node *block, \
69 ir_node *op1, ir_node *op2, ir_mode *mode) \
73 ir_graph *irg = current_ir_graph; \
76 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
77 res = optimize_node(res); \
78 IRN_VRFY_IRG(res, irg); \
82 /* creates a bd constructor for an unop */
83 #define NEW_BD_UNOP(instr) \
85 new_bd_##instr (dbg_info *db, ir_node *block, \
86 ir_node *op, ir_mode *mode) \
89 ir_graph *irg = current_ir_graph; \
90 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
91 res = optimize_node(res); \
92 IRN_VRFY_IRG(res, irg); \
96 /* creates a bd constructor for an divop */
97 #define NEW_BD_DIVOP(instr) \
99 new_bd_##instr (dbg_info *db, ir_node *block, \
100 ir_node *memop, ir_node *op1, ir_node *op2) \
104 ir_graph *irg = current_ir_graph; \
108 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2); \
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 /* Constructs a Block with a fixed number of predecessors.
172 Does not set current_block. Can not be used with automatic
173 Phi node construction. */
175 new_bd_Block (dbg_info *db, int arity, ir_node **in)
178 ir_graph *irg = current_ir_graph;
180 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
181 set_Block_matured(res, 1);
182 set_Block_block_visited(res, 0);
184 /* res->attr.block.exc = exc_normal; */
185 /* res->attr.block.handler_entry = 0; */
186 res->attr.block.dead = 0;
187 res->attr.block.irg = irg;
188 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
189 res->attr.block.in_cg = NULL;
190 res->attr.block.cg_backedge = NULL;
191 res->attr.block.extblk = NULL;
193 IRN_VRFY_IRG(res, irg);
198 new_bd_Start (dbg_info *db, ir_node *block)
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
204 /* res->attr.start.irg = irg; */
206 IRN_VRFY_IRG(res, irg);
211 new_bd_End (dbg_info *db, ir_node *block)
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
218 IRN_VRFY_IRG(res, irg);
222 /* Creates a Phi node with all predecessors. Calling this constructor
223 is only allowed if the corresponding block is mature. */
225 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
228 ir_graph *irg = current_ir_graph;
232 /* Don't assert that block matured: the use of this constructor is strongly
234 if ( get_Block_matured(block) )
235 assert( get_irn_arity(block) == arity );
237 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
239 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
241 for (i = arity-1; i >= 0; i--)
242 if (get_irn_op(in[i]) == op_Unknown) {
247 if (!has_unknown) res = optimize_node (res);
248 IRN_VRFY_IRG(res, irg);
250 /* Memory Phis in endless loops must be kept alive.
251 As we can't distinguish these easily we keep all of them alive. */
252 if ((res->op == op_Phi) && (mode == mode_M))
253 add_End_keepalive(get_irg_end(irg), res);
258 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
261 ir_graph *irg = current_ir_graph;
263 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
264 res->attr.con.tv = con;
265 set_Const_type(res, tp); /* Call method because of complex assertion. */
266 res = optimize_node (res);
267 assert(get_Const_type(res) == tp);
268 IRN_VRFY_IRG(res, irg);
274 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
276 ir_graph *irg = current_ir_graph;
278 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
282 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
290 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
293 ir_graph *irg = current_ir_graph;
295 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
296 res = optimize_node(res);
297 IRN_VRFY_IRG(res, irg);
302 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
306 ir_graph *irg = current_ir_graph;
308 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
309 res->attr.proj = proj;
312 assert(get_Proj_pred(res));
313 assert(get_nodes_block(get_Proj_pred(res)));
315 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
323 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
327 ir_graph *irg = current_ir_graph;
329 assert(arg->op == op_Cond);
330 arg->attr.c.kind = fragmentary;
331 arg->attr.c.default_proj = max_proj;
332 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
337 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
340 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
352 ir_graph *irg = current_ir_graph;
354 assert(is_atomic_type(to_tp));
356 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
357 res->attr.cast.totype = to_tp;
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
364 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
367 ir_graph *irg = current_ir_graph;
369 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
370 res = optimize_node (res);
371 IRN_VRFY_IRG(res, irg);
396 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
400 ir_graph *irg = current_ir_graph;
403 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Jmp (dbg_info *db, ir_node *block)
413 ir_graph *irg = current_ir_graph;
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
417 IRN_VRFY_IRG (res, irg);
422 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG (res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.c.kind = dense;
444 res->attr.c.default_proj = 0;
445 res->attr.c.pred = COND_JMP_PRED_NONE;
446 res = optimize_node (res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp)
458 ir_graph *irg = current_ir_graph;
461 NEW_ARR_A(ir_node *, r_in, r_arity);
464 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
466 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
468 assert((get_unknown_type() == tp) || is_Method_type(tp));
469 set_Call_type(res, tp);
470 res->attr.call.exc.pin_state = op_pin_state_pinned;
471 res->attr.call.callee_arr = NULL;
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Return (dbg_info *db, ir_node *block,
479 ir_node *store, int arity, ir_node **in)
484 ir_graph *irg = current_ir_graph;
487 NEW_ARR_A (ir_node *, r_in, r_arity);
489 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Load (dbg_info *db, ir_node *block,
498 ir_node *store, ir_node *adr, ir_mode *mode)
502 ir_graph *irg = current_ir_graph;
506 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
507 res->attr.load.exc.pin_state = op_pin_state_pinned;
508 res->attr.load.load_mode = mode;
509 res->attr.load.volatility = volatility_non_volatile;
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_bd_Store (dbg_info *db, ir_node *block,
517 ir_node *store, ir_node *adr, ir_node *val)
521 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
527 res->attr.store.exc.pin_state = op_pin_state_pinned;
528 res->attr.store.volatility = volatility_non_volatile;
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
535 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
536 ir_node *size, ir_type *alloc_type, where_alloc where)
540 ir_graph *irg = current_ir_graph;
544 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
545 res->attr.a.exc.pin_state = op_pin_state_pinned;
546 res->attr.a.where = where;
547 res->attr.a.type = alloc_type;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
555 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
559 ir_graph *irg = current_ir_graph;
564 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
565 res->attr.f.where = where;
566 res->attr.f.type = free_type;
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
574 int arity, ir_node **in, entity *ent)
579 ir_graph *irg = current_ir_graph;
581 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
584 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
587 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
589 * FIXM: Sel's can select functions which should be of mode mode_P_code.
591 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
592 res->attr.s.ent = ent;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
600 symconst_kind symkind, ir_type *tp) {
603 ir_graph *irg = current_ir_graph;
605 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
606 mode = mode_P_data; /* FIXME: can be mode_P_code */
610 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
612 res->attr.i.num = symkind;
613 res->attr.i.sym = value;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
623 symconst_kind symkind)
625 ir_graph *irg = current_ir_graph;
627 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
632 new_bd_Sync (dbg_info *db, ir_node *block)
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
638 /* no need to call optimize node here, Sync are always created with no predecessors */
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
646 ir_node *in[2], *res;
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
652 res->attr.confirm_cmp = cmp;
653 res = optimize_node (res);
654 IRN_VRFY_IRG(res, irg);
658 /* this function is often called with current_ir_graph unset */
660 new_bd_Unknown (ir_mode *m)
663 ir_graph *irg = current_ir_graph;
665 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
666 res = optimize_node(res);
671 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
675 ir_graph *irg = current_ir_graph;
677 in[0] = get_Call_ptr(call);
678 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
679 /* res->attr.callbegin.irg = irg; */
680 res->attr.callbegin.call = call;
681 res = optimize_node(res);
682 IRN_VRFY_IRG(res, irg);
687 new_bd_EndReg (dbg_info *db, ir_node *block)
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
693 set_irg_end_reg(irg, res);
694 IRN_VRFY_IRG(res, irg);
699 new_bd_EndExcept (dbg_info *db, ir_node *block)
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
705 set_irg_end_except(irg, res);
706 IRN_VRFY_IRG (res, irg);
711 new_bd_Break (dbg_info *db, ir_node *block)
714 ir_graph *irg = current_ir_graph;
716 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
727 ir_graph *irg = current_ir_graph;
729 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
730 res->attr.filter.proj = proj;
731 res->attr.filter.in_cg = NULL;
732 res->attr.filter.backedge = NULL;
735 assert(get_Proj_pred(res));
736 assert(get_nodes_block(get_Proj_pred(res)));
738 res = optimize_node(res);
739 IRN_VRFY_IRG(res, irg);
744 new_bd_Mux (dbg_info *db, ir_node *block,
745 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
749 ir_graph *irg = current_ir_graph;
755 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_Psi (dbg_info *db, ir_node *block,
765 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
769 ir_graph *irg = current_ir_graph;
772 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
774 for (i = 0; i < arity; ++i) {
776 in[2 * i + 1] = vals[i];
780 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
783 res = optimize_node(res);
784 IRN_VRFY_IRG(res, irg);
789 new_bd_CopyB (dbg_info *db, ir_node *block,
790 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
794 ir_graph *irg = current_ir_graph;
800 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
802 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
803 res->attr.copyb.data_type = data_type;
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
811 ir_node *objptr, ir_type *type)
815 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
820 res->attr.io.type = type;
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
831 ir_graph *irg = current_ir_graph;
835 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
836 res = optimize_node(res);
837 IRN_VRFY_IRG(res, irg);
842 new_bd_Bound (dbg_info *db, ir_node *block,
843 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
847 ir_graph *irg = current_ir_graph;
853 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
854 res->attr.bound.exc.pin_state = op_pin_state_pinned;
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
870 ir_graph *rem = current_ir_graph;
873 current_ir_graph = irg;
874 res = new_bd_Block (db, arity, in);
875 current_ir_graph = rem;
881 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
883 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_Start (db, block);
888 current_ir_graph = rem;
894 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
897 ir_graph *rem = current_ir_graph;
899 current_ir_graph = rem;
900 res = new_bd_End (db, block);
901 current_ir_graph = rem;
906 /* Creates a Phi node with all predecessors. Calling this constructor
907 is only allowed if the corresponding block is mature. */
909 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi (db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Const_type (db, block, mode, con, tp);
929 current_ir_graph = rem;
935 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
938 ir_graph *rem = current_ir_graph;
940 current_ir_graph = irg;
941 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
942 current_ir_graph = rem;
948 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
950 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
954 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Id(db, block, val, mode);
961 current_ir_graph = rem;
967 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_Proj(db, block, arg, mode, proj);
975 current_ir_graph = rem;
981 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_defaultProj(db, block, arg, max_proj);
989 current_ir_graph = rem;
995 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Conv(db, block, op, mode);
1002 current_ir_graph = rem;
1008 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Cast(db, block, op, to_tp);
1015 current_ir_graph = rem;
1021 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1024 ir_graph *rem = current_ir_graph;
1026 current_ir_graph = irg;
1027 res = new_bd_Tuple(db, block, arity, in);
1028 current_ir_graph = rem;
1038 NEW_RD_DIVOP(DivMod)
1051 NEW_RD_BINOP(Borrow)
1054 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1055 ir_node *op1, ir_node *op2)
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Cmp(db, block, op1, op2);
1062 current_ir_graph = rem;
1068 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_Jmp(db, block);
1075 current_ir_graph = rem;
1081 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_IJmp(db, block, tgt);
1088 current_ir_graph = rem;
1094 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Cond(db, block, c);
1101 current_ir_graph = rem;
1107 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1108 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1115 current_ir_graph = rem;
1121 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, int arity, ir_node **in)
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Return(db, block, store, arity, in);
1129 current_ir_graph = rem;
1135 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1136 ir_node *store, ir_node *adr, ir_mode *mode)
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Load(db, block, store, adr, mode);
1143 current_ir_graph = rem;
1149 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1150 ir_node *store, ir_node *adr, ir_node *val)
1153 ir_graph *rem = current_ir_graph;
1155 current_ir_graph = irg;
1156 res = new_bd_Store(db, block, store, adr, val);
1157 current_ir_graph = rem;
1163 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1164 ir_node *size, ir_type *alloc_type, where_alloc where)
1167 ir_graph *rem = current_ir_graph;
1169 current_ir_graph = irg;
1170 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1171 current_ir_graph = rem;
1177 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1178 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1185 current_ir_graph = rem;
1191 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1192 ir_node *store, ir_node *objptr, entity *ent)
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1199 current_ir_graph = rem;
1205 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1206 int arity, ir_node **in, entity *ent)
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1220 symconst_kind symkind, ir_type *tp)
1223 ir_graph *rem = current_ir_graph;
1225 current_ir_graph = irg;
1226 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1227 current_ir_graph = rem;
1233 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1234 symconst_kind symkind)
1236 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1240 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1242 symconst_symbol sym = {(ir_type *)symbol};
1243 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1246 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1247 symconst_symbol sym = {(ir_type *)symbol};
1248 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1251 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1252 symconst_symbol sym = {symbol};
1253 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1256 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1257 symconst_symbol sym = {symbol};
1258 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
1261 ir_node *new_rd_SymConst_align (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1262 symconst_symbol sym = {symbol};
1263 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
1267 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1270 ir_graph *rem = current_ir_graph;
1273 current_ir_graph = irg;
1274 res = new_bd_Sync(db, block);
1275 current_ir_graph = rem;
1277 for (i = 0; i < arity; ++i) add_Sync_pred(res, in[i]);
1283 new_rd_Bad (ir_graph *irg) {
1284 return get_irg_bad(irg);
1288 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1291 ir_graph *rem = current_ir_graph;
1293 current_ir_graph = irg;
1294 res = new_bd_Confirm(db, block, val, bound, cmp);
1295 current_ir_graph = rem;
1300 /* this function is often called with current_ir_graph unset */
1302 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1305 ir_graph *rem = current_ir_graph;
1307 current_ir_graph = irg;
1308 res = new_bd_Unknown(m);
1309 current_ir_graph = rem;
1315 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1318 ir_graph *rem = current_ir_graph;
1320 current_ir_graph = irg;
1321 res = new_bd_CallBegin(db, block, call);
1322 current_ir_graph = rem;
1328 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1332 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1333 set_irg_end_reg(irg, res);
1334 IRN_VRFY_IRG(res, irg);
1339 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1343 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1344 set_irg_end_except(irg, res);
1345 IRN_VRFY_IRG (res, irg);
1350 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1353 ir_graph *rem = current_ir_graph;
1355 current_ir_graph = irg;
1356 res = new_bd_Break(db, block);
1357 current_ir_graph = rem;
1363 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1367 ir_graph *rem = current_ir_graph;
1369 current_ir_graph = irg;
1370 res = new_bd_Filter(db, block, arg, mode, proj);
1371 current_ir_graph = rem;
1377 new_rd_NoMem (ir_graph *irg) {
1378 return get_irg_no_mem(irg);
1382 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1383 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1386 ir_graph *rem = current_ir_graph;
1388 current_ir_graph = irg;
1389 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1390 current_ir_graph = rem;
1396 new_rd_Psi (dbg_info *db, ir_graph *irg, ir_node *block,
1397 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1400 ir_graph *rem = current_ir_graph;
1402 current_ir_graph = irg;
1403 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1404 current_ir_graph = rem;
1409 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1410 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1413 ir_graph *rem = current_ir_graph;
1415 current_ir_graph = irg;
1416 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1417 current_ir_graph = rem;
1423 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1424 ir_node *objptr, ir_type *type)
1427 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1430 res = new_bd_InstOf(db, block, store, objptr, type);
1431 current_ir_graph = rem;
1437 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1440 ir_graph *rem = current_ir_graph;
1442 current_ir_graph = irg;
1443 res = new_bd_Raise(db, block, store, obj);
1444 current_ir_graph = rem;
1449 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1450 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1453 ir_graph *rem = current_ir_graph;
1455 current_ir_graph = irg;
1456 res = new_bd_Bound(db, block, store, idx, lower, upper);
1457 current_ir_graph = rem;
1462 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1463 return new_rd_Block(NULL, irg, arity, in);
1465 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1466 return new_rd_Start(NULL, irg, block);
1468 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1469 return new_rd_End(NULL, irg, block);
1471 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1472 return new_rd_Jmp(NULL, irg, block);
1474 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1475 return new_rd_IJmp(NULL, irg, block, tgt);
1477 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1478 return new_rd_Cond(NULL, irg, block, c);
1480 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1481 ir_node *store, int arity, ir_node **in) {
1482 return new_rd_Return(NULL, irg, block, store, arity, in);
1484 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1485 ir_mode *mode, tarval *con) {
1486 return new_rd_Const(NULL, irg, block, mode, con);
1488 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1489 ir_mode *mode, long value) {
1490 return new_rd_Const_long(NULL, irg, block, mode, value);
1492 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1493 ir_mode *mode, tarval *con, ir_type *tp) {
1494 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1496 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1497 symconst_symbol value, symconst_kind symkind) {
1498 return new_rd_SymConst(NULL, irg, block, value, symkind);
1500 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1501 ir_node *objptr, entity *ent) {
1502 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1504 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *objptr, int n_index, ir_node **index,
1507 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1509 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1510 ir_node *callee, int arity, ir_node **in,
1512 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1514 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1515 ir_node *op1, ir_node *op2, ir_mode *mode) {
1516 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1518 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1519 ir_node *op1, ir_node *op2, ir_mode *mode) {
1520 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1522 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1523 ir_node *op, ir_mode *mode) {
1524 return new_rd_Minus(NULL, irg, block, op, mode);
1526 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1527 ir_node *op1, ir_node *op2, ir_mode *mode) {
1528 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1530 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1531 ir_node *memop, ir_node *op1, ir_node *op2) {
1532 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1534 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1535 ir_node *memop, ir_node *op1, ir_node *op2) {
1536 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1538 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1539 ir_node *memop, ir_node *op1, ir_node *op2) {
1540 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1542 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1543 ir_node *memop, ir_node *op1, ir_node *op2) {
1544 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1546 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1547 ir_node *op, ir_mode *mode) {
1548 return new_rd_Abs(NULL, irg, block, op, mode);
1550 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1551 ir_node *op1, ir_node *op2, ir_mode *mode) {
1552 return new_rd_And(NULL, irg, block, op1, op2, mode);
1554 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1555 ir_node *op1, ir_node *op2, ir_mode *mode) {
1556 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1558 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1559 ir_node *op1, ir_node *op2, ir_mode *mode) {
1560 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1562 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1563 ir_node *op, ir_mode *mode) {
1564 return new_rd_Not(NULL, irg, block, op, mode);
1566 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1567 ir_node *op, ir_node *k, ir_mode *mode) {
1568 return new_rd_Shl(NULL, irg, block, op, k, mode);
1570 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1571 ir_node *op, ir_node *k, ir_mode *mode) {
1572 return new_rd_Shr(NULL, irg, block, op, k, mode);
1574 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1575 ir_node *op, ir_node *k, ir_mode *mode) {
1576 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1578 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1579 ir_node *op, ir_node *k, ir_mode *mode) {
1580 return new_rd_Rot(NULL, irg, block, op, k, mode);
1582 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1583 ir_node *op, ir_node *k, ir_mode *mode) {
1584 return new_rd_Carry(NULL, irg, block, op, k, mode);
1586 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1587 ir_node *op, ir_node *k, ir_mode *mode) {
1588 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1590 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1591 ir_node *op1, ir_node *op2) {
1592 return new_rd_Cmp(NULL, irg, block, op1, op2);
1594 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1595 ir_node *op, ir_mode *mode) {
1596 return new_rd_Conv(NULL, irg, block, op, mode);
1598 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1599 return new_rd_Cast(NULL, irg, block, op, to_tp);
1601 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1602 ir_node **in, ir_mode *mode) {
1603 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1605 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1606 ir_node *store, ir_node *adr, ir_mode *mode) {
1607 return new_rd_Load(NULL, irg, block, store, adr, mode);
1609 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1610 ir_node *store, ir_node *adr, ir_node *val) {
1611 return new_rd_Store(NULL, irg, block, store, adr, val);
1613 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1614 ir_node *size, ir_type *alloc_type, where_alloc where) {
1615 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1617 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1618 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1619 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1621 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1622 return new_rd_Sync(NULL, irg, block, arity, in);
1624 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1625 ir_mode *mode, long proj) {
1626 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1628 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1630 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1632 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1633 int arity, ir_node **in) {
1634 return new_rd_Tuple(NULL, irg, block, arity, in );
1636 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1637 ir_node *val, ir_mode *mode) {
1638 return new_rd_Id(NULL, irg, block, val, mode);
1640 ir_node *new_r_Bad (ir_graph *irg) {
1641 return new_rd_Bad(irg);
1643 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1644 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1646 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1647 return new_rd_Unknown(irg, m);
1649 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1650 return new_rd_CallBegin(NULL, irg, block, callee);
1652 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1653 return new_rd_EndReg(NULL, irg, block);
1655 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1656 return new_rd_EndExcept(NULL, irg, block);
1658 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1659 return new_rd_Break(NULL, irg, block);
1661 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1662 ir_mode *mode, long proj) {
1663 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1665 ir_node *new_r_NoMem (ir_graph *irg) {
1666 return new_rd_NoMem(irg);
1668 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1669 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1670 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1672 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1673 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1674 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1676 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1677 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1678 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1680 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1682 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1684 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1685 ir_node *store, ir_node *obj) {
1686 return new_rd_Raise(NULL, irg, block, store, obj);
1688 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1689 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1690 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1693 /** ********************/
1694 /** public interfaces */
1695 /** construction tools */
1699 * - create a new Start node in the current block
1701 * @return s - pointer to the created Start node
1706 new_d_Start (dbg_info *db)
1710 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1711 op_Start, mode_T, 0, NULL);
1712 /* res->attr.start.irg = current_ir_graph; */
1714 res = optimize_node(res);
1715 IRN_VRFY_IRG(res, current_ir_graph);
1720 new_d_End (dbg_info *db)
1723 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1724 op_End, mode_X, -1, NULL);
1725 res = optimize_node(res);
1726 IRN_VRFY_IRG(res, current_ir_graph);
1731 /* Constructs a Block with a fixed number of predecessors.
1732 Does set current_block. Can be used with automatic Phi
1733 node construction. */
1735 new_d_Block (dbg_info *db, int arity, ir_node **in)
1739 int has_unknown = 0;
1741 res = new_bd_Block(db, arity, in);
1743 /* Create and initialize array for Phi-node construction. */
1744 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1745 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1746 current_ir_graph->n_loc);
1747 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1750 for (i = arity-1; i >= 0; i--)
1751 if (get_irn_op(in[i]) == op_Unknown) {
1756 if (!has_unknown) res = optimize_node(res);
1757 current_ir_graph->current_block = res;
1759 IRN_VRFY_IRG(res, current_ir_graph);
1764 /* ***********************************************************************/
1765 /* Methods necessary for automatic Phi node creation */
1767 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1768 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1769 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1770 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1772 Call Graph: ( A ---> B == A "calls" B)
1774 get_value mature_immBlock
1782 get_r_value_internal |
1786 new_rd_Phi0 new_rd_Phi_in
1788 * *************************************************************************** */
1790 /** Creates a Phi node with 0 predecessors */
1791 static INLINE ir_node *
1792 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1796 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1797 IRN_VRFY_IRG(res, irg);
1801 /* There are two implementations of the Phi node construction. The first
1802 is faster, but does not work for blocks with more than 2 predecessors.
1803 The second works always but is slower and causes more unnecessary Phi
1805 Select the implementations by the following preprocessor flag set in
1807 #if USE_FAST_PHI_CONSTRUCTION
1809 /* This is a stack used for allocating and deallocating nodes in
1810 new_rd_Phi_in. The original implementation used the obstack
1811 to model this stack, now it is explicit. This reduces side effects.
1813 #if USE_EXPLICIT_PHI_IN_STACK
1815 new_Phi_in_stack(void) {
1818 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1820 res->stack = NEW_ARR_F (ir_node *, 0);
1827 free_Phi_in_stack(Phi_in_stack *s) {
1828 DEL_ARR_F(s->stack);
1832 free_to_Phi_in_stack(ir_node *phi) {
1833 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1834 current_ir_graph->Phi_in_stack->pos)
1835 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1837 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1839 (current_ir_graph->Phi_in_stack->pos)++;
1842 static INLINE ir_node *
1843 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1844 int arity, ir_node **in) {
1846 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1847 int pos = current_ir_graph->Phi_in_stack->pos;
1851 /* We need to allocate a new node */
1852 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1853 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1855 /* reuse the old node and initialize it again. */
1858 assert (res->kind == k_ir_node);
1859 assert (res->op == op_Phi);
1863 assert (arity >= 0);
1864 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1865 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1867 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1869 (current_ir_graph->Phi_in_stack->pos)--;
1873 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1875 /* Creates a Phi node with a given, fixed array **in of predecessors.
1876 If the Phi node is unnecessary, as the same value reaches the block
1877 through all control flow paths, it is eliminated and the value
1878 returned directly. This constructor is only intended for use in
1879 the automatic Phi node generation triggered by get_value or mature.
1880 The implementation is quite tricky and depends on the fact, that
1881 the nodes are allocated on a stack:
1882 The in array contains predecessors and NULLs. The NULLs appear,
1883 if get_r_value_internal, that computed the predecessors, reached
1884 the same block on two paths. In this case the same value reaches
1885 this block on both paths, there is no definition in between. We need
1886 not allocate a Phi where these path's merge, but we have to communicate
1887 this fact to the caller. This happens by returning a pointer to the
1888 node the caller _will_ allocate. (Yes, we predict the address. We can
1889 do so because the nodes are allocated on the obstack.) The caller then
1890 finds a pointer to itself and, when this routine is called again,
1893 static INLINE ir_node *
1894 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1897 ir_node *res, *known;
1899 /* Allocate a new node on the obstack. This can return a node to
1900 which some of the pointers in the in-array already point.
1901 Attention: the constructor copies the in array, i.e., the later
1902 changes to the array in this routine do not affect the
1903 constructed node! If the in array contains NULLs, there will be
1904 missing predecessors in the returned node. Is this a possible
1905 internal state of the Phi node generation? */
1906 #if USE_EXPLICIT_PHI_IN_STACK
1907 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1909 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1910 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1913 /* The in-array can contain NULLs. These were returned by
1914 get_r_value_internal if it reached the same block/definition on a
1915 second path. The NULLs are replaced by the node itself to
1916 simplify the test in the next loop. */
1917 for (i = 0; i < ins; ++i) {
1922 /* This loop checks whether the Phi has more than one predecessor.
1923 If so, it is a real Phi node and we break the loop. Else the Phi
1924 node merges the same definition on several paths and therefore is
1926 for (i = 0; i < ins; ++i) {
1927 if (in[i] == res || in[i] == known)
1936 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1938 #if USE_EXPLICIT_PHI_IN_STACK
1939 free_to_Phi_in_stack(res);
1941 edges_node_deleted(res, current_ir_graph);
1942 obstack_free(current_ir_graph->obst, res);
1946 res = optimize_node (res);
1947 IRN_VRFY_IRG(res, irg);
1950 /* return the pointer to the Phi node. This node might be deallocated! */
1955 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1958 allocates and returns this node. The routine called to allocate the
1959 node might optimize it away and return a real value, or even a pointer
1960 to a deallocated Phi node on top of the obstack!
1961 This function is called with an in-array of proper size. **/
1963 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1965 ir_node *prevBlock, *res;
1968 /* This loop goes to all predecessor blocks of the block the Phi node is in
1969 and there finds the operands of the Phi node by calling
1970 get_r_value_internal. */
1971 for (i = 1; i <= ins; ++i) {
1972 assert (block->in[i]);
1973 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1975 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1978 /* After collecting all predecessors into the array nin a new Phi node
1979 with these predecessors is created. This constructor contains an
1980 optimization: If all predecessors of the Phi node are identical it
1981 returns the only operand instead of a new Phi node. If the value
1982 passes two different control flow edges without being defined, and
1983 this is the second path treated, a pointer to the node that will be
1984 allocated for the first path (recursion) is returned. We already
1985 know the address of this node, as it is the next node to be allocated
1986 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1987 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1989 /* Now we now the value for "pos" and can enter it in the array with
1990 all known local variables. Attention: this might be a pointer to
1991 a node, that later will be allocated!!! See new_rd_Phi_in().
1992 If this is called in mature, after some set_value() in the same block,
1993 the proper value must not be overwritten:
1995 get_value (makes Phi0, put's it into graph_arr)
1996 set_value (overwrites Phi0 in graph_arr)
1997 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
2000 if (!block->attr.block.graph_arr[pos]) {
2001 block->attr.block.graph_arr[pos] = res;
2003 /* printf(" value already computed by %s\n",
2004 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2010 /* This function returns the last definition of a variable. In case
2011 this variable was last defined in a previous block, Phi nodes are
2012 inserted. If the part of the firm graph containing the definition
2013 is not yet constructed, a dummy Phi node is returned. */
2015 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2018 /* There are 4 cases to treat.
2020 1. The block is not mature and we visit it the first time. We can not
2021 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2022 predecessors is returned. This node is added to the linked list (field
2023 "link") of the containing block to be completed when this block is
2024 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2027 2. The value is already known in this block, graph_arr[pos] is set and we
2028 visit the block the first time. We can return the value without
2029 creating any new nodes.
2031 3. The block is mature and we visit it the first time. A Phi node needs
2032 to be created (phi_merge). If the Phi is not needed, as all it's
2033 operands are the same value reaching the block through different
2034 paths, it's optimized away and the value itself is returned.
2036 4. The block is mature, and we visit it the second time. Now two
2037 subcases are possible:
2038 * The value was computed completely the last time we were here. This
2039 is the case if there is no loop. We can return the proper value.
2040 * The recursion that visited this node and set the flag did not
2041 return yet. We are computing a value in a loop and need to
2042 break the recursion without knowing the result yet.
2043 @@@ strange case. Straight forward we would create a Phi before
2044 starting the computation of it's predecessors. In this case we will
2045 find a Phi here in any case. The problem is that this implementation
2046 only creates a Phi after computing the predecessors, so that it is
2047 hard to compute self references of this Phi. @@@
2048 There is no simple check for the second subcase. Therefore we check
2049 for a second visit and treat all such cases as the second subcase.
2050 Anyways, the basic situation is the same: we reached a block
2051 on two paths without finding a definition of the value: No Phi
2052 nodes are needed on both paths.
2053 We return this information "Two paths, no Phi needed" by a very tricky
2054 implementation that relies on the fact that an obstack is a stack and
2055 will return a node with the same address on different allocations.
2056 Look also at phi_merge and new_rd_phi_in to understand this.
2057 @@@ Unfortunately this does not work, see testprogram
2058 three_cfpred_example.
2062 /* case 4 -- already visited. */
2063 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2065 /* visited the first time */
2066 set_irn_visited(block, get_irg_visited(current_ir_graph));
2068 /* Get the local valid value */
2069 res = block->attr.block.graph_arr[pos];
2071 /* case 2 -- If the value is actually computed, return it. */
2072 if (res) return res;
2074 if (block->attr.block.matured) { /* case 3 */
2076 /* The Phi has the same amount of ins as the corresponding block. */
2077 int ins = get_irn_arity(block);
2079 NEW_ARR_A (ir_node *, nin, ins);
2081 /* Phi merge collects the predecessors and then creates a node. */
2082 res = phi_merge (block, pos, mode, nin, ins);
2084 } else { /* case 1 */
2085 /* The block is not mature, we don't know how many in's are needed. A Phi
2086 with zero predecessors is created. Such a Phi node is called Phi0
2087 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2088 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2090 The Phi0 has to remember the pos of it's internal value. If the real
2091 Phi is computed, pos is used to update the array with the local
2094 res = new_rd_Phi0 (current_ir_graph, block, mode);
2095 res->attr.phi0_pos = pos;
2096 res->link = block->link;
2100 /* If we get here, the frontend missed a use-before-definition error */
2103 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2104 assert (mode->code >= irm_F && mode->code <= irm_P);
2105 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2106 tarval_mode_null[mode->code]);
2109 /* The local valid value is available now. */
2110 block->attr.block.graph_arr[pos] = res;
2118 it starts the recursion. This causes an Id at the entry of
2119 every block that has no definition of the value! **/
2121 #if USE_EXPLICIT_PHI_IN_STACK
2123 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2124 void free_Phi_in_stack(Phi_in_stack *s) { }
2127 static INLINE ir_node *
2128 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2129 ir_node **in, int ins, ir_node *phi0)
2132 ir_node *res, *known;
2134 /* Allocate a new node on the obstack. The allocation copies the in
2136 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2137 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2139 /* This loop checks whether the Phi has more than one predecessor.
2140 If so, it is a real Phi node and we break the loop. Else the
2141 Phi node merges the same definition on several paths and therefore
2142 is not needed. Don't consider Bad nodes! */
2144 for (i=0; i < ins; ++i)
2148 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2150 /* Optimize self referencing Phis: We can't detect them yet properly, as
2151 they still refer to the Phi0 they will replace. So replace right now. */
2152 if (phi0 && in[i] == phi0) in[i] = res;
2154 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2162 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2165 edges_node_deleted(res, current_ir_graph);
2166 obstack_free (current_ir_graph->obst, res);
2167 if (is_Phi(known)) {
2168 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2169 order, an enclosing Phi know may get superfluous. */
2170 res = optimize_in_place_2(known);
2172 exchange(known, res);
2178 /* A undefined value, e.g., in unreachable code. */
2182 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2183 IRN_VRFY_IRG(res, irg);
2184 /* Memory Phis in endless loops must be kept alive.
2185 As we can't distinguish these easily we keep all of them alive. */
2186 if ((res->op == op_Phi) && (mode == mode_M))
2187 add_End_keepalive(get_irg_end(irg), res);
2194 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2196 #if PRECISE_EXC_CONTEXT
2198 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2200 /* Construct a new frag_array for node n.
2201 Copy the content from the current graph_arr of the corresponding block:
2202 this is the current state.
2203 Set ProjM(n) as current memory state.
2204 Further the last entry in frag_arr of current block points to n. This
2205 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2207 static INLINE ir_node ** new_frag_arr (ir_node *n)
2212 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2213 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2214 sizeof(ir_node *)*current_ir_graph->n_loc);
2216 /* turn off optimization before allocating Proj nodes, as res isn't
2218 opt = get_opt_optimize(); set_optimize(0);
2219 /* Here we rely on the fact that all frag ops have Memory as first result! */
2220 if (get_irn_op(n) == op_Call)
2221 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2222 else if (get_irn_op(n) == op_CopyB)
2223 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2224 else if (get_irn_op(n) == op_Bound)
2225 arr[0] = new_Proj(n, mode_M, pn_Bound_M_except);
2227 assert((pn_Quot_M == pn_DivMod_M) &&
2228 (pn_Quot_M == pn_Div_M) &&
2229 (pn_Quot_M == pn_Mod_M) &&
2230 (pn_Quot_M == pn_Load_M) &&
2231 (pn_Quot_M == pn_Store_M) &&
2232 (pn_Quot_M == pn_Alloc_M) );
2233 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2237 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2242 * returns the frag_arr from a node
2244 static INLINE ir_node **
2245 get_frag_arr (ir_node *n) {
2246 switch (get_irn_opcode(n)) {
2248 return n->attr.call.exc.frag_arr;
2250 return n->attr.a.exc.frag_arr;
2252 return n->attr.load.exc.frag_arr;
2254 return n->attr.store.exc.frag_arr;
2256 return n->attr.except.frag_arr;
2261 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2263 if (!frag_arr[pos]) frag_arr[pos] = val;
2264 if (frag_arr[current_ir_graph->n_loc - 1]) {
2265 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2266 assert(arr != frag_arr && "Endless recursion detected");
2267 set_frag_value(arr, pos, val);
2272 for (i = 0; i < 1000; ++i) {
2273 if (!frag_arr[pos]) {
2274 frag_arr[pos] = val;
2276 if (frag_arr[current_ir_graph->n_loc - 1]) {
2277 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2283 assert(0 && "potential endless recursion");
2288 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2292 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2294 frag_arr = get_frag_arr(cfOp);
2295 res = frag_arr[pos];
2297 if (block->attr.block.graph_arr[pos]) {
2298 /* There was a set_value() after the cfOp and no get_value before that
2299 set_value(). We must build a Phi node now. */
2300 if (block->attr.block.matured) {
2301 int ins = get_irn_arity(block);
2303 NEW_ARR_A (ir_node *, nin, ins);
2304 res = phi_merge(block, pos, mode, nin, ins);
2306 res = new_rd_Phi0 (current_ir_graph, block, mode);
2307 res->attr.phi0_pos = pos;
2308 res->link = block->link;
2312 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2313 but this should be better: (remove comment if this works) */
2314 /* It's a Phi, we can write this into all graph_arrs with NULL */
2315 set_frag_value(block->attr.block.graph_arr, pos, res);
2317 res = get_r_value_internal(block, pos, mode);
2318 set_frag_value(block->attr.block.graph_arr, pos, res);
2323 #endif /* PRECISE_EXC_CONTEXT */
2326 computes the predecessors for the real phi node, and then
2327 allocates and returns this node. The routine called to allocate the
2328 node might optimize it away and return a real value.
2329 This function must be called with an in-array of proper size. **/
2331 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2333 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2336 /* If this block has no value at pos create a Phi0 and remember it
2337 in graph_arr to break recursions.
2338 Else we may not set graph_arr as there a later value is remembered. */
2340 if (!block->attr.block.graph_arr[pos]) {
2341 if (block == get_irg_start_block(current_ir_graph)) {
2342 /* Collapsing to Bad tarvals is no good idea.
2343 So we call a user-supplied routine here that deals with this case as
2344 appropriate for the given language. Sorrily the only help we can give
2345 here is the position.
2347 Even if all variables are defined before use, it can happen that
2348 we get to the start block, if a Cond has been replaced by a tuple
2349 (bad, jmp). In this case we call the function needlessly, eventually
2350 generating an non existent error.
2351 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2354 if (default_initialize_local_variable) {
2355 ir_node *rem = get_cur_block();
2357 set_cur_block(block);
2358 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2362 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2363 /* We don't need to care about exception ops in the start block.
2364 There are none by definition. */
2365 return block->attr.block.graph_arr[pos];
2367 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2368 block->attr.block.graph_arr[pos] = phi0;
2369 #if PRECISE_EXC_CONTEXT
2370 if (get_opt_precise_exc_context()) {
2371 /* Set graph_arr for fragile ops. Also here we should break recursion.
2372 We could choose a cyclic path through an cfop. But the recursion would
2373 break at some point. */
2374 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2380 /* This loop goes to all predecessor blocks of the block the Phi node
2381 is in and there finds the operands of the Phi node by calling
2382 get_r_value_internal. */
2383 for (i = 1; i <= ins; ++i) {
2384 prevCfOp = skip_Proj(block->in[i]);
2386 if (is_Bad(prevCfOp)) {
2387 /* In case a Cond has been optimized we would get right to the start block
2388 with an invalid definition. */
2389 nin[i-1] = new_Bad();
2392 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2394 if (!is_Bad(prevBlock)) {
2395 #if PRECISE_EXC_CONTEXT
2396 if (get_opt_precise_exc_context() &&
2397 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2398 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2399 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2402 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2404 nin[i-1] = new_Bad();
2408 /* We want to pass the Phi0 node to the constructor: this finds additional
2409 optimization possibilities.
2410 The Phi0 node either is allocated in this function, or it comes from
2411 a former call to get_r_value_internal. In this case we may not yet
2412 exchange phi0, as this is done in mature_immBlock. */
2414 phi0_all = block->attr.block.graph_arr[pos];
2415 if (!((get_irn_op(phi0_all) == op_Phi) &&
2416 (get_irn_arity(phi0_all) == 0) &&
2417 (get_nodes_block(phi0_all) == block)))
2423 /* After collecting all predecessors into the array nin a new Phi node
2424 with these predecessors is created. This constructor contains an
2425 optimization: If all predecessors of the Phi node are identical it
2426 returns the only operand instead of a new Phi node. */
2427 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2429 /* In case we allocated a Phi0 node at the beginning of this procedure,
2430 we need to exchange this Phi0 with the real Phi. */
2432 exchange(phi0, res);
2433 block->attr.block.graph_arr[pos] = res;
2434 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2435 only an optimization. */
2441 /* This function returns the last definition of a variable. In case
2442 this variable was last defined in a previous block, Phi nodes are
2443 inserted. If the part of the firm graph containing the definition
2444 is not yet constructed, a dummy Phi node is returned. */
2446 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2449 /* There are 4 cases to treat.
2451 1. The block is not mature and we visit it the first time. We can not
2452 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2453 predecessors is returned. This node is added to the linked list (field
2454 "link") of the containing block to be completed when this block is
2455 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2458 2. The value is already known in this block, graph_arr[pos] is set and we
2459 visit the block the first time. We can return the value without
2460 creating any new nodes.
2462 3. The block is mature and we visit it the first time. A Phi node needs
2463 to be created (phi_merge). If the Phi is not needed, as all it's
2464 operands are the same value reaching the block through different
2465 paths, it's optimized away and the value itself is returned.
2467 4. The block is mature, and we visit it the second time. Now two
2468 subcases are possible:
2469 * The value was computed completely the last time we were here. This
2470 is the case if there is no loop. We can return the proper value.
2471 * The recursion that visited this node and set the flag did not
2472 return yet. We are computing a value in a loop and need to
2473 break the recursion. This case only happens if we visited
2474 the same block with phi_merge before, which inserted a Phi0.
2475 So we return the Phi0.
2478 /* case 4 -- already visited. */
2479 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2480 /* As phi_merge allocates a Phi0 this value is always defined. Here
2481 is the critical difference of the two algorithms. */
2482 assert(block->attr.block.graph_arr[pos]);
2483 return block->attr.block.graph_arr[pos];
2486 /* visited the first time */
2487 set_irn_visited(block, get_irg_visited(current_ir_graph));
2489 /* Get the local valid value */
2490 res = block->attr.block.graph_arr[pos];
2492 /* case 2 -- If the value is actually computed, return it. */
2493 if (res) { return res; };
2495 if (block->attr.block.matured) { /* case 3 */
2497 /* The Phi has the same amount of ins as the corresponding block. */
2498 int ins = get_irn_arity(block);
2500 NEW_ARR_A (ir_node *, nin, ins);
2502 /* Phi merge collects the predecessors and then creates a node. */
2503 res = phi_merge (block, pos, mode, nin, ins);
2505 } else { /* case 1 */
2506 /* The block is not mature, we don't know how many in's are needed. A Phi
2507 with zero predecessors is created. Such a Phi node is called Phi0
2508 node. The Phi0 is then added to the list of Phi0 nodes in this block
2509 to be matured by mature_immBlock later.
2510 The Phi0 has to remember the pos of it's internal value. If the real
2511 Phi is computed, pos is used to update the array with the local
2513 res = new_rd_Phi0 (current_ir_graph, block, mode);
2514 res->attr.phi0_pos = pos;
2515 res->link = block->link;
2519 /* If we get here, the frontend missed a use-before-definition error */
2522 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2523 assert (mode->code >= irm_F && mode->code <= irm_P);
2524 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2525 get_mode_null(mode));
2528 /* The local valid value is available now. */
2529 block->attr.block.graph_arr[pos] = res;
2534 #endif /* USE_FAST_PHI_CONSTRUCTION */
2536 /* ************************************************************************** */
2539 * Finalize a Block node, when all control flows are known.
2540 * Acceptable parameters are only Block nodes.
2543 mature_immBlock (ir_node *block)
2549 assert (get_irn_opcode(block) == iro_Block);
2550 /* @@@ should be commented in
2551 assert (!get_Block_matured(block) && "Block already matured"); */
2553 if (!get_Block_matured(block)) {
2554 ins = ARR_LEN (block->in)-1;
2555 /* Fix block parameters */
2556 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2558 /* An array for building the Phi nodes. */
2559 NEW_ARR_A (ir_node *, nin, ins);
2561 /* Traverse a chain of Phi nodes attached to this block and mature
2563 for (n = block->link; n; n=next) {
2564 inc_irg_visited(current_ir_graph);
2566 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2569 block->attr.block.matured = 1;
2571 /* Now, as the block is a finished firm node, we can optimize it.
2572 Since other nodes have been allocated since the block was created
2573 we can not free the node on the obstack. Therefore we have to call
2575 Unfortunately the optimization does not change a lot, as all allocated
2576 nodes refer to the unoptimized node.
2577 We can call _2, as global cse has no effect on blocks. */
2578 block = optimize_in_place_2(block);
2579 IRN_VRFY_IRG(block, current_ir_graph);
2584 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2586 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2590 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2592 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2596 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2598 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2602 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2604 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2609 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2611 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2615 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2617 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2621 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2624 assert(arg->op == op_Cond);
2625 arg->attr.c.kind = fragmentary;
2626 arg->attr.c.default_proj = max_proj;
2627 res = new_Proj (arg, mode_X, max_proj);
2632 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2634 return new_bd_Conv(db, current_ir_graph->current_block, op, mode);
2638 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2640 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2644 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2646 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2655 * allocate the frag array
2657 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2658 if (get_opt_precise_exc_context()) {
2659 if ((current_ir_graph->phase_state == phase_building) &&
2660 (get_irn_op(res) == op) && /* Could be optimized away. */
2661 !*frag_store) /* Could be a cse where the arr is already set. */ {
2662 *frag_store = new_frag_arr(res);
2668 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2671 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2672 res->attr.except.pin_state = op_pin_state_pinned;
2673 #if PRECISE_EXC_CONTEXT
2674 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2681 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2684 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2685 res->attr.except.pin_state = op_pin_state_pinned;
2686 #if PRECISE_EXC_CONTEXT
2687 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2694 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2697 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2698 res->attr.except.pin_state = op_pin_state_pinned;
2699 #if PRECISE_EXC_CONTEXT
2700 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2707 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2710 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2711 res->attr.except.pin_state = op_pin_state_pinned;
2712 #if PRECISE_EXC_CONTEXT
2713 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2732 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2734 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2738 new_d_Jmp (dbg_info *db)
2740 return new_bd_Jmp (db, current_ir_graph->current_block);
2744 new_d_IJmp (dbg_info *db, ir_node *tgt)
2746 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2750 new_d_Cond (dbg_info *db, ir_node *c)
2752 return new_bd_Cond (db, current_ir_graph->current_block, c);
2756 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2760 res = new_bd_Call (db, current_ir_graph->current_block,
2761 store, callee, arity, in, tp);
2762 #if PRECISE_EXC_CONTEXT
2763 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2770 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2772 return new_bd_Return (db, current_ir_graph->current_block,
2777 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2780 res = new_bd_Load (db, current_ir_graph->current_block,
2782 #if PRECISE_EXC_CONTEXT
2783 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2790 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2793 res = new_bd_Store (db, current_ir_graph->current_block,
2795 #if PRECISE_EXC_CONTEXT
2796 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2803 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2807 res = new_bd_Alloc (db, current_ir_graph->current_block,
2808 store, size, alloc_type, where);
2809 #if PRECISE_EXC_CONTEXT
2810 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2817 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2818 ir_node *size, ir_type *free_type, where_alloc where)
2820 return new_bd_Free (db, current_ir_graph->current_block,
2821 store, ptr, size, free_type, where);
2825 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2826 /* GL: objptr was called frame before. Frame was a bad choice for the name
2827 as the operand could as well be a pointer to a dynamic object. */
2829 return new_bd_Sel (db, current_ir_graph->current_block,
2830 store, objptr, 0, NULL, ent);
2834 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2836 return new_bd_Sel (db, current_ir_graph->current_block,
2837 store, objptr, n_index, index, sel);
2841 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2843 return new_bd_SymConst_type (db, get_irg_start_block(current_ir_graph),
2848 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2850 return new_bd_SymConst (db, get_irg_start_block(current_ir_graph),
2855 new_d_Sync (dbg_info *db, int arity, ir_node *in[])
2857 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block, arity, in);
2863 return _new_d_Bad();
2867 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2869 return new_bd_Confirm (db, current_ir_graph->current_block,
2874 new_d_Unknown (ir_mode *m)
2876 return new_bd_Unknown(m);
2880 new_d_CallBegin (dbg_info *db, ir_node *call)
2883 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2888 new_d_EndReg (dbg_info *db)
2891 res = new_bd_EndReg(db, current_ir_graph->current_block);
2896 new_d_EndExcept (dbg_info *db)
2899 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2904 new_d_Break (dbg_info *db)
2906 return new_bd_Break (db, current_ir_graph->current_block);
2910 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2912 return new_bd_Filter (db, current_ir_graph->current_block,
2919 return _new_d_NoMem();
2923 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2924 ir_node *ir_true, ir_mode *mode) {
2925 return new_bd_Mux (db, current_ir_graph->current_block,
2926 sel, ir_false, ir_true, mode);
2930 new_d_Psi (dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2931 return new_bd_Psi (db, current_ir_graph->current_block,
2932 arity, conds, vals, mode);
2935 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2936 ir_node *dst, ir_node *src, ir_type *data_type) {
2938 res = new_bd_CopyB(db, current_ir_graph->current_block,
2939 store, dst, src, data_type);
2940 #if PRECISE_EXC_CONTEXT
2941 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2947 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2949 return new_bd_InstOf (db, current_ir_graph->current_block,
2950 store, objptr, type);
2954 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2956 return new_bd_Raise (db, current_ir_graph->current_block,
2960 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2961 ir_node *idx, ir_node *lower, ir_node *upper) {
2963 res = new_bd_Bound(db, current_ir_graph->current_block,
2964 store, idx, lower, upper);
2965 #if PRECISE_EXC_CONTEXT
2966 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2971 /* ********************************************************************* */
2972 /* Comfortable interface with automatic Phi node construction. */
2973 /* (Uses also constructors of ?? interface, except new_Block. */
2974 /* ********************************************************************* */
2976 /* Block construction */
2977 /* immature Block without predecessors */
2978 ir_node *new_d_immBlock (dbg_info *db) {
2981 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2982 /* creates a new dynamic in-array as length of in is -1 */
2983 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2984 current_ir_graph->current_block = res;
2985 res->attr.block.matured = 0;
2986 res->attr.block.dead = 0;
2987 /* res->attr.block.exc = exc_normal; */
2988 /* res->attr.block.handler_entry = 0; */
2989 res->attr.block.irg = current_ir_graph;
2990 res->attr.block.backedge = NULL;
2991 res->attr.block.in_cg = NULL;
2992 res->attr.block.cg_backedge = NULL;
2993 set_Block_block_visited(res, 0);
2995 /* Create and initialize array for Phi-node construction. */
2996 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2997 current_ir_graph->n_loc);
2998 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
3000 /* Immature block may not be optimized! */
3001 IRN_VRFY_IRG(res, current_ir_graph);
3007 new_immBlock (void) {
3008 return new_d_immBlock(NULL);
3011 /* add an edge to a jmp/control flow node */
3013 add_immBlock_pred (ir_node *block, ir_node *jmp)
3015 if (block->attr.block.matured) {
3016 assert(0 && "Error: Block already matured!\n");
3019 assert(jmp != NULL);
3020 ARR_APP1(ir_node *, block->in, jmp);
3024 /* changing the current block */
3026 set_cur_block (ir_node *target) {
3027 current_ir_graph->current_block = target;
3030 /* ************************ */
3031 /* parameter administration */
3033 /* get a value from the parameter array from the current block by its index */
3035 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3037 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3038 inc_irg_visited(current_ir_graph);
3040 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3042 /* get a value from the parameter array from the current block by its index */
3044 get_value (int pos, ir_mode *mode)
3046 return get_d_value(NULL, pos, mode);
3049 /* set a value at position pos in the parameter array from the current block */
3051 set_value (int pos, ir_node *value)
3053 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3054 assert(pos+1 < current_ir_graph->n_loc);
3055 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3059 find_value(ir_node *value)
3062 ir_node *bl = current_ir_graph->current_block;
3064 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3065 if (bl->attr.block.graph_arr[i] == value)
3070 /* get the current store */
3074 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3075 /* GL: one could call get_value instead */
3076 inc_irg_visited(current_ir_graph);
3077 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3080 /* set the current store: handles automatic Sync construction for Load nodes */
3082 set_store (ir_node *store)
3084 ir_node *load, *pload, *pred, *in[2];
3086 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3088 /* handle non-volatile Load nodes by automatically creating Sync's */
3089 load = skip_Proj(store);
3090 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3091 pred = get_Load_mem(load);
3093 if (is_Sync(pred)) {
3094 /* a Load after a Sync: move it up */
3095 set_Load_mem(load, get_Sync_pred(pred, 0));
3099 pload = skip_Proj(pred);
3100 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3101 /* a Load after a Load: create a new Sync */
3102 set_Load_mem(load, get_Load_mem(pload));
3106 store = new_Sync(2, in);
3110 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3114 keep_alive (ir_node *ka) {
3115 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3118 /* --- Useful access routines --- */
3119 /* Returns the current block of the current graph. To set the current
3120 block use set_cur_block. */
3121 ir_node *get_cur_block(void) {
3122 return get_irg_current_block(current_ir_graph);
3125 /* Returns the frame type of the current graph */
3126 ir_type *get_cur_frame_type(void) {
3127 return get_irg_frame_type(current_ir_graph);
3131 /* ********************************************************************* */
3134 /* call once for each run of the library */
3136 init_cons(uninitialized_local_variable_func_t *func)
3138 default_initialize_local_variable = func;
3142 irp_finalize_cons (void) {
3144 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3145 irg_finalize_cons(get_irp_irg(i));
3147 irp->phase_state = phase_high;
3151 ir_node *new_Block(int arity, ir_node **in) {
3152 return new_d_Block(NULL, arity, in);
3154 ir_node *new_Start (void) {
3155 return new_d_Start(NULL);
3157 ir_node *new_End (void) {
3158 return new_d_End(NULL);
3160 ir_node *new_Jmp (void) {
3161 return new_d_Jmp(NULL);
3163 ir_node *new_IJmp (ir_node *tgt) {
3164 return new_d_IJmp(NULL, tgt);
3166 ir_node *new_Cond (ir_node *c) {
3167 return new_d_Cond(NULL, c);
3169 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3170 return new_d_Return(NULL, store, arity, in);
3172 ir_node *new_Const (ir_mode *mode, tarval *con) {
3173 return new_d_Const(NULL, mode, con);
3176 ir_node *new_Const_long(ir_mode *mode, long value)
3178 return new_d_Const_long(NULL, mode, value);
3181 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3182 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3185 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3186 return new_d_SymConst(NULL, value, kind);
3188 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3189 return new_d_simpleSel(NULL, store, objptr, ent);
3191 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3193 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3195 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3197 return new_d_Call(NULL, store, callee, arity, in, tp);
3199 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3200 return new_d_Add(NULL, op1, op2, mode);
3202 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3203 return new_d_Sub(NULL, op1, op2, mode);
3205 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3206 return new_d_Minus(NULL, op, mode);
3208 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3209 return new_d_Mul(NULL, op1, op2, mode);
3211 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3212 return new_d_Quot(NULL, memop, op1, op2);
3214 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3215 return new_d_DivMod(NULL, memop, op1, op2);
3217 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3218 return new_d_Div(NULL, memop, op1, op2);
3220 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3221 return new_d_Mod(NULL, memop, op1, op2);
3223 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3224 return new_d_Abs(NULL, op, mode);
3226 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3227 return new_d_And(NULL, op1, op2, mode);
3229 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3230 return new_d_Or(NULL, op1, op2, mode);
3232 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3233 return new_d_Eor(NULL, op1, op2, mode);
3235 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3236 return new_d_Not(NULL, op, mode);
3238 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3239 return new_d_Shl(NULL, op, k, mode);
3241 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3242 return new_d_Shr(NULL, op, k, mode);
3244 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3245 return new_d_Shrs(NULL, op, k, mode);
3247 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3248 return new_d_Rot(NULL, op, k, mode);
3250 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3251 return new_d_Carry(NULL, op1, op2, mode);
3253 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3254 return new_d_Borrow(NULL, op1, op2, mode);
3256 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3257 return new_d_Cmp(NULL, op1, op2);
3259 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3260 return new_d_Conv(NULL, op, mode);
3262 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3263 return new_d_Cast(NULL, op, to_tp);
3265 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3266 return new_d_Phi(NULL, arity, in, mode);
3268 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3269 return new_d_Load(NULL, store, addr, mode);
3271 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3272 return new_d_Store(NULL, store, addr, val);
3274 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3275 where_alloc where) {
3276 return new_d_Alloc(NULL, store, size, alloc_type, where);
3278 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3279 ir_type *free_type, where_alloc where) {
3280 return new_d_Free(NULL, store, ptr, size, free_type, where);
3282 ir_node *new_Sync (int arity, ir_node *in[]) {
3283 return new_d_Sync(NULL, arity, in);
3285 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3286 return new_d_Proj(NULL, arg, mode, proj);
3288 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3289 return new_d_defaultProj(NULL, arg, max_proj);
3291 ir_node *new_Tuple (int arity, ir_node **in) {
3292 return new_d_Tuple(NULL, arity, in);
3294 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3295 return new_d_Id(NULL, val, mode);
3297 ir_node *new_Bad (void) {
3300 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3301 return new_d_Confirm (NULL, val, bound, cmp);
3303 ir_node *new_Unknown(ir_mode *m) {
3304 return new_d_Unknown(m);
3306 ir_node *new_CallBegin (ir_node *callee) {
3307 return new_d_CallBegin(NULL, callee);
3309 ir_node *new_EndReg (void) {
3310 return new_d_EndReg(NULL);
3312 ir_node *new_EndExcept (void) {
3313 return new_d_EndExcept(NULL);
3315 ir_node *new_Break (void) {
3316 return new_d_Break(NULL);
3318 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3319 return new_d_Filter(NULL, arg, mode, proj);
3321 ir_node *new_NoMem (void) {
3322 return new_d_NoMem();
3324 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3325 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3327 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3328 return new_d_Psi(NULL, arity, conds, vals, mode);
3330 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3331 return new_d_CopyB(NULL, store, dst, src, data_type);
3333 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3334 return new_d_InstOf (NULL, store, objptr, ent);
3336 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3337 return new_d_Raise(NULL, store, obj);
3339 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3340 return new_d_Bound(NULL, store, idx, lower, upper);