3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* creates a bd constructor for a binop */
66 #define NEW_BD_BINOP(instr) \
68 new_bd_##instr (dbg_info *db, ir_node *block, \
69 ir_node *op1, ir_node *op2, ir_mode *mode) \
73 ir_graph *irg = current_ir_graph; \
76 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
77 res = optimize_node(res); \
78 IRN_VRFY_IRG(res, irg); \
82 /* creates a bd constructor for an unop */
83 #define NEW_BD_UNOP(instr) \
85 new_bd_##instr (dbg_info *db, ir_node *block, \
86 ir_node *op, ir_mode *mode) \
89 ir_graph *irg = current_ir_graph; \
90 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
91 res = optimize_node(res); \
92 IRN_VRFY_IRG(res, irg); \
96 /* creates a bd constructor for an divop */
97 #define NEW_BD_DIVOP(instr) \
99 new_bd_##instr (dbg_info *db, ir_node *block, \
100 ir_node *memop, ir_node *op1, ir_node *op2) \
104 ir_graph *irg = current_ir_graph; \
108 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2); \
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 /* Constructs a Block with a fixed number of predecessors.
172 Does not set current_block. Can not be used with automatic
173 Phi node construction. */
175 new_bd_Block (dbg_info *db, int arity, ir_node **in)
178 ir_graph *irg = current_ir_graph;
180 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
181 set_Block_matured(res, 1);
182 set_Block_block_visited(res, 0);
184 /* res->attr.block.exc = exc_normal; */
185 /* res->attr.block.handler_entry = 0; */
186 res->attr.block.dead = 0;
187 res->attr.block.irg = irg;
188 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
189 res->attr.block.in_cg = NULL;
190 res->attr.block.cg_backedge = NULL;
191 res->attr.block.extblk = NULL;
193 IRN_VRFY_IRG(res, irg);
198 new_bd_Start (dbg_info *db, ir_node *block)
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
204 /* res->attr.start.irg = irg; */
206 IRN_VRFY_IRG(res, irg);
211 new_bd_End (dbg_info *db, ir_node *block)
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
218 IRN_VRFY_IRG(res, irg);
222 /* Creates a Phi node with all predecessors. Calling this constructor
223 is only allowed if the corresponding block is mature. */
225 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
228 ir_graph *irg = current_ir_graph;
232 /* Don't assert that block matured: the use of this constructor is strongly
234 if ( get_Block_matured(block) )
235 assert( get_irn_arity(block) == arity );
237 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
239 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
241 for (i = arity-1; i >= 0; i--)
242 if (get_irn_op(in[i]) == op_Unknown) {
247 if (!has_unknown) res = optimize_node (res);
248 IRN_VRFY_IRG(res, irg);
250 /* Memory Phis in endless loops must be kept alive.
251 As we can't distinguish these easily we keep all of them alive. */
252 if ((res->op == op_Phi) && (mode == mode_M))
253 add_End_keepalive(get_irg_end(irg), res);
258 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
261 ir_graph *irg = current_ir_graph;
263 res = new_ir_node (db, irg, get_irg_start_block(irg), op_Const, mode, 0, NULL);
264 res->attr.con.tv = con;
265 set_Const_type(res, tp); /* Call method because of complex assertion. */
266 res = optimize_node (res);
267 assert(get_Const_type(res) == tp);
268 IRN_VRFY_IRG(res, irg);
274 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
276 ir_graph *irg = current_ir_graph;
278 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
282 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
290 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
293 ir_graph *irg = current_ir_graph;
295 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
296 res = optimize_node(res);
297 IRN_VRFY_IRG(res, irg);
302 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
306 ir_graph *irg = current_ir_graph;
308 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
309 res->attr.proj = proj;
312 assert(get_Proj_pred(res));
313 assert(get_nodes_block(get_Proj_pred(res)));
315 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
323 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
327 ir_graph *irg = current_ir_graph;
329 assert(arg->op == op_Cond);
330 arg->attr.c.kind = fragmentary;
331 arg->attr.c.default_proj = max_proj;
332 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
337 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
340 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
352 ir_graph *irg = current_ir_graph;
354 assert(is_atomic_type(to_tp));
356 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
357 res->attr.cast.totype = to_tp;
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
364 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
367 ir_graph *irg = current_ir_graph;
369 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
370 res = optimize_node (res);
371 IRN_VRFY_IRG(res, irg);
396 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
400 ir_graph *irg = current_ir_graph;
403 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Jmp (dbg_info *db, ir_node *block)
413 ir_graph *irg = current_ir_graph;
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
417 IRN_VRFY_IRG (res, irg);
422 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG (res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.c.kind = dense;
444 res->attr.c.default_proj = 0;
445 res->attr.c.pred = COND_JMP_PRED_NONE;
446 res = optimize_node (res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp)
458 ir_graph *irg = current_ir_graph;
461 NEW_ARR_A(ir_node *, r_in, r_arity);
464 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
466 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
468 assert((get_unknown_type() == tp) || is_Method_type(tp));
469 set_Call_type(res, tp);
470 res->attr.call.exc.pin_state = op_pin_state_pinned;
471 res->attr.call.callee_arr = NULL;
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Return (dbg_info *db, ir_node *block,
479 ir_node *store, int arity, ir_node **in)
484 ir_graph *irg = current_ir_graph;
487 NEW_ARR_A (ir_node *, r_in, r_arity);
489 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Load (dbg_info *db, ir_node *block,
498 ir_node *store, ir_node *adr, ir_mode *mode)
502 ir_graph *irg = current_ir_graph;
506 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
507 res->attr.load.exc.pin_state = op_pin_state_pinned;
508 res->attr.load.load_mode = mode;
509 res->attr.load.volatility = volatility_non_volatile;
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_bd_Store (dbg_info *db, ir_node *block,
517 ir_node *store, ir_node *adr, ir_node *val)
521 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
527 res->attr.store.exc.pin_state = op_pin_state_pinned;
528 res->attr.store.volatility = volatility_non_volatile;
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
535 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
536 ir_node *size, ir_type *alloc_type, where_alloc where)
540 ir_graph *irg = current_ir_graph;
544 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
545 res->attr.a.exc.pin_state = op_pin_state_pinned;
546 res->attr.a.where = where;
547 res->attr.a.type = alloc_type;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
555 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
559 ir_graph *irg = current_ir_graph;
564 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
565 res->attr.f.where = where;
566 res->attr.f.type = free_type;
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
574 int arity, ir_node **in, entity *ent)
579 ir_graph *irg = current_ir_graph;
581 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
584 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
587 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
589 * FIXM: Sel's can select functions which should be of mode mode_P_code.
591 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
592 res->attr.s.ent = ent;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
600 symconst_kind symkind, ir_type *tp) {
603 ir_graph *irg = current_ir_graph;
605 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
606 mode = mode_P_data; /* FIXME: can be mode_P_code */
610 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
612 res->attr.i.num = symkind;
613 res->attr.i.sym = value;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
623 symconst_kind symkind)
625 ir_graph *irg = current_ir_graph;
627 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
632 new_bd_Sync (dbg_info *db, ir_node *block)
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
638 /* no need to call optimize node here, Sync are always created with no predecessors */
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
646 ir_node *in[2], *res;
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
652 res->attr.confirm_cmp = cmp;
653 res = optimize_node (res);
654 IRN_VRFY_IRG(res, irg);
658 /* this function is often called with current_ir_graph unset */
660 new_bd_Unknown (ir_mode *m)
663 ir_graph *irg = current_ir_graph;
665 res = new_ir_node(NULL, irg, get_irg_start_block(irg), op_Unknown, m, 0, NULL);
666 res = optimize_node(res);
671 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
675 ir_graph *irg = current_ir_graph;
677 in[0] = get_Call_ptr(call);
678 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
679 /* res->attr.callbegin.irg = irg; */
680 res->attr.callbegin.call = call;
681 res = optimize_node(res);
682 IRN_VRFY_IRG(res, irg);
687 new_bd_EndReg (dbg_info *db, ir_node *block)
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
693 set_irg_end_reg(irg, res);
694 IRN_VRFY_IRG(res, irg);
699 new_bd_EndExcept (dbg_info *db, ir_node *block)
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
705 set_irg_end_except(irg, res);
706 IRN_VRFY_IRG (res, irg);
711 new_bd_Break (dbg_info *db, ir_node *block)
714 ir_graph *irg = current_ir_graph;
716 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
727 ir_graph *irg = current_ir_graph;
729 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
730 res->attr.filter.proj = proj;
731 res->attr.filter.in_cg = NULL;
732 res->attr.filter.backedge = NULL;
735 assert(get_Proj_pred(res));
736 assert(get_nodes_block(get_Proj_pred(res)));
738 res = optimize_node(res);
739 IRN_VRFY_IRG(res, irg);
744 new_bd_Mux (dbg_info *db, ir_node *block,
745 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
749 ir_graph *irg = current_ir_graph;
755 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_Psi (dbg_info *db, ir_node *block,
765 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
769 ir_graph *irg = current_ir_graph;
772 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
774 for (i = 0; i < arity; ++i) {
776 in[2 * i + 1] = vals[i];
780 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
783 res = optimize_node(res);
784 IRN_VRFY_IRG(res, irg);
789 new_bd_CopyB (dbg_info *db, ir_node *block,
790 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
794 ir_graph *irg = current_ir_graph;
800 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
802 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
803 res->attr.copyb.data_type = data_type;
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
811 ir_node *objptr, ir_type *type)
815 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
820 res->attr.io.type = type;
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
831 ir_graph *irg = current_ir_graph;
835 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
836 res = optimize_node(res);
837 IRN_VRFY_IRG(res, irg);
842 new_bd_Bound (dbg_info *db, ir_node *block,
843 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
847 ir_graph *irg = current_ir_graph;
853 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
854 res->attr.bound.exc.pin_state = op_pin_state_pinned;
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
870 ir_graph *rem = current_ir_graph;
873 current_ir_graph = irg;
874 res = new_bd_Block (db, arity, in);
875 current_ir_graph = rem;
881 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
883 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_Start (db, block);
888 current_ir_graph = rem;
894 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
897 ir_graph *rem = current_ir_graph;
899 current_ir_graph = rem;
900 res = new_bd_End (db, block);
901 current_ir_graph = rem;
906 /* Creates a Phi node with all predecessors. Calling this constructor
907 is only allowed if the corresponding block is mature. */
909 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi (db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Const_type (db, block, mode, con, tp);
929 current_ir_graph = rem;
935 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
938 ir_graph *rem = current_ir_graph;
940 current_ir_graph = irg;
941 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
942 current_ir_graph = rem;
948 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
950 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
954 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Id(db, block, val, mode);
961 current_ir_graph = rem;
967 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_Proj(db, block, arg, mode, proj);
975 current_ir_graph = rem;
981 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_defaultProj(db, block, arg, max_proj);
989 current_ir_graph = rem;
995 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Conv(db, block, op, mode);
1002 current_ir_graph = rem;
1008 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Cast(db, block, op, to_tp);
1015 current_ir_graph = rem;
1021 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1024 ir_graph *rem = current_ir_graph;
1026 current_ir_graph = irg;
1027 res = new_bd_Tuple(db, block, arity, in);
1028 current_ir_graph = rem;
1038 NEW_RD_DIVOP(DivMod)
1051 NEW_RD_BINOP(Borrow)
1054 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1055 ir_node *op1, ir_node *op2)
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Cmp(db, block, op1, op2);
1062 current_ir_graph = rem;
1068 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_Jmp(db, block);
1075 current_ir_graph = rem;
1081 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_IJmp(db, block, tgt);
1088 current_ir_graph = rem;
1094 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Cond(db, block, c);
1101 current_ir_graph = rem;
1107 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1108 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1115 current_ir_graph = rem;
1121 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, int arity, ir_node **in)
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Return(db, block, store, arity, in);
1129 current_ir_graph = rem;
1135 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1136 ir_node *store, ir_node *adr, ir_mode *mode)
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Load(db, block, store, adr, mode);
1143 current_ir_graph = rem;
1149 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1150 ir_node *store, ir_node *adr, ir_node *val)
1153 ir_graph *rem = current_ir_graph;
1155 current_ir_graph = irg;
1156 res = new_bd_Store(db, block, store, adr, val);
1157 current_ir_graph = rem;
1163 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1164 ir_node *size, ir_type *alloc_type, where_alloc where)
1167 ir_graph *rem = current_ir_graph;
1169 current_ir_graph = irg;
1170 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1171 current_ir_graph = rem;
1177 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1178 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1185 current_ir_graph = rem;
1191 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1192 ir_node *store, ir_node *objptr, entity *ent)
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1199 current_ir_graph = rem;
1205 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1206 int arity, ir_node **in, entity *ent)
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1220 symconst_kind symkind, ir_type *tp)
1223 ir_graph *rem = current_ir_graph;
1225 current_ir_graph = irg;
1226 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1227 current_ir_graph = rem;
1233 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1234 symconst_kind symkind)
1236 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1240 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1242 symconst_symbol sym = {(ir_type *)symbol};
1243 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
1246 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1247 symconst_symbol sym = {(ir_type *)symbol};
1248 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
1251 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1252 symconst_symbol sym = {symbol};
1253 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
1256 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1257 symconst_symbol sym = {symbol};
1258 return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_size, tp);
1262 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node *in[])
1265 ir_graph *rem = current_ir_graph;
1268 current_ir_graph = irg;
1269 res = new_bd_Sync(db, block);
1270 current_ir_graph = rem;
1272 for (i = 0; i < arity; ++i) add_Sync_pred(res, in[i]);
1278 new_rd_Bad (ir_graph *irg) {
1279 return get_irg_bad(irg);
1283 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1286 ir_graph *rem = current_ir_graph;
1288 current_ir_graph = irg;
1289 res = new_bd_Confirm(db, block, val, bound, cmp);
1290 current_ir_graph = rem;
1295 /* this function is often called with current_ir_graph unset */
1297 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1300 ir_graph *rem = current_ir_graph;
1302 current_ir_graph = irg;
1303 res = new_bd_Unknown(m);
1304 current_ir_graph = rem;
1310 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1313 ir_graph *rem = current_ir_graph;
1315 current_ir_graph = irg;
1316 res = new_bd_CallBegin(db, block, call);
1317 current_ir_graph = rem;
1323 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1327 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1328 set_irg_end_reg(irg, res);
1329 IRN_VRFY_IRG(res, irg);
1334 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1338 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1339 set_irg_end_except(irg, res);
1340 IRN_VRFY_IRG (res, irg);
1345 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1348 ir_graph *rem = current_ir_graph;
1350 current_ir_graph = irg;
1351 res = new_bd_Break(db, block);
1352 current_ir_graph = rem;
1358 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1362 ir_graph *rem = current_ir_graph;
1364 current_ir_graph = irg;
1365 res = new_bd_Filter(db, block, arg, mode, proj);
1366 current_ir_graph = rem;
1372 new_rd_NoMem (ir_graph *irg) {
1373 return get_irg_no_mem(irg);
1377 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1378 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1381 ir_graph *rem = current_ir_graph;
1383 current_ir_graph = irg;
1384 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1385 current_ir_graph = rem;
1391 new_rd_Psi (dbg_info *db, ir_graph *irg, ir_node *block,
1392 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1395 ir_graph *rem = current_ir_graph;
1397 current_ir_graph = irg;
1398 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1399 current_ir_graph = rem;
1404 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1405 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1408 ir_graph *rem = current_ir_graph;
1410 current_ir_graph = irg;
1411 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1412 current_ir_graph = rem;
1418 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1419 ir_node *objptr, ir_type *type)
1422 ir_graph *rem = current_ir_graph;
1424 current_ir_graph = irg;
1425 res = new_bd_InstOf(db, block, store, objptr, type);
1426 current_ir_graph = rem;
1432 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1435 ir_graph *rem = current_ir_graph;
1437 current_ir_graph = irg;
1438 res = new_bd_Raise(db, block, store, obj);
1439 current_ir_graph = rem;
1444 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1445 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1448 ir_graph *rem = current_ir_graph;
1450 current_ir_graph = irg;
1451 res = new_bd_Bound(db, block, store, idx, lower, upper);
1452 current_ir_graph = rem;
1457 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1458 return new_rd_Block(NULL, irg, arity, in);
1460 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1461 return new_rd_Start(NULL, irg, block);
1463 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1464 return new_rd_End(NULL, irg, block);
1466 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1467 return new_rd_Jmp(NULL, irg, block);
1469 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1470 return new_rd_IJmp(NULL, irg, block, tgt);
1472 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1473 return new_rd_Cond(NULL, irg, block, c);
1475 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1476 ir_node *store, int arity, ir_node **in) {
1477 return new_rd_Return(NULL, irg, block, store, arity, in);
1479 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1480 ir_mode *mode, tarval *con) {
1481 return new_rd_Const(NULL, irg, block, mode, con);
1483 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1484 ir_mode *mode, long value) {
1485 return new_rd_Const_long(NULL, irg, block, mode, value);
1487 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1488 ir_mode *mode, tarval *con, ir_type *tp) {
1489 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1491 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1492 symconst_symbol value, symconst_kind symkind) {
1493 return new_rd_SymConst(NULL, irg, block, value, symkind);
1495 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1496 ir_node *objptr, entity *ent) {
1497 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1499 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1500 ir_node *objptr, int n_index, ir_node **index,
1502 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1504 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1505 ir_node *callee, int arity, ir_node **in,
1507 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1509 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1510 ir_node *op1, ir_node *op2, ir_mode *mode) {
1511 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1513 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1514 ir_node *op1, ir_node *op2, ir_mode *mode) {
1515 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1517 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1518 ir_node *op, ir_mode *mode) {
1519 return new_rd_Minus(NULL, irg, block, op, mode);
1521 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1522 ir_node *op1, ir_node *op2, ir_mode *mode) {
1523 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1525 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1526 ir_node *memop, ir_node *op1, ir_node *op2) {
1527 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1529 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1530 ir_node *memop, ir_node *op1, ir_node *op2) {
1531 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1533 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1534 ir_node *memop, ir_node *op1, ir_node *op2) {
1535 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1537 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1538 ir_node *memop, ir_node *op1, ir_node *op2) {
1539 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1541 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1542 ir_node *op, ir_mode *mode) {
1543 return new_rd_Abs(NULL, irg, block, op, mode);
1545 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1546 ir_node *op1, ir_node *op2, ir_mode *mode) {
1547 return new_rd_And(NULL, irg, block, op1, op2, mode);
1549 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1550 ir_node *op1, ir_node *op2, ir_mode *mode) {
1551 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1553 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1554 ir_node *op1, ir_node *op2, ir_mode *mode) {
1555 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1557 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1558 ir_node *op, ir_mode *mode) {
1559 return new_rd_Not(NULL, irg, block, op, mode);
1561 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1562 ir_node *op, ir_node *k, ir_mode *mode) {
1563 return new_rd_Shl(NULL, irg, block, op, k, mode);
1565 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1566 ir_node *op, ir_node *k, ir_mode *mode) {
1567 return new_rd_Shr(NULL, irg, block, op, k, mode);
1569 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1570 ir_node *op, ir_node *k, ir_mode *mode) {
1571 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1573 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1574 ir_node *op, ir_node *k, ir_mode *mode) {
1575 return new_rd_Rot(NULL, irg, block, op, k, mode);
1577 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1578 ir_node *op, ir_node *k, ir_mode *mode) {
1579 return new_rd_Carry(NULL, irg, block, op, k, mode);
1581 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1582 ir_node *op, ir_node *k, ir_mode *mode) {
1583 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1585 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1586 ir_node *op1, ir_node *op2) {
1587 return new_rd_Cmp(NULL, irg, block, op1, op2);
1589 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1590 ir_node *op, ir_mode *mode) {
1591 return new_rd_Conv(NULL, irg, block, op, mode);
1593 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1594 return new_rd_Cast(NULL, irg, block, op, to_tp);
1596 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1597 ir_node **in, ir_mode *mode) {
1598 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1600 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1601 ir_node *store, ir_node *adr, ir_mode *mode) {
1602 return new_rd_Load(NULL, irg, block, store, adr, mode);
1604 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1605 ir_node *store, ir_node *adr, ir_node *val) {
1606 return new_rd_Store(NULL, irg, block, store, adr, val);
1608 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1609 ir_node *size, ir_type *alloc_type, where_alloc where) {
1610 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1612 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1613 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1614 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1616 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
1617 return new_rd_Sync(NULL, irg, block, arity, in);
1619 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1620 ir_mode *mode, long proj) {
1621 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1623 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1625 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1627 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1628 int arity, ir_node **in) {
1629 return new_rd_Tuple(NULL, irg, block, arity, in );
1631 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1632 ir_node *val, ir_mode *mode) {
1633 return new_rd_Id(NULL, irg, block, val, mode);
1635 ir_node *new_r_Bad (ir_graph *irg) {
1636 return new_rd_Bad(irg);
1638 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1639 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1641 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1642 return new_rd_Unknown(irg, m);
1644 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1645 return new_rd_CallBegin(NULL, irg, block, callee);
1647 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1648 return new_rd_EndReg(NULL, irg, block);
1650 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1651 return new_rd_EndExcept(NULL, irg, block);
1653 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1654 return new_rd_Break(NULL, irg, block);
1656 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1657 ir_mode *mode, long proj) {
1658 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1660 ir_node *new_r_NoMem (ir_graph *irg) {
1661 return new_rd_NoMem(irg);
1663 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1664 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1665 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1667 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1668 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1669 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1671 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1672 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1673 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1675 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1677 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1679 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1680 ir_node *store, ir_node *obj) {
1681 return new_rd_Raise(NULL, irg, block, store, obj);
1683 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1684 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1685 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1688 /** ********************/
1689 /** public interfaces */
1690 /** construction tools */
1694 * - create a new Start node in the current block
1696 * @return s - pointer to the created Start node
1701 new_d_Start (dbg_info *db)
1705 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1706 op_Start, mode_T, 0, NULL);
1707 /* res->attr.start.irg = current_ir_graph; */
1709 res = optimize_node(res);
1710 IRN_VRFY_IRG(res, current_ir_graph);
1715 new_d_End (dbg_info *db)
1718 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1719 op_End, mode_X, -1, NULL);
1720 res = optimize_node(res);
1721 IRN_VRFY_IRG(res, current_ir_graph);
1726 /* Constructs a Block with a fixed number of predecessors.
1727 Does set current_block. Can be used with automatic Phi
1728 node construction. */
1730 new_d_Block (dbg_info *db, int arity, ir_node **in)
1734 int has_unknown = 0;
1736 res = new_bd_Block(db, arity, in);
1738 /* Create and initialize array for Phi-node construction. */
1739 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1740 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1741 current_ir_graph->n_loc);
1742 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1745 for (i = arity-1; i >= 0; i--)
1746 if (get_irn_op(in[i]) == op_Unknown) {
1751 if (!has_unknown) res = optimize_node(res);
1752 current_ir_graph->current_block = res;
1754 IRN_VRFY_IRG(res, current_ir_graph);
1759 /* ***********************************************************************/
1760 /* Methods necessary for automatic Phi node creation */
1762 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1763 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1764 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1765 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1767 Call Graph: ( A ---> B == A "calls" B)
1769 get_value mature_immBlock
1777 get_r_value_internal |
1781 new_rd_Phi0 new_rd_Phi_in
1783 * *************************************************************************** */
1785 /** Creates a Phi node with 0 predecessors */
1786 static INLINE ir_node *
1787 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1791 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1792 IRN_VRFY_IRG(res, irg);
1796 /* There are two implementations of the Phi node construction. The first
1797 is faster, but does not work for blocks with more than 2 predecessors.
1798 The second works always but is slower and causes more unnecessary Phi
1800 Select the implementations by the following preprocessor flag set in
1802 #if USE_FAST_PHI_CONSTRUCTION
1804 /* This is a stack used for allocating and deallocating nodes in
1805 new_rd_Phi_in. The original implementation used the obstack
1806 to model this stack, now it is explicit. This reduces side effects.
1808 #if USE_EXPLICIT_PHI_IN_STACK
1810 new_Phi_in_stack(void) {
1813 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1815 res->stack = NEW_ARR_F (ir_node *, 0);
1822 free_Phi_in_stack(Phi_in_stack *s) {
1823 DEL_ARR_F(s->stack);
1827 free_to_Phi_in_stack(ir_node *phi) {
1828 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1829 current_ir_graph->Phi_in_stack->pos)
1830 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1832 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1834 (current_ir_graph->Phi_in_stack->pos)++;
1837 static INLINE ir_node *
1838 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1839 int arity, ir_node **in) {
1841 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1842 int pos = current_ir_graph->Phi_in_stack->pos;
1846 /* We need to allocate a new node */
1847 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1848 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1850 /* reuse the old node and initialize it again. */
1853 assert (res->kind == k_ir_node);
1854 assert (res->op == op_Phi);
1858 assert (arity >= 0);
1859 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1860 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1862 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1864 (current_ir_graph->Phi_in_stack->pos)--;
1868 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1870 /* Creates a Phi node with a given, fixed array **in of predecessors.
1871 If the Phi node is unnecessary, as the same value reaches the block
1872 through all control flow paths, it is eliminated and the value
1873 returned directly. This constructor is only intended for use in
1874 the automatic Phi node generation triggered by get_value or mature.
1875 The implementation is quite tricky and depends on the fact, that
1876 the nodes are allocated on a stack:
1877 The in array contains predecessors and NULLs. The NULLs appear,
1878 if get_r_value_internal, that computed the predecessors, reached
1879 the same block on two paths. In this case the same value reaches
1880 this block on both paths, there is no definition in between. We need
1881 not allocate a Phi where these path's merge, but we have to communicate
1882 this fact to the caller. This happens by returning a pointer to the
1883 node the caller _will_ allocate. (Yes, we predict the address. We can
1884 do so because the nodes are allocated on the obstack.) The caller then
1885 finds a pointer to itself and, when this routine is called again,
1888 static INLINE ir_node *
1889 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1892 ir_node *res, *known;
1894 /* Allocate a new node on the obstack. This can return a node to
1895 which some of the pointers in the in-array already point.
1896 Attention: the constructor copies the in array, i.e., the later
1897 changes to the array in this routine do not affect the
1898 constructed node! If the in array contains NULLs, there will be
1899 missing predecessors in the returned node. Is this a possible
1900 internal state of the Phi node generation? */
1901 #if USE_EXPLICIT_PHI_IN_STACK
1902 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1904 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1905 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1908 /* The in-array can contain NULLs. These were returned by
1909 get_r_value_internal if it reached the same block/definition on a
1910 second path. The NULLs are replaced by the node itself to
1911 simplify the test in the next loop. */
1912 for (i = 0; i < ins; ++i) {
1917 /* This loop checks whether the Phi has more than one predecessor.
1918 If so, it is a real Phi node and we break the loop. Else the Phi
1919 node merges the same definition on several paths and therefore is
1921 for (i = 0; i < ins; ++i) {
1922 if (in[i] == res || in[i] == known)
1931 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1933 #if USE_EXPLICIT_PHI_IN_STACK
1934 free_to_Phi_in_stack(res);
1936 edges_node_deleted(res, current_ir_graph);
1937 obstack_free(current_ir_graph->obst, res);
1941 res = optimize_node (res);
1942 IRN_VRFY_IRG(res, irg);
1945 /* return the pointer to the Phi node. This node might be deallocated! */
1950 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1953 allocates and returns this node. The routine called to allocate the
1954 node might optimize it away and return a real value, or even a pointer
1955 to a deallocated Phi node on top of the obstack!
1956 This function is called with an in-array of proper size. **/
1958 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1960 ir_node *prevBlock, *res;
1963 /* This loop goes to all predecessor blocks of the block the Phi node is in
1964 and there finds the operands of the Phi node by calling
1965 get_r_value_internal. */
1966 for (i = 1; i <= ins; ++i) {
1967 assert (block->in[i]);
1968 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1970 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1973 /* After collecting all predecessors into the array nin a new Phi node
1974 with these predecessors is created. This constructor contains an
1975 optimization: If all predecessors of the Phi node are identical it
1976 returns the only operand instead of a new Phi node. If the value
1977 passes two different control flow edges without being defined, and
1978 this is the second path treated, a pointer to the node that will be
1979 allocated for the first path (recursion) is returned. We already
1980 know the address of this node, as it is the next node to be allocated
1981 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1982 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1984 /* Now we now the value for "pos" and can enter it in the array with
1985 all known local variables. Attention: this might be a pointer to
1986 a node, that later will be allocated!!! See new_rd_Phi_in().
1987 If this is called in mature, after some set_value() in the same block,
1988 the proper value must not be overwritten:
1990 get_value (makes Phi0, put's it into graph_arr)
1991 set_value (overwrites Phi0 in graph_arr)
1992 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1995 if (!block->attr.block.graph_arr[pos]) {
1996 block->attr.block.graph_arr[pos] = res;
1998 /* printf(" value already computed by %s\n",
1999 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2005 /* This function returns the last definition of a variable. In case
2006 this variable was last defined in a previous block, Phi nodes are
2007 inserted. If the part of the firm graph containing the definition
2008 is not yet constructed, a dummy Phi node is returned. */
2010 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2013 /* There are 4 cases to treat.
2015 1. The block is not mature and we visit it the first time. We can not
2016 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2017 predecessors is returned. This node is added to the linked list (field
2018 "link") of the containing block to be completed when this block is
2019 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2022 2. The value is already known in this block, graph_arr[pos] is set and we
2023 visit the block the first time. We can return the value without
2024 creating any new nodes.
2026 3. The block is mature and we visit it the first time. A Phi node needs
2027 to be created (phi_merge). If the Phi is not needed, as all it's
2028 operands are the same value reaching the block through different
2029 paths, it's optimized away and the value itself is returned.
2031 4. The block is mature, and we visit it the second time. Now two
2032 subcases are possible:
2033 * The value was computed completely the last time we were here. This
2034 is the case if there is no loop. We can return the proper value.
2035 * The recursion that visited this node and set the flag did not
2036 return yet. We are computing a value in a loop and need to
2037 break the recursion without knowing the result yet.
2038 @@@ strange case. Straight forward we would create a Phi before
2039 starting the computation of it's predecessors. In this case we will
2040 find a Phi here in any case. The problem is that this implementation
2041 only creates a Phi after computing the predecessors, so that it is
2042 hard to compute self references of this Phi. @@@
2043 There is no simple check for the second subcase. Therefore we check
2044 for a second visit and treat all such cases as the second subcase.
2045 Anyways, the basic situation is the same: we reached a block
2046 on two paths without finding a definition of the value: No Phi
2047 nodes are needed on both paths.
2048 We return this information "Two paths, no Phi needed" by a very tricky
2049 implementation that relies on the fact that an obstack is a stack and
2050 will return a node with the same address on different allocations.
2051 Look also at phi_merge and new_rd_phi_in to understand this.
2052 @@@ Unfortunately this does not work, see testprogram
2053 three_cfpred_example.
2057 /* case 4 -- already visited. */
2058 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2060 /* visited the first time */
2061 set_irn_visited(block, get_irg_visited(current_ir_graph));
2063 /* Get the local valid value */
2064 res = block->attr.block.graph_arr[pos];
2066 /* case 2 -- If the value is actually computed, return it. */
2067 if (res) return res;
2069 if (block->attr.block.matured) { /* case 3 */
2071 /* The Phi has the same amount of ins as the corresponding block. */
2072 int ins = get_irn_arity(block);
2074 NEW_ARR_A (ir_node *, nin, ins);
2076 /* Phi merge collects the predecessors and then creates a node. */
2077 res = phi_merge (block, pos, mode, nin, ins);
2079 } else { /* case 1 */
2080 /* The block is not mature, we don't know how many in's are needed. A Phi
2081 with zero predecessors is created. Such a Phi node is called Phi0
2082 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2083 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2085 The Phi0 has to remember the pos of it's internal value. If the real
2086 Phi is computed, pos is used to update the array with the local
2089 res = new_rd_Phi0 (current_ir_graph, block, mode);
2090 res->attr.phi0_pos = pos;
2091 res->link = block->link;
2095 /* If we get here, the frontend missed a use-before-definition error */
2098 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2099 assert (mode->code >= irm_F && mode->code <= irm_P);
2100 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2101 tarval_mode_null[mode->code]);
2104 /* The local valid value is available now. */
2105 block->attr.block.graph_arr[pos] = res;
2113 it starts the recursion. This causes an Id at the entry of
2114 every block that has no definition of the value! **/
2116 #if USE_EXPLICIT_PHI_IN_STACK
2118 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2119 void free_Phi_in_stack(Phi_in_stack *s) { }
2122 static INLINE ir_node *
2123 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2124 ir_node **in, int ins, ir_node *phi0)
2127 ir_node *res, *known;
2129 /* Allocate a new node on the obstack. The allocation copies the in
2131 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2132 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2134 /* This loop checks whether the Phi has more than one predecessor.
2135 If so, it is a real Phi node and we break the loop. Else the
2136 Phi node merges the same definition on several paths and therefore
2137 is not needed. Don't consider Bad nodes! */
2139 for (i=0; i < ins; ++i)
2143 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2145 /* Optimize self referencing Phis: We can't detect them yet properly, as
2146 they still refer to the Phi0 they will replace. So replace right now. */
2147 if (phi0 && in[i] == phi0) in[i] = res;
2149 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2157 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2160 edges_node_deleted(res, current_ir_graph);
2161 obstack_free (current_ir_graph->obst, res);
2162 if (is_Phi(known)) {
2163 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2164 order, an enclosing Phi know may get superfluous. */
2165 res = optimize_in_place_2(known);
2167 exchange(known, res);
2173 /* A undefined value, e.g., in unreachable code. */
2177 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2178 IRN_VRFY_IRG(res, irg);
2179 /* Memory Phis in endless loops must be kept alive.
2180 As we can't distinguish these easily we keep all of them alive. */
2181 if ((res->op == op_Phi) && (mode == mode_M))
2182 add_End_keepalive(get_irg_end(irg), res);
2189 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2191 #if PRECISE_EXC_CONTEXT
2193 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2195 /* Construct a new frag_array for node n.
2196 Copy the content from the current graph_arr of the corresponding block:
2197 this is the current state.
2198 Set ProjM(n) as current memory state.
2199 Further the last entry in frag_arr of current block points to n. This
2200 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2202 static INLINE ir_node ** new_frag_arr (ir_node *n)
2207 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2208 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2209 sizeof(ir_node *)*current_ir_graph->n_loc);
2211 /* turn off optimization before allocating Proj nodes, as res isn't
2213 opt = get_opt_optimize(); set_optimize(0);
2214 /* Here we rely on the fact that all frag ops have Memory as first result! */
2215 if (get_irn_op(n) == op_Call)
2216 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2217 else if (get_irn_op(n) == op_CopyB)
2218 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2219 else if (get_irn_op(n) == op_Bound)
2220 arr[0] = new_Proj(n, mode_M, pn_Bound_M_except);
2222 assert((pn_Quot_M == pn_DivMod_M) &&
2223 (pn_Quot_M == pn_Div_M) &&
2224 (pn_Quot_M == pn_Mod_M) &&
2225 (pn_Quot_M == pn_Load_M) &&
2226 (pn_Quot_M == pn_Store_M) &&
2227 (pn_Quot_M == pn_Alloc_M) );
2228 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2232 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2237 * returns the frag_arr from a node
2239 static INLINE ir_node **
2240 get_frag_arr (ir_node *n) {
2241 switch (get_irn_opcode(n)) {
2243 return n->attr.call.exc.frag_arr;
2245 return n->attr.a.exc.frag_arr;
2247 return n->attr.load.exc.frag_arr;
2249 return n->attr.store.exc.frag_arr;
2251 return n->attr.except.frag_arr;
2256 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2258 if (!frag_arr[pos]) frag_arr[pos] = val;
2259 if (frag_arr[current_ir_graph->n_loc - 1]) {
2260 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2261 assert(arr != frag_arr && "Endless recursion detected");
2262 set_frag_value(arr, pos, val);
2267 for (i = 0; i < 1000; ++i) {
2268 if (!frag_arr[pos]) {
2269 frag_arr[pos] = val;
2271 if (frag_arr[current_ir_graph->n_loc - 1]) {
2272 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2278 assert(0 && "potential endless recursion");
2283 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2287 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2289 frag_arr = get_frag_arr(cfOp);
2290 res = frag_arr[pos];
2292 if (block->attr.block.graph_arr[pos]) {
2293 /* There was a set_value() after the cfOp and no get_value before that
2294 set_value(). We must build a Phi node now. */
2295 if (block->attr.block.matured) {
2296 int ins = get_irn_arity(block);
2298 NEW_ARR_A (ir_node *, nin, ins);
2299 res = phi_merge(block, pos, mode, nin, ins);
2301 res = new_rd_Phi0 (current_ir_graph, block, mode);
2302 res->attr.phi0_pos = pos;
2303 res->link = block->link;
2307 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2308 but this should be better: (remove comment if this works) */
2309 /* It's a Phi, we can write this into all graph_arrs with NULL */
2310 set_frag_value(block->attr.block.graph_arr, pos, res);
2312 res = get_r_value_internal(block, pos, mode);
2313 set_frag_value(block->attr.block.graph_arr, pos, res);
2318 #endif /* PRECISE_EXC_CONTEXT */
2321 computes the predecessors for the real phi node, and then
2322 allocates and returns this node. The routine called to allocate the
2323 node might optimize it away and return a real value.
2324 This function must be called with an in-array of proper size. **/
2326 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2328 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2331 /* If this block has no value at pos create a Phi0 and remember it
2332 in graph_arr to break recursions.
2333 Else we may not set graph_arr as there a later value is remembered. */
2335 if (!block->attr.block.graph_arr[pos]) {
2336 if (block == get_irg_start_block(current_ir_graph)) {
2337 /* Collapsing to Bad tarvals is no good idea.
2338 So we call a user-supplied routine here that deals with this case as
2339 appropriate for the given language. Sorrily the only help we can give
2340 here is the position.
2342 Even if all variables are defined before use, it can happen that
2343 we get to the start block, if a Cond has been replaced by a tuple
2344 (bad, jmp). In this case we call the function needlessly, eventually
2345 generating an non existent error.
2346 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2349 if (default_initialize_local_variable) {
2350 ir_node *rem = get_cur_block();
2352 set_cur_block(block);
2353 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2357 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2358 /* We don't need to care about exception ops in the start block.
2359 There are none by definition. */
2360 return block->attr.block.graph_arr[pos];
2362 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2363 block->attr.block.graph_arr[pos] = phi0;
2364 #if PRECISE_EXC_CONTEXT
2365 if (get_opt_precise_exc_context()) {
2366 /* Set graph_arr for fragile ops. Also here we should break recursion.
2367 We could choose a cyclic path through an cfop. But the recursion would
2368 break at some point. */
2369 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2375 /* This loop goes to all predecessor blocks of the block the Phi node
2376 is in and there finds the operands of the Phi node by calling
2377 get_r_value_internal. */
2378 for (i = 1; i <= ins; ++i) {
2379 prevCfOp = skip_Proj(block->in[i]);
2381 if (is_Bad(prevCfOp)) {
2382 /* In case a Cond has been optimized we would get right to the start block
2383 with an invalid definition. */
2384 nin[i-1] = new_Bad();
2387 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2389 if (!is_Bad(prevBlock)) {
2390 #if PRECISE_EXC_CONTEXT
2391 if (get_opt_precise_exc_context() &&
2392 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2393 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2394 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2397 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2399 nin[i-1] = new_Bad();
2403 /* We want to pass the Phi0 node to the constructor: this finds additional
2404 optimization possibilities.
2405 The Phi0 node either is allocated in this function, or it comes from
2406 a former call to get_r_value_internal. In this case we may not yet
2407 exchange phi0, as this is done in mature_immBlock. */
2409 phi0_all = block->attr.block.graph_arr[pos];
2410 if (!((get_irn_op(phi0_all) == op_Phi) &&
2411 (get_irn_arity(phi0_all) == 0) &&
2412 (get_nodes_block(phi0_all) == block)))
2418 /* After collecting all predecessors into the array nin a new Phi node
2419 with these predecessors is created. This constructor contains an
2420 optimization: If all predecessors of the Phi node are identical it
2421 returns the only operand instead of a new Phi node. */
2422 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2424 /* In case we allocated a Phi0 node at the beginning of this procedure,
2425 we need to exchange this Phi0 with the real Phi. */
2427 exchange(phi0, res);
2428 block->attr.block.graph_arr[pos] = res;
2429 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2430 only an optimization. */
2436 /* This function returns the last definition of a variable. In case
2437 this variable was last defined in a previous block, Phi nodes are
2438 inserted. If the part of the firm graph containing the definition
2439 is not yet constructed, a dummy Phi node is returned. */
2441 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2444 /* There are 4 cases to treat.
2446 1. The block is not mature and we visit it the first time. We can not
2447 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2448 predecessors is returned. This node is added to the linked list (field
2449 "link") of the containing block to be completed when this block is
2450 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2453 2. The value is already known in this block, graph_arr[pos] is set and we
2454 visit the block the first time. We can return the value without
2455 creating any new nodes.
2457 3. The block is mature and we visit it the first time. A Phi node needs
2458 to be created (phi_merge). If the Phi is not needed, as all it's
2459 operands are the same value reaching the block through different
2460 paths, it's optimized away and the value itself is returned.
2462 4. The block is mature, and we visit it the second time. Now two
2463 subcases are possible:
2464 * The value was computed completely the last time we were here. This
2465 is the case if there is no loop. We can return the proper value.
2466 * The recursion that visited this node and set the flag did not
2467 return yet. We are computing a value in a loop and need to
2468 break the recursion. This case only happens if we visited
2469 the same block with phi_merge before, which inserted a Phi0.
2470 So we return the Phi0.
2473 /* case 4 -- already visited. */
2474 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2475 /* As phi_merge allocates a Phi0 this value is always defined. Here
2476 is the critical difference of the two algorithms. */
2477 assert(block->attr.block.graph_arr[pos]);
2478 return block->attr.block.graph_arr[pos];
2481 /* visited the first time */
2482 set_irn_visited(block, get_irg_visited(current_ir_graph));
2484 /* Get the local valid value */
2485 res = block->attr.block.graph_arr[pos];
2487 /* case 2 -- If the value is actually computed, return it. */
2488 if (res) { return res; };
2490 if (block->attr.block.matured) { /* case 3 */
2492 /* The Phi has the same amount of ins as the corresponding block. */
2493 int ins = get_irn_arity(block);
2495 NEW_ARR_A (ir_node *, nin, ins);
2497 /* Phi merge collects the predecessors and then creates a node. */
2498 res = phi_merge (block, pos, mode, nin, ins);
2500 } else { /* case 1 */
2501 /* The block is not mature, we don't know how many in's are needed. A Phi
2502 with zero predecessors is created. Such a Phi node is called Phi0
2503 node. The Phi0 is then added to the list of Phi0 nodes in this block
2504 to be matured by mature_immBlock later.
2505 The Phi0 has to remember the pos of it's internal value. If the real
2506 Phi is computed, pos is used to update the array with the local
2508 res = new_rd_Phi0 (current_ir_graph, block, mode);
2509 res->attr.phi0_pos = pos;
2510 res->link = block->link;
2514 /* If we get here, the frontend missed a use-before-definition error */
2517 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2518 assert (mode->code >= irm_F && mode->code <= irm_P);
2519 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2520 get_mode_null(mode));
2523 /* The local valid value is available now. */
2524 block->attr.block.graph_arr[pos] = res;
2529 #endif /* USE_FAST_PHI_CONSTRUCTION */
2531 /* ************************************************************************** */
2534 * Finalize a Block node, when all control flows are known.
2535 * Acceptable parameters are only Block nodes.
2538 mature_immBlock (ir_node *block)
2544 assert (get_irn_opcode(block) == iro_Block);
2545 /* @@@ should be commented in
2546 assert (!get_Block_matured(block) && "Block already matured"); */
2548 if (!get_Block_matured(block)) {
2549 ins = ARR_LEN (block->in)-1;
2550 /* Fix block parameters */
2551 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2553 /* An array for building the Phi nodes. */
2554 NEW_ARR_A (ir_node *, nin, ins);
2556 /* Traverse a chain of Phi nodes attached to this block and mature
2558 for (n = block->link; n; n=next) {
2559 inc_irg_visited(current_ir_graph);
2561 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2564 block->attr.block.matured = 1;
2566 /* Now, as the block is a finished firm node, we can optimize it.
2567 Since other nodes have been allocated since the block was created
2568 we can not free the node on the obstack. Therefore we have to call
2570 Unfortunately the optimization does not change a lot, as all allocated
2571 nodes refer to the unoptimized node.
2572 We can call _2, as global cse has no effect on blocks. */
2573 block = optimize_in_place_2(block);
2574 IRN_VRFY_IRG(block, current_ir_graph);
2579 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2581 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2585 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2587 return new_bd_Const(db, get_irg_start_block(current_ir_graph), mode, con);
2591 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2593 return new_bd_Const_long(db, get_irg_start_block(current_ir_graph), mode, value);
2597 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2599 return new_bd_Const_type(db, get_irg_start_block(current_ir_graph), mode, con, tp);
2604 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2606 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2610 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2612 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2616 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2619 assert(arg->op == op_Cond);
2620 arg->attr.c.kind = fragmentary;
2621 arg->attr.c.default_proj = max_proj;
2622 res = new_Proj (arg, mode_X, max_proj);
2627 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2629 return new_bd_Conv(db, current_ir_graph->current_block, op, mode);
2633 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2635 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2639 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2641 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2650 * allocate the frag array
2652 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2653 if (get_opt_precise_exc_context()) {
2654 if ((current_ir_graph->phase_state == phase_building) &&
2655 (get_irn_op(res) == op) && /* Could be optimized away. */
2656 !*frag_store) /* Could be a cse where the arr is already set. */ {
2657 *frag_store = new_frag_arr(res);
2663 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2666 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2667 res->attr.except.pin_state = op_pin_state_pinned;
2668 #if PRECISE_EXC_CONTEXT
2669 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2676 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2679 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2680 res->attr.except.pin_state = op_pin_state_pinned;
2681 #if PRECISE_EXC_CONTEXT
2682 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2689 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2692 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2693 res->attr.except.pin_state = op_pin_state_pinned;
2694 #if PRECISE_EXC_CONTEXT
2695 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2702 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2705 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2706 res->attr.except.pin_state = op_pin_state_pinned;
2707 #if PRECISE_EXC_CONTEXT
2708 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2727 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2729 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2733 new_d_Jmp (dbg_info *db)
2735 return new_bd_Jmp (db, current_ir_graph->current_block);
2739 new_d_IJmp (dbg_info *db, ir_node *tgt)
2741 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2745 new_d_Cond (dbg_info *db, ir_node *c)
2747 return new_bd_Cond (db, current_ir_graph->current_block, c);
2751 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2755 res = new_bd_Call (db, current_ir_graph->current_block,
2756 store, callee, arity, in, tp);
2757 #if PRECISE_EXC_CONTEXT
2758 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2765 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2767 return new_bd_Return (db, current_ir_graph->current_block,
2772 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2775 res = new_bd_Load (db, current_ir_graph->current_block,
2777 #if PRECISE_EXC_CONTEXT
2778 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2785 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2788 res = new_bd_Store (db, current_ir_graph->current_block,
2790 #if PRECISE_EXC_CONTEXT
2791 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2798 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2802 res = new_bd_Alloc (db, current_ir_graph->current_block,
2803 store, size, alloc_type, where);
2804 #if PRECISE_EXC_CONTEXT
2805 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2812 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2813 ir_node *size, ir_type *free_type, where_alloc where)
2815 return new_bd_Free (db, current_ir_graph->current_block,
2816 store, ptr, size, free_type, where);
2820 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2821 /* GL: objptr was called frame before. Frame was a bad choice for the name
2822 as the operand could as well be a pointer to a dynamic object. */
2824 return new_bd_Sel (db, current_ir_graph->current_block,
2825 store, objptr, 0, NULL, ent);
2829 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2831 return new_bd_Sel (db, current_ir_graph->current_block,
2832 store, objptr, n_index, index, sel);
2836 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2838 return new_bd_SymConst_type (db, get_irg_start_block(current_ir_graph),
2843 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2845 return new_bd_SymConst (db, get_irg_start_block(current_ir_graph),
2850 new_d_Sync (dbg_info *db, int arity, ir_node *in[])
2852 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block, arity, in);
2858 return _new_d_Bad();
2862 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2864 return new_bd_Confirm (db, current_ir_graph->current_block,
2869 new_d_Unknown (ir_mode *m)
2871 return new_bd_Unknown(m);
2875 new_d_CallBegin (dbg_info *db, ir_node *call)
2878 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2883 new_d_EndReg (dbg_info *db)
2886 res = new_bd_EndReg(db, current_ir_graph->current_block);
2891 new_d_EndExcept (dbg_info *db)
2894 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2899 new_d_Break (dbg_info *db)
2901 return new_bd_Break (db, current_ir_graph->current_block);
2905 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2907 return new_bd_Filter (db, current_ir_graph->current_block,
2914 return _new_d_NoMem();
2918 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2919 ir_node *ir_true, ir_mode *mode) {
2920 return new_bd_Mux (db, current_ir_graph->current_block,
2921 sel, ir_false, ir_true, mode);
2925 new_d_Psi (dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2926 return new_bd_Psi (db, current_ir_graph->current_block,
2927 arity, conds, vals, mode);
2930 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2931 ir_node *dst, ir_node *src, ir_type *data_type) {
2933 res = new_bd_CopyB(db, current_ir_graph->current_block,
2934 store, dst, src, data_type);
2935 #if PRECISE_EXC_CONTEXT
2936 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2942 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2944 return new_bd_InstOf (db, current_ir_graph->current_block,
2945 store, objptr, type);
2949 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2951 return new_bd_Raise (db, current_ir_graph->current_block,
2955 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2956 ir_node *idx, ir_node *lower, ir_node *upper) {
2958 res = new_bd_Bound(db, current_ir_graph->current_block,
2959 store, idx, lower, upper);
2960 #if PRECISE_EXC_CONTEXT
2961 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2966 /* ********************************************************************* */
2967 /* Comfortable interface with automatic Phi node construction. */
2968 /* (Uses also constructors of ?? interface, except new_Block. */
2969 /* ********************************************************************* */
2971 /* Block construction */
2972 /* immature Block without predecessors */
2973 ir_node *new_d_immBlock (dbg_info *db) {
2976 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2977 /* creates a new dynamic in-array as length of in is -1 */
2978 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2979 current_ir_graph->current_block = res;
2980 res->attr.block.matured = 0;
2981 res->attr.block.dead = 0;
2982 /* res->attr.block.exc = exc_normal; */
2983 /* res->attr.block.handler_entry = 0; */
2984 res->attr.block.irg = current_ir_graph;
2985 res->attr.block.backedge = NULL;
2986 res->attr.block.in_cg = NULL;
2987 res->attr.block.cg_backedge = NULL;
2988 set_Block_block_visited(res, 0);
2990 /* Create and initialize array for Phi-node construction. */
2991 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2992 current_ir_graph->n_loc);
2993 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2995 /* Immature block may not be optimized! */
2996 IRN_VRFY_IRG(res, current_ir_graph);
3002 new_immBlock (void) {
3003 return new_d_immBlock(NULL);
3006 /* add an edge to a jmp/control flow node */
3008 add_immBlock_pred (ir_node *block, ir_node *jmp)
3010 if (block->attr.block.matured) {
3011 assert(0 && "Error: Block already matured!\n");
3014 assert(jmp != NULL);
3015 ARR_APP1(ir_node *, block->in, jmp);
3019 /* changing the current block */
3021 set_cur_block (ir_node *target) {
3022 current_ir_graph->current_block = target;
3025 /* ************************ */
3026 /* parameter administration */
3028 /* get a value from the parameter array from the current block by its index */
3030 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3032 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3033 inc_irg_visited(current_ir_graph);
3035 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3037 /* get a value from the parameter array from the current block by its index */
3039 get_value (int pos, ir_mode *mode)
3041 return get_d_value(NULL, pos, mode);
3044 /* set a value at position pos in the parameter array from the current block */
3046 set_value (int pos, ir_node *value)
3048 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3049 assert(pos+1 < current_ir_graph->n_loc);
3050 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3054 find_value(ir_node *value)
3057 ir_node *bl = current_ir_graph->current_block;
3059 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3060 if (bl->attr.block.graph_arr[i] == value)
3065 /* get the current store */
3069 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3070 /* GL: one could call get_value instead */
3071 inc_irg_visited(current_ir_graph);
3072 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3075 /* set the current store: handles automatic Sync construction for Load nodes */
3077 set_store (ir_node *store)
3079 ir_node *load, *pload, *pred, *in[2];
3081 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3083 /* handle non-volatile Load nodes by automatically creating Sync's */
3084 load = skip_Proj(store);
3085 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
3086 pred = get_Load_mem(load);
3088 if (is_Sync(pred)) {
3089 /* a Load after a Sync: move it up */
3090 set_Load_mem(load, get_Sync_pred(pred, 0));
3094 pload = skip_Proj(pred);
3095 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
3096 /* a Load after a Load: create a new Sync */
3097 set_Load_mem(load, get_Load_mem(pload));
3101 store = new_Sync(2, in);
3105 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3109 keep_alive (ir_node *ka) {
3110 add_End_keepalive(get_irg_end(current_ir_graph), ka);
3113 /* --- Useful access routines --- */
3114 /* Returns the current block of the current graph. To set the current
3115 block use set_cur_block. */
3116 ir_node *get_cur_block(void) {
3117 return get_irg_current_block(current_ir_graph);
3120 /* Returns the frame type of the current graph */
3121 ir_type *get_cur_frame_type(void) {
3122 return get_irg_frame_type(current_ir_graph);
3126 /* ********************************************************************* */
3129 /* call once for each run of the library */
3131 init_cons(uninitialized_local_variable_func_t *func)
3133 default_initialize_local_variable = func;
3137 irp_finalize_cons (void) {
3139 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
3140 irg_finalize_cons(get_irp_irg(i));
3142 irp->phase_state = phase_high;
3146 ir_node *new_Block(int arity, ir_node **in) {
3147 return new_d_Block(NULL, arity, in);
3149 ir_node *new_Start (void) {
3150 return new_d_Start(NULL);
3152 ir_node *new_End (void) {
3153 return new_d_End(NULL);
3155 ir_node *new_Jmp (void) {
3156 return new_d_Jmp(NULL);
3158 ir_node *new_IJmp (ir_node *tgt) {
3159 return new_d_IJmp(NULL, tgt);
3161 ir_node *new_Cond (ir_node *c) {
3162 return new_d_Cond(NULL, c);
3164 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3165 return new_d_Return(NULL, store, arity, in);
3167 ir_node *new_Const (ir_mode *mode, tarval *con) {
3168 return new_d_Const(NULL, mode, con);
3171 ir_node *new_Const_long(ir_mode *mode, long value)
3173 return new_d_Const_long(NULL, mode, value);
3176 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3177 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3180 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3181 return new_d_SymConst(NULL, value, kind);
3183 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3184 return new_d_simpleSel(NULL, store, objptr, ent);
3186 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3188 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3190 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3192 return new_d_Call(NULL, store, callee, arity, in, tp);
3194 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3195 return new_d_Add(NULL, op1, op2, mode);
3197 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3198 return new_d_Sub(NULL, op1, op2, mode);
3200 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3201 return new_d_Minus(NULL, op, mode);
3203 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3204 return new_d_Mul(NULL, op1, op2, mode);
3206 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3207 return new_d_Quot(NULL, memop, op1, op2);
3209 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3210 return new_d_DivMod(NULL, memop, op1, op2);
3212 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3213 return new_d_Div(NULL, memop, op1, op2);
3215 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3216 return new_d_Mod(NULL, memop, op1, op2);
3218 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3219 return new_d_Abs(NULL, op, mode);
3221 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3222 return new_d_And(NULL, op1, op2, mode);
3224 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3225 return new_d_Or(NULL, op1, op2, mode);
3227 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3228 return new_d_Eor(NULL, op1, op2, mode);
3230 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3231 return new_d_Not(NULL, op, mode);
3233 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3234 return new_d_Shl(NULL, op, k, mode);
3236 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3237 return new_d_Shr(NULL, op, k, mode);
3239 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3240 return new_d_Shrs(NULL, op, k, mode);
3242 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3243 return new_d_Rot(NULL, op, k, mode);
3245 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3246 return new_d_Carry(NULL, op1, op2, mode);
3248 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3249 return new_d_Borrow(NULL, op1, op2, mode);
3251 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3252 return new_d_Cmp(NULL, op1, op2);
3254 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3255 return new_d_Conv(NULL, op, mode);
3257 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3258 return new_d_Cast(NULL, op, to_tp);
3260 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3261 return new_d_Phi(NULL, arity, in, mode);
3263 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3264 return new_d_Load(NULL, store, addr, mode);
3266 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3267 return new_d_Store(NULL, store, addr, val);
3269 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3270 where_alloc where) {
3271 return new_d_Alloc(NULL, store, size, alloc_type, where);
3273 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3274 ir_type *free_type, where_alloc where) {
3275 return new_d_Free(NULL, store, ptr, size, free_type, where);
3277 ir_node *new_Sync (int arity, ir_node *in[]) {
3278 return new_d_Sync(NULL, arity, in);
3280 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3281 return new_d_Proj(NULL, arg, mode, proj);
3283 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3284 return new_d_defaultProj(NULL, arg, max_proj);
3286 ir_node *new_Tuple (int arity, ir_node **in) {
3287 return new_d_Tuple(NULL, arity, in);
3289 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3290 return new_d_Id(NULL, val, mode);
3292 ir_node *new_Bad (void) {
3295 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3296 return new_d_Confirm (NULL, val, bound, cmp);
3298 ir_node *new_Unknown(ir_mode *m) {
3299 return new_d_Unknown(m);
3301 ir_node *new_CallBegin (ir_node *callee) {
3302 return new_d_CallBegin(NULL, callee);
3304 ir_node *new_EndReg (void) {
3305 return new_d_EndReg(NULL);
3307 ir_node *new_EndExcept (void) {
3308 return new_d_EndExcept(NULL);
3310 ir_node *new_Break (void) {
3311 return new_d_Break(NULL);
3313 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3314 return new_d_Filter(NULL, arg, mode, proj);
3316 ir_node *new_NoMem (void) {
3317 return new_d_NoMem();
3319 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3320 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3322 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3323 return new_d_Psi(NULL, arity, conds, vals, mode);
3325 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3326 return new_d_CopyB(NULL, store, dst, src, data_type);
3328 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3329 return new_d_InstOf (NULL, store, objptr, ent);
3331 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3332 return new_d_Raise(NULL, store, obj);
3334 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3335 return new_d_Bound(NULL, store, idx, lower, upper);