3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 # include "irprog_t.h"
29 # include "irgraph_t.h"
30 # include "irnode_t.h"
31 # include "irmode_t.h"
32 # include "ircons_t.h"
33 # include "firm_common_t.h"
39 # include "irbackedge_t.h"
40 # include "irflag_t.h"
41 # include "iredges_t.h"
43 #if USE_EXPLICIT_PHI_IN_STACK
44 /* A stack needed for the automatic Phi node construction in constructor
45 Phi_in. Redefinition in irgraph.c!! */
50 typedef struct Phi_in_stack Phi_in_stack;
53 /* when we need verifying */
55 # define IRN_VRFY_IRG(res, irg)
57 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
61 * Language dependent variable initialization callback.
63 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
65 /* creates a bd constructor for a binop */
66 #define NEW_BD_BINOP(instr) \
68 new_bd_##instr (dbg_info *db, ir_node *block, \
69 ir_node *op1, ir_node *op2, ir_mode *mode) \
73 ir_graph *irg = current_ir_graph; \
76 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
77 res = optimize_node(res); \
78 IRN_VRFY_IRG(res, irg); \
82 /* creates a bd constructor for an unop */
83 #define NEW_BD_UNOP(instr) \
85 new_bd_##instr (dbg_info *db, ir_node *block, \
86 ir_node *op, ir_mode *mode) \
89 ir_graph *irg = current_ir_graph; \
90 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
91 res = optimize_node(res); \
92 IRN_VRFY_IRG(res, irg); \
96 /* creates a bd constructor for an divop */
97 #define NEW_BD_DIVOP(instr) \
99 new_bd_##instr (dbg_info *db, ir_node *block, \
100 ir_node *memop, ir_node *op1, ir_node *op2) \
104 ir_graph *irg = current_ir_graph; \
108 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
109 res = optimize_node(res); \
110 IRN_VRFY_IRG(res, irg); \
114 /* creates a rd constructor for a binop */
115 #define NEW_RD_BINOP(instr) \
117 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
118 ir_node *op1, ir_node *op2, ir_mode *mode) \
121 ir_graph *rem = current_ir_graph; \
122 current_ir_graph = irg; \
123 res = new_bd_##instr(db, block, op1, op2, mode); \
124 current_ir_graph = rem; \
128 /* creates a rd constructor for an unop */
129 #define NEW_RD_UNOP(instr) \
131 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
132 ir_node *op, ir_mode *mode) \
135 ir_graph *rem = current_ir_graph; \
136 current_ir_graph = irg; \
137 res = new_bd_##instr(db, block, op, mode); \
138 current_ir_graph = rem; \
142 /* creates a rd constructor for an divop */
143 #define NEW_RD_DIVOP(instr) \
145 new_rd_##instr (dbg_info *db, ir_graph *irg, ir_node *block, \
146 ir_node *memop, ir_node *op1, ir_node *op2) \
149 ir_graph *rem = current_ir_graph; \
150 current_ir_graph = irg; \
151 res = new_bd_##instr(db, block, memop, op1, op2); \
152 current_ir_graph = rem; \
156 /* creates a d constructor for an binop */
157 #define NEW_D_BINOP(instr) \
159 new_d_##instr (dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
160 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
163 /* creates a d constructor for an unop */
164 #define NEW_D_UNOP(instr) \
166 new_d_##instr (dbg_info *db, ir_node *op, ir_mode *mode) { \
167 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
171 /* Constructs a Block with a fixed number of predecessors.
172 Does not set current_block. Can not be used with automatic
173 Phi node construction. */
175 new_bd_Block (dbg_info *db, int arity, ir_node **in)
178 ir_graph *irg = current_ir_graph;
180 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
181 set_Block_matured(res, 1);
182 set_Block_block_visited(res, 0);
184 /* res->attr.block.exc = exc_normal; */
185 /* res->attr.block.handler_entry = 0; */
186 res->attr.block.dead = 0;
187 res->attr.block.irg = irg;
188 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
189 res->attr.block.in_cg = NULL;
190 res->attr.block.cg_backedge = NULL;
191 res->attr.block.extblk = NULL;
193 IRN_VRFY_IRG(res, irg);
198 new_bd_Start (dbg_info *db, ir_node *block)
201 ir_graph *irg = current_ir_graph;
203 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
204 /* res->attr.start.irg = irg; */
206 IRN_VRFY_IRG(res, irg);
211 new_bd_End (dbg_info *db, ir_node *block)
214 ir_graph *irg = current_ir_graph;
216 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
218 IRN_VRFY_IRG(res, irg);
222 /* Creates a Phi node with all predecessors. Calling this constructor
223 is only allowed if the corresponding block is mature. */
225 new_bd_Phi (dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
228 ir_graph *irg = current_ir_graph;
232 /* Don't assert that block matured: the use of this constructor is strongly
234 if ( get_Block_matured(block) )
235 assert( get_irn_arity(block) == arity );
237 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
239 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
241 for (i = arity-1; i >= 0; i--)
242 if (get_irn_op(in[i]) == op_Unknown) {
247 if (!has_unknown) res = optimize_node (res);
248 IRN_VRFY_IRG(res, irg);
250 /* Memory Phis in endless loops must be kept alive.
251 As we can't distinguish these easily we keep all of them alive. */
252 if ((res->op == op_Phi) && (mode == mode_M))
253 add_End_keepalive(irg->end, res);
258 new_bd_Const_type (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
261 ir_graph *irg = current_ir_graph;
263 res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
264 res->attr.con.tv = con;
265 set_Const_type(res, tp); /* Call method because of complex assertion. */
266 res = optimize_node (res);
267 assert(get_Const_type(res) == tp);
268 IRN_VRFY_IRG(res, irg);
274 new_bd_Const (dbg_info *db, ir_node *block, ir_mode *mode, tarval *con)
276 ir_graph *irg = current_ir_graph;
278 return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type);
282 new_bd_Const_long (dbg_info *db, ir_node *block, ir_mode *mode, long value)
284 ir_graph *irg = current_ir_graph;
286 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
290 new_bd_Id (dbg_info *db, ir_node *block, ir_node *val, ir_mode *mode)
293 ir_graph *irg = current_ir_graph;
295 res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
296 res = optimize_node(res);
297 IRN_VRFY_IRG(res, irg);
302 new_bd_Proj (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
306 ir_graph *irg = current_ir_graph;
308 res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
309 res->attr.proj = proj;
312 assert(get_Proj_pred(res));
313 assert(get_nodes_block(get_Proj_pred(res)));
315 res = optimize_node(res);
317 IRN_VRFY_IRG(res, irg);
323 new_bd_defaultProj (dbg_info *db, ir_node *block, ir_node *arg,
327 ir_graph *irg = current_ir_graph;
329 assert(arg->op == op_Cond);
330 arg->attr.c.kind = fragmentary;
331 arg->attr.c.default_proj = max_proj;
332 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
337 new_bd_Conv (dbg_info *db, ir_node *block, ir_node *op, ir_mode *mode)
340 ir_graph *irg = current_ir_graph;
342 res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
343 res = optimize_node(res);
344 IRN_VRFY_IRG(res, irg);
349 new_bd_Cast (dbg_info *db, ir_node *block, ir_node *op, ir_type *to_tp)
352 ir_graph *irg = current_ir_graph;
354 assert(is_atomic_type(to_tp));
356 res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
357 res->attr.cast.totype = to_tp;
358 res = optimize_node(res);
359 IRN_VRFY_IRG(res, irg);
364 new_bd_Tuple (dbg_info *db, ir_node *block, int arity, ir_node **in)
367 ir_graph *irg = current_ir_graph;
369 res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
370 res = optimize_node (res);
371 IRN_VRFY_IRG(res, irg);
396 new_bd_Cmp (dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
400 ir_graph *irg = current_ir_graph;
403 res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
404 res = optimize_node(res);
405 IRN_VRFY_IRG(res, irg);
410 new_bd_Jmp (dbg_info *db, ir_node *block)
413 ir_graph *irg = current_ir_graph;
415 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
416 res = optimize_node (res);
417 IRN_VRFY_IRG (res, irg);
422 new_bd_IJmp (dbg_info *db, ir_node *block, ir_node *tgt)
425 ir_graph *irg = current_ir_graph;
427 res = new_ir_node (db, irg, block, op_IJmp, mode_X, 1, &tgt);
428 res = optimize_node (res);
429 IRN_VRFY_IRG (res, irg);
431 if (get_irn_op(res) == op_IJmp) /* still an IJmp */
437 new_bd_Cond (dbg_info *db, ir_node *block, ir_node *c)
440 ir_graph *irg = current_ir_graph;
442 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
443 res->attr.c.kind = dense;
444 res->attr.c.default_proj = 0;
445 res->attr.c.pred = COND_JMP_PRED_NONE;
446 res = optimize_node (res);
447 IRN_VRFY_IRG(res, irg);
452 new_bd_Call (dbg_info *db, ir_node *block, ir_node *store,
453 ir_node *callee, int arity, ir_node **in, ir_type *tp)
458 ir_graph *irg = current_ir_graph;
461 NEW_ARR_A(ir_node *, r_in, r_arity);
464 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
466 res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
468 assert((get_unknown_type() == tp) || is_Method_type(tp));
469 set_Call_type(res, tp);
470 res->attr.call.exc.pin_state = op_pin_state_pinned;
471 res->attr.call.callee_arr = NULL;
472 res = optimize_node(res);
473 IRN_VRFY_IRG(res, irg);
478 new_bd_Return (dbg_info *db, ir_node *block,
479 ir_node *store, int arity, ir_node **in)
484 ir_graph *irg = current_ir_graph;
487 NEW_ARR_A (ir_node *, r_in, r_arity);
489 memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
490 res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
491 res = optimize_node(res);
492 IRN_VRFY_IRG(res, irg);
497 new_bd_Load (dbg_info *db, ir_node *block,
498 ir_node *store, ir_node *adr, ir_mode *mode)
502 ir_graph *irg = current_ir_graph;
506 res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
507 res->attr.load.exc.pin_state = op_pin_state_pinned;
508 res->attr.load.load_mode = mode;
509 res->attr.load.volatility = volatility_non_volatile;
510 res = optimize_node(res);
511 IRN_VRFY_IRG(res, irg);
516 new_bd_Store (dbg_info *db, ir_node *block,
517 ir_node *store, ir_node *adr, ir_node *val)
521 ir_graph *irg = current_ir_graph;
526 res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
527 res->attr.store.exc.pin_state = op_pin_state_pinned;
528 res->attr.store.volatility = volatility_non_volatile;
529 res = optimize_node(res);
530 IRN_VRFY_IRG(res, irg);
535 new_bd_Alloc (dbg_info *db, ir_node *block, ir_node *store,
536 ir_node *size, ir_type *alloc_type, where_alloc where)
540 ir_graph *irg = current_ir_graph;
544 res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
545 res->attr.a.exc.pin_state = op_pin_state_pinned;
546 res->attr.a.where = where;
547 res->attr.a.type = alloc_type;
548 res = optimize_node(res);
549 IRN_VRFY_IRG(res, irg);
554 new_bd_Free (dbg_info *db, ir_node *block, ir_node *store,
555 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
559 ir_graph *irg = current_ir_graph;
564 res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in);
565 res->attr.f.where = where;
566 res->attr.f.type = free_type;
567 res = optimize_node(res);
568 IRN_VRFY_IRG(res, irg);
573 new_bd_Sel (dbg_info *db, ir_node *block, ir_node *store, ir_node *objptr,
574 int arity, ir_node **in, entity *ent)
579 ir_graph *irg = current_ir_graph;
581 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
584 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
587 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
589 * FIXM: Sel's can select functions which should be of mode mode_P_code.
591 res = new_ir_node(db, irg, block, op_Sel, mode_P_data, r_arity, r_in);
592 res->attr.s.ent = ent;
593 res = optimize_node(res);
594 IRN_VRFY_IRG(res, irg);
599 new_bd_SymConst_type (dbg_info *db, ir_node *block, symconst_symbol value,
600 symconst_kind symkind, ir_type *tp) {
603 ir_graph *irg = current_ir_graph;
605 if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
606 mode = mode_P_data; /* FIXME: can be mode_P_code */
610 res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
612 res->attr.i.num = symkind;
613 res->attr.i.sym = value;
616 res = optimize_node(res);
617 IRN_VRFY_IRG(res, irg);
622 new_bd_SymConst (dbg_info *db, ir_node *block, symconst_symbol value,
623 symconst_kind symkind)
625 ir_graph *irg = current_ir_graph;
627 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
632 new_bd_Sync (dbg_info *db, ir_node *block, int arity, ir_node **in)
635 ir_graph *irg = current_ir_graph;
637 res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
638 res = optimize_node(res);
639 IRN_VRFY_IRG(res, irg);
644 new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
646 ir_node *in[2], *res;
647 ir_graph *irg = current_ir_graph;
651 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
652 res->attr.confirm_cmp = cmp;
653 res = optimize_node (res);
654 IRN_VRFY_IRG(res, irg);
658 /* this function is often called with current_ir_graph unset */
660 new_bd_Unknown (ir_mode *m)
663 ir_graph *irg = current_ir_graph;
665 res = new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
666 res = optimize_node(res);
671 new_bd_CallBegin (dbg_info *db, ir_node *block, ir_node *call)
675 ir_graph *irg = current_ir_graph;
677 in[0] = get_Call_ptr(call);
678 res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
679 /* res->attr.callbegin.irg = irg; */
680 res->attr.callbegin.call = call;
681 res = optimize_node(res);
682 IRN_VRFY_IRG(res, irg);
687 new_bd_EndReg (dbg_info *db, ir_node *block)
690 ir_graph *irg = current_ir_graph;
692 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
694 IRN_VRFY_IRG(res, irg);
699 new_bd_EndExcept (dbg_info *db, ir_node *block)
702 ir_graph *irg = current_ir_graph;
704 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
705 irg->end_except = res;
706 IRN_VRFY_IRG (res, irg);
711 new_bd_Break (dbg_info *db, ir_node *block)
714 ir_graph *irg = current_ir_graph;
716 res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
717 res = optimize_node(res);
718 IRN_VRFY_IRG(res, irg);
723 new_bd_Filter (dbg_info *db, ir_node *block, ir_node *arg, ir_mode *mode,
727 ir_graph *irg = current_ir_graph;
729 res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
730 res->attr.filter.proj = proj;
731 res->attr.filter.in_cg = NULL;
732 res->attr.filter.backedge = NULL;
735 assert(get_Proj_pred(res));
736 assert(get_nodes_block(get_Proj_pred(res)));
738 res = optimize_node(res);
739 IRN_VRFY_IRG(res, irg);
744 new_bd_Mux (dbg_info *db, ir_node *block,
745 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
749 ir_graph *irg = current_ir_graph;
755 res = new_ir_node(db, irg, block, op_Mux, mode, 3, in);
758 res = optimize_node(res);
759 IRN_VRFY_IRG(res, irg);
764 new_bd_Psi (dbg_info *db, ir_node *block,
765 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
769 ir_graph *irg = current_ir_graph;
772 NEW_ARR_A(ir_node *, in, 2 * arity + 1);
774 for (i = 0; i < arity; ++i) {
776 in[2 * i + 1] = vals[i];
780 res = new_ir_node(db, irg, block, op_Psi, mode, 2 * arity + 1, in);
783 res = optimize_node(res);
784 IRN_VRFY_IRG(res, irg);
789 new_bd_CopyB (dbg_info *db, ir_node *block,
790 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
794 ir_graph *irg = current_ir_graph;
800 res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
802 res->attr.copyb.exc.pin_state = op_pin_state_pinned;
803 res->attr.copyb.data_type = data_type;
804 res = optimize_node(res);
805 IRN_VRFY_IRG(res, irg);
810 new_bd_InstOf (dbg_info *db, ir_node *block, ir_node *store,
811 ir_node *objptr, ir_type *type)
815 ir_graph *irg = current_ir_graph;
819 res = new_ir_node(db, irg, block, op_Sel, mode_T, 2, in);
820 res->attr.io.type = type;
821 res = optimize_node(res);
822 IRN_VRFY_IRG(res, irg);
827 new_bd_Raise (dbg_info *db, ir_node *block, ir_node *store, ir_node *obj)
831 ir_graph *irg = current_ir_graph;
835 res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
836 res = optimize_node(res);
837 IRN_VRFY_IRG(res, irg);
842 new_bd_Bound (dbg_info *db, ir_node *block,
843 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
847 ir_graph *irg = current_ir_graph;
853 res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
854 res->attr.bound.exc.pin_state = op_pin_state_pinned;
855 res = optimize_node(res);
856 IRN_VRFY_IRG(res, irg);
860 /* --------------------------------------------- */
861 /* private interfaces, for professional use only */
862 /* --------------------------------------------- */
864 /* Constructs a Block with a fixed number of predecessors.
865 Does not set current_block. Can not be used with automatic
866 Phi node construction. */
868 new_rd_Block (dbg_info *db, ir_graph *irg, int arity, ir_node **in)
870 ir_graph *rem = current_ir_graph;
873 current_ir_graph = irg;
874 res = new_bd_Block (db, arity, in);
875 current_ir_graph = rem;
881 new_rd_Start (dbg_info *db, ir_graph *irg, ir_node *block)
883 ir_graph *rem = current_ir_graph;
886 current_ir_graph = irg;
887 res = new_bd_Start (db, block);
888 current_ir_graph = rem;
894 new_rd_End (dbg_info *db, ir_graph *irg, ir_node *block)
897 ir_graph *rem = current_ir_graph;
899 current_ir_graph = rem;
900 res = new_bd_End (db, block);
901 current_ir_graph = rem;
906 /* Creates a Phi node with all predecessors. Calling this constructor
907 is only allowed if the corresponding block is mature. */
909 new_rd_Phi (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
912 ir_graph *rem = current_ir_graph;
914 current_ir_graph = irg;
915 res = new_bd_Phi (db, block,arity, in, mode);
916 current_ir_graph = rem;
922 new_rd_Const_type (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, ir_type *tp)
925 ir_graph *rem = current_ir_graph;
927 current_ir_graph = irg;
928 res = new_bd_Const_type (db, block, mode, con, tp);
929 current_ir_graph = rem;
935 new_rd_Const (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
938 ir_graph *rem = current_ir_graph;
940 current_ir_graph = irg;
941 res = new_bd_Const_type (db, block, mode, con, firm_unknown_type);
942 current_ir_graph = rem;
948 new_rd_Const_long (dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode, long value)
950 return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode));
954 new_rd_Id (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
957 ir_graph *rem = current_ir_graph;
959 current_ir_graph = irg;
960 res = new_bd_Id(db, block, val, mode);
961 current_ir_graph = rem;
967 new_rd_Proj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
971 ir_graph *rem = current_ir_graph;
973 current_ir_graph = irg;
974 res = new_bd_Proj(db, block, arg, mode, proj);
975 current_ir_graph = rem;
981 new_rd_defaultProj (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg,
985 ir_graph *rem = current_ir_graph;
987 current_ir_graph = irg;
988 res = new_bd_defaultProj(db, block, arg, max_proj);
989 current_ir_graph = rem;
995 new_rd_Conv (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
998 ir_graph *rem = current_ir_graph;
1000 current_ir_graph = irg;
1001 res = new_bd_Conv(db, block, op, mode);
1002 current_ir_graph = rem;
1008 new_rd_Cast (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp)
1011 ir_graph *rem = current_ir_graph;
1013 current_ir_graph = irg;
1014 res = new_bd_Cast(db, block, op, to_tp);
1015 current_ir_graph = rem;
1021 new_rd_Tuple (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1024 ir_graph *rem = current_ir_graph;
1026 current_ir_graph = irg;
1027 res = new_bd_Tuple(db, block, arity, in);
1028 current_ir_graph = rem;
1038 NEW_RD_DIVOP(DivMod)
1051 NEW_RD_BINOP(Borrow)
1054 new_rd_Cmp (dbg_info *db, ir_graph *irg, ir_node *block,
1055 ir_node *op1, ir_node *op2)
1058 ir_graph *rem = current_ir_graph;
1060 current_ir_graph = irg;
1061 res = new_bd_Cmp(db, block, op1, op2);
1062 current_ir_graph = rem;
1068 new_rd_Jmp (dbg_info *db, ir_graph *irg, ir_node *block)
1071 ir_graph *rem = current_ir_graph;
1073 current_ir_graph = irg;
1074 res = new_bd_Jmp(db, block);
1075 current_ir_graph = rem;
1081 new_rd_IJmp (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *tgt)
1084 ir_graph *rem = current_ir_graph;
1086 current_ir_graph = irg;
1087 res = new_bd_IJmp(db, block, tgt);
1088 current_ir_graph = rem;
1094 new_rd_Cond (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *c)
1097 ir_graph *rem = current_ir_graph;
1099 current_ir_graph = irg;
1100 res = new_bd_Cond(db, block, c);
1101 current_ir_graph = rem;
1107 new_rd_Call (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1108 ir_node *callee, int arity, ir_node **in, ir_type *tp)
1111 ir_graph *rem = current_ir_graph;
1113 current_ir_graph = irg;
1114 res = new_bd_Call(db, block, store, callee, arity, in, tp);
1115 current_ir_graph = rem;
1121 new_rd_Return (dbg_info *db, ir_graph *irg, ir_node *block,
1122 ir_node *store, int arity, ir_node **in)
1125 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1128 res = new_bd_Return(db, block, store, arity, in);
1129 current_ir_graph = rem;
1135 new_rd_Load (dbg_info *db, ir_graph *irg, ir_node *block,
1136 ir_node *store, ir_node *adr, ir_mode *mode)
1139 ir_graph *rem = current_ir_graph;
1141 current_ir_graph = irg;
1142 res = new_bd_Load(db, block, store, adr, mode);
1143 current_ir_graph = rem;
1149 new_rd_Store (dbg_info *db, ir_graph *irg, ir_node *block,
1150 ir_node *store, ir_node *adr, ir_node *val)
1153 ir_graph *rem = current_ir_graph;
1155 current_ir_graph = irg;
1156 res = new_bd_Store(db, block, store, adr, val);
1157 current_ir_graph = rem;
1163 new_rd_Alloc (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1164 ir_node *size, ir_type *alloc_type, where_alloc where)
1167 ir_graph *rem = current_ir_graph;
1169 current_ir_graph = irg;
1170 res = new_bd_Alloc (db, block, store, size, alloc_type, where);
1171 current_ir_graph = rem;
1177 new_rd_Free (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1178 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where)
1181 ir_graph *rem = current_ir_graph;
1183 current_ir_graph = irg;
1184 res = new_bd_Free(db, block, store, ptr, size, free_type, where);
1185 current_ir_graph = rem;
1191 new_rd_simpleSel (dbg_info *db, ir_graph *irg, ir_node *block,
1192 ir_node *store, ir_node *objptr, entity *ent)
1195 ir_graph *rem = current_ir_graph;
1197 current_ir_graph = irg;
1198 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
1199 current_ir_graph = rem;
1205 new_rd_Sel (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1206 int arity, ir_node **in, entity *ent)
1209 ir_graph *rem = current_ir_graph;
1211 current_ir_graph = irg;
1212 res = new_bd_Sel(db, block, store, objptr, arity, in, ent);
1213 current_ir_graph = rem;
1219 new_rd_SymConst_type (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1220 symconst_kind symkind, ir_type *tp)
1223 ir_graph *rem = current_ir_graph;
1225 current_ir_graph = irg;
1226 res = new_bd_SymConst_type(db, block, value, symkind, tp);
1227 current_ir_graph = rem;
1233 new_rd_SymConst (dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
1234 symconst_kind symkind)
1236 ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
1240 ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, ir_type *tp)
1242 symconst_symbol sym = {(ir_type *)symbol};
1243 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
1246 ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
1247 symconst_symbol sym = {(ir_type *)symbol};
1248 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
1251 ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1252 symconst_symbol sym = {symbol};
1253 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
1256 ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
1257 symconst_symbol sym = {symbol};
1258 return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
1262 new_rd_Sync (dbg_info *db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
1265 ir_graph *rem = current_ir_graph;
1267 current_ir_graph = irg;
1268 res = new_bd_Sync(db, block, arity, in);
1269 current_ir_graph = rem;
1275 new_rd_Bad (ir_graph *irg)
1281 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
1284 ir_graph *rem = current_ir_graph;
1286 current_ir_graph = irg;
1287 res = new_bd_Confirm(db, block, val, bound, cmp);
1288 current_ir_graph = rem;
1293 /* this function is often called with current_ir_graph unset */
1295 new_rd_Unknown (ir_graph *irg, ir_mode *m)
1298 ir_graph *rem = current_ir_graph;
1300 current_ir_graph = irg;
1301 res = new_bd_Unknown(m);
1302 current_ir_graph = rem;
1308 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
1311 ir_graph *rem = current_ir_graph;
1313 current_ir_graph = irg;
1314 res = new_bd_CallBegin(db, block, call);
1315 current_ir_graph = rem;
1321 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
1325 res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
1327 IRN_VRFY_IRG(res, irg);
1332 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
1336 res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
1337 irg->end_except = res;
1338 IRN_VRFY_IRG (res, irg);
1343 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
1346 ir_graph *rem = current_ir_graph;
1348 current_ir_graph = irg;
1349 res = new_bd_Break(db, block);
1350 current_ir_graph = rem;
1356 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
1360 ir_graph *rem = current_ir_graph;
1362 current_ir_graph = irg;
1363 res = new_bd_Filter(db, block, arg, mode, proj);
1364 current_ir_graph = rem;
1370 new_rd_NoMem (ir_graph *irg) {
1375 new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block,
1376 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode)
1379 ir_graph *rem = current_ir_graph;
1381 current_ir_graph = irg;
1382 res = new_bd_Mux(db, block, sel, ir_false, ir_true, mode);
1383 current_ir_graph = rem;
1389 new_rd_Psi (dbg_info *db, ir_graph *irg, ir_node *block,
1390 int arity, ir_node *cond[], ir_node *vals[], ir_mode *mode)
1393 ir_graph *rem = current_ir_graph;
1395 current_ir_graph = irg;
1396 res = new_bd_Psi(db, block, arity, cond, vals, mode);
1397 current_ir_graph = rem;
1402 ir_node *new_rd_CopyB(dbg_info *db, ir_graph *irg, ir_node *block,
1403 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type)
1406 ir_graph *rem = current_ir_graph;
1408 current_ir_graph = irg;
1409 res = new_bd_CopyB(db, block, store, dst, src, data_type);
1410 current_ir_graph = rem;
1416 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
1417 ir_node *objptr, ir_type *type)
1420 ir_graph *rem = current_ir_graph;
1422 current_ir_graph = irg;
1423 res = new_bd_InstOf(db, block, store, objptr, type);
1424 current_ir_graph = rem;
1430 new_rd_Raise (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
1433 ir_graph *rem = current_ir_graph;
1435 current_ir_graph = irg;
1436 res = new_bd_Raise(db, block, store, obj);
1437 current_ir_graph = rem;
1442 ir_node *new_rd_Bound(dbg_info *db, ir_graph *irg, ir_node *block,
1443 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper)
1446 ir_graph *rem = current_ir_graph;
1448 current_ir_graph = irg;
1449 res = new_bd_Bound(db, block, store, idx, lower, upper);
1450 current_ir_graph = rem;
1455 ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
1456 return new_rd_Block(NULL, irg, arity, in);
1458 ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
1459 return new_rd_Start(NULL, irg, block);
1461 ir_node *new_r_End (ir_graph *irg, ir_node *block) {
1462 return new_rd_End(NULL, irg, block);
1464 ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
1465 return new_rd_Jmp(NULL, irg, block);
1467 ir_node *new_r_IJmp (ir_graph *irg, ir_node *block, ir_node *tgt) {
1468 return new_rd_IJmp(NULL, irg, block, tgt);
1470 ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
1471 return new_rd_Cond(NULL, irg, block, c);
1473 ir_node *new_r_Return (ir_graph *irg, ir_node *block,
1474 ir_node *store, int arity, ir_node **in) {
1475 return new_rd_Return(NULL, irg, block, store, arity, in);
1477 ir_node *new_r_Const (ir_graph *irg, ir_node *block,
1478 ir_mode *mode, tarval *con) {
1479 return new_rd_Const(NULL, irg, block, mode, con);
1481 ir_node *new_r_Const_long(ir_graph *irg, ir_node *block,
1482 ir_mode *mode, long value) {
1483 return new_rd_Const_long(NULL, irg, block, mode, value);
1485 ir_node *new_r_Const_type(ir_graph *irg, ir_node *block,
1486 ir_mode *mode, tarval *con, ir_type *tp) {
1487 return new_rd_Const_type(NULL, irg, block, mode, con, tp);
1489 ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
1490 symconst_symbol value, symconst_kind symkind) {
1491 return new_rd_SymConst(NULL, irg, block, value, symkind);
1493 ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
1494 ir_node *objptr, entity *ent) {
1495 return new_rd_Sel(NULL, irg, block, store, objptr, 0, NULL, ent);
1497 ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
1498 ir_node *objptr, int n_index, ir_node **index,
1500 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
1502 ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
1503 ir_node *callee, int arity, ir_node **in,
1505 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
1507 ir_node *new_r_Add (ir_graph *irg, ir_node *block,
1508 ir_node *op1, ir_node *op2, ir_mode *mode) {
1509 return new_rd_Add(NULL, irg, block, op1, op2, mode);
1511 ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
1512 ir_node *op1, ir_node *op2, ir_mode *mode) {
1513 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
1515 ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
1516 ir_node *op, ir_mode *mode) {
1517 return new_rd_Minus(NULL, irg, block, op, mode);
1519 ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
1520 ir_node *op1, ir_node *op2, ir_mode *mode) {
1521 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
1523 ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
1524 ir_node *memop, ir_node *op1, ir_node *op2) {
1525 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
1527 ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
1528 ir_node *memop, ir_node *op1, ir_node *op2) {
1529 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
1531 ir_node *new_r_Div (ir_graph *irg, ir_node *block,
1532 ir_node *memop, ir_node *op1, ir_node *op2) {
1533 return new_rd_Div(NULL, irg, block, memop, op1, op2);
1535 ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
1536 ir_node *memop, ir_node *op1, ir_node *op2) {
1537 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
1539 ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
1540 ir_node *op, ir_mode *mode) {
1541 return new_rd_Abs(NULL, irg, block, op, mode);
1543 ir_node *new_r_And (ir_graph *irg, ir_node *block,
1544 ir_node *op1, ir_node *op2, ir_mode *mode) {
1545 return new_rd_And(NULL, irg, block, op1, op2, mode);
1547 ir_node *new_r_Or (ir_graph *irg, ir_node *block,
1548 ir_node *op1, ir_node *op2, ir_mode *mode) {
1549 return new_rd_Or(NULL, irg, block, op1, op2, mode);
1551 ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
1552 ir_node *op1, ir_node *op2, ir_mode *mode) {
1553 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
1555 ir_node *new_r_Not (ir_graph *irg, ir_node *block,
1556 ir_node *op, ir_mode *mode) {
1557 return new_rd_Not(NULL, irg, block, op, mode);
1559 ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
1560 ir_node *op, ir_node *k, ir_mode *mode) {
1561 return new_rd_Shl(NULL, irg, block, op, k, mode);
1563 ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
1564 ir_node *op, ir_node *k, ir_mode *mode) {
1565 return new_rd_Shr(NULL, irg, block, op, k, mode);
1567 ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
1568 ir_node *op, ir_node *k, ir_mode *mode) {
1569 return new_rd_Shrs(NULL, irg, block, op, k, mode);
1571 ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
1572 ir_node *op, ir_node *k, ir_mode *mode) {
1573 return new_rd_Rot(NULL, irg, block, op, k, mode);
1575 ir_node *new_r_Carry (ir_graph *irg, ir_node *block,
1576 ir_node *op, ir_node *k, ir_mode *mode) {
1577 return new_rd_Carry(NULL, irg, block, op, k, mode);
1579 ir_node *new_r_Borrow (ir_graph *irg, ir_node *block,
1580 ir_node *op, ir_node *k, ir_mode *mode) {
1581 return new_rd_Borrow(NULL, irg, block, op, k, mode);
1583 ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
1584 ir_node *op1, ir_node *op2) {
1585 return new_rd_Cmp(NULL, irg, block, op1, op2);
1587 ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
1588 ir_node *op, ir_mode *mode) {
1589 return new_rd_Conv(NULL, irg, block, op, mode);
1591 ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, ir_type *to_tp) {
1592 return new_rd_Cast(NULL, irg, block, op, to_tp);
1594 ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
1595 ir_node **in, ir_mode *mode) {
1596 return new_rd_Phi(NULL, irg, block, arity, in, mode);
1598 ir_node *new_r_Load (ir_graph *irg, ir_node *block,
1599 ir_node *store, ir_node *adr, ir_mode *mode) {
1600 return new_rd_Load(NULL, irg, block, store, adr, mode);
1602 ir_node *new_r_Store (ir_graph *irg, ir_node *block,
1603 ir_node *store, ir_node *adr, ir_node *val) {
1604 return new_rd_Store(NULL, irg, block, store, adr, val);
1606 ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
1607 ir_node *size, ir_type *alloc_type, where_alloc where) {
1608 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
1610 ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
1611 ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
1612 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
1614 ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
1615 return new_rd_Sync(NULL, irg, block, arity, in);
1617 ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
1618 ir_mode *mode, long proj) {
1619 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
1621 ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
1623 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
1625 ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
1626 int arity, ir_node **in) {
1627 return new_rd_Tuple(NULL, irg, block, arity, in );
1629 ir_node *new_r_Id (ir_graph *irg, ir_node *block,
1630 ir_node *val, ir_mode *mode) {
1631 return new_rd_Id(NULL, irg, block, val, mode);
1633 ir_node *new_r_Bad (ir_graph *irg) {
1634 return new_rd_Bad(irg);
1636 ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
1637 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
1639 ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
1640 return new_rd_Unknown(irg, m);
1642 ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
1643 return new_rd_CallBegin(NULL, irg, block, callee);
1645 ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
1646 return new_rd_EndReg(NULL, irg, block);
1648 ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
1649 return new_rd_EndExcept(NULL, irg, block);
1651 ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
1652 return new_rd_Break(NULL, irg, block);
1654 ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
1655 ir_mode *mode, long proj) {
1656 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1658 ir_node *new_r_NoMem (ir_graph *irg) {
1659 return new_rd_NoMem(irg);
1661 ir_node *new_r_Mux (ir_graph *irg, ir_node *block,
1662 ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
1663 return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode);
1665 ir_node *new_r_Psi (ir_graph *irg, ir_node *block,
1666 int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
1667 return new_rd_Psi(NULL, irg, block, arity, conds, vals, mode);
1669 ir_node *new_r_CopyB(ir_graph *irg, ir_node *block,
1670 ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
1671 return new_rd_CopyB(NULL, irg, block, store, dst, src, data_type);
1673 ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
1675 return (new_rd_InstOf (NULL, irg, block, store, objptr, type));
1677 ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
1678 ir_node *store, ir_node *obj) {
1679 return new_rd_Raise(NULL, irg, block, store, obj);
1681 ir_node *new_r_Bound(ir_graph *irg, ir_node *block,
1682 ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
1683 return new_rd_Bound(NULL, irg, block, store, idx, lower, upper);
1686 /** ********************/
1687 /** public interfaces */
1688 /** construction tools */
1692 * - create a new Start node in the current block
1694 * @return s - pointer to the created Start node
1699 new_d_Start (dbg_info *db)
1703 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1704 op_Start, mode_T, 0, NULL);
1705 /* res->attr.start.irg = current_ir_graph; */
1707 res = optimize_node(res);
1708 IRN_VRFY_IRG(res, current_ir_graph);
1713 new_d_End (dbg_info *db)
1716 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
1717 op_End, mode_X, -1, NULL);
1718 res = optimize_node(res);
1719 IRN_VRFY_IRG(res, current_ir_graph);
1724 /* Constructs a Block with a fixed number of predecessors.
1725 Does set current_block. Can be used with automatic Phi
1726 node construction. */
1728 new_d_Block (dbg_info *db, int arity, ir_node **in)
1732 int has_unknown = 0;
1734 res = new_bd_Block(db, arity, in);
1736 /* Create and initialize array for Phi-node construction. */
1737 if (get_irg_phase_state(current_ir_graph) == phase_building) {
1738 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1739 current_ir_graph->n_loc);
1740 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1743 for (i = arity-1; i >= 0; i--)
1744 if (get_irn_op(in[i]) == op_Unknown) {
1749 if (!has_unknown) res = optimize_node(res);
1750 current_ir_graph->current_block = res;
1752 IRN_VRFY_IRG(res, current_ir_graph);
1757 /* ***********************************************************************/
1758 /* Methods necessary for automatic Phi node creation */
1760 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1761 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1762 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1763 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1765 Call Graph: ( A ---> B == A "calls" B)
1767 get_value mature_immBlock
1775 get_r_value_internal |
1779 new_rd_Phi0 new_rd_Phi_in
1781 * *************************************************************************** */
1783 /** Creates a Phi node with 0 predecessors */
1784 static INLINE ir_node *
1785 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1789 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
1790 IRN_VRFY_IRG(res, irg);
1794 /* There are two implementations of the Phi node construction. The first
1795 is faster, but does not work for blocks with more than 2 predecessors.
1796 The second works always but is slower and causes more unnecessary Phi
1798 Select the implementations by the following preprocessor flag set in
1800 #if USE_FAST_PHI_CONSTRUCTION
1802 /* This is a stack used for allocating and deallocating nodes in
1803 new_rd_Phi_in. The original implementation used the obstack
1804 to model this stack, now it is explicit. This reduces side effects.
1806 #if USE_EXPLICIT_PHI_IN_STACK
1808 new_Phi_in_stack(void) {
1811 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1813 res->stack = NEW_ARR_F (ir_node *, 0);
1820 free_Phi_in_stack(Phi_in_stack *s) {
1821 DEL_ARR_F(s->stack);
1825 free_to_Phi_in_stack(ir_node *phi) {
1826 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1827 current_ir_graph->Phi_in_stack->pos)
1828 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1830 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1832 (current_ir_graph->Phi_in_stack->pos)++;
1835 static INLINE ir_node *
1836 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1837 int arity, ir_node **in) {
1839 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1840 int pos = current_ir_graph->Phi_in_stack->pos;
1844 /* We need to allocate a new node */
1845 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1846 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1848 /* reuse the old node and initialize it again. */
1851 assert (res->kind == k_ir_node);
1852 assert (res->op == op_Phi);
1856 assert (arity >= 0);
1857 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1858 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1860 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1862 (current_ir_graph->Phi_in_stack->pos)--;
1866 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1868 /* Creates a Phi node with a given, fixed array **in of predecessors.
1869 If the Phi node is unnecessary, as the same value reaches the block
1870 through all control flow paths, it is eliminated and the value
1871 returned directly. This constructor is only intended for use in
1872 the automatic Phi node generation triggered by get_value or mature.
1873 The implementation is quite tricky and depends on the fact, that
1874 the nodes are allocated on a stack:
1875 The in array contains predecessors and NULLs. The NULLs appear,
1876 if get_r_value_internal, that computed the predecessors, reached
1877 the same block on two paths. In this case the same value reaches
1878 this block on both paths, there is no definition in between. We need
1879 not allocate a Phi where these path's merge, but we have to communicate
1880 this fact to the caller. This happens by returning a pointer to the
1881 node the caller _will_ allocate. (Yes, we predict the address. We can
1882 do so because the nodes are allocated on the obstack.) The caller then
1883 finds a pointer to itself and, when this routine is called again,
1886 static INLINE ir_node *
1887 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1890 ir_node *res, *known;
1892 /* Allocate a new node on the obstack. This can return a node to
1893 which some of the pointers in the in-array already point.
1894 Attention: the constructor copies the in array, i.e., the later
1895 changes to the array in this routine do not affect the
1896 constructed node! If the in array contains NULLs, there will be
1897 missing predecessors in the returned node. Is this a possible
1898 internal state of the Phi node generation? */
1899 #if USE_EXPLICIT_PHI_IN_STACK
1900 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1902 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1903 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1906 /* The in-array can contain NULLs. These were returned by
1907 get_r_value_internal if it reached the same block/definition on a
1908 second path. The NULLs are replaced by the node itself to
1909 simplify the test in the next loop. */
1910 for (i = 0; i < ins; ++i) {
1915 /* This loop checks whether the Phi has more than one predecessor.
1916 If so, it is a real Phi node and we break the loop. Else the Phi
1917 node merges the same definition on several paths and therefore is
1919 for (i = 0; i < ins; ++i) {
1920 if (in[i] == res || in[i] == known)
1929 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1931 #if USE_EXPLICIT_PHI_IN_STACK
1932 free_to_Phi_in_stack(res);
1934 edges_node_deleted(res, current_ir_graph);
1935 obstack_free(current_ir_graph->obst, res);
1939 res = optimize_node (res);
1940 IRN_VRFY_IRG(res, irg);
1943 /* return the pointer to the Phi node. This node might be deallocated! */
1948 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1951 allocates and returns this node. The routine called to allocate the
1952 node might optimize it away and return a real value, or even a pointer
1953 to a deallocated Phi node on top of the obstack!
1954 This function is called with an in-array of proper size. **/
1956 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1958 ir_node *prevBlock, *res;
1961 /* This loop goes to all predecessor blocks of the block the Phi node is in
1962 and there finds the operands of the Phi node by calling
1963 get_r_value_internal. */
1964 for (i = 1; i <= ins; ++i) {
1965 assert (block->in[i]);
1966 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1968 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1971 /* After collecting all predecessors into the array nin a new Phi node
1972 with these predecessors is created. This constructor contains an
1973 optimization: If all predecessors of the Phi node are identical it
1974 returns the only operand instead of a new Phi node. If the value
1975 passes two different control flow edges without being defined, and
1976 this is the second path treated, a pointer to the node that will be
1977 allocated for the first path (recursion) is returned. We already
1978 know the address of this node, as it is the next node to be allocated
1979 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1980 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1982 /* Now we now the value for "pos" and can enter it in the array with
1983 all known local variables. Attention: this might be a pointer to
1984 a node, that later will be allocated!!! See new_rd_Phi_in.
1985 If this is called in mature, after some set_value in the same block,
1986 the proper value must not be overwritten:
1988 get_value (makes Phi0, put's it into graph_arr)
1989 set_value (overwrites Phi0 in graph_arr)
1990 mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
1993 if (!block->attr.block.graph_arr[pos]) {
1994 block->attr.block.graph_arr[pos] = res;
1996 /* printf(" value already computed by %s\n",
1997 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
2003 /* This function returns the last definition of a variable. In case
2004 this variable was last defined in a previous block, Phi nodes are
2005 inserted. If the part of the firm graph containing the definition
2006 is not yet constructed, a dummy Phi node is returned. */
2008 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2011 /* There are 4 cases to treat.
2013 1. The block is not mature and we visit it the first time. We can not
2014 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2015 predecessors is returned. This node is added to the linked list (field
2016 "link") of the containing block to be completed when this block is
2017 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2020 2. The value is already known in this block, graph_arr[pos] is set and we
2021 visit the block the first time. We can return the value without
2022 creating any new nodes.
2024 3. The block is mature and we visit it the first time. A Phi node needs
2025 to be created (phi_merge). If the Phi is not needed, as all it's
2026 operands are the same value reaching the block through different
2027 paths, it's optimized away and the value itself is returned.
2029 4. The block is mature, and we visit it the second time. Now two
2030 subcases are possible:
2031 * The value was computed completely the last time we were here. This
2032 is the case if there is no loop. We can return the proper value.
2033 * The recursion that visited this node and set the flag did not
2034 return yet. We are computing a value in a loop and need to
2035 break the recursion without knowing the result yet.
2036 @@@ strange case. Straight forward we would create a Phi before
2037 starting the computation of it's predecessors. In this case we will
2038 find a Phi here in any case. The problem is that this implementation
2039 only creates a Phi after computing the predecessors, so that it is
2040 hard to compute self references of this Phi. @@@
2041 There is no simple check for the second subcase. Therefore we check
2042 for a second visit and treat all such cases as the second subcase.
2043 Anyways, the basic situation is the same: we reached a block
2044 on two paths without finding a definition of the value: No Phi
2045 nodes are needed on both paths.
2046 We return this information "Two paths, no Phi needed" by a very tricky
2047 implementation that relies on the fact that an obstack is a stack and
2048 will return a node with the same address on different allocations.
2049 Look also at phi_merge and new_rd_phi_in to understand this.
2050 @@@ Unfortunately this does not work, see testprogram
2051 three_cfpred_example.
2055 /* case 4 -- already visited. */
2056 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
2058 /* visited the first time */
2059 set_irn_visited(block, get_irg_visited(current_ir_graph));
2061 /* Get the local valid value */
2062 res = block->attr.block.graph_arr[pos];
2064 /* case 2 -- If the value is actually computed, return it. */
2065 if (res) return res;
2067 if (block->attr.block.matured) { /* case 3 */
2069 /* The Phi has the same amount of ins as the corresponding block. */
2070 int ins = get_irn_arity(block);
2072 NEW_ARR_A (ir_node *, nin, ins);
2074 /* Phi merge collects the predecessors and then creates a node. */
2075 res = phi_merge (block, pos, mode, nin, ins);
2077 } else { /* case 1 */
2078 /* The block is not mature, we don't know how many in's are needed. A Phi
2079 with zero predecessors is created. Such a Phi node is called Phi0
2080 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
2081 to the list of Phi0 nodes in this block to be matured by mature_immBlock
2083 The Phi0 has to remember the pos of it's internal value. If the real
2084 Phi is computed, pos is used to update the array with the local
2087 res = new_rd_Phi0 (current_ir_graph, block, mode);
2088 res->attr.phi0_pos = pos;
2089 res->link = block->link;
2093 /* If we get here, the frontend missed a use-before-definition error */
2096 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2097 assert (mode->code >= irm_F && mode->code <= irm_P);
2098 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2099 tarval_mode_null[mode->code]);
2102 /* The local valid value is available now. */
2103 block->attr.block.graph_arr[pos] = res;
2111 it starts the recursion. This causes an Id at the entry of
2112 every block that has no definition of the value! **/
2114 #if USE_EXPLICIT_PHI_IN_STACK
2116 Phi_in_stack * new_Phi_in_stack() { return NULL; }
2117 void free_Phi_in_stack(Phi_in_stack *s) { }
2120 static INLINE ir_node *
2121 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
2122 ir_node **in, int ins, ir_node *phi0)
2125 ir_node *res, *known;
2127 /* Allocate a new node on the obstack. The allocation copies the in
2129 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
2130 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
2132 /* This loop checks whether the Phi has more than one predecessor.
2133 If so, it is a real Phi node and we break the loop. Else the
2134 Phi node merges the same definition on several paths and therefore
2135 is not needed. Don't consider Bad nodes! */
2137 for (i=0; i < ins; ++i)
2141 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
2143 /* Optimize self referencing Phis: We can't detect them yet properly, as
2144 they still refer to the Phi0 they will replace. So replace right now. */
2145 if (phi0 && in[i] == phi0) in[i] = res;
2147 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
2155 /* i==ins: there is at most one predecessor, we don't need a phi node. */
2158 edges_node_deleted(res, current_ir_graph);
2159 obstack_free (current_ir_graph->obst, res);
2160 if (is_Phi(known)) {
2161 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
2162 order, an enclosing Phi know may get superfluous. */
2163 res = optimize_in_place_2(known);
2165 exchange(known, res);
2171 /* A undefined value, e.g., in unreachable code. */
2175 res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
2176 IRN_VRFY_IRG(res, irg);
2177 /* Memory Phis in endless loops must be kept alive.
2178 As we can't distinguish these easily we keep all of them alive. */
2179 if ((res->op == op_Phi) && (mode == mode_M))
2180 add_End_keepalive(irg->end, res);
2187 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
2189 #if PRECISE_EXC_CONTEXT
2191 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
2193 /* Construct a new frag_array for node n.
2194 Copy the content from the current graph_arr of the corresponding block:
2195 this is the current state.
2196 Set ProjM(n) as current memory state.
2197 Further the last entry in frag_arr of current block points to n. This
2198 constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
2200 static INLINE ir_node ** new_frag_arr (ir_node *n)
2205 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
2206 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
2207 sizeof(ir_node *)*current_ir_graph->n_loc);
2209 /* turn off optimization before allocating Proj nodes, as res isn't
2211 opt = get_opt_optimize(); set_optimize(0);
2212 /* Here we rely on the fact that all frag ops have Memory as first result! */
2213 if (get_irn_op(n) == op_Call)
2214 arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
2215 else if (get_irn_op(n) == op_CopyB)
2216 arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
2217 else if (get_irn_op(n) == op_Bound)
2218 arr[0] = new_Proj(n, mode_M, pn_Bound_M_except);
2220 assert((pn_Quot_M == pn_DivMod_M) &&
2221 (pn_Quot_M == pn_Div_M) &&
2222 (pn_Quot_M == pn_Mod_M) &&
2223 (pn_Quot_M == pn_Load_M) &&
2224 (pn_Quot_M == pn_Store_M) &&
2225 (pn_Quot_M == pn_Alloc_M) );
2226 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
2230 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
2235 * returns the frag_arr from a node
2237 static INLINE ir_node **
2238 get_frag_arr (ir_node *n) {
2239 switch (get_irn_opcode(n)) {
2241 return n->attr.call.exc.frag_arr;
2243 return n->attr.a.exc.frag_arr;
2245 return n->attr.load.exc.frag_arr;
2247 return n->attr.store.exc.frag_arr;
2249 return n->attr.except.frag_arr;
2254 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
2256 if (!frag_arr[pos]) frag_arr[pos] = val;
2257 if (frag_arr[current_ir_graph->n_loc - 1]) {
2258 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2259 assert(arr != frag_arr && "Endless recursion detected");
2260 set_frag_value(arr, pos, val);
2265 for (i = 0; i < 1000; ++i) {
2266 if (!frag_arr[pos]) {
2267 frag_arr[pos] = val;
2269 if (frag_arr[current_ir_graph->n_loc - 1]) {
2270 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
2276 assert(0 && "potential endless recursion");
2281 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
2285 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
2287 frag_arr = get_frag_arr(cfOp);
2288 res = frag_arr[pos];
2290 if (block->attr.block.graph_arr[pos]) {
2291 /* There was a set_value after the cfOp and no get_value before that
2292 set_value. We must build a Phi node now. */
2293 if (block->attr.block.matured) {
2294 int ins = get_irn_arity(block);
2296 NEW_ARR_A (ir_node *, nin, ins);
2297 res = phi_merge(block, pos, mode, nin, ins);
2299 res = new_rd_Phi0 (current_ir_graph, block, mode);
2300 res->attr.phi0_pos = pos;
2301 res->link = block->link;
2305 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
2306 but this should be better: (remove comment if this works) */
2307 /* It's a Phi, we can write this into all graph_arrs with NULL */
2308 set_frag_value(block->attr.block.graph_arr, pos, res);
2310 res = get_r_value_internal(block, pos, mode);
2311 set_frag_value(block->attr.block.graph_arr, pos, res);
2316 #endif /* PRECISE_EXC_CONTEXT */
2319 computes the predecessors for the real phi node, and then
2320 allocates and returns this node. The routine called to allocate the
2321 node might optimize it away and return a real value.
2322 This function must be called with an in-array of proper size. **/
2324 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
2326 ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
2329 /* If this block has no value at pos create a Phi0 and remember it
2330 in graph_arr to break recursions.
2331 Else we may not set graph_arr as there a later value is remembered. */
2333 if (!block->attr.block.graph_arr[pos]) {
2334 if (block == get_irg_start_block(current_ir_graph)) {
2335 /* Collapsing to Bad tarvals is no good idea.
2336 So we call a user-supplied routine here that deals with this case as
2337 appropriate for the given language. Sorrily the only help we can give
2338 here is the position.
2340 Even if all variables are defined before use, it can happen that
2341 we get to the start block, if a Cond has been replaced by a tuple
2342 (bad, jmp). In this case we call the function needlessly, eventually
2343 generating an non existent error.
2344 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
2347 if (default_initialize_local_variable) {
2348 ir_node *rem = get_cur_block();
2350 set_cur_block(block);
2351 block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
2355 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
2356 /* We don't need to care about exception ops in the start block.
2357 There are none by definition. */
2358 return block->attr.block.graph_arr[pos];
2360 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
2361 block->attr.block.graph_arr[pos] = phi0;
2362 #if PRECISE_EXC_CONTEXT
2363 if (get_opt_precise_exc_context()) {
2364 /* Set graph_arr for fragile ops. Also here we should break recursion.
2365 We could choose a cyclic path through an cfop. But the recursion would
2366 break at some point. */
2367 set_frag_value(block->attr.block.graph_arr, pos, phi0);
2373 /* This loop goes to all predecessor blocks of the block the Phi node
2374 is in and there finds the operands of the Phi node by calling
2375 get_r_value_internal. */
2376 for (i = 1; i <= ins; ++i) {
2377 prevCfOp = skip_Proj(block->in[i]);
2379 if (is_Bad(prevCfOp)) {
2380 /* In case a Cond has been optimized we would get right to the start block
2381 with an invalid definition. */
2382 nin[i-1] = new_Bad();
2385 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
2387 if (!is_Bad(prevBlock)) {
2388 #if PRECISE_EXC_CONTEXT
2389 if (get_opt_precise_exc_context() &&
2390 is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
2391 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
2392 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
2395 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
2397 nin[i-1] = new_Bad();
2401 /* We want to pass the Phi0 node to the constructor: this finds additional
2402 optimization possibilities.
2403 The Phi0 node either is allocated in this function, or it comes from
2404 a former call to get_r_value_internal. In this case we may not yet
2405 exchange phi0, as this is done in mature_immBlock. */
2407 phi0_all = block->attr.block.graph_arr[pos];
2408 if (!((get_irn_op(phi0_all) == op_Phi) &&
2409 (get_irn_arity(phi0_all) == 0) &&
2410 (get_nodes_block(phi0_all) == block)))
2416 /* After collecting all predecessors into the array nin a new Phi node
2417 with these predecessors is created. This constructor contains an
2418 optimization: If all predecessors of the Phi node are identical it
2419 returns the only operand instead of a new Phi node. */
2420 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
2422 /* In case we allocated a Phi0 node at the beginning of this procedure,
2423 we need to exchange this Phi0 with the real Phi. */
2425 exchange(phi0, res);
2426 block->attr.block.graph_arr[pos] = res;
2427 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
2428 only an optimization. */
2434 /* This function returns the last definition of a variable. In case
2435 this variable was last defined in a previous block, Phi nodes are
2436 inserted. If the part of the firm graph containing the definition
2437 is not yet constructed, a dummy Phi node is returned. */
2439 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
2442 /* There are 4 cases to treat.
2444 1. The block is not mature and we visit it the first time. We can not
2445 create a proper Phi node, therefore a Phi0, i.e., a Phi without
2446 predecessors is returned. This node is added to the linked list (field
2447 "link") of the containing block to be completed when this block is
2448 matured. (Completion will add a new Phi and turn the Phi0 into an Id
2451 2. The value is already known in this block, graph_arr[pos] is set and we
2452 visit the block the first time. We can return the value without
2453 creating any new nodes.
2455 3. The block is mature and we visit it the first time. A Phi node needs
2456 to be created (phi_merge). If the Phi is not needed, as all it's
2457 operands are the same value reaching the block through different
2458 paths, it's optimized away and the value itself is returned.
2460 4. The block is mature, and we visit it the second time. Now two
2461 subcases are possible:
2462 * The value was computed completely the last time we were here. This
2463 is the case if there is no loop. We can return the proper value.
2464 * The recursion that visited this node and set the flag did not
2465 return yet. We are computing a value in a loop and need to
2466 break the recursion. This case only happens if we visited
2467 the same block with phi_merge before, which inserted a Phi0.
2468 So we return the Phi0.
2471 /* case 4 -- already visited. */
2472 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
2473 /* As phi_merge allocates a Phi0 this value is always defined. Here
2474 is the critical difference of the two algorithms. */
2475 assert(block->attr.block.graph_arr[pos]);
2476 return block->attr.block.graph_arr[pos];
2479 /* visited the first time */
2480 set_irn_visited(block, get_irg_visited(current_ir_graph));
2482 /* Get the local valid value */
2483 res = block->attr.block.graph_arr[pos];
2485 /* case 2 -- If the value is actually computed, return it. */
2486 if (res) { return res; };
2488 if (block->attr.block.matured) { /* case 3 */
2490 /* The Phi has the same amount of ins as the corresponding block. */
2491 int ins = get_irn_arity(block);
2493 NEW_ARR_A (ir_node *, nin, ins);
2495 /* Phi merge collects the predecessors and then creates a node. */
2496 res = phi_merge (block, pos, mode, nin, ins);
2498 } else { /* case 1 */
2499 /* The block is not mature, we don't know how many in's are needed. A Phi
2500 with zero predecessors is created. Such a Phi node is called Phi0
2501 node. The Phi0 is then added to the list of Phi0 nodes in this block
2502 to be matured by mature_immBlock later.
2503 The Phi0 has to remember the pos of it's internal value. If the real
2504 Phi is computed, pos is used to update the array with the local
2506 res = new_rd_Phi0 (current_ir_graph, block, mode);
2507 res->attr.phi0_pos = pos;
2508 res->link = block->link;
2512 /* If we get here, the frontend missed a use-before-definition error */
2515 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
2516 assert (mode->code >= irm_F && mode->code <= irm_P);
2517 res = new_rd_Const (NULL, current_ir_graph, block, mode,
2518 get_mode_null(mode));
2521 /* The local valid value is available now. */
2522 block->attr.block.graph_arr[pos] = res;
2527 #endif /* USE_FAST_PHI_CONSTRUCTION */
2529 /* ************************************************************************** */
2532 * Finalize a Block node, when all control flows are known.
2533 * Acceptable parameters are only Block nodes.
2536 mature_immBlock (ir_node *block)
2542 assert (get_irn_opcode(block) == iro_Block);
2543 /* @@@ should be commented in
2544 assert (!get_Block_matured(block) && "Block already matured"); */
2546 if (!get_Block_matured(block)) {
2547 ins = ARR_LEN (block->in)-1;
2548 /* Fix block parameters */
2549 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
2551 /* An array for building the Phi nodes. */
2552 NEW_ARR_A (ir_node *, nin, ins);
2554 /* Traverse a chain of Phi nodes attached to this block and mature
2556 for (n = block->link; n; n=next) {
2557 inc_irg_visited(current_ir_graph);
2559 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
2562 block->attr.block.matured = 1;
2564 /* Now, as the block is a finished firm node, we can optimize it.
2565 Since other nodes have been allocated since the block was created
2566 we can not free the node on the obstack. Therefore we have to call
2568 Unfortunately the optimization does not change a lot, as all allocated
2569 nodes refer to the unoptimized node.
2570 We can call _2, as global cse has no effect on blocks. */
2571 block = optimize_in_place_2(block);
2572 IRN_VRFY_IRG(block, current_ir_graph);
2577 new_d_Phi (dbg_info *db, int arity, ir_node **in, ir_mode *mode)
2579 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
2583 new_d_Const (dbg_info *db, ir_mode *mode, tarval *con)
2585 return new_bd_Const(db, current_ir_graph->start_block, mode, con);
2589 new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
2591 return new_bd_Const_long(db, current_ir_graph->start_block, mode, value);
2595 new_d_Const_type (dbg_info *db, ir_mode *mode, tarval *con, ir_type *tp)
2597 return new_bd_Const_type(db, current_ir_graph->start_block, mode, con, tp);
2602 new_d_Id (dbg_info *db, ir_node *val, ir_mode *mode)
2604 return new_bd_Id(db, current_ir_graph->current_block, val, mode);
2608 new_d_Proj (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2610 return new_bd_Proj(db, current_ir_graph->current_block, arg, mode, proj);
2614 new_d_defaultProj (dbg_info *db, ir_node *arg, long max_proj)
2617 assert(arg->op == op_Cond);
2618 arg->attr.c.kind = fragmentary;
2619 arg->attr.c.default_proj = max_proj;
2620 res = new_Proj (arg, mode_X, max_proj);
2625 new_d_Conv (dbg_info *db, ir_node *op, ir_mode *mode)
2627 return new_bd_Conv(db, current_ir_graph->current_block, op, mode);
2631 new_d_Cast (dbg_info *db, ir_node *op, ir_type *to_tp)
2633 return new_bd_Cast(db, current_ir_graph->current_block, op, to_tp);
2637 new_d_Tuple (dbg_info *db, int arity, ir_node **in)
2639 return new_bd_Tuple(db, current_ir_graph->current_block, arity, in);
2648 * allocate the frag array
2650 static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
2651 if (get_opt_precise_exc_context()) {
2652 if ((current_ir_graph->phase_state == phase_building) &&
2653 (get_irn_op(res) == op) && /* Could be optimized away. */
2654 !*frag_store) /* Could be a cse where the arr is already set. */ {
2655 *frag_store = new_frag_arr(res);
2661 new_d_Quot (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2664 res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
2665 res->attr.except.pin_state = op_pin_state_pinned;
2666 #if PRECISE_EXC_CONTEXT
2667 allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
2674 new_d_DivMod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2677 res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
2678 res->attr.except.pin_state = op_pin_state_pinned;
2679 #if PRECISE_EXC_CONTEXT
2680 allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
2687 new_d_Div (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2690 res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
2691 res->attr.except.pin_state = op_pin_state_pinned;
2692 #if PRECISE_EXC_CONTEXT
2693 allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
2700 new_d_Mod (dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2)
2703 res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
2704 res->attr.except.pin_state = op_pin_state_pinned;
2705 #if PRECISE_EXC_CONTEXT
2706 allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
2725 new_d_Cmp (dbg_info *db, ir_node *op1, ir_node *op2)
2727 return new_bd_Cmp(db, current_ir_graph->current_block, op1, op2);
2731 new_d_Jmp (dbg_info *db)
2733 return new_bd_Jmp (db, current_ir_graph->current_block);
2737 new_d_IJmp (dbg_info *db, ir_node *tgt)
2739 return new_bd_IJmp (db, current_ir_graph->current_block, tgt);
2743 new_d_Cond (dbg_info *db, ir_node *c)
2745 return new_bd_Cond (db, current_ir_graph->current_block, c);
2749 new_d_Call (dbg_info *db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2753 res = new_bd_Call (db, current_ir_graph->current_block,
2754 store, callee, arity, in, tp);
2755 #if PRECISE_EXC_CONTEXT
2756 allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
2763 new_d_Return (dbg_info *db, ir_node* store, int arity, ir_node **in)
2765 return new_bd_Return (db, current_ir_graph->current_block,
2770 new_d_Load (dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode)
2773 res = new_bd_Load (db, current_ir_graph->current_block,
2775 #if PRECISE_EXC_CONTEXT
2776 allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
2783 new_d_Store (dbg_info *db, ir_node *store, ir_node *addr, ir_node *val)
2786 res = new_bd_Store (db, current_ir_graph->current_block,
2788 #if PRECISE_EXC_CONTEXT
2789 allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
2796 new_d_Alloc (dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
2800 res = new_bd_Alloc (db, current_ir_graph->current_block,
2801 store, size, alloc_type, where);
2802 #if PRECISE_EXC_CONTEXT
2803 allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
2810 new_d_Free (dbg_info *db, ir_node *store, ir_node *ptr,
2811 ir_node *size, ir_type *free_type, where_alloc where)
2813 return new_bd_Free (db, current_ir_graph->current_block,
2814 store, ptr, size, free_type, where);
2818 new_d_simpleSel (dbg_info *db, ir_node *store, ir_node *objptr, entity *ent)
2819 /* GL: objptr was called frame before. Frame was a bad choice for the name
2820 as the operand could as well be a pointer to a dynamic object. */
2822 return new_bd_Sel (db, current_ir_graph->current_block,
2823 store, objptr, 0, NULL, ent);
2827 new_d_Sel (dbg_info *db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2829 return new_bd_Sel (db, current_ir_graph->current_block,
2830 store, objptr, n_index, index, sel);
2834 new_d_SymConst_type (dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp)
2836 return new_bd_SymConst_type (db, current_ir_graph->start_block,
2841 new_d_SymConst (dbg_info *db, symconst_symbol value, symconst_kind kind)
2843 return new_bd_SymConst (db, current_ir_graph->start_block,
2848 new_d_Sync (dbg_info *db, int arity, ir_node** in)
2850 return new_bd_Sync (db, current_ir_graph->current_block, arity, in);
2856 return _new_d_Bad();
2860 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2862 return new_bd_Confirm (db, current_ir_graph->current_block,
2867 new_d_Unknown (ir_mode *m)
2869 return new_bd_Unknown(m);
2873 new_d_CallBegin (dbg_info *db, ir_node *call)
2876 res = new_bd_CallBegin (db, current_ir_graph->current_block, call);
2881 new_d_EndReg (dbg_info *db)
2884 res = new_bd_EndReg(db, current_ir_graph->current_block);
2889 new_d_EndExcept (dbg_info *db)
2892 res = new_bd_EndExcept(db, current_ir_graph->current_block);
2897 new_d_Break (dbg_info *db)
2899 return new_bd_Break (db, current_ir_graph->current_block);
2903 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2905 return new_bd_Filter (db, current_ir_graph->current_block,
2912 return _new_d_NoMem();
2916 new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false,
2917 ir_node *ir_true, ir_mode *mode) {
2918 return new_bd_Mux (db, current_ir_graph->current_block,
2919 sel, ir_false, ir_true, mode);
2923 new_d_Psi (dbg_info *db,int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
2924 return new_bd_Psi (db, current_ir_graph->current_block,
2925 arity, conds, vals, mode);
2928 ir_node *new_d_CopyB(dbg_info *db,ir_node *store,
2929 ir_node *dst, ir_node *src, ir_type *data_type) {
2931 res = new_bd_CopyB(db, current_ir_graph->current_block,
2932 store, dst, src, data_type);
2933 #if PRECISE_EXC_CONTEXT
2934 allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
2940 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, ir_type *type)
2942 return new_bd_InstOf (db, current_ir_graph->current_block,
2943 store, objptr, type);
2947 new_d_Raise (dbg_info *db, ir_node *store, ir_node *obj)
2949 return new_bd_Raise (db, current_ir_graph->current_block,
2953 ir_node *new_d_Bound(dbg_info *db,ir_node *store,
2954 ir_node *idx, ir_node *lower, ir_node *upper) {
2956 res = new_bd_Bound(db, current_ir_graph->current_block,
2957 store, idx, lower, upper);
2958 #if PRECISE_EXC_CONTEXT
2959 allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
2964 /* ********************************************************************* */
2965 /* Comfortable interface with automatic Phi node construction. */
2966 /* (Uses also constructors of ?? interface, except new_Block. */
2967 /* ********************************************************************* */
2969 /* Block construction */
2970 /* immature Block without predecessors */
2971 ir_node *new_d_immBlock (dbg_info *db) {
2974 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2975 /* creates a new dynamic in-array as length of in is -1 */
2976 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2977 current_ir_graph->current_block = res;
2978 res->attr.block.matured = 0;
2979 res->attr.block.dead = 0;
2980 /* res->attr.block.exc = exc_normal; */
2981 /* res->attr.block.handler_entry = 0; */
2982 res->attr.block.irg = current_ir_graph;
2983 res->attr.block.backedge = NULL;
2984 res->attr.block.in_cg = NULL;
2985 res->attr.block.cg_backedge = NULL;
2986 set_Block_block_visited(res, 0);
2988 /* Create and initialize array for Phi-node construction. */
2989 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2990 current_ir_graph->n_loc);
2991 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2993 /* Immature block may not be optimized! */
2994 IRN_VRFY_IRG(res, current_ir_graph);
3000 new_immBlock (void) {
3001 return new_d_immBlock(NULL);
3004 /* add an edge to a jmp/control flow node */
3006 add_immBlock_pred (ir_node *block, ir_node *jmp)
3008 if (block->attr.block.matured) {
3009 assert(0 && "Error: Block already matured!\n");
3012 assert(jmp != NULL);
3013 ARR_APP1(ir_node *, block->in, jmp);
3017 /* changing the current block */
3019 set_cur_block (ir_node *target) {
3020 current_ir_graph->current_block = target;
3023 /* ************************ */
3024 /* parameter administration */
3026 /* get a value from the parameter array from the current block by its index */
3028 get_d_value (dbg_info *db, int pos, ir_mode *mode)
3030 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3031 inc_irg_visited(current_ir_graph);
3033 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
3035 /* get a value from the parameter array from the current block by its index */
3037 get_value (int pos, ir_mode *mode)
3039 return get_d_value(NULL, pos, mode);
3042 /* set a value at position pos in the parameter array from the current block */
3044 set_value (int pos, ir_node *value)
3046 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3047 assert(pos+1 < current_ir_graph->n_loc);
3048 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
3052 find_value(ir_node *value)
3055 ir_node *bl = current_ir_graph->current_block;
3057 for (i = 1; i < ARR_LEN(bl->attr.block.graph_arr); ++i)
3058 if (bl->attr.block.graph_arr[i] == value)
3063 /* get the current store */
3067 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3068 /* GL: one could call get_value instead */
3069 inc_irg_visited(current_ir_graph);
3070 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
3073 /* set the current store */
3075 set_store (ir_node *store)
3077 /* GL: one could call set_value instead */
3078 assert(get_irg_phase_state (current_ir_graph) == phase_building);
3079 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
3083 keep_alive (ir_node *ka) {
3084 add_End_keepalive(current_ir_graph->end, ka);
3087 /* --- Useful access routines --- */
3088 /* Returns the current block of the current graph. To set the current
3089 block use set_cur_block. */
3090 ir_node *get_cur_block(void) {
3091 return get_irg_current_block(current_ir_graph);
3094 /* Returns the frame type of the current graph */
3095 ir_type *get_cur_frame_type(void) {
3096 return get_irg_frame_type(current_ir_graph);
3100 /* ********************************************************************* */
3103 /* call once for each run of the library */
3105 init_cons(uninitialized_local_variable_func_t *func)
3107 default_initialize_local_variable = func;
3110 /* call for each graph */
3112 irg_finalize_cons (ir_graph *irg) {
3113 irg->phase_state = phase_high;
3117 irp_finalize_cons (void) {
3118 int i, n_irgs = get_irp_n_irgs();
3119 for (i = 0; i < n_irgs; i++) {
3120 irg_finalize_cons(get_irp_irg(i));
3122 irp->phase_state = phase_high;
3126 ir_node *new_Block(int arity, ir_node **in) {
3127 return new_d_Block(NULL, arity, in);
3129 ir_node *new_Start (void) {
3130 return new_d_Start(NULL);
3132 ir_node *new_End (void) {
3133 return new_d_End(NULL);
3135 ir_node *new_Jmp (void) {
3136 return new_d_Jmp(NULL);
3138 ir_node *new_IJmp (ir_node *tgt) {
3139 return new_d_IJmp(NULL, tgt);
3141 ir_node *new_Cond (ir_node *c) {
3142 return new_d_Cond(NULL, c);
3144 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
3145 return new_d_Return(NULL, store, arity, in);
3147 ir_node *new_Const (ir_mode *mode, tarval *con) {
3148 return new_d_Const(NULL, mode, con);
3151 ir_node *new_Const_long(ir_mode *mode, long value)
3153 return new_d_Const_long(NULL, mode, value);
3156 ir_node *new_Const_type(tarval *con, ir_type *tp) {
3157 return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
3160 ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
3161 return new_d_SymConst(NULL, value, kind);
3163 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
3164 return new_d_simpleSel(NULL, store, objptr, ent);
3166 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
3168 return new_d_Sel(NULL, store, objptr, arity, in, ent);
3170 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
3172 return new_d_Call(NULL, store, callee, arity, in, tp);
3174 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
3175 return new_d_Add(NULL, op1, op2, mode);
3177 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
3178 return new_d_Sub(NULL, op1, op2, mode);
3180 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
3181 return new_d_Minus(NULL, op, mode);
3183 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
3184 return new_d_Mul(NULL, op1, op2, mode);
3186 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
3187 return new_d_Quot(NULL, memop, op1, op2);
3189 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
3190 return new_d_DivMod(NULL, memop, op1, op2);
3192 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
3193 return new_d_Div(NULL, memop, op1, op2);
3195 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
3196 return new_d_Mod(NULL, memop, op1, op2);
3198 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
3199 return new_d_Abs(NULL, op, mode);
3201 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
3202 return new_d_And(NULL, op1, op2, mode);
3204 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
3205 return new_d_Or(NULL, op1, op2, mode);
3207 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
3208 return new_d_Eor(NULL, op1, op2, mode);
3210 ir_node *new_Not (ir_node *op, ir_mode *mode) {
3211 return new_d_Not(NULL, op, mode);
3213 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
3214 return new_d_Shl(NULL, op, k, mode);
3216 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
3217 return new_d_Shr(NULL, op, k, mode);
3219 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
3220 return new_d_Shrs(NULL, op, k, mode);
3222 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
3223 return new_d_Rot(NULL, op, k, mode);
3225 ir_node *new_Carry (ir_node *op1, ir_node *op2, ir_mode *mode) {
3226 return new_d_Carry(NULL, op1, op2, mode);
3228 ir_node *new_Borrow (ir_node *op1, ir_node *op2, ir_mode *mode) {
3229 return new_d_Borrow(NULL, op1, op2, mode);
3231 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
3232 return new_d_Cmp(NULL, op1, op2);
3234 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
3235 return new_d_Conv(NULL, op, mode);
3237 ir_node *new_Cast (ir_node *op, ir_type *to_tp) {
3238 return new_d_Cast(NULL, op, to_tp);
3240 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
3241 return new_d_Phi(NULL, arity, in, mode);
3243 ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
3244 return new_d_Load(NULL, store, addr, mode);
3246 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
3247 return new_d_Store(NULL, store, addr, val);
3249 ir_node *new_Alloc (ir_node *store, ir_node *size, ir_type *alloc_type,
3250 where_alloc where) {
3251 return new_d_Alloc(NULL, store, size, alloc_type, where);
3253 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
3254 ir_type *free_type, where_alloc where) {
3255 return new_d_Free(NULL, store, ptr, size, free_type, where);
3257 ir_node *new_Sync (int arity, ir_node **in) {
3258 return new_d_Sync(NULL, arity, in);
3260 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
3261 return new_d_Proj(NULL, arg, mode, proj);
3263 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
3264 return new_d_defaultProj(NULL, arg, max_proj);
3266 ir_node *new_Tuple (int arity, ir_node **in) {
3267 return new_d_Tuple(NULL, arity, in);
3269 ir_node *new_Id (ir_node *val, ir_mode *mode) {
3270 return new_d_Id(NULL, val, mode);
3272 ir_node *new_Bad (void) {
3275 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
3276 return new_d_Confirm (NULL, val, bound, cmp);
3278 ir_node *new_Unknown(ir_mode *m) {
3279 return new_d_Unknown(m);
3281 ir_node *new_CallBegin (ir_node *callee) {
3282 return new_d_CallBegin(NULL, callee);
3284 ir_node *new_EndReg (void) {
3285 return new_d_EndReg(NULL);
3287 ir_node *new_EndExcept (void) {
3288 return new_d_EndExcept(NULL);
3290 ir_node *new_Break (void) {
3291 return new_d_Break(NULL);
3293 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
3294 return new_d_Filter(NULL, arg, mode, proj);
3296 ir_node *new_NoMem (void) {
3297 return new_d_NoMem();
3299 ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) {
3300 return new_d_Mux(NULL, sel, ir_false, ir_true, mode);
3302 ir_node *new_Psi (int arity, ir_node *conds[], ir_node *vals[], ir_mode *mode) {
3303 return new_d_Psi(NULL, arity, conds, vals, mode);
3305 ir_node *new_CopyB(ir_node *store, ir_node *dst, ir_node *src, ir_type *data_type) {
3306 return new_d_CopyB(NULL, store, dst, src, data_type);
3308 ir_node *new_InstOf (ir_node *store, ir_node *objptr, ir_type *ent) {
3309 return new_d_InstOf (NULL, store, objptr, ent);
3311 ir_node *new_Raise (ir_node *store, ir_node *obj) {
3312 return new_d_Raise(NULL, store, obj);
3314 ir_node *new_Bound(ir_node *store, ir_node *idx, ir_node *lower, ir_node *upper) {
3315 return new_d_Bound(NULL, store, idx, lower, upper);