3 * File name: ir/ir/ircons.c
4 * Purpose: Various irnode constructors. Automatic construction
5 * of SSA representation.
6 * Author: Martin Trapp, Christian Schaefer
7 * Modified by: Goetz Lindenmaier, Boris Boesler
10 * Copyright: (c) 1998-2003 Universität Karlsruhe
11 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
19 # include "irgraph_t.h"
20 # include "irnode_t.h"
21 # include "irmode_t.h"
23 # include "firm_common_t.h"
29 /* memset belongs to string.h */
31 # include "irbackedge_t.h"
33 #if USE_EXPLICIT_PHI_IN_STACK
34 /* A stack needed for the automatic Phi node construction in constructor
35 Phi_in. Redefinition in irgraph.c!! */
40 typedef struct Phi_in_stack Phi_in_stack;
44 * language dependant initialization variable
46 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
48 /*** ******************************************** */
49 /** privat interfaces, for professional use only */
51 /* Constructs a Block with a fixed number of predecessors.
52 Does not set current_block. Can not be used with automatic
53 Phi node construction. */
55 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
59 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
60 set_Block_matured(res, 1);
61 set_Block_block_visited(res, 0);
63 //res->attr.block.exc = exc_normal;
64 //res->attr.block.handler_entry = 0;
65 res->attr.block.irg = irg;
66 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
67 res->attr.block.in_cg = NULL;
68 res->attr.block.cg_backedge = NULL;
70 irn_vrfy_irg (res, irg);
75 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
79 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
80 //res->attr.start.irg = irg;
82 irn_vrfy_irg (res, irg);
87 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
91 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
93 irn_vrfy_irg (res, irg);
97 /* Creates a Phi node with all predecessors. Calling this constructor
98 is only allowed if the corresponding block is mature. */
100 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
104 bool has_unknown = false;
106 assert( get_Block_matured(block) );
107 assert( get_irn_arity(block) == arity );
109 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
111 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
113 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
114 if (!has_unknown) res = optimize_node (res);
115 irn_vrfy_irg (res, irg);
117 /* Memory Phis in endless loops must be kept alive.
118 As we can't distinguish these easily we keep all of them alive. */
119 if ((res->op == op_Phi) && (mode == mode_M))
120 add_End_keepalive(irg->end, res);
125 new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
128 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
129 res->attr.con.tv = con;
130 set_Const_type(res, tp); /* Call method because of complex assertion. */
131 res = optimize_node (res);
132 assert(get_Const_type(res) == tp);
133 irn_vrfy_irg (res, irg);
136 res = local_optimize_newby (res);
143 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
145 type *tp = unknown_type;
146 if (tarval_is_entity(con))
147 tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
148 return new_rd_Const_type (db, irg, block, mode, con, tp);
152 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
157 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
158 res = optimize_node (res);
159 irn_vrfy_irg (res, irg);
164 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
170 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
171 res->attr.proj = proj;
174 assert(get_Proj_pred(res));
175 assert(get_nodes_Block(get_Proj_pred(res)));
177 res = optimize_node (res);
179 irn_vrfy_irg (res, irg);
185 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
189 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
190 arg->attr.c.kind = fragmentary;
191 arg->attr.c.default_proj = max_proj;
192 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
197 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
202 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
203 res = optimize_node (res);
204 irn_vrfy_irg (res, irg);
209 new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
212 res = new_ir_node (db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
213 res->attr.cast.totype = to_tp;
214 res = optimize_node (res);
215 irn_vrfy_irg (res, irg);
220 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
224 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
225 res = optimize_node (res);
226 irn_vrfy_irg (res, irg);
231 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
232 ir_node *op1, ir_node *op2, ir_mode *mode)
238 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
239 res = optimize_node (res);
240 irn_vrfy_irg (res, irg);
245 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
246 ir_node *op1, ir_node *op2, ir_mode *mode)
252 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
253 res = optimize_node (res);
254 irn_vrfy_irg (res, irg);
259 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
260 ir_node *op, ir_mode *mode)
265 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
266 res = optimize_node (res);
267 irn_vrfy_irg (res, irg);
272 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
273 ir_node *op1, ir_node *op2, ir_mode *mode)
279 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
280 res = optimize_node (res);
281 irn_vrfy_irg (res, irg);
286 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
287 ir_node *memop, ir_node *op1, ir_node *op2)
294 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
295 res = optimize_node (res);
296 irn_vrfy_irg (res, irg);
301 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
302 ir_node *memop, ir_node *op1, ir_node *op2)
309 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
310 res = optimize_node (res);
311 irn_vrfy_irg (res, irg);
316 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
317 ir_node *memop, ir_node *op1, ir_node *op2)
324 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
325 res = optimize_node (res);
326 irn_vrfy_irg (res, irg);
331 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
332 ir_node *memop, ir_node *op1, ir_node *op2)
339 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
340 res = optimize_node (res);
341 irn_vrfy_irg (res, irg);
346 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
347 ir_node *op1, ir_node *op2, ir_mode *mode)
353 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
354 res = optimize_node (res);
355 irn_vrfy_irg (res, irg);
360 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
361 ir_node *op1, ir_node *op2, ir_mode *mode)
367 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
368 res = optimize_node (res);
369 irn_vrfy_irg (res, irg);
374 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
375 ir_node *op1, ir_node *op2, ir_mode *mode)
381 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
382 res = optimize_node (res);
383 irn_vrfy_irg (res, irg);
388 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
389 ir_node *op, ir_mode *mode)
394 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
395 res = optimize_node (res);
396 irn_vrfy_irg (res, irg);
401 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
402 ir_node *op, ir_node *k, ir_mode *mode)
408 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
409 res = optimize_node (res);
410 irn_vrfy_irg (res, irg);
415 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
416 ir_node *op, ir_node *k, ir_mode *mode)
422 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
423 res = optimize_node (res);
424 irn_vrfy_irg (res, irg);
429 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
430 ir_node *op, ir_node *k, ir_mode *mode)
436 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
437 res = optimize_node (res);
438 irn_vrfy_irg (res, irg);
443 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
444 ir_node *op, ir_node *k, ir_mode *mode)
450 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
451 res = optimize_node (res);
452 irn_vrfy_irg (res, irg);
457 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
458 ir_node *op, ir_mode *mode)
463 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
464 res = optimize_node (res);
465 irn_vrfy_irg (res, irg);
470 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
471 ir_node *op1, ir_node *op2)
477 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
478 res = optimize_node (res);
479 irn_vrfy_irg (res, irg);
484 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
487 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
488 res = optimize_node (res);
489 irn_vrfy_irg (res, irg);
494 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
499 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
500 res->attr.c.kind = dense;
501 res->attr.c.default_proj = 0;
502 res = optimize_node (res);
503 irn_vrfy_irg (res, irg);
508 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
509 ir_node *callee, int arity, ir_node **in, type *tp)
516 NEW_ARR_A (ir_node *, r_in, r_arity);
519 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
521 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
523 assert(is_method_type(tp));
524 set_Call_type(res, tp);
525 res->attr.call.callee_arr = NULL;
526 res = optimize_node (res);
527 irn_vrfy_irg (res, irg);
532 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
533 ir_node *store, int arity, ir_node **in)
540 NEW_ARR_A (ir_node *, r_in, r_arity);
542 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
543 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
544 res = optimize_node (res);
545 irn_vrfy_irg (res, irg);
550 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
556 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
557 res = optimize_node (res);
558 irn_vrfy_irg (res, irg);
563 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
564 ir_node *store, ir_node *adr)
570 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
572 res = optimize_node (res);
573 irn_vrfy_irg (res, irg);
578 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
579 ir_node *store, ir_node *adr, ir_node *val)
586 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
588 res = optimize_node (res);
590 irn_vrfy_irg (res, irg);
595 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
596 ir_node *size, type *alloc_type, where_alloc where)
602 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
604 res->attr.a.where = where;
605 res->attr.a.type = alloc_type;
607 res = optimize_node (res);
608 irn_vrfy_irg (res, irg);
613 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
614 ir_node *ptr, ir_node *size, type *free_type)
621 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
623 res->attr.f = free_type;
625 res = optimize_node (res);
626 irn_vrfy_irg (res, irg);
631 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
632 int arity, ir_node **in, entity *ent)
639 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
642 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
643 res = new_ir_node (db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
645 res->attr.s.ent = ent;
647 res = optimize_node (res);
648 irn_vrfy_irg (res, irg);
653 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
654 ir_node *objptr, type *ent)
661 NEW_ARR_A (ir_node *, r_in, r_arity);
665 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
667 res->attr.io.ent = ent;
669 /* res = optimize (res);
670 * irn_vrfy_irg (res, irg); */
675 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
676 symconst_kind symkind)
680 if (symkind == linkage_ptr_info)
684 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
686 res->attr.i.num = symkind;
687 if (symkind == linkage_ptr_info) {
688 res->attr.i.tori.ptrinfo = (ident *)value;
690 assert ( ( (symkind == type_tag)
691 || (symkind == size))
692 && (is_type(value)));
693 res->attr.i.tori.typ = (type *)value;
695 res = optimize_node (res);
696 irn_vrfy_irg (res, irg);
701 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
705 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
707 res = optimize_node (res);
708 irn_vrfy_irg (res, irg);
713 new_rd_Bad (ir_graph *irg)
719 new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
721 ir_node *in[2], *res;
725 res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
727 res->attr.confirm_cmp = cmp;
729 res = optimize_node (res);
730 irn_vrfy_irg(res, irg);
735 new_rd_Unknown (ir_graph *irg, ir_mode *m)
737 return new_ir_node (NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
741 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
745 in[0] = get_Call_ptr(call);
746 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
747 //res->attr.callbegin.irg = irg;
748 res->attr.callbegin.call = call;
749 res = optimize_node (res);
750 irn_vrfy_irg (res, irg);
755 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
759 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
760 //res->attr.end.irg = irg;
762 irn_vrfy_irg (res, irg);
767 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
771 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
772 //res->attr.end.irg = irg;
774 irn_vrfy_irg (res, irg);
779 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
782 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
783 res = optimize_node (res);
784 irn_vrfy_irg (res, irg);
789 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
795 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
796 res->attr.filter.proj = proj;
797 res->attr.filter.in_cg = NULL;
798 res->attr.filter.backedge = NULL;
801 assert(get_Proj_pred(res));
802 assert(get_nodes_Block(get_Proj_pred(res)));
804 res = optimize_node (res);
806 irn_vrfy_irg (res, irg);
811 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
812 return new_rd_Block(NULL, irg, arity, in);
814 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
815 return new_rd_Start(NULL, irg, block);
817 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
818 return new_rd_End(NULL, irg, block);
820 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
821 return new_rd_Jmp(NULL, irg, block);
823 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
824 return new_rd_Cond(NULL, irg, block, c);
826 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
827 ir_node *store, int arity, ir_node **in) {
828 return new_rd_Return(NULL, irg, block, store, arity, in);
830 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
831 ir_node *store, ir_node *obj) {
832 return new_rd_Raise(NULL, irg, block, store, obj);
834 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
835 ir_mode *mode, tarval *con) {
836 return new_rd_Const(NULL, irg, block, mode, con);
838 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
839 type_or_id_p value, symconst_kind symkind) {
840 return new_rd_SymConst(NULL, irg, block, value, symkind);
842 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
843 ir_node *objptr, int n_index, ir_node **index,
845 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
847 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
849 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
851 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
852 ir_node *callee, int arity, ir_node **in,
854 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
856 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
857 ir_node *op1, ir_node *op2, ir_mode *mode) {
858 return new_rd_Add(NULL, irg, block, op1, op2, mode);
860 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
861 ir_node *op1, ir_node *op2, ir_mode *mode) {
862 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
864 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
865 ir_node *op, ir_mode *mode) {
866 return new_rd_Minus(NULL, irg, block, op, mode);
868 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
869 ir_node *op1, ir_node *op2, ir_mode *mode) {
870 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
872 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
873 ir_node *memop, ir_node *op1, ir_node *op2) {
874 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
876 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
877 ir_node *memop, ir_node *op1, ir_node *op2) {
878 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
880 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
881 ir_node *memop, ir_node *op1, ir_node *op2) {
882 return new_rd_Div(NULL, irg, block, memop, op1, op2);
884 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
885 ir_node *memop, ir_node *op1, ir_node *op2) {
886 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
888 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
889 ir_node *op, ir_mode *mode) {
890 return new_rd_Abs(NULL, irg, block, op, mode);
892 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
893 ir_node *op1, ir_node *op2, ir_mode *mode) {
894 return new_rd_And(NULL, irg, block, op1, op2, mode);
896 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
897 ir_node *op1, ir_node *op2, ir_mode *mode) {
898 return new_rd_Or(NULL, irg, block, op1, op2, mode);
900 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
901 ir_node *op1, ir_node *op2, ir_mode *mode) {
902 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
904 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
905 ir_node *op, ir_mode *mode) {
906 return new_rd_Not(NULL, irg, block, op, mode);
908 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
909 ir_node *op1, ir_node *op2) {
910 return new_rd_Cmp(NULL, irg, block, op1, op2);
912 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
913 ir_node *op, ir_node *k, ir_mode *mode) {
914 return new_rd_Shl(NULL, irg, block, op, k, mode);
916 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
917 ir_node *op, ir_node *k, ir_mode *mode) {
918 return new_rd_Shr(NULL, irg, block, op, k, mode);
920 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
921 ir_node *op, ir_node *k, ir_mode *mode) {
922 return new_rd_Shrs(NULL, irg, block, op, k, mode);
924 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
925 ir_node *op, ir_node *k, ir_mode *mode) {
926 return new_rd_Rot(NULL, irg, block, op, k, mode);
928 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
929 ir_node *op, ir_mode *mode) {
930 return new_rd_Conv(NULL, irg, block, op, mode);
932 INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
933 return new_rd_Cast(NULL, irg, block, op, to_tp);
935 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
936 ir_node **in, ir_mode *mode) {
937 return new_rd_Phi(NULL, irg, block, arity, in, mode);
939 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
940 ir_node *store, ir_node *adr) {
941 return new_rd_Load(NULL, irg, block, store, adr);
943 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
944 ir_node *store, ir_node *adr, ir_node *val) {
945 return new_rd_Store(NULL, irg, block, store, adr, val);
947 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
948 ir_node *size, type *alloc_type, where_alloc where) {
949 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
951 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
952 ir_node *ptr, ir_node *size, type *free_type) {
953 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
955 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
956 return new_rd_Sync(NULL, irg, block, arity, in);
958 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
959 ir_mode *mode, long proj) {
960 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
962 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
964 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
966 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
967 int arity, ir_node **in) {
968 return new_rd_Tuple(NULL, irg, block, arity, in );
970 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
971 ir_node *val, ir_mode *mode) {
972 return new_rd_Id(NULL, irg, block, val, mode);
974 INLINE ir_node *new_r_Bad (ir_graph *irg) {
975 return new_rd_Bad(irg);
977 INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
978 return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
980 INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
981 return new_rd_Unknown(irg, m);
983 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
984 return new_rd_CallBegin(NULL, irg, block, callee);
986 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
987 return new_rd_EndReg(NULL, irg, block);
989 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
990 return new_rd_EndExcept(NULL, irg, block);
992 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
993 return new_rd_Break(NULL, irg, block);
995 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
996 ir_mode *mode, long proj) {
997 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
1001 /** ********************/
1002 /** public interfaces */
1003 /** construction tools */
1007 * - create a new Start node in the current block
1009 * @return s - pointer to the created Start node
1014 new_d_Start (dbg_info* db)
1018 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1019 op_Start, mode_T, 0, NULL);
1020 //res->attr.start.irg = current_ir_graph;
1022 res = optimize_node (res);
1023 irn_vrfy_irg (res, current_ir_graph);
1028 new_d_End (dbg_info* db)
1031 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
1032 op_End, mode_X, -1, NULL);
1033 res = optimize_node (res);
1034 irn_vrfy_irg (res, current_ir_graph);
1039 /* Constructs a Block with a fixed number of predecessors.
1040 Does set current_block. Can be used with automatic Phi
1041 node construction. */
1043 new_d_Block (dbg_info* db, int arity, ir_node **in)
1047 bool has_unknown = false;
1049 res = new_rd_Block (db, current_ir_graph, arity, in);
1051 /* Create and initialize array for Phi-node construction. */
1052 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
1053 current_ir_graph->n_loc);
1054 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1056 for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) has_unknown = true;
1058 if (!has_unknown) res = optimize_node (res);
1059 current_ir_graph->current_block = res;
1061 irn_vrfy_irg (res, current_ir_graph);
1066 /* ***********************************************************************/
1067 /* Methods necessary for automatic Phi node creation */
1069 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1070 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1071 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1072 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
1074 Call Graph: ( A ---> B == A "calls" B)
1076 get_value mature_block
1084 get_r_value_internal |
1088 new_rd_Phi0 new_rd_Phi_in
1090 * *************************************************************************** */
1092 /* Creates a Phi node with 0 predecessors */
1093 static INLINE ir_node *
1094 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
1097 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
1098 irn_vrfy_irg (res, irg);
1102 /* There are two implementations of the Phi node construction. The first
1103 is faster, but does not work for blocks with more than 2 predecessors.
1104 The second works always but is slower and causes more unnecessary Phi
1106 Select the implementations by the following preprocessor flag set in
1108 #if USE_FAST_PHI_CONSTRUCTION
1110 /* This is a stack used for allocating and deallocating nodes in
1111 new_rd_Phi_in. The original implementation used the obstack
1112 to model this stack, now it is explicit. This reduces side effects.
1114 #if USE_EXPLICIT_PHI_IN_STACK
1115 INLINE Phi_in_stack *
1116 new_Phi_in_stack() {
1119 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1121 res->stack = NEW_ARR_F (ir_node *, 1);
1128 free_Phi_in_stack(Phi_in_stack *s) {
1129 DEL_ARR_F(s->stack);
1133 free_to_Phi_in_stack(ir_node *phi) {
1134 assert(get_irn_opcode(phi) == iro_Phi);
1136 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1137 current_ir_graph->Phi_in_stack->pos)
1138 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1140 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1142 (current_ir_graph->Phi_in_stack->pos)++;
1145 static INLINE ir_node *
1146 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1147 int arity, ir_node **in) {
1149 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1150 int pos = current_ir_graph->Phi_in_stack->pos;
1154 /* We need to allocate a new node */
1155 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1156 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1158 /* reuse the old node and initialize it again. */
1161 assert (res->kind == k_ir_node);
1162 assert (res->op == op_Phi);
1166 assert (arity >= 0);
1167 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1168 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1170 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1172 (current_ir_graph->Phi_in_stack->pos)--;
1176 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1178 /* Creates a Phi node with a given, fixed array **in of predecessors.
1179 If the Phi node is unnecessary, as the same value reaches the block
1180 through all control flow paths, it is eliminated and the value
1181 returned directly. This constructor is only intended for use in
1182 the automatic Phi node generation triggered by get_value or mature.
1183 The implementation is quite tricky and depends on the fact, that
1184 the nodes are allocated on a stack:
1185 The in array contains predecessors and NULLs. The NULLs appear,
1186 if get_r_value_internal, that computed the predecessors, reached
1187 the same block on two paths. In this case the same value reaches
1188 this block on both paths, there is no definition in between. We need
1189 not allocate a Phi where these path's merge, but we have to communicate
1190 this fact to the caller. This happens by returning a pointer to the
1191 node the caller _will_ allocate. (Yes, we predict the address. We can
1192 do so because the nodes are allocated on the obstack.) The caller then
1193 finds a pointer to itself and, when this routine is called again,
1196 static INLINE ir_node *
1197 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1198 ir_node **in, int ins)
1201 ir_node *res, *known;
1203 /* allocate a new node on the obstack.
1204 This can return a node to which some of the pointers in the in-array
1206 Attention: the constructor copies the in array, i.e., the later changes
1207 to the array in this routine do not affect the constructed node! If
1208 the in array contains NULLs, there will be missing predecessors in the
1210 Is this a possible internal state of the Phi node generation? */
1211 #if USE_EXPLICIT_PHI_IN_STACK
1212 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1214 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1215 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1217 /* The in-array can contain NULLs. These were returned by
1218 get_r_value_internal if it reached the same block/definition on a
1220 The NULLs are replaced by the node itself to simplify the test in the
1222 for (i=0; i < ins; ++i)
1223 if (in[i] == NULL) in[i] = res;
1225 /* This loop checks whether the Phi has more than one predecessor.
1226 If so, it is a real Phi node and we break the loop. Else the
1227 Phi node merges the same definition on several paths and therefore
1229 for (i=0; i < ins; ++i)
1231 if (in[i]==res || in[i]==known) continue;
1239 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1241 #if USE_EXPLICIT_PHI_IN_STACK
1242 free_to_Phi_in_stack(res);
1244 obstack_free (current_ir_graph->obst, res);
1248 res = optimize_node (res);
1249 irn_vrfy_irg (res, irg);
1252 /* return the pointer to the Phi node. This node might be deallocated! */
1257 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1260 allocates and returns this node. The routine called to allocate the
1261 node might optimize it away and return a real value, or even a pointer
1262 to a deallocated Phi node on top of the obstack!
1263 This function is called with an in-array of proper size. **/
1265 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1267 ir_node *prevBlock, *res;
1270 /* This loop goes to all predecessor blocks of the block the Phi node is in
1271 and there finds the operands of the Phi node by calling
1272 get_r_value_internal. */
1273 for (i = 1; i <= ins; ++i) {
1274 assert (block->in[i]);
1275 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1277 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1280 /* After collecting all predecessors into the array nin a new Phi node
1281 with these predecessors is created. This constructor contains an
1282 optimization: If all predecessors of the Phi node are identical it
1283 returns the only operand instead of a new Phi node. If the value
1284 passes two different control flow edges without being defined, and
1285 this is the second path treated, a pointer to the node that will be
1286 allocated for the first path (recursion) is returned. We already
1287 know the address of this node, as it is the next node to be allocated
1288 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1289 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1291 /* Now we now the value for "pos" and can enter it in the array with
1292 all known local variables. Attention: this might be a pointer to
1293 a node, that later will be allocated!!! See new_rd_Phi_in.
1294 If this is called in mature, after some set_value in the same block,
1295 the proper value must not be overwritten:
1297 get_value (makes Phi0, put's it into graph_arr)
1298 set_value (overwrites Phi0 in graph_arr)
1299 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1302 if (!block->attr.block.graph_arr[pos]) {
1303 block->attr.block.graph_arr[pos] = res;
1305 /* printf(" value already computed by %s\n",
1306 get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
1312 /* This function returns the last definition of a variable. In case
1313 this variable was last defined in a previous block, Phi nodes are
1314 inserted. If the part of the firm graph containing the definition
1315 is not yet constructed, a dummy Phi node is returned. */
1317 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1320 /* There are 4 cases to treat.
1322 1. The block is not mature and we visit it the first time. We can not
1323 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1324 predecessors is returned. This node is added to the linked list (field
1325 "link") of the containing block to be completed when this block is
1326 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1329 2. The value is already known in this block, graph_arr[pos] is set and we
1330 visit the block the first time. We can return the value without
1331 creating any new nodes.
1333 3. The block is mature and we visit it the first time. A Phi node needs
1334 to be created (phi_merge). If the Phi is not needed, as all it's
1335 operands are the same value reaching the block through different
1336 paths, it's optimized away and the value itself is returned.
1338 4. The block is mature, and we visit it the second time. Now two
1339 subcases are possible:
1340 * The value was computed completely the last time we were here. This
1341 is the case if there is no loop. We can return the proper value.
1342 * The recursion that visited this node and set the flag did not
1343 return yet. We are computing a value in a loop and need to
1344 break the recursion without knowing the result yet.
1345 @@@ strange case. Straight forward we would create a Phi before
1346 starting the computation of it's predecessors. In this case we will
1347 find a Phi here in any case. The problem is that this implementation
1348 only creates a Phi after computing the predecessors, so that it is
1349 hard to compute self references of this Phi. @@@
1350 There is no simple check for the second subcase. Therefore we check
1351 for a second visit and treat all such cases as the second subcase.
1352 Anyways, the basic situation is the same: we reached a block
1353 on two paths without finding a definition of the value: No Phi
1354 nodes are needed on both paths.
1355 We return this information "Two paths, no Phi needed" by a very tricky
1356 implementation that relies on the fact that an obstack is a stack and
1357 will return a node with the same address on different allocations.
1358 Look also at phi_merge and new_rd_phi_in to understand this.
1359 @@@ Unfortunately this does not work, see testprogram
1360 three_cfpred_example.
1364 /* case 4 -- already visited. */
1365 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1367 /* visited the first time */
1368 set_irn_visited(block, get_irg_visited(current_ir_graph));
1370 /* Get the local valid value */
1371 res = block->attr.block.graph_arr[pos];
1373 /* case 2 -- If the value is actually computed, return it. */
1374 if (res) { return res;};
1376 if (block->attr.block.matured) { /* case 3 */
1378 /* The Phi has the same amount of ins as the corresponding block. */
1379 int ins = get_irn_arity(block);
1381 NEW_ARR_A (ir_node *, nin, ins);
1383 /* Phi merge collects the predecessors and then creates a node. */
1384 res = phi_merge (block, pos, mode, nin, ins);
1386 } else { /* case 1 */
1387 /* The block is not mature, we don't know how many in's are needed. A Phi
1388 with zero predecessors is created. Such a Phi node is called Phi0
1389 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1390 to the list of Phi0 nodes in this block to be matured by mature_block
1392 The Phi0 has to remember the pos of it's internal value. If the real
1393 Phi is computed, pos is used to update the array with the local
1396 res = new_rd_Phi0 (current_ir_graph, block, mode);
1397 res->attr.phi0_pos = pos;
1398 res->link = block->link;
1402 /* If we get here, the frontend missed a use-before-definition error */
1405 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1406 assert (mode->code >= irm_F && mode->code <= irm_P);
1407 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1408 tarval_mode_null[mode->code]);
1411 /* The local valid value is available now. */
1412 block->attr.block.graph_arr[pos] = res;
1420 it starts the recursion. This causes an Id at the entry of
1421 every block that has no definition of the value! **/
1423 #if USE_EXPLICIT_PHI_IN_STACK
1425 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1426 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1429 static INLINE ir_node *
1430 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1431 ir_node **in, int ins)
1434 ir_node *res, *known;
1436 /* Allocate a new node on the obstack. The allocation copies the in
1438 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1439 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1441 /* This loop checks whether the Phi has more than one predecessor.
1442 If so, it is a real Phi node and we break the loop. Else the
1443 Phi node merges the same definition on several paths and therefore
1444 is not needed. Don't consider Bad nodes! */
1446 for (i=0; i < ins; ++i)
1450 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1458 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1461 obstack_free (current_ir_graph->obst, res);
1464 /* A undefined value, e.g., in unreachable code. */
1468 res = optimize_node (res);
1469 irn_vrfy_irg (res, irg);
1470 /* Memory Phis in endless loops must be kept alive.
1471 As we can't distinguish these easily we keep all of the alive. */
1472 if ((res->op == op_Phi) && (mode == mode_M))
1473 add_End_keepalive(irg->end, res);
1480 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1482 #if PRECISE_EXC_CONTEXT
1484 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1486 static INLINE ir_node **
1487 new_frag_arr (ir_node *n) {
1490 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1491 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1492 sizeof(ir_node *)*current_ir_graph->n_loc);
1493 /* turn off optimization before allocating Proj nodes, as res isn't
1495 opt = get_optimize(); set_optimize(0);
1496 /* Here we rely on the fact that all frag ops have Memory as first result! */
1497 if (get_irn_op(n) == op_Call)
1498 arr[0] = new_Proj(n, mode_M, 3);
1500 arr[0] = new_Proj(n, mode_M, 0);
1502 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1506 static INLINE ir_node **
1507 get_frag_arr (ir_node *n) {
1508 if (get_irn_op(n) == op_Call) {
1509 return n->attr.call.frag_arr;
1510 } else if (get_irn_op(n) == op_Alloc) {
1511 return n->attr.a.frag_arr;
1513 return n->attr.frag_arr;
1518 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1519 if (!frag_arr[pos]) frag_arr[pos] = val;
1520 if (frag_arr[current_ir_graph->n_loc - 1])
1521 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1525 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1529 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1531 frag_arr = get_frag_arr(cfOp);
1532 res = frag_arr[pos];
1534 if (block->attr.block.graph_arr[pos]) {
1535 /* There was a set_value after the cfOp and no get_value before that
1536 set_value. We must build a Phi node now. */
1537 if (block->attr.block.matured) {
1538 int ins = get_irn_arity(block);
1540 NEW_ARR_A (ir_node *, nin, ins);
1541 res = phi_merge(block, pos, mode, nin, ins);
1543 res = new_rd_Phi0 (current_ir_graph, block, mode);
1544 res->attr.phi0_pos = pos;
1545 res->link = block->link;
1549 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1550 but this should be better: (remove comment if this works) */
1551 /* It's a Phi, we can write this into all graph_arrs with NULL */
1552 set_frag_value(block->attr.block.graph_arr, pos, res);
1554 res = get_r_value_internal(block, pos, mode);
1555 set_frag_value(block->attr.block.graph_arr, pos, res);
1563 computes the predecessors for the real phi node, and then
1564 allocates and returns this node. The routine called to allocate the
1565 node might optimize it away and return a real value.
1566 This function must be called with an in-array of proper size. **/
1568 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1570 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1573 /* If this block has no value at pos create a Phi0 and remember it
1574 in graph_arr to break recursions.
1575 Else we may not set graph_arr as there a later value is remembered. */
1577 if (!block->attr.block.graph_arr[pos]) {
1578 if (block == get_irg_start_block(current_ir_graph)) {
1579 /* Collapsing to Bad tarvals is no good idea.
1580 So we call a user-supplied routine here that deals with this case as
1581 appropriate for the given language. Sorryly the only help we can give
1582 here is the position.
1584 Even if all variables are defined before use, it can happen that
1585 we get to the start block, if a cond has been replaced by a tuple
1586 (bad, jmp). In this case we call the function needlessly, eventually
1587 generating an non existant error.
1588 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1591 if (default_initialize_local_variable)
1592 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1594 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1595 /* We don't need to care about exception ops in the start block.
1596 There are none by definition. */
1597 return block->attr.block.graph_arr[pos];
1599 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1600 block->attr.block.graph_arr[pos] = phi0;
1601 #if PRECISE_EXC_CONTEXT
1602 /* Set graph_arr for fragile ops. Also here we should break recursion.
1603 We could choose a cyclic path through an cfop. But the recursion would
1604 break at some point. */
1605 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1610 /* This loop goes to all predecessor blocks of the block the Phi node
1611 is in and there finds the operands of the Phi node by calling
1612 get_r_value_internal. */
1613 for (i = 1; i <= ins; ++i) {
1614 prevCfOp = skip_Proj(block->in[i]);
1616 if (is_Bad(prevCfOp)) {
1617 /* In case a Cond has been optimized we would get right to the start block
1618 with an invalid definition. */
1619 nin[i-1] = new_Bad();
1622 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1624 if (!is_Bad(prevBlock)) {
1625 #if PRECISE_EXC_CONTEXT
1626 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1627 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1628 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1631 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1633 nin[i-1] = new_Bad();
1637 /* After collecting all predecessors into the array nin a new Phi node
1638 with these predecessors is created. This constructor contains an
1639 optimization: If all predecessors of the Phi node are identical it
1640 returns the only operand instead of a new Phi node. */
1641 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1643 /* In case we allocated a Phi0 node at the beginning of this procedure,
1644 we need to exchange this Phi0 with the real Phi. */
1646 exchange(phi0, res);
1647 block->attr.block.graph_arr[pos] = res;
1648 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1649 only an optimization. */
1655 /* This function returns the last definition of a variable. In case
1656 this variable was last defined in a previous block, Phi nodes are
1657 inserted. If the part of the firm graph containing the definition
1658 is not yet constructed, a dummy Phi node is returned. */
1660 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1663 /* There are 4 cases to treat.
1665 1. The block is not mature and we visit it the first time. We can not
1666 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1667 predecessors is returned. This node is added to the linked list (field
1668 "link") of the containing block to be completed when this block is
1669 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1672 2. The value is already known in this block, graph_arr[pos] is set and we
1673 visit the block the first time. We can return the value without
1674 creating any new nodes.
1676 3. The block is mature and we visit it the first time. A Phi node needs
1677 to be created (phi_merge). If the Phi is not needed, as all it's
1678 operands are the same value reaching the block through different
1679 paths, it's optimized away and the value itself is returned.
1681 4. The block is mature, and we visit it the second time. Now two
1682 subcases are possible:
1683 * The value was computed completely the last time we were here. This
1684 is the case if there is no loop. We can return the proper value.
1685 * The recursion that visited this node and set the flag did not
1686 return yet. We are computing a value in a loop and need to
1687 break the recursion. This case only happens if we visited
1688 the same block with phi_merge before, which inserted a Phi0.
1689 So we return the Phi0.
1692 /* case 4 -- already visited. */
1693 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1694 /* As phi_merge allocates a Phi0 this value is always defined. Here
1695 is the critical difference of the two algorithms. */
1696 assert(block->attr.block.graph_arr[pos]);
1697 return block->attr.block.graph_arr[pos];
1700 /* visited the first time */
1701 set_irn_visited(block, get_irg_visited(current_ir_graph));
1703 /* Get the local valid value */
1704 res = block->attr.block.graph_arr[pos];
1706 /* case 2 -- If the value is actually computed, return it. */
1707 if (res) { return res; };
1709 if (block->attr.block.matured) { /* case 3 */
1711 /* The Phi has the same amount of ins as the corresponding block. */
1712 int ins = get_irn_arity(block);
1714 NEW_ARR_A (ir_node *, nin, ins);
1716 /* Phi merge collects the predecessors and then creates a node. */
1717 res = phi_merge (block, pos, mode, nin, ins);
1719 } else { /* case 1 */
1720 /* The block is not mature, we don't know how many in's are needed. A Phi
1721 with zero predecessors is created. Such a Phi node is called Phi0
1722 node. The Phi0 is then added to the list of Phi0 nodes in this block
1723 to be matured by mature_block later.
1724 The Phi0 has to remember the pos of it's internal value. If the real
1725 Phi is computed, pos is used to update the array with the local
1727 res = new_rd_Phi0 (current_ir_graph, block, mode);
1728 res->attr.phi0_pos = pos;
1729 res->link = block->link;
1733 /* If we get here, the frontend missed a use-before-definition error */
1736 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1737 assert (mode->code >= irm_F && mode->code <= irm_P);
1738 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1739 get_mode_null(mode));
1742 /* The local valid value is available now. */
1743 block->attr.block.graph_arr[pos] = res;
1748 #endif /* USE_FAST_PHI_CONSTRUCTION */
1750 /* ************************************************************************** */
1752 /** Finalize a Block node, when all control flows are known. */
1753 /** Acceptable parameters are only Block nodes. */
1755 mature_block (ir_node *block)
1762 assert (get_irn_opcode(block) == iro_Block);
1763 /* @@@ should be commented in
1764 assert (!get_Block_matured(block) && "Block already matured"); */
1766 if (!get_Block_matured(block)) {
1767 ins = ARR_LEN (block->in)-1;
1768 /* Fix block parameters */
1769 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1771 /* An array for building the Phi nodes. */
1772 NEW_ARR_A (ir_node *, nin, ins);
1774 /* Traverse a chain of Phi nodes attached to this block and mature
1776 for (n = block->link; n; n=next) {
1777 inc_irg_visited(current_ir_graph);
1779 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1782 block->attr.block.matured = 1;
1784 /* Now, as the block is a finished firm node, we can optimize it.
1785 Since other nodes have been allocated since the block was created
1786 we can not free the node on the obstack. Therefore we have to call
1788 Unfortunately the optimization does not change a lot, as all allocated
1789 nodes refer to the unoptimized node.
1790 We can call _2, as global cse has no effect on blocks. */
1791 block = optimize_in_place_2(block);
1792 irn_vrfy_irg(block, current_ir_graph);
1797 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1799 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1804 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1806 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1811 new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
1813 return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
1819 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1821 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1826 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1828 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1833 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1836 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1837 arg->attr.c.kind = fragmentary;
1838 arg->attr.c.default_proj = max_proj;
1839 res = new_Proj (arg, mode_X, max_proj);
1844 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1846 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
1853 return new_rd_Cast (db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
1857 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1859 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1864 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1866 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1871 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1873 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1879 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1881 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1886 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1888 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1893 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1896 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1898 #if PRECISE_EXC_CONTEXT
1899 if ((current_ir_graph->phase_state == phase_building) &&
1900 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1901 res->attr.frag_arr = new_frag_arr(res);
1908 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1911 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1913 #if PRECISE_EXC_CONTEXT
1914 if ((current_ir_graph->phase_state == phase_building) &&
1915 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1916 res->attr.frag_arr = new_frag_arr(res);
1923 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1926 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1928 #if PRECISE_EXC_CONTEXT
1929 if ((current_ir_graph->phase_state == phase_building) &&
1930 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1931 res->attr.frag_arr = new_frag_arr(res);
1938 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1941 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1943 #if PRECISE_EXC_CONTEXT
1944 if ((current_ir_graph->phase_state == phase_building) &&
1945 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1946 res->attr.frag_arr = new_frag_arr(res);
1953 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1955 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1960 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1962 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1967 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1969 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1974 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1976 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1981 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1983 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1988 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1990 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1995 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1997 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
2002 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
2004 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
2009 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
2011 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
2016 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
2018 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
2023 new_d_Jmp (dbg_info* db)
2025 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
2029 new_d_Cond (dbg_info* db, ir_node *c)
2031 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
2035 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
2039 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
2040 store, callee, arity, in, tp);
2041 #if PRECISE_EXC_CONTEXT
2042 if ((current_ir_graph->phase_state == phase_building) &&
2043 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
2044 res->attr.call.frag_arr = new_frag_arr(res);
2051 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
2053 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
2058 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
2060 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
2065 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
2068 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
2070 #if PRECISE_EXC_CONTEXT
2071 if ((current_ir_graph->phase_state == phase_building) &&
2072 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
2073 res->attr.frag_arr = new_frag_arr(res);
2080 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
2083 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
2085 #if PRECISE_EXC_CONTEXT
2086 if ((current_ir_graph->phase_state == phase_building) &&
2087 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
2088 res->attr.frag_arr = new_frag_arr(res);
2095 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
2099 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
2100 store, size, alloc_type, where);
2101 #if PRECISE_EXC_CONTEXT
2102 if ((current_ir_graph->phase_state == phase_building) &&
2103 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
2104 res->attr.a.frag_arr = new_frag_arr(res);
2111 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
2113 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
2114 store, ptr, size, free_type);
2118 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
2119 /* GL: objptr was called frame before. Frame was a bad choice for the name
2120 as the operand could as well be a pointer to a dynamic object. */
2122 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2123 store, objptr, 0, NULL, ent);
2127 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2129 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2130 store, objptr, n_index, index, sel);
2134 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2136 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2137 store, objptr, ent));
2141 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2143 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
2148 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2150 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2158 return current_ir_graph->bad;
2162 new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
2164 return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
2169 new_d_Unknown (ir_mode *m)
2171 return new_rd_Unknown(current_ir_graph, m);
2175 new_d_CallBegin (dbg_info *db, ir_node *call)
2178 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2183 new_d_EndReg (dbg_info *db)
2186 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2191 new_d_EndExcept (dbg_info *db)
2194 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2199 new_d_Break (dbg_info *db)
2201 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2205 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2207 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2211 /* ********************************************************************* */
2212 /* Comfortable interface with automatic Phi node construction. */
2213 /* (Uses also constructors of ?? interface, except new_Block. */
2214 /* ********************************************************************* */
2216 /** Block construction **/
2217 /* immature Block without predecessors */
2218 ir_node *new_d_immBlock (dbg_info* db) {
2221 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2222 /* creates a new dynamic in-array as length of in is -1 */
2223 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2224 current_ir_graph->current_block = res;
2225 res->attr.block.matured = 0;
2226 //res->attr.block.exc = exc_normal;
2227 //res->attr.block.handler_entry = 0;
2228 res->attr.block.irg = current_ir_graph;
2229 res->attr.block.backedge = NULL;
2230 res->attr.block.in_cg = NULL;
2231 res->attr.block.cg_backedge = NULL;
2232 set_Block_block_visited(res, 0);
2234 /* Create and initialize array for Phi-node construction. */
2235 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2236 current_ir_graph->n_loc);
2237 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2239 /* Immature block may not be optimized! */
2240 irn_vrfy_irg (res, current_ir_graph);
2247 return new_d_immBlock(NULL);
2250 /* add an adge to a jmp/control flow node */
2252 add_in_edge (ir_node *block, ir_node *jmp)
2254 if (block->attr.block.matured) {
2255 assert(0 && "Error: Block already matured!\n");
2258 assert (jmp != NULL);
2259 ARR_APP1 (ir_node *, block->in, jmp);
2263 /* changing the current block */
2265 switch_block (ir_node *target)
2267 current_ir_graph->current_block = target;
2270 /* ************************ */
2271 /* parameter administration */
2273 /* get a value from the parameter array from the current block by its index */
2275 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2277 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2278 inc_irg_visited(current_ir_graph);
2280 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2282 /* get a value from the parameter array from the current block by its index */
2284 get_value (int pos, ir_mode *mode)
2286 return get_d_value(NULL, pos, mode);
2289 /* set a value at position pos in the parameter array from the current block */
2291 set_value (int pos, ir_node *value)
2293 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2294 assert(pos+1 < current_ir_graph->n_loc);
2295 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2298 /* get the current store */
2302 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2303 /* GL: one could call get_value instead */
2304 inc_irg_visited(current_ir_graph);
2305 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2308 /* set the current store */
2310 set_store (ir_node *store)
2312 /* GL: one could call set_value instead */
2313 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2314 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2318 keep_alive (ir_node *ka)
2320 add_End_keepalive(current_ir_graph->end, ka);
2323 /** Useful access routines **/
2324 /* Returns the current block of the current graph. To set the current
2325 block use switch_block(). */
2326 ir_node *get_cur_block() {
2327 return get_irg_current_block(current_ir_graph);
2330 /* Returns the frame type of the current graph */
2331 type *get_cur_frame_type() {
2332 return get_irg_frame_type(current_ir_graph);
2336 /* ********************************************************************* */
2339 /* call once for each run of the library */
2341 init_cons (default_initialize_local_variable_func_t *func)
2343 default_initialize_local_variable = func;
2346 /* call for each graph */
2348 finalize_cons (ir_graph *irg) {
2349 irg->phase_state = phase_high;
2353 ir_node *new_Block(int arity, ir_node **in) {
2354 return new_d_Block(NULL, arity, in);
2356 ir_node *new_Start (void) {
2357 return new_d_Start(NULL);
2359 ir_node *new_End (void) {
2360 return new_d_End(NULL);
2362 ir_node *new_Jmp (void) {
2363 return new_d_Jmp(NULL);
2365 ir_node *new_Cond (ir_node *c) {
2366 return new_d_Cond(NULL, c);
2368 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2369 return new_d_Return(NULL, store, arity, in);
2371 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2372 return new_d_Raise(NULL, store, obj);
2374 ir_node *new_Const (ir_mode *mode, tarval *con) {
2375 return new_d_Const(NULL, mode, con);
2377 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2378 return new_d_SymConst(NULL, value, kind);
2380 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2381 return new_d_simpleSel(NULL, store, objptr, ent);
2383 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2385 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2387 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2388 return new_d_InstOf (NULL, store, objptr, ent);
2390 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2392 return new_d_Call(NULL, store, callee, arity, in, tp);
2394 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2395 return new_d_Add(NULL, op1, op2, mode);
2397 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2398 return new_d_Sub(NULL, op1, op2, mode);
2400 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2401 return new_d_Minus(NULL, op, mode);
2403 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2404 return new_d_Mul(NULL, op1, op2, mode);
2406 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2407 return new_d_Quot(NULL, memop, op1, op2);
2409 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2410 return new_d_DivMod(NULL, memop, op1, op2);
2412 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2413 return new_d_Div(NULL, memop, op1, op2);
2415 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2416 return new_d_Mod(NULL, memop, op1, op2);
2418 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2419 return new_d_Abs(NULL, op, mode);
2421 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2422 return new_d_And(NULL, op1, op2, mode);
2424 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2425 return new_d_Or(NULL, op1, op2, mode);
2427 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2428 return new_d_Eor(NULL, op1, op2, mode);
2430 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2431 return new_d_Not(NULL, op, mode);
2433 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2434 return new_d_Shl(NULL, op, k, mode);
2436 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2437 return new_d_Shr(NULL, op, k, mode);
2439 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2440 return new_d_Shrs(NULL, op, k, mode);
2442 #define new_Rotate new_Rot
2443 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2444 return new_d_Rot(NULL, op, k, mode);
2446 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2447 return new_d_Cmp(NULL, op1, op2);
2449 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2450 return new_d_Conv(NULL, op, mode);
2452 ir_node *new_Cast (ir_node *op, type *to_tp) {
2453 return new_d_Cast(NULL, op, to_tp);
2455 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2456 return new_d_Phi(NULL, arity, in, mode);
2458 ir_node *new_Load (ir_node *store, ir_node *addr) {
2459 return new_d_Load(NULL, store, addr);
2461 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2462 return new_d_Store(NULL, store, addr, val);
2464 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2465 where_alloc where) {
2466 return new_d_Alloc(NULL, store, size, alloc_type, where);
2468 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2470 return new_d_Free(NULL, store, ptr, size, free_type);
2472 ir_node *new_Sync (int arity, ir_node **in) {
2473 return new_d_Sync(NULL, arity, in);
2475 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2476 return new_d_Proj(NULL, arg, mode, proj);
2478 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2479 return new_d_defaultProj(NULL, arg, max_proj);
2481 ir_node *new_Tuple (int arity, ir_node **in) {
2482 return new_d_Tuple(NULL, arity, in);
2484 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2485 return new_d_Id(NULL, val, mode);
2487 ir_node *new_Bad (void) {
2490 ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
2491 return new_d_Confirm (NULL, val, bound, cmp);
2493 ir_node *new_Unknown(ir_mode *m) {
2494 return new_d_Unknown(m);
2496 ir_node *new_CallBegin (ir_node *callee) {
2497 return new_d_CallBegin(NULL, callee);
2499 ir_node *new_EndReg (void) {
2500 return new_d_EndReg(NULL);
2502 ir_node *new_EndExcept (void) {
2503 return new_d_EndExcept(NULL);
2505 ir_node *new_Break (void) {
2506 return new_d_Break(NULL);
2508 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2509 return new_d_Filter(NULL, arg, mode, proj);