1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer
6 * ircons.c: basic and more detailed irnode constructors
7 * store, block and parameter administration.
8 * Adapted to extended FIRM nodes (exceptions...) and commented
18 # include "irgraph_t.h"
19 # include "irnode_t.h"
20 # include "irmode_t.h"
22 # include "firm_common_t.h"
28 /* memset belongs to string.h */
30 # include "irbackedge_t.h"
32 #if USE_EXPLICIT_PHI_IN_STACK
33 /* A stack needed for the automatic Phi node construction in constructor
34 Phi_in. Redefinition in irgraph.c!! */
39 typedef struct Phi_in_stack Phi_in_stack;
43 * language dependant initialization variable
45 static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
47 /*** ******************************************** */
48 /** privat interfaces, for professional use only */
50 /* Constructs a Block with a fixed number of predecessors.
51 Does not set current_block. Can not be used with automatic
52 Phi node construction. */
54 new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in)
58 res = new_ir_node (db, irg, NULL, op_Block, mode_BB, arity, in);
59 set_Block_matured(res, 1);
60 set_Block_block_visited(res, 0);
62 res->attr.block.exc = exc_normal;
63 res->attr.block.handler_entry = 0;
64 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
65 res->attr.block.in_cg = NULL;
66 res->attr.block.cg_backedge = NULL;
68 irn_vrfy_irg (res, irg);
73 new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block)
77 res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
79 irn_vrfy_irg (res, irg);
84 new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block)
88 res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
90 irn_vrfy_irg (res, irg);
94 /* Creates a Phi node with all predecessors. Calling this constructor
95 is only allowed if the corresponding block is mature. */
97 new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
101 assert( get_Block_matured(block) );
102 assert( get_irn_arity(block) == arity );
104 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
106 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
108 res = optimize_node (res);
109 irn_vrfy_irg (res, irg);
111 /* Memory Phis in endless loops must be kept alive.
112 As we can't distinguish these easily we keep all of them alive. */
113 if ((res->op == op_Phi) && (mode == mode_M))
114 add_End_keepalive(irg->end, res);
119 new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
122 res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
124 res = optimize_node (res);
125 irn_vrfy_irg (res, irg);
128 res = local_optimize_newby (res);
135 new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
137 ir_node *in[1] = {val};
139 res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
140 res = optimize_node (res);
141 irn_vrfy_irg (res, irg);
146 new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
149 ir_node *in[1] = {arg};
151 res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
152 res->attr.proj = proj;
155 assert(get_Proj_pred(res));
156 assert(get_nodes_Block(get_Proj_pred(res)));
158 res = optimize_node (res);
160 irn_vrfy_irg (res, irg);
166 new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
170 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
171 arg->attr.c.kind = fragmentary;
172 arg->attr.c.default_proj = max_proj;
173 res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
178 new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
180 ir_node *in[1] = {op};
182 res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
183 res = optimize_node (res);
184 irn_vrfy_irg (res, irg);
190 new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
194 res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
195 res = optimize_node (res);
196 irn_vrfy_irg (res, irg);
201 new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
202 ir_node *op1, ir_node *op2, ir_mode *mode)
204 ir_node *in[2] = {op1, op2};
206 res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
207 res = optimize_node (res);
208 irn_vrfy_irg (res, irg);
213 new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
214 ir_node *op1, ir_node *op2, ir_mode *mode)
216 ir_node *in[2] = {op1, op2};
218 res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
219 res = optimize_node (res);
220 irn_vrfy_irg (res, irg);
225 new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
226 ir_node *op, ir_mode *mode)
228 ir_node *in[1] = {op};
230 res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
231 res = optimize_node (res);
232 irn_vrfy_irg (res, irg);
237 new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
238 ir_node *op1, ir_node *op2, ir_mode *mode)
240 ir_node *in[2] = {op1, op2};
242 res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
243 res = optimize_node (res);
244 irn_vrfy_irg (res, irg);
249 new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
250 ir_node *memop, ir_node *op1, ir_node *op2)
252 ir_node *in[3] = {memop, op1, op2};
254 res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
255 res = optimize_node (res);
256 irn_vrfy_irg (res, irg);
261 new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
262 ir_node *memop, ir_node *op1, ir_node *op2)
264 ir_node *in[3] = {memop, op1, op2};
266 res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
267 res = optimize_node (res);
268 irn_vrfy_irg (res, irg);
273 new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
274 ir_node *memop, ir_node *op1, ir_node *op2)
276 ir_node *in[3] = {memop, op1, op2};
278 res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
279 res = optimize_node (res);
280 irn_vrfy_irg (res, irg);
285 new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
286 ir_node *memop, ir_node *op1, ir_node *op2)
288 ir_node *in[3] = {memop, op1, op2};
290 res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
291 res = optimize_node (res);
292 irn_vrfy_irg (res, irg);
297 new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
298 ir_node *op1, ir_node *op2, ir_mode *mode)
300 ir_node *in[2] = {op1, op2};
302 res = new_ir_node (db, irg, block, op_And, mode, 2, in);
303 res = optimize_node (res);
304 irn_vrfy_irg (res, irg);
309 new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
310 ir_node *op1, ir_node *op2, ir_mode *mode)
312 ir_node *in[2] = {op1, op2};
314 res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
315 res = optimize_node (res);
316 irn_vrfy_irg (res, irg);
321 new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
322 ir_node *op1, ir_node *op2, ir_mode *mode)
324 ir_node *in[2] = {op1, op2};
326 res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
327 res = optimize_node (res);
328 irn_vrfy_irg (res, irg);
333 new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
334 ir_node *op, ir_mode *mode)
336 ir_node *in[1] = {op};
338 res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
339 res = optimize_node (res);
340 irn_vrfy_irg (res, irg);
345 new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
346 ir_node *op, ir_node *k, ir_mode *mode)
348 ir_node *in[2] = {op, k};
350 res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
351 res = optimize_node (res);
352 irn_vrfy_irg (res, irg);
357 new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
358 ir_node *op, ir_node *k, ir_mode *mode)
360 ir_node *in[2] = {op, k};
362 res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
363 res = optimize_node (res);
364 irn_vrfy_irg (res, irg);
369 new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
370 ir_node *op, ir_node *k, ir_mode *mode)
372 ir_node *in[2] = {op, k};
374 res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
375 res = optimize_node (res);
376 irn_vrfy_irg (res, irg);
381 new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
382 ir_node *op, ir_node *k, ir_mode *mode)
384 ir_node *in[2] = {op, k};
386 res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
387 res = optimize_node (res);
388 irn_vrfy_irg (res, irg);
393 new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
394 ir_node *op, ir_mode *mode)
396 ir_node *in[1] = {op};
398 res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
399 res = optimize_node (res);
400 irn_vrfy_irg (res, irg);
405 new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
406 ir_node *op1, ir_node *op2)
408 ir_node *in[2] = {op1, op2};
410 res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
411 res = optimize_node (res);
412 irn_vrfy_irg (res, irg);
417 new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
420 res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
421 res = optimize_node (res);
422 irn_vrfy_irg (res, irg);
427 new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
429 ir_node *in[1] = {c};
431 res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
432 res->attr.c.kind = dense;
433 res->attr.c.default_proj = 0;
434 res = optimize_node (res);
435 irn_vrfy_irg (res, irg);
440 new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
441 ir_node *callee, int arity, ir_node **in, type *tp)
448 NEW_ARR_A (ir_node *, r_in, r_arity);
451 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
453 res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
455 assert(is_method_type(tp));
456 set_Call_type(res, tp);
457 res->attr.call.callee_arr = NULL;
458 res = optimize_node (res);
459 irn_vrfy_irg (res, irg);
464 new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block,
465 ir_node *store, int arity, ir_node **in)
472 NEW_ARR_A (ir_node *, r_in, r_arity);
474 memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
475 res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
476 res = optimize_node (res);
477 irn_vrfy_irg (res, irg);
482 new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
484 ir_node *in[2] = {store, obj};
486 res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
487 res = optimize_node (res);
488 irn_vrfy_irg (res, irg);
493 new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
494 ir_node *store, ir_node *adr)
496 ir_node *in[2] = {store, adr};
498 res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
500 res = optimize_node (res);
501 irn_vrfy_irg (res, irg);
506 new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
507 ir_node *store, ir_node *adr, ir_node *val)
509 ir_node *in[3] = {store, adr, val};
511 res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
513 res = optimize_node (res);
515 irn_vrfy_irg (res, irg);
520 new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
521 ir_node *size, type *alloc_type, where_alloc where)
523 ir_node *in[2] = {store, size};
525 res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
527 res->attr.a.where = where;
528 res->attr.a.type = alloc_type;
530 res = optimize_node (res);
531 irn_vrfy_irg (res, irg);
536 new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
537 ir_node *ptr, ir_node *size, type *free_type)
539 ir_node *in[3] = {store, ptr, size};
541 res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
543 res->attr.f = free_type;
545 res = optimize_node (res);
546 irn_vrfy_irg (res, irg);
551 new_rd_Sel (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
552 int arity, ir_node **in, entity *ent)
559 NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
562 memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
563 res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
565 res->attr.s.ent = ent;
567 res = optimize_node (res);
568 irn_vrfy_irg (res, irg);
573 new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
574 ir_node *objptr, type *ent)
581 NEW_ARR_A (ir_node *, r_in, r_arity);
585 res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
587 res->attr.io.ent = ent;
589 /* res = optimize (res);
590 * irn_vrfy_irg (res, irg); */
595 new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
596 symconst_kind symkind)
600 if (symkind == linkage_ptr_info)
604 res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
606 res->attr.i.num = symkind;
607 if (symkind == linkage_ptr_info) {
608 res->attr.i.tori.ptrinfo = (ident *)value;
610 assert ( ( (symkind == type_tag)
611 || (symkind == size))
612 && (is_type(value)));
613 res->attr.i.tori.typ = (type *)value;
615 res = optimize_node (res);
616 irn_vrfy_irg (res, irg);
621 new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
625 res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
627 res = optimize_node (res);
628 irn_vrfy_irg (res, irg);
633 new_rd_Bad (ir_graph *irg)
639 new_rd_Unknown (ir_graph *irg)
645 new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
647 ir_node *in[1] = { get_Call_ptr(call) };
649 res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
650 res->attr.callbegin.irg = irg;
651 res->attr.callbegin.call = call;
652 res = optimize_node (res);
653 irn_vrfy_irg (res, irg);
658 new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block)
662 res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
663 res->attr.end.irg = irg;
665 irn_vrfy_irg (res, irg);
670 new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block)
674 res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
675 res->attr.end.irg = irg;
677 irn_vrfy_irg (res, irg);
682 new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
685 res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
686 res = optimize_node (res);
687 irn_vrfy_irg (res, irg);
692 new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
695 ir_node *in[1] = {arg};
697 res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
698 res->attr.filter.proj = proj;
699 res->attr.filter.in_cg = NULL;
700 res->attr.filter.backedge = NULL;
703 assert(get_Proj_pred(res));
704 assert(get_nodes_Block(get_Proj_pred(res)));
706 res = optimize_node (res);
708 irn_vrfy_irg (res, irg);
713 INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
714 return new_rd_Block(NULL, irg, arity, in);
716 INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) {
717 return new_rd_Start(NULL, irg, block);
719 INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) {
720 return new_rd_End(NULL, irg, block);
722 INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) {
723 return new_rd_Jmp(NULL, irg, block);
725 INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) {
726 return new_rd_Cond(NULL, irg, block, c);
728 INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
729 ir_node *store, int arity, ir_node **in) {
730 return new_rd_Return(NULL, irg, block, store, arity, in);
732 INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
733 ir_node *store, ir_node *obj) {
734 return new_rd_Raise(NULL, irg, block, store, obj);
736 INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
737 ir_mode *mode, tarval *con) {
738 return new_rd_Const(NULL, irg, block, mode, con);
740 INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
741 type_or_id_p value, symconst_kind symkind) {
742 return new_rd_SymConst(NULL, irg, block, value, symkind);
744 INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
745 ir_node *objptr, int n_index, ir_node **index,
747 return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
749 INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
751 return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
753 INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
754 ir_node *callee, int arity, ir_node **in,
756 return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
758 INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
759 ir_node *op1, ir_node *op2, ir_mode *mode) {
760 return new_rd_Add(NULL, irg, block, op1, op2, mode);
762 INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
763 ir_node *op1, ir_node *op2, ir_mode *mode) {
764 return new_rd_Sub(NULL, irg, block, op1, op2, mode);
766 INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
767 ir_node *op, ir_mode *mode) {
768 return new_rd_Minus(NULL, irg, block, op, mode);
770 INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
771 ir_node *op1, ir_node *op2, ir_mode *mode) {
772 return new_rd_Mul(NULL, irg, block, op1, op2, mode);
774 INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
775 ir_node *memop, ir_node *op1, ir_node *op2) {
776 return new_rd_Quot(NULL, irg, block, memop, op1, op2);
778 INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
779 ir_node *memop, ir_node *op1, ir_node *op2) {
780 return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
782 INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
783 ir_node *memop, ir_node *op1, ir_node *op2) {
784 return new_rd_Div(NULL, irg, block, memop, op1, op2);
786 INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
787 ir_node *memop, ir_node *op1, ir_node *op2) {
788 return new_rd_Mod(NULL, irg, block, memop, op1, op2);
790 INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
791 ir_node *op, ir_mode *mode) {
792 return new_rd_Abs(NULL, irg, block, op, mode);
794 INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
795 ir_node *op1, ir_node *op2, ir_mode *mode) {
796 return new_rd_And(NULL, irg, block, op1, op2, mode);
798 INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
799 ir_node *op1, ir_node *op2, ir_mode *mode) {
800 return new_rd_Or(NULL, irg, block, op1, op2, mode);
802 INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
803 ir_node *op1, ir_node *op2, ir_mode *mode) {
804 return new_rd_Eor(NULL, irg, block, op1, op2, mode);
806 INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
807 ir_node *op, ir_mode *mode) {
808 return new_rd_Not(NULL, irg, block, op, mode);
810 INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
811 ir_node *op1, ir_node *op2) {
812 return new_rd_Cmp(NULL, irg, block, op1, op2);
814 INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
815 ir_node *op, ir_node *k, ir_mode *mode) {
816 return new_rd_Shl(NULL, irg, block, op, k, mode);
818 INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
819 ir_node *op, ir_node *k, ir_mode *mode) {
820 return new_rd_Shr(NULL, irg, block, op, k, mode);
822 INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
823 ir_node *op, ir_node *k, ir_mode *mode) {
824 return new_rd_Shrs(NULL, irg, block, op, k, mode);
826 INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
827 ir_node *op, ir_node *k, ir_mode *mode) {
828 return new_rd_Rot(NULL, irg, block, op, k, mode);
830 INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
831 ir_node *op, ir_mode *mode) {
832 return new_rd_Conv(NULL, irg, block, op, mode);
834 INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
835 ir_node **in, ir_mode *mode) {
836 return new_rd_Phi(NULL, irg, block, arity, in, mode);
838 INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
839 ir_node *store, ir_node *adr) {
840 return new_rd_Load(NULL, irg, block, store, adr);
842 INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
843 ir_node *store, ir_node *adr, ir_node *val) {
844 return new_rd_Store(NULL, irg, block, store, adr, val);
846 INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
847 ir_node *size, type *alloc_type, where_alloc where) {
848 return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
850 INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
851 ir_node *ptr, ir_node *size, type *free_type) {
852 return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
854 INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
855 return new_rd_Sync(NULL, irg, block, arity, in);
857 INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
858 ir_mode *mode, long proj) {
859 return new_rd_Proj(NULL, irg, block, arg, mode, proj);
861 INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
863 return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
865 INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
866 int arity, ir_node **in) {
867 return new_rd_Tuple(NULL, irg, block, arity, in );
869 INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
870 ir_node *val, ir_mode *mode) {
871 return new_rd_Id(NULL, irg, block, val, mode);
873 INLINE ir_node *new_r_Bad (ir_graph *irg) {
874 return new_rd_Bad(irg);
876 INLINE ir_node *new_r_Unknown (ir_graph *irg) {
877 return new_rd_Unknown(irg);
879 INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
880 return new_rd_CallBegin(NULL, irg, block, callee);
882 INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) {
883 return new_rd_EndReg(NULL, irg, block);
885 INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) {
886 return new_rd_EndExcept(NULL, irg, block);
888 INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) {
889 return new_rd_Break(NULL, irg, block);
891 INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
892 ir_mode *mode, long proj) {
893 return new_rd_Filter(NULL, irg, block, arg, mode, proj);
897 /** ********************/
898 /** public interfaces */
899 /** construction tools */
903 * - create a new Start node in the current block
905 * @return s - pointer to the created Start node
910 new_d_Start (dbg_info* db)
914 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
915 op_Start, mode_T, 0, NULL);
917 res = optimize_node (res);
918 irn_vrfy_irg (res, current_ir_graph);
923 new_d_End (dbg_info* db)
926 res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
927 op_End, mode_X, -1, NULL);
928 res = optimize_node (res);
929 irn_vrfy_irg (res, current_ir_graph);
934 /* Constructs a Block with a fixed number of predecessors.
935 Does set current_block. Can be used with automatic Phi
936 node construction. */
938 new_d_Block (dbg_info* db, int arity, ir_node **in)
942 res = new_rd_Block (db, current_ir_graph, arity, in);
944 /* Create and initialize array for Phi-node construction. */
945 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
946 current_ir_graph->n_loc);
947 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
949 res = optimize_node (res);
950 current_ir_graph->current_block = res;
952 irn_vrfy_irg (res, current_ir_graph);
957 /* ***********************************************************************/
958 /* Methods necessary for automatic Phi node creation */
960 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
961 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
962 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
963 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
965 Call Graph: ( A ---> B == A "calls" B)
967 get_value mature_block
975 get_r_value_internal |
979 new_rd_Phi0 new_rd_Phi_in
981 * *************************************************************************** */
983 /* Creates a Phi node with 0 predecessors */
984 static INLINE ir_node *
985 new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
988 res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
989 irn_vrfy_irg (res, irg);
993 /* There are two implementations of the Phi node construction. The first
994 is faster, but does not work for blocks with more than 2 predecessors.
995 The second works always but is slower and causes more unnecessary Phi
997 Select the implementations by the following preprocessor flag set in
999 #if USE_FAST_PHI_CONSTRUCTION
1001 /* This is a stack used for allocating and deallocating nodes in
1002 new_rd_Phi_in. The original implementation used the obstack
1003 to model this stack, now it is explicit. This reduces side effects.
1005 #if USE_EXPLICIT_PHI_IN_STACK
1006 INLINE Phi_in_stack *
1007 new_Phi_in_stack() {
1010 res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
1012 res->stack = NEW_ARR_F (ir_node *, 1);
1019 free_Phi_in_stack(Phi_in_stack *s) {
1020 DEL_ARR_F(s->stack);
1024 free_to_Phi_in_stack(ir_node *phi) {
1025 assert(get_irn_opcode(phi) == iro_Phi);
1027 if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
1028 current_ir_graph->Phi_in_stack->pos)
1029 ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
1031 current_ir_graph->Phi_in_stack->stack[current_ir_graph->Phi_in_stack->pos] = phi;
1033 (current_ir_graph->Phi_in_stack->pos)++;
1036 static INLINE ir_node *
1037 alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
1038 int arity, ir_node **in) {
1040 ir_node **stack = current_ir_graph->Phi_in_stack->stack;
1041 int pos = current_ir_graph->Phi_in_stack->pos;
1045 /* We need to allocate a new node */
1046 res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
1047 res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
1049 /* reuse the old node and initialize it again. */
1052 assert (res->kind == k_ir_node);
1053 assert (res->op == op_Phi);
1057 assert (arity >= 0);
1058 /* ???!!! How to free the old in array?? Not at all: on obstack ?!! */
1059 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
1061 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
1063 (current_ir_graph->Phi_in_stack->pos)--;
1067 #endif /* USE_EXPLICIT_PHI_IN_STACK */
1069 /* Creates a Phi node with a given, fixed array **in of predecessors.
1070 If the Phi node is unnecessary, as the same value reaches the block
1071 through all control flow paths, it is eliminated and the value
1072 returned directly. This constructor is only intended for use in
1073 the automatic Phi node generation triggered by get_value or mature.
1074 The implementation is quite tricky and depends on the fact, that
1075 the nodes are allocated on a stack:
1076 The in array contains predecessors and NULLs. The NULLs appear,
1077 if get_r_value_internal, that computed the predecessors, reached
1078 the same block on two paths. In this case the same value reaches
1079 this block on both paths, there is no definition in between. We need
1080 not allocate a Phi where these path's merge, but we have to communicate
1081 this fact to the caller. This happens by returning a pointer to the
1082 node the caller _will_ allocate. (Yes, we predict the address. We can
1083 do so because the nodes are allocated on the obstack.) The caller then
1084 finds a pointer to itself and, when this routine is called again,
1087 static INLINE ir_node *
1088 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1089 ir_node **in, int ins)
1092 ir_node *res, *known;
1094 /* allocate a new node on the obstack.
1095 This can return a node to which some of the pointers in the in-array
1097 Attention: the constructor copies the in array, i.e., the later changes
1098 to the array in this routine do not affect the constructed node! If
1099 the in array contains NULLs, there will be missing predecessors in the
1101 Is this a possible internal state of the Phi node generation? */
1102 #if USE_EXPLICIT_PHI_IN_STACK
1103 res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
1105 res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1106 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1108 /* The in-array can contain NULLs. These were returned by
1109 get_r_value_internal if it reached the same block/definition on a
1111 The NULLs are replaced by the node itself to simplify the test in the
1113 for (i=0; i < ins; ++i)
1114 if (in[i] == NULL) in[i] = res;
1116 /* This loop checks whether the Phi has more than one predecessor.
1117 If so, it is a real Phi node and we break the loop. Else the
1118 Phi node merges the same definition on several paths and therefore
1120 for (i=0; i < ins; ++i)
1122 if (in[i]==res || in[i]==known) continue;
1130 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1132 #if USE_EXPLICIT_PHI_IN_STACK
1133 free_to_Phi_in_stack(res);
1135 obstack_free (current_ir_graph->obst, res);
1139 res = optimize_node (res);
1140 irn_vrfy_irg (res, irg);
1143 /* return the pointer to the Phi node. This node might be deallocated! */
1148 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1151 allocates and returns this node. The routine called to allocate the
1152 node might optimize it away and return a real value, or even a pointer
1153 to a deallocated Phi node on top of the obstack!
1154 This function is called with an in-array of proper size. **/
1156 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1158 ir_node *prevBlock, *res;
1161 /* This loop goes to all predecessor blocks of the block the Phi node is in
1162 and there finds the operands of the Phi node by calling
1163 get_r_value_internal. */
1164 for (i = 1; i <= ins; ++i) {
1165 assert (block->in[i]);
1166 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1168 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1171 /* After collecting all predecessors into the array nin a new Phi node
1172 with these predecessors is created. This constructor contains an
1173 optimization: If all predecessors of the Phi node are identical it
1174 returns the only operand instead of a new Phi node. If the value
1175 passes two different control flow edges without being defined, and
1176 this is the second path treated, a pointer to the node that will be
1177 allocated for the first path (recursion) is returned. We already
1178 know the address of this node, as it is the next node to be allocated
1179 and will be placed on top of the obstack. (The obstack is a _stack_!) */
1180 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1182 /* Now we now the value for "pos" and can enter it in the array with
1183 all known local variables. Attention: this might be a pointer to
1184 a node, that later will be allocated!!! See new_rd_Phi_in.
1185 If this is called in mature, after some set_value in the same block,
1186 the proper value must not be overwritten:
1188 get_value (makes Phi0, put's it into graph_arr)
1189 set_value (overwrites Phi0 in graph_arr)
1190 mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
1193 if (!block->attr.block.graph_arr[pos]) {
1194 block->attr.block.graph_arr[pos] = res;
1196 /* printf(" value already computed by %s\n",
1197 id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
1203 /* This function returns the last definition of a variable. In case
1204 this variable was last defined in a previous block, Phi nodes are
1205 inserted. If the part of the firm graph containing the definition
1206 is not yet constructed, a dummy Phi node is returned. */
1208 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1211 /* There are 4 cases to treat.
1213 1. The block is not mature and we visit it the first time. We can not
1214 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1215 predecessors is returned. This node is added to the linked list (field
1216 "link") of the containing block to be completed when this block is
1217 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1220 2. The value is already known in this block, graph_arr[pos] is set and we
1221 visit the block the first time. We can return the value without
1222 creating any new nodes.
1224 3. The block is mature and we visit it the first time. A Phi node needs
1225 to be created (phi_merge). If the Phi is not needed, as all it's
1226 operands are the same value reaching the block through different
1227 paths, it's optimized away and the value itself is returned.
1229 4. The block is mature, and we visit it the second time. Now two
1230 subcases are possible:
1231 * The value was computed completely the last time we were here. This
1232 is the case if there is no loop. We can return the proper value.
1233 * The recursion that visited this node and set the flag did not
1234 return yet. We are computing a value in a loop and need to
1235 break the recursion without knowing the result yet.
1236 @@@ strange case. Straight forward we would create a Phi before
1237 starting the computation of it's predecessors. In this case we will
1238 find a Phi here in any case. The problem is that this implementation
1239 only creates a Phi after computing the predecessors, so that it is
1240 hard to compute self references of this Phi. @@@
1241 There is no simple check for the second subcase. Therefore we check
1242 for a second visit and treat all such cases as the second subcase.
1243 Anyways, the basic situation is the same: we reached a block
1244 on two paths without finding a definition of the value: No Phi
1245 nodes are needed on both paths.
1246 We return this information "Two paths, no Phi needed" by a very tricky
1247 implementation that relies on the fact that an obstack is a stack and
1248 will return a node with the same address on different allocations.
1249 Look also at phi_merge and new_rd_phi_in to understand this.
1250 @@@ Unfortunately this does not work, see testprogram
1251 three_cfpred_example.
1255 /* case 4 -- already visited. */
1256 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) return NULL;
1258 /* visited the first time */
1259 set_irn_visited(block, get_irg_visited(current_ir_graph));
1261 /* Get the local valid value */
1262 res = block->attr.block.graph_arr[pos];
1264 /* case 2 -- If the value is actually computed, return it. */
1265 if (res) { return res;};
1267 if (block->attr.block.matured) { /* case 3 */
1269 /* The Phi has the same amount of ins as the corresponding block. */
1270 int ins = get_irn_arity(block);
1272 NEW_ARR_A (ir_node *, nin, ins);
1274 /* Phi merge collects the predecessors and then creates a node. */
1275 res = phi_merge (block, pos, mode, nin, ins);
1277 } else { /* case 1 */
1278 /* The block is not mature, we don't know how many in's are needed. A Phi
1279 with zero predecessors is created. Such a Phi node is called Phi0
1280 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
1281 to the list of Phi0 nodes in this block to be matured by mature_block
1283 The Phi0 has to remember the pos of it's internal value. If the real
1284 Phi is computed, pos is used to update the array with the local
1287 res = new_rd_Phi0 (current_ir_graph, block, mode);
1288 res->attr.phi0_pos = pos;
1289 res->link = block->link;
1293 /* If we get here, the frontend missed a use-before-definition error */
1296 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1297 assert (mode->code >= irm_F && mode->code <= irm_P);
1298 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1299 tarval_mode_null[mode->code]);
1302 /* The local valid value is available now. */
1303 block->attr.block.graph_arr[pos] = res;
1311 it starts the recursion. This causes an Id at the entry of
1312 every block that has no definition of the value! **/
1314 #if USE_EXPLICIT_PHI_IN_STACK
1316 INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; }
1317 INLINE void free_Phi_in_stack(Phi_in_stack *s) { }
1320 static INLINE ir_node *
1321 new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
1322 ir_node **in, int ins)
1325 ir_node *res, *known;
1327 /* Allocate a new node on the obstack. The allocation copies the in
1329 res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
1330 res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
1332 /* This loop checks whether the Phi has more than one predecessor.
1333 If so, it is a real Phi node and we break the loop. Else the
1334 Phi node merges the same definition on several paths and therefore
1335 is not needed. Don't consider Bad nodes! */
1337 for (i=0; i < ins; ++i)
1341 if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
1349 /* i==ins: there is at most one predecessor, we don't need a phi node. */
1352 obstack_free (current_ir_graph->obst, res);
1355 /* A undefined value, e.g., in unreachable code. */
1359 res = optimize_node (res);
1360 irn_vrfy_irg (res, irg);
1361 /* Memory Phis in endless loops must be kept alive.
1362 As we can't distinguish these easily we keep all of the alive. */
1363 if ((res->op == op_Phi) && (mode == mode_M))
1364 add_End_keepalive(irg->end, res);
1371 get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
1373 #if PRECISE_EXC_CONTEXT
1375 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
1377 static INLINE ir_node **
1378 new_frag_arr (ir_node *n) {
1381 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
1382 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
1383 sizeof(ir_node *)*current_ir_graph->n_loc);
1384 /* turn off optimization before allocating Proj nodes, as res isn't
1386 opt = get_optimize(); set_optimize(0);
1387 /* Here we rely on the fact that all frag ops have Memory as first result! */
1388 if (get_irn_op(n) == op_Call)
1389 arr[0] = new_Proj(n, mode_M, 3);
1391 arr[0] = new_Proj(n, mode_M, 0);
1393 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
1397 static INLINE ir_node **
1398 get_frag_arr (ir_node *n) {
1399 if (get_irn_op(n) == op_Call) {
1400 return n->attr.call.frag_arr;
1401 } else if (get_irn_op(n) == op_Alloc) {
1402 return n->attr.a.frag_arr;
1404 return n->attr.frag_arr;
1409 set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
1410 if (!frag_arr[pos]) frag_arr[pos] = val;
1411 if (frag_arr[current_ir_graph->n_loc - 1])
1412 set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
1416 get_r_frag_value_internal (ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
1420 assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
1422 frag_arr = get_frag_arr(cfOp);
1423 res = frag_arr[pos];
1425 if (block->attr.block.graph_arr[pos]) {
1426 /* There was a set_value after the cfOp and no get_value before that
1427 set_value. We must build a Phi node now. */
1428 if (block->attr.block.matured) {
1429 int ins = get_irn_arity(block);
1431 NEW_ARR_A (ir_node *, nin, ins);
1432 res = phi_merge(block, pos, mode, nin, ins);
1434 res = new_rd_Phi0 (current_ir_graph, block, mode);
1435 res->attr.phi0_pos = pos;
1436 res->link = block->link;
1440 /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
1441 but this should be better: (remove comment if this works) */
1442 /* It's a Phi, we can write this into all graph_arrs with NULL */
1443 set_frag_value(block->attr.block.graph_arr, pos, res);
1445 res = get_r_value_internal(block, pos, mode);
1446 set_frag_value(block->attr.block.graph_arr, pos, res);
1454 computes the predecessors for the real phi node, and then
1455 allocates and returns this node. The routine called to allocate the
1456 node might optimize it away and return a real value.
1457 This function must be called with an in-array of proper size. **/
1459 phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
1461 ir_node *prevBlock, *prevCfOp, *res, *phi0;
1464 /* If this block has no value at pos create a Phi0 and remember it
1465 in graph_arr to break recursions.
1466 Else we may not set graph_arr as there a later value is remembered. */
1468 if (!block->attr.block.graph_arr[pos]) {
1469 if (block == get_irg_start_block(current_ir_graph)) {
1470 /* Collapsing to Bad tarvals is no good idea.
1471 So we call a user-supplied routine here that deals with this case as
1472 appropriate for the given language. Sorryly the only help we can give
1473 here is the position.
1475 Even if all variables are defined before use, it can happen that
1476 we get to the start block, if a cond has been replaced by a tuple
1477 (bad, jmp). In this case we call the function needlessly, eventually
1478 generating an non existant error.
1479 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
1482 if (default_initialize_local_variable)
1483 block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos);
1485 block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
1486 /* We don't need to care about exception ops in the start block.
1487 There are none by definition. */
1488 return block->attr.block.graph_arr[pos];
1490 phi0 = new_rd_Phi0(current_ir_graph, block, mode);
1491 block->attr.block.graph_arr[pos] = phi0;
1492 #if PRECISE_EXC_CONTEXT
1493 /* Set graph_arr for fragile ops. Also here we should break recursion.
1494 We could choose a cyclic path through an cfop. But the recursion would
1495 break at some point. */
1496 set_frag_value(block->attr.block.graph_arr, pos, phi0);
1501 /* This loop goes to all predecessor blocks of the block the Phi node
1502 is in and there finds the operands of the Phi node by calling
1503 get_r_value_internal. */
1504 for (i = 1; i <= ins; ++i) {
1505 prevCfOp = skip_Proj(block->in[i]);
1507 if (is_Bad(prevCfOp)) {
1508 /* In case a Cond has been optimized we would get right to the start block
1509 with an invalid definition. */
1510 nin[i-1] = new_Bad();
1513 prevBlock = block->in[i]->in[0]; /* go past control flow op to prev block */
1515 if (!is_Bad(prevBlock)) {
1516 #if PRECISE_EXC_CONTEXT
1517 if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
1518 assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
1519 nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
1522 nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
1524 nin[i-1] = new_Bad();
1528 /* After collecting all predecessors into the array nin a new Phi node
1529 with these predecessors is created. This constructor contains an
1530 optimization: If all predecessors of the Phi node are identical it
1531 returns the only operand instead of a new Phi node. */
1532 res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
1534 /* In case we allocated a Phi0 node at the beginning of this procedure,
1535 we need to exchange this Phi0 with the real Phi. */
1537 exchange(phi0, res);
1538 block->attr.block.graph_arr[pos] = res;
1539 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1540 only an optimization. */
1546 /* This function returns the last definition of a variable. In case
1547 this variable was last defined in a previous block, Phi nodes are
1548 inserted. If the part of the firm graph containing the definition
1549 is not yet constructed, a dummy Phi node is returned. */
1551 get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
1554 /* There are 4 cases to treat.
1556 1. The block is not mature and we visit it the first time. We can not
1557 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1558 predecessors is returned. This node is added to the linked list (field
1559 "link") of the containing block to be completed when this block is
1560 matured. (Comlpletion will add a new Phi and turn the Phi0 into an Id
1563 2. The value is already known in this block, graph_arr[pos] is set and we
1564 visit the block the first time. We can return the value without
1565 creating any new nodes.
1567 3. The block is mature and we visit it the first time. A Phi node needs
1568 to be created (phi_merge). If the Phi is not needed, as all it's
1569 operands are the same value reaching the block through different
1570 paths, it's optimized away and the value itself is returned.
1572 4. The block is mature, and we visit it the second time. Now two
1573 subcases are possible:
1574 * The value was computed completely the last time we were here. This
1575 is the case if there is no loop. We can return the proper value.
1576 * The recursion that visited this node and set the flag did not
1577 return yet. We are computing a value in a loop and need to
1578 break the recursion. This case only happens if we visited
1579 the same block with phi_merge before, which inserted a Phi0.
1580 So we return the Phi0.
1583 /* case 4 -- already visited. */
1584 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1585 /* As phi_merge allocates a Phi0 this value is always defined. Here
1586 is the critical difference of the two algorithms. */
1587 assert(block->attr.block.graph_arr[pos]);
1588 return block->attr.block.graph_arr[pos];
1591 /* visited the first time */
1592 set_irn_visited(block, get_irg_visited(current_ir_graph));
1594 /* Get the local valid value */
1595 res = block->attr.block.graph_arr[pos];
1597 /* case 2 -- If the value is actually computed, return it. */
1598 if (res) { return res; };
1600 if (block->attr.block.matured) { /* case 3 */
1602 /* The Phi has the same amount of ins as the corresponding block. */
1603 int ins = get_irn_arity(block);
1605 NEW_ARR_A (ir_node *, nin, ins);
1607 /* Phi merge collects the predecessors and then creates a node. */
1608 res = phi_merge (block, pos, mode, nin, ins);
1610 } else { /* case 1 */
1611 /* The block is not mature, we don't know how many in's are needed. A Phi
1612 with zero predecessors is created. Such a Phi node is called Phi0
1613 node. The Phi0 is then added to the list of Phi0 nodes in this block
1614 to be matured by mature_block later.
1615 The Phi0 has to remember the pos of it's internal value. If the real
1616 Phi is computed, pos is used to update the array with the local
1618 res = new_rd_Phi0 (current_ir_graph, block, mode);
1619 res->attr.phi0_pos = pos;
1620 res->link = block->link;
1624 /* If we get here, the frontend missed a use-before-definition error */
1627 printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
1628 assert (mode->code >= irm_F && mode->code <= irm_P);
1629 res = new_rd_Const (NULL, current_ir_graph, block, mode,
1630 get_mode_null(mode));
1633 /* The local valid value is available now. */
1634 block->attr.block.graph_arr[pos] = res;
1639 #endif /* USE_FAST_PHI_CONSTRUCTION */
1641 /* ************************************************************************** */
1643 /** Finalize a Block node, when all control flows are known. */
1644 /** Acceptable parameters are only Block nodes. */
1646 mature_block (ir_node *block)
1653 assert (get_irn_opcode(block) == iro_Block);
1654 /* @@@ should be commented in
1655 assert (!get_Block_matured(block) && "Block already matured"); */
1657 if (!get_Block_matured(block)) {
1658 ins = ARR_LEN (block->in)-1;
1659 /* Fix block parameters */
1660 block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
1662 /* An array for building the Phi nodes. */
1663 NEW_ARR_A (ir_node *, nin, ins);
1665 /* Traverse a chain of Phi nodes attached to this block and mature
1667 for (n = block->link; n; n=next) {
1668 inc_irg_visited(current_ir_graph);
1670 exchange (n, phi_merge (block, n->attr.phi0_pos, n->mode, nin, ins));
1673 block->attr.block.matured = 1;
1675 /* Now, as the block is a finished firm node, we can optimize it.
1676 Since other nodes have been allocated since the block was created
1677 we can not free the node on the obstack. Therefore we have to call
1679 Unfortunately the optimization does not change a lot, as all allocated
1680 nodes refer to the unoptimized node.
1681 We can call _2, as global cse has no effect on blocks. */
1682 block = optimize_in_place_2(block);
1683 irn_vrfy_irg(block, current_ir_graph);
1688 new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
1690 return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
1695 new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
1697 return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
1702 new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
1704 return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
1709 new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
1711 return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
1716 new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
1719 assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
1720 arg->attr.c.kind = fragmentary;
1721 arg->attr.c.default_proj = max_proj;
1722 res = new_Proj (arg, mode_X, max_proj);
1727 new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
1729 return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
1734 new_d_Tuple (dbg_info* db, int arity, ir_node **in)
1736 return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
1741 new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1743 return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
1748 new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1750 return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
1756 new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
1758 return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
1763 new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1765 return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
1770 new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1773 res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
1775 #if PRECISE_EXC_CONTEXT
1776 if ((current_ir_graph->phase_state == phase_building) &&
1777 (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
1778 res->attr.frag_arr = new_frag_arr(res);
1785 new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1788 res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
1790 #if PRECISE_EXC_CONTEXT
1791 if ((current_ir_graph->phase_state == phase_building) &&
1792 (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
1793 res->attr.frag_arr = new_frag_arr(res);
1800 new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1803 res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
1805 #if PRECISE_EXC_CONTEXT
1806 if ((current_ir_graph->phase_state == phase_building) &&
1807 (get_irn_op(res) == op_Div)) /* Could be optimized away. */
1808 res->attr.frag_arr = new_frag_arr(res);
1815 new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
1818 res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
1820 #if PRECISE_EXC_CONTEXT
1821 if ((current_ir_graph->phase_state == phase_building) &&
1822 (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
1823 res->attr.frag_arr = new_frag_arr(res);
1830 new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1832 return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
1837 new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1839 return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
1844 new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
1846 return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
1851 new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
1853 return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
1858 new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1860 return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
1865 new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1867 return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
1872 new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1874 return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
1879 new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
1881 return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
1886 new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
1888 return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
1893 new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
1895 return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
1900 new_d_Jmp (dbg_info* db)
1902 return new_rd_Jmp (db, current_ir_graph, current_ir_graph->current_block);
1906 new_d_Cond (dbg_info* db, ir_node *c)
1908 return new_rd_Cond (db, current_ir_graph, current_ir_graph->current_block, c);
1912 new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
1916 res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
1917 store, callee, arity, in, tp);
1918 #if PRECISE_EXC_CONTEXT
1919 if ((current_ir_graph->phase_state == phase_building) &&
1920 (get_irn_op(res) == op_Call)) /* Could be optimized away. */
1921 res->attr.call.frag_arr = new_frag_arr(res);
1928 new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
1930 return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
1935 new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
1937 return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
1942 new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
1945 res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
1947 #if PRECISE_EXC_CONTEXT
1948 if ((current_ir_graph->phase_state == phase_building) &&
1949 (get_irn_op(res) == op_Load)) /* Could be optimized away. */
1950 res->attr.frag_arr = new_frag_arr(res);
1957 new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val)
1960 res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
1962 #if PRECISE_EXC_CONTEXT
1963 if ((current_ir_graph->phase_state == phase_building) &&
1964 (get_irn_op(res) == op_Store)) /* Could be optimized away. */
1965 res->attr.frag_arr = new_frag_arr(res);
1972 new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type,
1976 res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
1977 store, size, alloc_type, where);
1978 #if PRECISE_EXC_CONTEXT
1979 if ((current_ir_graph->phase_state == phase_building) &&
1980 (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
1981 res->attr.a.frag_arr = new_frag_arr(res);
1988 new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
1990 return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
1991 store, ptr, size, free_type);
1995 new_d_simpleSel (dbg_info* db, ir_node *store, ir_node *objptr, entity *ent)
1996 /* GL: objptr was called frame before. Frame was a bad choice for the name
1997 as the operand could as well be a pointer to a dynamic object. */
1999 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2000 store, objptr, 0, NULL, ent);
2004 new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
2006 return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
2007 store, objptr, n_index, index, sel);
2011 new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
2013 return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
2014 store, objptr, ent));
2018 new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
2020 return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
2025 new_d_Sync (dbg_info* db, int arity, ir_node** in)
2027 return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
2035 return current_ir_graph->bad;
2039 new_d_Unknown (void)
2041 return current_ir_graph->unknown;
2045 new_d_CallBegin (dbg_info *db, ir_node *call)
2048 res = new_rd_CallBegin (db, current_ir_graph, current_ir_graph->current_block, call);
2053 new_d_EndReg (dbg_info *db)
2056 res = new_rd_EndReg(db, current_ir_graph, current_ir_graph->current_block);
2061 new_d_EndExcept (dbg_info *db)
2064 res = new_rd_EndExcept(db, current_ir_graph, current_ir_graph->current_block);
2069 new_d_Break (dbg_info *db)
2071 return new_rd_Break (db, current_ir_graph, current_ir_graph->current_block);
2075 new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
2077 return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
2081 /* ********************************************************************* */
2082 /* Comfortable interface with automatic Phi node construction. */
2083 /* (Uses also constructors of ?? interface, except new_Block. */
2084 /* ********************************************************************* */
2086 /** Block construction **/
2087 /* immature Block without predecessors */
2088 ir_node *new_d_immBlock (dbg_info* db) {
2091 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2092 /* creates a new dynamic in-array as length of in is -1 */
2093 res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
2094 current_ir_graph->current_block = res;
2095 res->attr.block.matured = 0;
2096 res->attr.block.exc = exc_normal;
2097 res->attr.block.handler_entry = 0;
2098 res->attr.block.backedge = NULL;
2099 res->attr.block.in_cg = NULL;
2100 res->attr.block.cg_backedge = NULL;
2101 set_Block_block_visited(res, 0);
2103 /* Create and initialize array for Phi-node construction. */
2104 res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
2105 current_ir_graph->n_loc);
2106 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
2108 /* Immature block may not be optimized! */
2109 irn_vrfy_irg (res, current_ir_graph);
2116 return new_d_immBlock(NULL);
2119 /* add an adge to a jmp/control flow node */
2121 add_in_edge (ir_node *block, ir_node *jmp)
2123 if (block->attr.block.matured) {
2124 assert(0 && "Error: Block already matured!\n");
2127 assert (jmp != NULL);
2128 ARR_APP1 (ir_node *, block->in, jmp);
2132 /* changing the current block */
2134 switch_block (ir_node *target)
2136 current_ir_graph->current_block = target;
2139 /* ************************ */
2140 /* parameter administration */
2142 /* get a value from the parameter array from the current block by its index */
2144 get_d_value (dbg_info* db, int pos, ir_mode *mode)
2146 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2147 inc_irg_visited(current_ir_graph);
2149 return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode);
2151 /* get a value from the parameter array from the current block by its index */
2153 get_value (int pos, ir_mode *mode)
2155 return get_d_value(NULL, pos, mode);
2158 /* set a value at position pos in the parameter array from the current block */
2160 set_value (int pos, ir_node *value)
2162 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2163 assert(pos+1 < current_ir_graph->n_loc);
2164 current_ir_graph->current_block->attr.block.graph_arr[pos + 1] = value;
2167 /* get the current store */
2171 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2172 /* GL: one could call get_value instead */
2173 inc_irg_visited(current_ir_graph);
2174 return get_r_value_internal (current_ir_graph->current_block, 0, mode_M);
2177 /* set the current store */
2179 set_store (ir_node *store)
2181 /* GL: one could call set_value instead */
2182 assert(get_irg_phase_state (current_ir_graph) == phase_building);
2183 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
2187 keep_alive (ir_node *ka)
2189 add_End_keepalive(current_ir_graph->end, ka);
2192 /** Useful access routines **/
2193 /* Returns the current block of the current graph. To set the current
2194 block use switch_block(). */
2195 ir_node *get_cur_block() {
2196 return get_irg_current_block(current_ir_graph);
2199 /* Returns the frame type of the current graph */
2200 type *get_cur_frame_type() {
2201 return get_irg_frame_type(current_ir_graph);
2205 /* ********************************************************************* */
2208 /* call once for each run of the library */
2210 init_cons (default_initialize_local_variable_func_t *func)
2212 default_initialize_local_variable = func;
2215 /* call for each graph */
2217 finalize_cons (ir_graph *irg) {
2218 irg->phase_state = phase_high;
2222 ir_node *new_Block(int arity, ir_node **in) {
2223 return new_d_Block(NULL, arity, in);
2225 ir_node *new_Start (void) {
2226 return new_d_Start(NULL);
2228 ir_node *new_End (void) {
2229 return new_d_End(NULL);
2231 ir_node *new_Jmp (void) {
2232 return new_d_Jmp(NULL);
2234 ir_node *new_Cond (ir_node *c) {
2235 return new_d_Cond(NULL, c);
2237 ir_node *new_Return (ir_node *store, int arity, ir_node *in[]) {
2238 return new_d_Return(NULL, store, arity, in);
2240 ir_node *new_Raise (ir_node *store, ir_node *obj) {
2241 return new_d_Raise(NULL, store, obj);
2243 ir_node *new_Const (ir_mode *mode, tarval *con) {
2244 return new_d_Const(NULL, mode, con);
2246 ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
2247 return new_d_SymConst(NULL, value, kind);
2249 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
2250 return new_d_simpleSel(NULL, store, objptr, ent);
2252 ir_node *new_Sel (ir_node *store, ir_node *objptr, int arity, ir_node **in,
2254 return new_d_Sel(NULL, store, objptr, arity, in, ent);
2256 ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
2257 return (new_d_InstOf (NULL, store, objptr, ent));
2259 ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
2261 return new_d_Call(NULL, store, callee, arity, in, tp);
2263 ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
2264 return new_d_Add(NULL, op1, op2, mode);
2266 ir_node *new_Sub (ir_node *op1, ir_node *op2, ir_mode *mode) {
2267 return new_d_Sub(NULL, op1, op2, mode);
2269 ir_node *new_Minus (ir_node *op, ir_mode *mode) {
2270 return new_d_Minus(NULL, op, mode);
2272 ir_node *new_Mul (ir_node *op1, ir_node *op2, ir_mode *mode) {
2273 return new_d_Mul(NULL, op1, op2, mode);
2275 ir_node *new_Quot (ir_node *memop, ir_node *op1, ir_node *op2) {
2276 return new_d_Quot(NULL, memop, op1, op2);
2278 ir_node *new_DivMod (ir_node *memop, ir_node *op1, ir_node *op2) {
2279 return new_d_DivMod(NULL, memop, op1, op2);
2281 ir_node *new_Div (ir_node *memop, ir_node *op1, ir_node *op2) {
2282 return new_d_Div(NULL, memop, op1, op2);
2284 ir_node *new_Mod (ir_node *memop, ir_node *op1, ir_node *op2) {
2285 return new_d_Mod(NULL, memop, op1, op2);
2287 ir_node *new_Abs (ir_node *op, ir_mode *mode) {
2288 return new_d_Abs(NULL, op, mode);
2290 ir_node *new_And (ir_node *op1, ir_node *op2, ir_mode *mode) {
2291 return new_d_And(NULL, op1, op2, mode);
2293 ir_node *new_Or (ir_node *op1, ir_node *op2, ir_mode *mode) {
2294 return new_d_Or(NULL, op1, op2, mode);
2296 ir_node *new_Eor (ir_node *op1, ir_node *op2, ir_mode *mode) {
2297 return new_d_Eor(NULL, op1, op2, mode);
2299 ir_node *new_Not (ir_node *op, ir_mode *mode) {
2300 return new_d_Not(NULL, op, mode);
2302 ir_node *new_Shl (ir_node *op, ir_node *k, ir_mode *mode) {
2303 return new_d_Shl(NULL, op, k, mode);
2305 ir_node *new_Shr (ir_node *op, ir_node *k, ir_mode *mode) {
2306 return new_d_Shr(NULL, op, k, mode);
2308 ir_node *new_Shrs (ir_node *op, ir_node *k, ir_mode *mode) {
2309 return new_d_Shrs(NULL, op, k, mode);
2311 #define new_Rotate new_Rot
2312 ir_node *new_Rot (ir_node *op, ir_node *k, ir_mode *mode) {
2313 return new_d_Rot(NULL, op, k, mode);
2315 ir_node *new_Cmp (ir_node *op1, ir_node *op2) {
2316 return new_d_Cmp(NULL, op1, op2);
2318 ir_node *new_Conv (ir_node *op, ir_mode *mode) {
2319 return new_d_Conv(NULL, op, mode);
2321 ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
2322 return new_d_Phi(NULL, arity, in, mode);
2324 ir_node *new_Load (ir_node *store, ir_node *addr) {
2325 return new_d_Load(NULL, store, addr);
2327 ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
2328 return new_d_Store(NULL, store, addr, val);
2330 ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type,
2331 where_alloc where) {
2332 return new_d_Alloc(NULL, store, size, alloc_type, where);
2334 ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
2336 return new_d_Free(NULL, store, ptr, size, free_type);
2338 ir_node *new_Sync (int arity, ir_node **in) {
2339 return new_d_Sync(NULL, arity, in);
2341 ir_node *new_Proj (ir_node *arg, ir_mode *mode, long proj) {
2342 return new_d_Proj(NULL, arg, mode, proj);
2344 ir_node *new_defaultProj (ir_node *arg, long max_proj) {
2345 return new_d_defaultProj(NULL, arg, max_proj);
2347 ir_node *new_Tuple (int arity, ir_node **in) {
2348 return new_d_Tuple(NULL, arity, in);
2350 ir_node *new_Id (ir_node *val, ir_mode *mode) {
2351 return new_d_Id(NULL, val, mode);
2353 ir_node *new_Bad (void) {
2356 ir_node *new_Unknown(void) {
2357 return new_d_Unknown();
2359 ir_node *new_CallBegin (ir_node *callee) {
2360 return new_d_CallBegin(NULL, callee);
2362 ir_node *new_EndReg (void) {
2363 return new_d_EndReg(NULL);
2365 ir_node *new_EndExcept (void) {
2366 return new_d_EndExcept(NULL);
2368 ir_node *new_Break (void) {
2369 return new_d_Break(NULL);
2371 ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
2372 return new_d_Filter(NULL, arg, mode, proj);